OSDN Git Service

dmaengine: idxd: Clean up descriptors with fault error
authorDave Jiang <dave.jiang@intel.com>
Tue, 27 Oct 2020 17:34:40 +0000 (10:34 -0700)
committerVinod Koul <vkoul@kernel.org>
Fri, 30 Oct 2020 08:40:36 +0000 (14:10 +0530)
Add code to "complete" a descriptor when the descriptor or its completion
address hit a fault error when SVA mode is being used. This error can be
triggered due to bad programming by the user. A lock is introduced in order
to protect the descriptor completion lists since the fault handler will run
from the system work queue after being scheduled in the interrupt handler.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Tony Luck <tony.luck@intel.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Link: https://lore.kernel.org/r/160382008092.3911367.12766483427643278985.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/idxd/idxd.h
drivers/dma/idxd/init.c
drivers/dma/idxd/irq.c

index dcac3bb..7e54209 100644 (file)
@@ -34,6 +34,11 @@ struct idxd_irq_entry {
        int id;
        struct llist_head pending_llist;
        struct list_head work_list;
+       /*
+        * Lock to protect access between irq thread process descriptor
+        * and irq thread processing error descriptor.
+        */
+       spinlock_t list_lock;
 };
 
 struct idxd_group {
index 1639f3b..c24106e 100644 (file)
@@ -97,6 +97,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
        for (i = 0; i < msixcnt; i++) {
                idxd->irq_entries[i].id = i;
                idxd->irq_entries[i].idxd = idxd;
+               spin_lock_init(&idxd->irq_entries[i].list_lock);
        }
 
        msix = &idxd->msix_entries[0];
index 17a65a1..593a2f6 100644 (file)
 #include "idxd.h"
 #include "registers.h"
 
+enum irq_work_type {
+       IRQ_WORK_NORMAL = 0,
+       IRQ_WORK_PROCESS_FAULT,
+};
+
+struct idxd_fault {
+       struct work_struct work;
+       u64 addr;
+       struct idxd_device *idxd;
+};
+
+static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
+                                enum irq_work_type wtype,
+                                int *processed, u64 data);
+static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
+                                    enum irq_work_type wtype,
+                                    int *processed, u64 data);
+
 static void idxd_device_reinit(struct work_struct *work)
 {
        struct idxd_device *idxd = container_of(work, struct idxd_device, work);
@@ -44,6 +62,46 @@ static void idxd_device_reinit(struct work_struct *work)
        idxd_device_wqs_clear_state(idxd);
 }
 
+static void idxd_device_fault_work(struct work_struct *work)
+{
+       struct idxd_fault *fault = container_of(work, struct idxd_fault, work);
+       struct idxd_irq_entry *ie;
+       int i;
+       int processed;
+       int irqcnt = fault->idxd->num_wq_irqs + 1;
+
+       for (i = 1; i < irqcnt; i++) {
+               ie = &fault->idxd->irq_entries[i];
+               irq_process_work_list(ie, IRQ_WORK_PROCESS_FAULT,
+                                     &processed, fault->addr);
+               if (processed)
+                       break;
+
+               irq_process_pending_llist(ie, IRQ_WORK_PROCESS_FAULT,
+                                         &processed, fault->addr);
+               if (processed)
+                       break;
+       }
+
+       kfree(fault);
+}
+
+static int idxd_device_schedule_fault_process(struct idxd_device *idxd,
+                                             u64 fault_addr)
+{
+       struct idxd_fault *fault;
+
+       fault = kmalloc(sizeof(*fault), GFP_ATOMIC);
+       if (!fault)
+               return -ENOMEM;
+
+       fault->addr = fault_addr;
+       fault->idxd = idxd;
+       INIT_WORK(&fault->work, idxd_device_fault_work);
+       queue_work(idxd->wq, &fault->work);
+       return 0;
+}
+
 irqreturn_t idxd_irq_handler(int vec, void *data)
 {
        struct idxd_irq_entry *irq_entry = data;
@@ -125,6 +183,15 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
        if (!err)
                goto out;
 
+       /*
+        * This case should rarely happen and typically is due to software
+        * programming error by the driver.
+        */
+       if (idxd->sw_err.valid &&
+           idxd->sw_err.desc_valid &&
+           idxd->sw_err.fault_addr)
+               idxd_device_schedule_fault_process(idxd, idxd->sw_err.fault_addr);
+
        gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
        if (gensts.state == IDXD_DEVICE_STATE_HALT) {
                idxd->state = IDXD_DEV_HALTED;
@@ -152,57 +219,110 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
        return IRQ_HANDLED;
 }
 
+static bool process_fault(struct idxd_desc *desc, u64 fault_addr)
+{
+       /*
+        * Completion address can be bad as well. Check fault address match for descriptor
+        * and completion address.
+        */
+       if ((u64)desc->hw == fault_addr ||
+           (u64)desc->completion == fault_addr) {
+               idxd_dma_complete_txd(desc, IDXD_COMPLETE_DEV_FAIL);
+               return true;
+       }
+
+       return false;
+}
+
+static bool complete_desc(struct idxd_desc *desc)
+{
+       if (desc->completion->status) {
+               idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+               return true;
+       }
+
+       return false;
+}
+
 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
-                                    int *processed)
+                                    enum irq_work_type wtype,
+                                    int *processed, u64 data)
 {
        struct idxd_desc *desc, *t;
        struct llist_node *head;
        int queued = 0;
+       bool completed = false;
+       unsigned long flags;
 
        *processed = 0;
        head = llist_del_all(&irq_entry->pending_llist);
        if (!head)
-               return 0;
+               goto out;
 
        llist_for_each_entry_safe(desc, t, head, llnode) {
-               if (desc->completion->status) {
-                       idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+               if (wtype == IRQ_WORK_NORMAL)
+                       completed = complete_desc(desc);
+               else if (wtype == IRQ_WORK_PROCESS_FAULT)
+                       completed = process_fault(desc, data);
+
+               if (completed) {
                        idxd_free_desc(desc->wq, desc);
                        (*processed)++;
+                       if (wtype == IRQ_WORK_PROCESS_FAULT)
+                               break;
                } else {
-                       list_add_tail(&desc->list, &irq_entry->work_list);
+                       spin_lock_irqsave(&irq_entry->list_lock, flags);
+                       list_add_tail(&desc->list,
+                                     &irq_entry->work_list);
+                       spin_unlock_irqrestore(&irq_entry->list_lock, flags);
                        queued++;
                }
        }
 
+ out:
        return queued;
 }
 
 static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
-                                int *processed)
+                                enum irq_work_type wtype,
+                                int *processed, u64 data)
 {
        struct list_head *node, *next;
        int queued = 0;
+       bool completed = false;
+       unsigned long flags;
 
        *processed = 0;
+       spin_lock_irqsave(&irq_entry->list_lock, flags);
        if (list_empty(&irq_entry->work_list))
-               return 0;
+               goto out;
 
        list_for_each_safe(node, next, &irq_entry->work_list) {
                struct idxd_desc *desc =
                        container_of(node, struct idxd_desc, list);
 
-               if (desc->completion->status) {
+               spin_unlock_irqrestore(&irq_entry->list_lock, flags);
+               if (wtype == IRQ_WORK_NORMAL)
+                       completed = complete_desc(desc);
+               else if (wtype == IRQ_WORK_PROCESS_FAULT)
+                       completed = process_fault(desc, data);
+
+               if (completed) {
+                       spin_lock_irqsave(&irq_entry->list_lock, flags);
                        list_del(&desc->list);
-                       /* process and callback */
-                       idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+                       spin_unlock_irqrestore(&irq_entry->list_lock, flags);
                        idxd_free_desc(desc->wq, desc);
                        (*processed)++;
+                       if (wtype == IRQ_WORK_PROCESS_FAULT)
+                               return queued;
                } else {
                        queued++;
                }
+               spin_lock_irqsave(&irq_entry->list_lock, flags);
        }
 
+ out:
+       spin_unlock_irqrestore(&irq_entry->list_lock, flags);
        return queued;
 }
 
@@ -230,12 +350,14 @@ static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
         * 5. Repeat until no more descriptors.
         */
        do {
-               rc = irq_process_work_list(irq_entry, &processed);
+               rc = irq_process_work_list(irq_entry, IRQ_WORK_NORMAL,
+                                          &processed, 0);
                total += processed;
                if (rc != 0)
                        continue;
 
-               rc = irq_process_pending_llist(irq_entry, &processed);
+               rc = irq_process_pending_llist(irq_entry, IRQ_WORK_NORMAL,
+                                              &processed, 0);
                total += processed;
        } while (rc != 0);