@@ -22,11 +22,6 @@ struct idxd_fault {
struct idxd_device *idxd;
};
-static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
- int *processed, u64 data);
-static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
- int *processed, u64 data);
-
static void idxd_device_reinit(struct work_struct *work)
{
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
@@ -177,18 +172,15 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
return IRQ_HANDLED;
}
-static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
- int *processed, u64 data)
+static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
{
struct idxd_desc *desc, *t;
struct llist_node *head;
- int queued = 0;
unsigned long flags;
- *processed = 0;
head = llist_del_all(&irq_entry->pending_llist);
if (!head)
- goto out;
+ return;
llist_for_each_entry_safe(desc, t, head, llnode) {
u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
@@ -200,35 +192,25 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
*/
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
complete_desc(desc, IDXD_COMPLETE_ABORT);
- (*processed)++;
continue;
}
complete_desc(desc, IDXD_COMPLETE_NORMAL);
- (*processed)++;
} else {
spin_lock_irqsave(&irq_entry->list_lock, flags);
list_add_tail(&desc->list,
&irq_entry->work_list);
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
- queued++;
}
}
-
- out:
- return queued;
}
-static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
- int *processed, u64 data)
+static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
{
- int queued = 0;
unsigned long flags;
LIST_HEAD(flist);
struct idxd_desc *desc, *n;
- *processed = 0;
-
/*
* This lock protects list corruption from access of list outside of the irq handler
* thread.
@@ -236,16 +218,13 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
spin_lock_irqsave(&irq_entry->list_lock, flags);
if (list_empty(&irq_entry->work_list)) {
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
- return 0;
+ return;
}
list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
if (desc->completion->status) {
list_del(&desc->list);
- (*processed)++;
list_add_tail(&desc->list, &flist);
- } else {
- queued++;
}
}
@@ -265,13 +244,11 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
complete_desc(desc, IDXD_COMPLETE_NORMAL);
}
-
- return queued;
}
-static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
+irqreturn_t idxd_wq_thread(int irq, void *data)
{
- int rc, processed, total = 0;
+ struct idxd_irq_entry *irq_entry = data;
/*
* There are two lists we are processing. The pending_llist is where
@@ -290,29 +267,9 @@ static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
* and process the completed entries.
* 4. If the entry is still waiting on hardware, list_add_tail() to
* the work_list.
- * 5. Repeat until no more descriptors.
*/
- do {
- rc = irq_process_work_list(irq_entry, &processed, 0);
- total += processed;
- if (rc != 0)
- continue;
-
- rc = irq_process_pending_llist(irq_entry, &processed, 0);
- total += processed;
- } while (rc != 0);
-
- return total;
-}
-
-irqreturn_t idxd_wq_thread(int irq, void *data)
-{
- struct idxd_irq_entry *irq_entry = data;
- int processed;
-
- processed = idxd_desc_process(irq_entry);
- if (processed == 0)
- return IRQ_NONE;
+ irq_process_work_list(irq_entry);
+ irq_process_pending_llist(irq_entry);
return IRQ_HANDLED;
}