@@ -1332,8 +1332,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int work_done, rq_work_done = 0, wq_work_done;
int err;
- wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
- enic_wq_service, NULL);
+ wq_work_done = enic_wq_cq_service(enic, cq_wq, wq_work_to_do);
if (budget > 0)
rq_work_done = enic_rq_cq_service(enic, cq_rq, rq_work_to_do);
@@ -1435,8 +1434,8 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
wq_irq = wq->index;
cq = enic_cq_wq(enic, wq_irq);
intr = enic_msix_wq_intr(enic, wq_irq);
- wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
- enic_wq_service, NULL);
+
+ wq_work_done = enic_wq_cq_service(enic, cq, wq_work_to_do);
vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
0 /* don't unmask intr */,
@@ -6,8 +6,12 @@
#include "enic.h"
#include "enic_wq.h"
-static void cq_desc_dec(const struct cq_desc *desc_arg, u8 *type, u8 *color,
- u16 *q_number, u16 *completed_index)
+#define ENET_CQ_DESC_COMP_NDX_BITS 14
+#define ENET_CQ_DESC_COMP_NDX_MASK GENMASK(ENET_CQ_DESC_COMP_NDX_BITS - 1, 0)
+
+static void enic_wq_cq_desc_dec(const struct cq_desc *desc_arg, bool ext_wq,
+ u8 *type, u8 *color, u16 *q_number,
+ u16 *completed_index)
{
const struct cq_desc *desc = desc_arg;
const u8 type_color = desc->type_color;
@@ -25,48 +29,13 @@ static void cq_desc_dec(const struct cq_desc *desc_arg, u8 *type, u8 *color,
*type = type_color & CQ_DESC_TYPE_MASK;
*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
- *completed_index = le16_to_cpu(desc->completed_index) &
- CQ_DESC_COMP_NDX_MASK;
-}
-
-unsigned int vnic_cq_service(struct vnic_cq *cq, unsigned int work_to_do,
- int (*q_service)(struct vnic_dev *vdev,
- struct cq_desc *cq_desc, u8 type,
- u16 q_number, u16 completed_index,
- void *opaque), void *opaque)
-{
- struct cq_desc *cq_desc;
- unsigned int work_done = 0;
- u16 q_number, completed_index;
- u8 type, color;
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
-
- while (color != cq->last_color) {
- if ((*q_service)(cq->vdev, cq_desc, type, q_number,
- completed_index, opaque))
- break;
-
- cq->to_clean++;
- if (cq->to_clean == cq->ring.desc_count) {
- cq->to_clean = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
- work_done++;
- if (work_done >= work_to_do)
- break;
- }
-
- return work_done;
+ if (ext_wq)
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ ENET_CQ_DESC_COMP_NDX_MASK;
+ else
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ CQ_DESC_COMP_NDX_MASK;
}
void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
@@ -94,15 +63,15 @@ static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc,
enic_free_wq_buf(wq, buf);
}
-int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type,
- u16 q_number, u16 completed_index, void *opaque)
+static void enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index)
{
struct enic *enic = vnic_dev_priv(vdev);
spin_lock(&enic->wq[q_number].lock);
vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
- completed_index, enic_wq_free_buf, opaque);
+ completed_index, enic_wq_free_buf, NULL);
if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number))
&& vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
@@ -112,7 +81,37 @@ int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type,
}
spin_unlock(&enic->wq[q_number].lock);
-
- return 0;
}
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do)
+{
+ struct vnic_cq *cq = &enic->cq[cq_index];
+ u16 q_number, completed_index;
+ unsigned int work_done = 0;
+ struct cq_desc *cq_desc;
+ u8 type, color;
+ bool ext_wq;
+
+ ext_wq = cq->ring.size > ENIC_MAX_WQ_DESCS;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+
+ while (color != cq->last_color) {
+ enic_wq_service(cq->vdev, cq_desc, type, q_number,
+ completed_index);
+
+ vnic_cq_inc_to_clean(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+ }
+
+ return work_done;
+}
@@ -2,13 +2,6 @@
* Copyright 2025 Cisco Systems, Inc. All rights reserved.
*/
-unsigned int vnic_cq_service(struct vnic_cq *cq, unsigned int work_to_do,
- int (*q_service)(struct vnic_dev *vdev,
- struct cq_desc *cq_desc, u8 type,
- u16 q_number, u16 completed_index,
- void *opaque), void *opaque);
-
void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
-
-int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type,
- u16 q_number, u16 completed_index, void *opaque);
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do);