@@ -165,23 +165,9 @@ int ena_xmit_common(struct ena_adapter *adapter,
static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
{
#ifdef CONFIG_RFS_ACCEL
- u32 i;
- int rc;
-
adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
if (!adapter->netdev->rx_cpu_rmap)
return -ENOMEM;
- for (i = 0; i < adapter->num_io_queues; i++) {
- int irq_idx = ENA_IO_IRQ_IDX(i);
-
- rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
- pci_irq_vector(adapter->pdev, irq_idx));
- if (rc) {
- free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
- adapter->netdev->rx_cpu_rmap = NULL;
- return rc;
- }
- }
#endif /* CONFIG_RFS_ACCEL */
return 0;
}
@@ -1712,7 +1698,12 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
for (i = 0; i < io_queue_count; i++) {
irq_idx = ENA_IO_IRQ_IDX(i);
irq = &adapter->irq_tbl[irq_idx];
+#ifdef CONFIG_RFS_ACCEL
+ netif_napi_set_irq(&adapter->ena_napi[i].napi, irq->vector,
+ NAPIF_IRQ_ARFS_RMAP);
+#else
netif_napi_set_irq(&adapter->ena_napi[i].napi, irq->vector, 0);
+#endif
}
return rc;
@@ -11192,11 +11192,8 @@ static void bnxt_free_irq(struct bnxt *bp)
static int bnxt_request_irq(struct bnxt *bp)
{
- int i, j, rc = 0;
+ int i, rc = 0;
unsigned long flags = 0;
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rmap;
-#endif
rc = bnxt_setup_int_mode(bp);
if (rc) {
@@ -11204,28 +11201,22 @@ static int bnxt_request_irq(struct bnxt *bp)
rc);
return rc;
}
-#ifdef CONFIG_RFS_ACCEL
- rmap = bp->dev->rx_cpu_rmap;
-#endif
- for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
int map_idx = bnxt_cp_num_to_irq_num(bp, i);
struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
-#ifdef CONFIG_RFS_ACCEL
- if (rmap && bp->bnapi[i]->rx_ring) {
- rc = irq_cpu_rmap_add(rmap, irq->vector);
- if (rc)
- netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
- j);
- j++;
- }
-#endif
rc = request_irq(irq->vector, irq->handler, flags, irq->name,
bp->bnapi[i]);
if (rc)
break;
+#ifdef CONFIG_RFS_ACCEL
+ netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector,
+ NAPIF_IRQ_ARFS_RMAP);
+#else
netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector, 0);
+#endif
irq->requested = 1;
if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
@@ -590,14 +590,13 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
}
/**
- * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
+ * ice_set_cpu_rx_rmap - allocate CPU reverse map for a VSI
* @vsi: the VSI to be forwarded to
*/
int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
{
struct net_device *netdev;
struct ice_pf *pf;
- int i;
if (!vsi || vsi->type != ICE_VSI_PF)
return 0;
@@ -614,13 +613,6 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
if (unlikely(!netdev->rx_cpu_rmap))
return -EINVAL;
- ice_for_each_q_vector(vsi, i)
- if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
- vsi->q_vectors[i]->irq.virq)) {
- ice_free_cpu_rx_rmap(vsi);
- return -EINVAL;
- }
-
return 0;
}
@@ -2735,7 +2735,12 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+#ifdef CONFIG_RFS_ACCEL
+ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq,
+ NAPIF_IRQ_ARFS_RMAP);
+#else
netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq, 0);
+#endif
}
}
@@ -1944,12 +1944,24 @@ static void qede_napi_disable_remove(struct qede_dev *edev)
static void qede_napi_add_enable(struct qede_dev *edev)
{
+ struct qede_fastpath *fp = &edev->fp_array[i];
int i;
/* Add NAPI objects */
for_each_queue(i) {
- netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll);
- napi_enable(&edev->fp_array[i].napi);
+ fp = &edev->fp_array[i];
+ netif_napi_add(edev->ndev, &fp->napi, qede_poll);
+ napi_enable(&fp->napi);
+ if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX))
+#ifdef CONFIG_RFS_ACCEL
+ netif_napi_set_irq(&edev->fp_array[i].napi,
+ edev->int_info.msix[i].vector,
+ NAPIF_IRQ_ARFS_RMAP);
+#else
+ netif_napi_set_irq(&edev->fp_array[i].napi,
+ edev->int_info.msix[i].vector,
+ 0);
+#endif
}
}
@@ -1983,18 +1995,6 @@ static int qede_req_msix_irqs(struct qede_dev *edev)
}
for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
-#ifdef CONFIG_RFS_ACCEL
- struct qede_fastpath *fp = &edev->fp_array[i];
-
- if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
- rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
- edev->int_info.msix[i].vector);
- if (rc) {
- DP_ERR(edev, "Failed to add CPU rmap\n");
- qede_free_arfs(edev);
- }
- }
-#endif
rc = request_irq(edev->int_info.msix[i].vector,
qede_msix_fp_int, 0, edev->fp_array[i].name,
&edev->fp_array[i]);
@@ -2004,6 +2004,15 @@ static void ef4_init_napi_channel(struct ef4_channel *channel)
channel->napi_dev = efx->net_dev;
netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll);
+
+ if (efx->interrupt_mode == EF4_INT_MODE_MSIX &&
+ channel->channel < efx->n_rx_channels)
+#ifdef CONFIG_RFS_ACCEL
+ netif_napi_set_irq(&channel->napi_str, channel->irq,
+ NAPIF_IRQ_ARFS_RMAP);
+#else
+ netif_napi_set_irq(&channel->napi_str, channel->irq, 0);
+#endif
}
static void ef4_init_napi(struct ef4_nic *efx)
@@ -115,16 +115,6 @@ int ef4_nic_init_interrupt(struct ef4_nic *efx)
goto fail2;
}
++n_irqs;
-
-#ifdef CONFIG_RFS_ACCEL
- if (efx->interrupt_mode == EF4_INT_MODE_MSIX &&
- channel->channel < efx->n_rx_channels) {
- rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
- channel->irq);
- if (rc)
- goto fail2;
- }
-#endif
}
return 0;
@@ -1321,6 +1321,15 @@ static void efx_init_napi_channel(struct efx_channel *channel)
channel->napi_dev = efx->net_dev;
netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll);
+
+ if (efx->interrupt_mode == EFX_INT_MODE_MSIX &&
+ channel->channel < efx->n_rx_channels)
+#ifdef CONFIG_RFS_ACCEL
+ netif_napi_set_irq(&channel->napi_str, channel->irq,
+ NAPIF_IRQ_ARFS_RMAP);
+#else
+ netif_napi_set_irq(&channel->napi_str, channel->irq, 0);
+#endif
}
void efx_siena_init_napi(struct efx_nic *efx)
@@ -117,16 +117,6 @@ int efx_siena_init_interrupt(struct efx_nic *efx)
goto fail2;
}
++n_irqs;
-
-#ifdef CONFIG_RFS_ACCEL
- if (efx->interrupt_mode == EFX_INT_MODE_MSIX &&
- channel->channel < efx->n_rx_channels) {
- rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
- channel->irq);
- if (rc)
- goto fail2;
- }
-#endif
}
efx->irqs_hooked = true;
@@ -354,6 +354,18 @@ struct napi_config {
unsigned int napi_id;
};
+enum {
+#ifdef CONFIG_RFS_ACCEL
+ NAPI_IRQ_ARFS_RMAP, /* Core handles RMAP updates */
+#endif
+};
+
+enum {
+#ifdef CONFIG_RFS_ACCEL
+ NAPIF_IRQ_ARFS_RMAP = BIT(NAPI_IRQ_ARFS_RMAP),
+#endif
+};
+
/*
* Structure for NAPI scheduling similar to tasklet but with weighting
*/
@@ -6707,8 +6707,22 @@ EXPORT_SYMBOL(netif_queue_set_napi);
void netif_napi_set_irq(struct napi_struct *napi, int irq, unsigned long flags)
{
+ int rc;
+
napi->irq = irq;
napi->irq_flags = flags;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (napi->dev->rx_cpu_rmap && flags & NAPIF_IRQ_ARFS_RMAP) {
+ rc = irq_cpu_rmap_add(napi->dev->rx_cpu_rmap, irq);
+ if (rc) {
+ netdev_warn(napi->dev, "Unable to update ARFS map (%d).\n",
+ rc);
+ free_irq_cpu_rmap(napi->dev->rx_cpu_rmap);
+ napi->dev->rx_cpu_rmap = NULL;
+ }
+ }
+#endif
}
EXPORT_SYMBOL(netif_napi_set_irq);
Add a new napi->irq flag; NAPIF_IRQ_ARFS_RMAP. A driver can use the flag when binding an irq to a napi: netif_napi_set_irq(napi, irq, NAPIF_IRQ_ARFS_RMAP) and the core will update the ARFS rmap with the assigned irq affinity. Signed-off-by: Ahmed Zaki <ahmed.zaki@intel.com> --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 19 ++++--------- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 25 ++++++----------- drivers/net/ethernet/intel/ice/ice_arfs.c | 10 +------ drivers/net/ethernet/intel/ice/ice_lib.c | 5 ++++ drivers/net/ethernet/qlogic/qede/qede_main.c | 28 +++++++++---------- drivers/net/ethernet/sfc/falcon/efx.c | 9 ++++++ drivers/net/ethernet/sfc/falcon/nic.c | 10 ------- drivers/net/ethernet/sfc/siena/efx_channels.c | 9 ++++++ drivers/net/ethernet/sfc/siena/nic.c | 10 ------- include/linux/netdevice.h | 12 ++++++++ net/core/dev.c | 14 ++++++++++ 11 files changed, 77 insertions(+), 74 deletions(-)