@@ -90,6 +90,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int cq_idx)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct napi_config *napi_config;
int irq, err = 0;
int timestamp_en = 0;
bool assigned_eq = false;
@@ -100,11 +101,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
*cq->mcq.set_ci_db = 0;
*cq->mcq.arm_db = 0;
memset(cq->buf, 0, cq->buf_size);
+ napi_config = cq->napi.config;
if (cq->type == RX) {
if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
cq->vector)) {
- cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);
+ cq->vector = cpumask_first(&napi_config->affinity_mask);
err = mlx4_assign_eq(mdev->dev, priv->port,
&cq->vector);
@@ -150,7 +152,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
case TX:
cq->mcq.comp = mlx4_en_tx_irq;
netif_napi_add_tx(cq->dev, &cq->napi, mlx4_en_poll_tx_cq);
- netif_napi_set_irq(&cq->napi, irq, 0);
+ netif_napi_set_irq(&cq->napi, irq, NAPIF_IRQ_AFFINITY);
napi_enable(&cq->napi);
netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_TX, &cq->napi);
break;
@@ -158,7 +160,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq->mcq.comp = mlx4_en_rx_irq;
netif_napi_add_config(cq->dev, &cq->napi, mlx4_en_poll_rx_cq,
cq_idx);
- netif_napi_set_irq(&cq->napi, irq, 0);
+ netif_napi_set_irq(&cq->napi, irq, NAPIF_IRQ_AFFINITY);
napi_enable(&cq->napi);
netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_RX, &cq->napi);
break;
@@ -1596,24 +1596,6 @@ static void mlx4_en_linkstate_work(struct work_struct *work)
mutex_unlock(&mdev->state_lock);
}
-static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
-{
- struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
- int numa_node = priv->mdev->dev->numa_node;
-
- if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
- return -ENOMEM;
-
- cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
- ring->affinity_mask);
- return 0;
-}
-
-static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
-{
- free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
-}
-
static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
int tx_ring_idx)
{
@@ -1663,16 +1645,9 @@ int mlx4_en_start_port(struct net_device *dev)
for (i = 0; i < priv->rx_ring_num; i++) {
cq = priv->rx_cq[i];
- err = mlx4_en_init_affinity_hint(priv, i);
- if (err) {
- en_err(priv, "Failed preparing IRQ affinity hint\n");
- goto cq_err;
- }
-
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed activating Rx CQ\n");
- mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
@@ -1688,7 +1663,6 @@ int mlx4_en_start_port(struct net_device *dev)
if (err) {
en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq);
- mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
mlx4_en_arm_cq(priv, cq);
@@ -1874,10 +1848,9 @@ int mlx4_en_start_port(struct net_device *dev)
mac_err:
mlx4_en_put_qp(priv);
cq_err:
- while (rx_index--) {
+ while (rx_index--)
mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
- mlx4_en_free_affinity_hint(priv, rx_index);
- }
+
for (i = 0; i < priv->rx_ring_num; i++)
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
@@ -2011,8 +1984,6 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
napi_synchronize(&cq->napi);
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq);
-
- mlx4_en_free_affinity_hint(priv, i);
}
}
@@ -233,23 +233,6 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
slave_event(dev, slave, eqe);
}
-#if defined(CONFIG_SMP)
-static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
-{
- int hint_err;
- struct mlx4_dev *dev = &priv->dev;
- struct mlx4_eq *eq = &priv->eq_table.eq[vec];
-
- if (!cpumask_available(eq->affinity_mask) ||
- cpumask_empty(eq->affinity_mask))
- return;
-
- hint_err = irq_update_affinity_hint(eq->irq, eq->affinity_mask);
- if (hint_err)
- mlx4_warn(dev, "irq_update_affinity_hint failed, err %d\n", hint_err);
-}
-#endif
-
int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
{
struct mlx4_eqe eqe;
@@ -1123,8 +1106,6 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) {
- free_cpumask_var(eq_table->eq[i].affinity_mask);
- irq_update_affinity_hint(eq_table->eq[i].irq, NULL);
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0;
}
@@ -1516,9 +1497,6 @@ int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector)
clear_bit(*prequested_vector, priv->msix_ctl.pool_bm);
*prequested_vector = -1;
} else {
-#if defined(CONFIG_SMP)
- mlx4_set_eq_affinity_hint(priv, *prequested_vector);
-#endif
eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1);
priv->eq_table.eq[*prequested_vector].have_irq = 1;
}
@@ -2923,36 +2923,6 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
return err;
}
-static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
-{
- int requested_cpu = 0;
- struct mlx4_priv *priv = mlx4_priv(dev);
- struct mlx4_eq *eq;
- int off = 0;
- int i;
-
- if (eqn > dev->caps.num_comp_vectors)
- return -EINVAL;
-
- for (i = 1; i < port; i++)
- off += mlx4_get_eqs_per_port(dev, i);
-
- requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
-
- /* Meaning EQs are shared, and this call comes from the second port */
- if (requested_cpu < 0)
- return 0;
-
- eq = &priv->eq_table.eq[eqn];
-
- if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
- return -ENOMEM;
-
- cpumask_set_cpu(requested_cpu, eq->affinity_mask);
-
- return 0;
-}
-
static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2997,19 +2967,13 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
priv->eq_table.eq[i].irq =
entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
- if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
+ if (MLX4_IS_LEGACY_EQ_MODE(dev->caps))
bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
dev->caps.num_ports);
- /* We don't set affinity hint when there
- * aren't enough EQs
- */
- } else {
+ else
set_bit(port,
priv->eq_table.eq[i].actv_ports.ports);
- if (mlx4_init_affinity_hint(dev, port + 1, i))
- mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
- i);
- }
+
/* We divide the Eqs evenly between the two ports.
* (dev->caps.num_comp_vectors / dev->caps.num_ports)
* refers to the number of Eqs per port
@@ -403,7 +403,6 @@ struct mlx4_eq {
struct mlx4_eq_tasklet tasklet_ctx;
struct mlx4_active_ports actv_ports;
u32 ref_count;
- cpumask_var_t affinity_mask;
};
struct mlx4_slave_eqe {
@@ -357,7 +357,6 @@ struct mlx4_en_rx_ring {
unsigned long dropped;
unsigned long alloc_fail;
int hwtstamp_rx_filter;
- cpumask_var_t affinity_mask;
struct xdp_rxq_info xdp_rxq;
};
Delete the driver CPU affinity info and use the core's napi config instead. Signed-off-by: Ahmed Zaki <ahmed.zaki@intel.com> --- drivers/net/ethernet/mellanox/mlx4/en_cq.c | 8 ++-- .../net/ethernet/mellanox/mlx4/en_netdev.c | 33 +-------------- drivers/net/ethernet/mellanox/mlx4/eq.c | 22 ---------- drivers/net/ethernet/mellanox/mlx4/main.c | 42 ++----------------- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 1 - drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 1 - 6 files changed, 10 insertions(+), 97 deletions(-)