@@ -430,7 +430,7 @@ struct mlx5e_txqsq {
u16 cc;
u16 skb_fifo_cc;
u32 dma_fifo_cc;
- struct dim dim; /* Adaptive Moderation */
+ struct dim *dim; /* Adaptive Moderation */
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
@@ -722,7 +722,7 @@ struct mlx5e_rq {
int ix;
unsigned int hw_mtu;
- struct dim dim; /* Dynamic Interrupt Moderation */
+ struct dim *dim; /* Dynamic Interrupt Moderation */
/* XDP */
struct bpf_prog __rcu *xdp_prog;
@@ -44,7 +44,7 @@ mlx5e_complete_dim_work(struct dim *dim, struct dim_cq_moder moder,
void mlx5e_rx_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
- struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
+ struct mlx5e_rq *rq = dim->priv;
struct dim_cq_moder cur_moder =
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
@@ -54,7 +54,7 @@ void mlx5e_rx_dim_work(struct work_struct *work)
void mlx5e_tx_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
- struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim);
+ struct mlx5e_txqsq *sq = dim->priv;
struct dim_cq_moder cur_moder =
net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
@@ -962,11 +962,20 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
}
}
- INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
- rq->dim.mode = params->rx_cq_moderation.cq_period_mode;
+ rq->dim = kvzalloc_node(sizeof(*rq->dim), GFP_KERNEL, node);
+ if (!rq->dim) {
+ err = -ENOMEM;
+ goto err_unreg_xdp_rxq_info;
+ }
+
+ rq->dim->priv = rq;
+ INIT_WORK(&rq->dim->work, mlx5e_rx_dim_work);
+ rq->dim->mode = params->rx_cq_moderation.cq_period_mode;
return 0;
+err_unreg_xdp_rxq_info:
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
err_destroy_page_pool:
page_pool_destroy(rq->page_pool);
err_free_by_rq_type:
@@ -1014,6 +1023,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5e_free_wqe_alloc_info(rq);
}
+ kvfree(rq->dim);
xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
@@ -1341,7 +1351,7 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- cancel_work_sync(&rq->dim.work);
+ cancel_work_sync(&rq->dim->work);
cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
@@ -1616,12 +1626,20 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
+ sq->dim = kvzalloc_node(sizeof(*sq->dim), GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!sq->dim) {
+ err = -ENOMEM;
+ goto err_free_txqsq_db;
+ }
- INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
- sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
+ sq->dim->priv = sq;
+ INIT_WORK(&sq->dim->work, mlx5e_tx_dim_work);
+ sq->dim->mode = params->tx_cq_moderation.cq_period_mode;
return 0;
+err_free_txqsq_db:
+ mlx5e_free_txqsq_db(sq);
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
@@ -1630,6 +1648,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
{
+ kvfree(sq->dim);
mlx5e_free_txqsq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
}
@@ -1841,7 +1860,7 @@ void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
struct mlx5_core_dev *mdev = sq->mdev;
struct mlx5_rate_limit rl = {0};
- cancel_work_sync(&sq->dim.work);
+ cancel_work_sync(&sq->dim->work);
cancel_work_sync(&sq->recover_work);
mlx5e_destroy_sq(mdev, sq->sqn);
if (sq->rate_limit) {
@@ -55,7 +55,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
return;
dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
- net_dim(&sq->dim, dim_sample);
+ net_dim(sq->dim, dim_sample);
}
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
@@ -67,7 +67,7 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
return;
dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
- net_dim(&rq->dim, dim_sample);
+ net_dim(rq->dim, dim_sample);
}
void mlx5e_trigger_irq(struct mlx5e_icosq *sq)