Message ID | 20221223005645.8709-1-gakula@marvell.com (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | [net] octeontx2-pf: Fix lmtst Id used in aura free | expand |
On Fri, Dec 23, 2022 at 6:27 AM Geetha sowjanya <gakula@marvell.com> wrote: > > Current code uses per_cpu pointer to get the lmtst_id mapped to > the core on which aura_free() is executed. Using per_cpu pointer > without preemption disable causing mismatch between lmtst_id and > core on which pointer gets freed. This patch fixes the issue by > disabling preemption around aura_free. > > This patch also addresses the memory reservation issue, > currently NIX, NPA queue context memory is being allocated using > GFP_KERNEL flag which inturns allocates from memory reserved for > CMA_DMA. Sizing CMA_DMA memory is getting difficult due to this > dependency, the more number of interfaces enabled the more the > CMA_DMA memory requirement. > > To fix this issue, GFP_KERNEL flag is replaced with GFP_ATOMIC, > with this memory will be allocated from unreserved memory. > > Fixes: ef6c8da71eaf ("octeontx2-pf: cn10K: Reserve LMTST lines per core") Two separate issues are being fixed. I think these two fixes should be separate patches. > Signed-off-by: Sunil Goutham <sgoutham@marvell.com> > Signed-off-by: Geetha sowjanya <gakula@marvell.com> > --- > .../ethernet/marvell/octeontx2/af/common.h | 2 +- > .../marvell/octeontx2/nic/otx2_common.c | 30 +++++++++++++------ > 2 files changed, 22 insertions(+), 10 deletions(-) > > diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h > index 8931864ee110..4b4be9ca4d2f 100644 > --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h > +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h > @@ -61,7 +61,7 @@ static inline int qmem_alloc(struct device *dev, struct qmem **q, > qmem->entry_sz = entry_sz; > qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN; > qmem->base = dma_alloc_attrs(dev, qmem->alloc_sz, &qmem->iova, > - GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS); > + GFP_ATOMIC, DMA_ATTR_FORCE_CONTIGUOUS); > if (!qmem->base) > return -ENOMEM; > > diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c > index 9e10e7471b88..88f8772a61cd 100644 > --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c > +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c > @@ -1012,6 +1012,7 @@ static void otx2_pool_refill_task(struct work_struct *work) > rbpool = cq->rbpool; > free_ptrs = cq->pool_ptrs; > > + get_cpu(); > while (cq->pool_ptrs) { > if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) { > /* Schedule a WQ if we fails to free atleast half of the > @@ -1031,6 +1032,7 @@ static void otx2_pool_refill_task(struct work_struct *work) > pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); > cq->pool_ptrs--; > } > + put_cpu(); > cq->refill_task_sched = false; > } > > @@ -1368,6 +1370,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) > if (err) > goto fail; > > + get_cpu(); > /* Allocate pointers and free them to aura/pool */ > for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { > pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); > @@ -1376,18 +1379,24 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) > sq = &qset->sq[qidx]; > sq->sqb_count = 0; > sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); > - if (!sq->sqb_ptrs) > - return -ENOMEM; > + if (!sq->sqb_ptrs) { > + err = -ENOMEM; > + goto err_mem; > + } > > for (ptr = 0; ptr < num_sqbs; ptr++) { > - if (otx2_alloc_rbuf(pfvf, pool, &bufptr)) > - return -ENOMEM; > + err = otx2_alloc_rbuf(pfvf, pool, &bufptr); > + if (err) > + goto err_mem; > pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); > sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; > } > } > > - return 0; > +err_mem: > + put_cpu(); > + return err ? -ENOMEM : 0; > + > fail: > otx2_mbox_reset(&pfvf->mbox.mbox, 0); > otx2_aura_pool_free(pfvf); > @@ -1426,18 +1435,21 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) > if (err) > goto fail; > > + get_cpu(); > /* Allocate pointers and free them to aura/pool */ > for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { > pool = &pfvf->qset.pool[pool_id]; > for (ptr = 0; ptr < num_ptrs; ptr++) { > - if (otx2_alloc_rbuf(pfvf, pool, &bufptr)) > - return -ENOMEM; > + err = otx2_alloc_rbuf(pfvf, pool, &bufptr); > + if (err) > + goto err_mem; > pfvf->hw_ops->aura_freeptr(pfvf, pool_id, > bufptr + OTX2_HEAD_ROOM); > } > } > - > - return 0; > +err_mem: > + put_cpu(); > + return err ? -ENOMEM : 0; > fail: > otx2_mbox_reset(&pfvf->mbox.mbox, 0); > otx2_aura_pool_free(pfvf); > -- > 2.25.1 >
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index 8931864ee110..4b4be9ca4d2f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -61,7 +61,7 @@ static inline int qmem_alloc(struct device *dev, struct qmem **q, qmem->entry_sz = entry_sz; qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN; qmem->base = dma_alloc_attrs(dev, qmem->alloc_sz, &qmem->iova, - GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS); + GFP_ATOMIC, DMA_ATTR_FORCE_CONTIGUOUS); if (!qmem->base) return -ENOMEM; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 9e10e7471b88..88f8772a61cd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -1012,6 +1012,7 @@ static void otx2_pool_refill_task(struct work_struct *work) rbpool = cq->rbpool; free_ptrs = cq->pool_ptrs; + get_cpu(); while (cq->pool_ptrs) { if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) { /* Schedule a WQ if we fails to free atleast half of the @@ -1031,6 +1032,7 @@ static void otx2_pool_refill_task(struct work_struct *work) pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); cq->pool_ptrs--; } + put_cpu(); cq->refill_task_sched = false; } @@ -1368,6 +1370,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) if (err) goto fail; + get_cpu(); /* Allocate pointers and free them to aura/pool */ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); @@ -1376,18 +1379,24 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) sq = &qset->sq[qidx]; sq->sqb_count = 0; sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); - if (!sq->sqb_ptrs) - return -ENOMEM; + if (!sq->sqb_ptrs) { + err = -ENOMEM; + goto err_mem; + } for (ptr = 0; ptr < num_sqbs; ptr++) { - if (otx2_alloc_rbuf(pfvf, pool, &bufptr)) - return -ENOMEM; + err = otx2_alloc_rbuf(pfvf, pool, &bufptr); + if (err) + goto err_mem; pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; } } - return 0; +err_mem: + put_cpu(); + return err ? -ENOMEM : 0; + fail: otx2_mbox_reset(&pfvf->mbox.mbox, 0); otx2_aura_pool_free(pfvf); @@ -1426,18 +1435,21 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) if (err) goto fail; + get_cpu(); /* Allocate pointers and free them to aura/pool */ for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { pool = &pfvf->qset.pool[pool_id]; for (ptr = 0; ptr < num_ptrs; ptr++) { - if (otx2_alloc_rbuf(pfvf, pool, &bufptr)) - return -ENOMEM; + err = otx2_alloc_rbuf(pfvf, pool, &bufptr); + if (err) + goto err_mem; pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr + OTX2_HEAD_ROOM); } } - - return 0; +err_mem: + put_cpu(); + return err ? -ENOMEM : 0; fail: otx2_mbox_reset(&pfvf->mbox.mbox, 0); otx2_aura_pool_free(pfvf);