diff mbox series

[net-next,07/15] qed: simplify initialization of the chains with an external PBL

Message ID 20200722155349.747-8-alobakin@marvell.com (mailing list archive)
State Superseded
Headers show
Series qed/qede: improve chain API and add XDP_REDIRECT support | expand

Commit Message

Alexander Lobakin July 22, 2020, 3:53 p.m. UTC
Fill PBL table parameters for chains with an external PBL data earlier on
qed_chain_init_params() rather than on allocation itself. This simplifies
allocation code and allows to extend struct ext_pbl for other chain types.

Signed-off-by: Alexander Lobakin <alobakin@marvell.com>
Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
---
 drivers/net/ethernet/qlogic/qed/qed_chain.c | 37 +++++++++++----------
 1 file changed, 19 insertions(+), 18 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c
index e2c5741ed160..2a61007442ae 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_chain.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c
@@ -9,7 +9,8 @@  static void qed_chain_init_params(struct qed_chain *chain,
 				  u32 page_cnt, u8 elem_size,
 				  enum qed_chain_use_mode intended_use,
 				  enum qed_chain_mode mode,
-				  enum qed_chain_cnt_type cnt_type)
+				  enum qed_chain_cnt_type cnt_type,
+				  const struct qed_chain_ext_pbl *ext_pbl)
 {
 	memset(chain, 0, sizeof(*chain));
 
@@ -29,6 +30,13 @@  static void qed_chain_init_params(struct qed_chain *chain,
 	chain->page_cnt = page_cnt;
 	chain->capacity = chain->usable_per_page * page_cnt;
 	chain->size = chain->elem_per_page * page_cnt;
+
+	if (ext_pbl && ext_pbl->p_pbl_virt) {
+		chain->pbl_sp.table_virt = ext_pbl->p_pbl_virt;
+		chain->pbl_sp.table_phys = ext_pbl->p_pbl_phys;
+
+		chain->b_external_pbl = true;
+	}
 }
 
 static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain,
@@ -228,8 +236,7 @@  static int qed_chain_alloc_single(struct qed_dev *cdev,
 	return 0;
 }
 
-static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain,
-			       struct qed_chain_ext_pbl *ext_pbl)
+static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain)
 {
 	struct device *dev = &cdev->pdev->dev;
 	struct addr_tbl_entry *addr_tbl;
@@ -251,21 +258,14 @@  static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain,
 
 	chain->pbl.pp_addr_tbl = addr_tbl;
 
-	if (ext_pbl) {
-		size = 0;
-		pbl_virt = ext_pbl->p_pbl_virt;
-		pbl_phys = ext_pbl->p_pbl_phys;
+	if (chain->b_external_pbl)
+		goto alloc_pages;
 
-		chain->b_external_pbl = true;
-	} else {
-		size = array_size(page_cnt, sizeof(*pbl_virt));
-		if (unlikely(size == SIZE_MAX))
-			return -EOVERFLOW;
-
-		pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys,
-					      GFP_KERNEL);
-	}
+	size = array_size(page_cnt, sizeof(*pbl_virt));
+	if (unlikely(size == SIZE_MAX))
+		return -EOVERFLOW;
 
+	pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys, GFP_KERNEL);
 	if (!pbl_virt)
 		return -ENOMEM;
 
@@ -273,6 +273,7 @@  static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain,
 	chain->pbl_sp.table_phys = pbl_phys;
 	chain->pbl_sp.table_size = size;
 
+alloc_pages:
 	for (i = 0; i < page_cnt; i++) {
 		virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
 					  GFP_KERNEL);
@@ -323,7 +324,7 @@  int qed_chain_alloc(struct qed_dev *cdev,
 	}
 
 	qed_chain_init_params(chain, page_cnt, elem_size, intended_use, mode,
-			      cnt_type);
+			      cnt_type, ext_pbl);
 
 	switch (mode) {
 	case QED_CHAIN_MODE_NEXT_PTR:
@@ -333,7 +334,7 @@  int qed_chain_alloc(struct qed_dev *cdev,
 		rc = qed_chain_alloc_single(cdev, chain);
 		break;
 	case QED_CHAIN_MODE_PBL:
-		rc = qed_chain_alloc_pbl(cdev, chain, ext_pbl);
+		rc = qed_chain_alloc_pbl(cdev, chain);
 		break;
 	default:
 		return -EINVAL;