diff mbox

[net-next,4/6] net: mvneta: Allocate page for the descriptor

Message ID 20180706131949.2684-5-gregory.clement@bootlin.com (mailing list archive)
State New, archived
Headers show

Commit Message

Gregory CLEMENT July 6, 2018, 1:19 p.m. UTC
Instead of trying to allocate the exact amount of memory for each
descriptor use a page for each of them, it allows to simplify the
allocation management and increase the performance of the driver.

Based on the work of Yelena Krivosheev <yelena@marvell.com>
Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
---
 drivers/net/ethernet/marvell/mvneta.c    | 66 ++++++++++--------------
 drivers/net/ethernet/marvell/mvneta_bm.h |  3 --
 2 files changed, 26 insertions(+), 43 deletions(-)
diff mbox

Patch

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 2c575c6732ce..12739fb60732 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1791,47 +1791,30 @@  static void mvneta_txq_done(struct mvneta_port *pp,
 	}
 }
 
-void *mvneta_frag_alloc(unsigned int frag_size)
-{
-	if (likely(frag_size <= PAGE_SIZE))
-		return netdev_alloc_frag(frag_size);
-	else
-		return kmalloc(frag_size, GFP_ATOMIC);
-}
-EXPORT_SYMBOL_GPL(mvneta_frag_alloc);
-
-void mvneta_frag_free(unsigned int frag_size, void *data)
-{
-	if (likely(frag_size <= PAGE_SIZE))
-		skb_free_frag(data);
-	else
-		kfree(data);
-}
-EXPORT_SYMBOL_GPL(mvneta_frag_free);
-
 /* Refill processing for SW buffer management */
-static int mvneta_rx_refill(struct mvneta_port *pp,
-			    struct mvneta_rx_desc *rx_desc,
-			    struct mvneta_rx_queue *rxq)
-
+/* Allocate page per descriptor */
+static inline int mvneta_rx_refill(struct mvneta_port *pp,
+				   struct mvneta_rx_desc *rx_desc,
+				   struct mvneta_rx_queue *rxq,
+				   gfp_t gfp_mask)
 {
 	dma_addr_t phys_addr;
-	void *data;
+	struct page *page;
 
-	data = mvneta_frag_alloc(pp->frag_size);
-	if (!data)
+	page = __dev_alloc_page(gfp_mask);
+	if (!page)
 		return -ENOMEM;
 
-	phys_addr = dma_map_single(pp->dev->dev.parent, data,
-				   MVNETA_RX_BUF_SIZE(pp->pkt_size),
-				   DMA_FROM_DEVICE);
+	/* map page for use */
+	phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
+				 DMA_FROM_DEVICE);
 	if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
-		mvneta_frag_free(pp->frag_size, data);
+		__free_page(page);
 		return -ENOMEM;
 	}
 
 	phys_addr += pp->rx_offset_correction;
-	mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq);
+	mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
 	return 0;
 }
 
@@ -1897,7 +1880,7 @@  static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
 
 		dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
 				 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
-		mvneta_frag_free(pp->frag_size, data);
+		__free_page(data);
 	}
 }
 
@@ -1924,6 +1907,7 @@  static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
 		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
 		struct sk_buff *skb;
 		unsigned char *data;
+		struct page *page;
 		dma_addr_t phys_addr;
 		u32 rx_status, frag_size;
 		int rx_bytes, err, index;
@@ -1932,7 +1916,10 @@  static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
 		rx_status = rx_desc->status;
 		rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
 		index = rx_desc - rxq->descs;
-		data = rxq->buf_virt_addr[index];
+		page = (struct page *)rxq->buf_virt_addr[index];
+		data = page_address(page);
+		/* Prefetch header */
+		prefetch(data);
 		phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction;
 
 		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
@@ -1975,7 +1962,7 @@  static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
 		}
 
 		/* Refill processing */
-		err = mvneta_rx_refill(pp, rx_desc, rxq);
+		err = mvneta_rx_refill(pp, rx_desc, rxq, GFP_KERNEL);
 		if (err) {
 			netdev_err(dev, "Linux processing - Can't refill\n");
 			rxq->refill_err++;
@@ -2767,9 +2754,11 @@  static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 
 	for (i = 0; i < num; i++) {
 		memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
-		if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) {
-			netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs  filled\n",
-				__func__, rxq->id, i, num);
+		if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
+				     GFP_KERNEL) != 0) {
+			netdev_err(pp->dev,
+				   "%s:rxq %d, %d of %d buffs  filled\n",
+				   __func__, rxq->id, i, num);
 			break;
 		}
 	}
@@ -3183,8 +3172,6 @@  static int mvneta_change_mtu(struct net_device *dev, int mtu)
 		mvneta_bm_update_mtu(pp, mtu);
 
 	pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
-	pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
-	                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
 	ret = mvneta_setup_rxqs(pp);
 	if (ret) {
@@ -3672,8 +3659,7 @@  static int mvneta_open(struct net_device *dev)
 	int ret;
 
 	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
-	pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
-	                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	pp->frag_size = PAGE_SIZE;
 
 	ret = mvneta_setup_rxqs(pp);
 	if (ret)
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index 9358626e51ec..c8425d35c049 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -130,9 +130,6 @@  struct mvneta_bm_pool {
 };
 
 /* Declarations and definitions */
-void *mvneta_frag_alloc(unsigned int frag_size);
-void mvneta_frag_free(unsigned int frag_size, void *data);
-
 #if IS_ENABLED(CONFIG_MVNETA_BM)
 struct mvneta_bm *mvneta_bm_get(struct device_node *node);
 void mvneta_bm_put(struct mvneta_bm *priv);