diff mbox series

[net-next,v2,5/6] tsnep: Add RX queue info for XDP support

Message ID 20221208054045.3600-6-gerhard@engleder-embedded.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series tsnep: XDP support | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers success CCed 11 of 11 maintainers
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 126 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Gerhard Engleder Dec. 8, 2022, 5:40 a.m. UTC
Register xdp_rxq_info with page_pool memory model. This is needed for
XDP buffer handling.

Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com>
---
 drivers/net/ethernet/engleder/tsnep.h      |  5 ++--
 drivers/net/ethernet/engleder/tsnep_main.c | 34 +++++++++++++++++-----
 2 files changed, 30 insertions(+), 9 deletions(-)

Comments

Fijalkowski, Maciej Dec. 8, 2022, 12:59 p.m. UTC | #1
On Thu, Dec 08, 2022 at 06:40:44AM +0100, Gerhard Engleder wrote:
> Register xdp_rxq_info with page_pool memory model. This is needed for
> XDP buffer handling.
> 
> Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com>
> ---
>  drivers/net/ethernet/engleder/tsnep.h      |  5 ++--
>  drivers/net/ethernet/engleder/tsnep_main.c | 34 +++++++++++++++++-----
>  2 files changed, 30 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
> index 0e7fc36a64e1..70bc133d4a9d 100644
> --- a/drivers/net/ethernet/engleder/tsnep.h
> +++ b/drivers/net/ethernet/engleder/tsnep.h
> @@ -127,6 +127,7 @@ struct tsnep_rx {
>  	u32 owner_counter;
>  	int increment_owner_counter;
>  	struct page_pool *page_pool;
> +	struct xdp_rxq_info xdp_rxq;

this occupies full cacheline, did you make sure that you don't break
tsnep_rx layout with having xdp_rxq_info in the middle of the way?

>  
>  	u32 packets;
>  	u32 bytes;
> @@ -139,11 +140,11 @@ struct tsnep_queue {
>  	struct tsnep_adapter *adapter;
>  	char name[IFNAMSIZ + 9];
>  
> +	struct napi_struct napi;
> +
>  	struct tsnep_tx *tx;
>  	struct tsnep_rx *rx;
>  
> -	struct napi_struct napi;

why this move?

> -
>  	int irq;
>  	u32 irq_mask;
>  	void __iomem *irq_delay_addr;
> diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
> index ebfc08c1c46d..2b662a98b62a 100644
> --- a/drivers/net/ethernet/engleder/tsnep_main.c
> +++ b/drivers/net/ethernet/engleder/tsnep_main.c
> @@ -806,6 +806,9 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
>  		entry->page = NULL;
>  	}
>  
> +	if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
> +		xdp_rxq_info_unreg(&rx->xdp_rxq);
> +
>  	if (rx->page_pool)
>  		page_pool_destroy(rx->page_pool);
>  
> @@ -821,7 +824,7 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
>  	}
>  }
>  
> -static int tsnep_rx_ring_init(struct tsnep_rx *rx)
> +static int tsnep_rx_ring_init(struct tsnep_rx *rx, unsigned int napi_id)
>  {
>  	struct device *dmadev = rx->adapter->dmadev;
>  	struct tsnep_rx_entry *entry;
> @@ -864,6 +867,15 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
>  		goto failed;
>  	}
>  
> +	retval = xdp_rxq_info_reg(&rx->xdp_rxq, rx->adapter->netdev,
> +				  rx->queue_index, napi_id);
> +	if (retval)
> +		goto failed;
> +	retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, MEM_TYPE_PAGE_POOL,
> +					    rx->page_pool);
> +	if (retval)
> +		goto failed;
> +
>  	for (i = 0; i < TSNEP_RING_SIZE; i++) {
>  		entry = &rx->entry[i];
>  		next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
> @@ -1112,7 +1124,8 @@ static bool tsnep_rx_pending(struct tsnep_rx *rx)
>  }
>  
>  static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
> -			 int queue_index, struct tsnep_rx *rx)
> +			 unsigned int napi_id, int queue_index,
> +			 struct tsnep_rx *rx)
>  {
>  	dma_addr_t dma;
>  	int retval;
> @@ -1122,7 +1135,7 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
>  	rx->addr = addr;
>  	rx->queue_index = queue_index;
>  
> -	retval = tsnep_rx_ring_init(rx);
> +	retval = tsnep_rx_ring_init(rx, napi_id);
>  	if (retval)
>  		return retval;
>  
> @@ -1250,6 +1263,7 @@ int tsnep_netdev_open(struct net_device *netdev)
>  {
>  	struct tsnep_adapter *adapter = netdev_priv(netdev);
>  	int i;
> +	unsigned int napi_id;
>  	void __iomem *addr;
>  	int tx_queue_index = 0;
>  	int rx_queue_index = 0;
> @@ -1257,6 +1271,11 @@ int tsnep_netdev_open(struct net_device *netdev)
>  
>  	for (i = 0; i < adapter->num_queues; i++) {
>  		adapter->queue[i].adapter = adapter;
> +
> +		netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
> +			       tsnep_poll);
> +		napi_id = adapter->queue[i].napi.napi_id;
> +
>  		if (adapter->queue[i].tx) {
>  			addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
>  			retval = tsnep_tx_open(adapter, addr, tx_queue_index,
> @@ -1267,7 +1286,7 @@ int tsnep_netdev_open(struct net_device *netdev)
>  		}
>  		if (adapter->queue[i].rx) {
>  			addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
> -			retval = tsnep_rx_open(adapter, addr,
> +			retval = tsnep_rx_open(adapter, addr, napi_id,
>  					       rx_queue_index,
>  					       adapter->queue[i].rx);
>  			if (retval)
> @@ -1299,8 +1318,6 @@ int tsnep_netdev_open(struct net_device *netdev)
>  		goto phy_failed;
>  
>  	for (i = 0; i < adapter->num_queues; i++) {
> -		netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
> -			       tsnep_poll);
>  		napi_enable(&adapter->queue[i].napi);
>  
>  		tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
> @@ -1321,6 +1338,8 @@ int tsnep_netdev_open(struct net_device *netdev)
>  			tsnep_rx_close(adapter->queue[i].rx);
>  		if (adapter->queue[i].tx)
>  			tsnep_tx_close(adapter->queue[i].tx);
> +
> +		netif_napi_del(&adapter->queue[i].napi);
>  	}
>  	return retval;
>  }
> @@ -1339,7 +1358,6 @@ int tsnep_netdev_close(struct net_device *netdev)
>  		tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
>  
>  		napi_disable(&adapter->queue[i].napi);
> -		netif_napi_del(&adapter->queue[i].napi);
>  
>  		tsnep_free_irq(&adapter->queue[i], i == 0);
>  
> @@ -1347,6 +1365,8 @@ int tsnep_netdev_close(struct net_device *netdev)
>  			tsnep_rx_close(adapter->queue[i].rx);
>  		if (adapter->queue[i].tx)
>  			tsnep_tx_close(adapter->queue[i].tx);
> +
> +		netif_napi_del(&adapter->queue[i].napi);
>  	}
>  
>  	return 0;
> -- 
> 2.30.2
>
Gerhard Engleder Dec. 8, 2022, 8:32 p.m. UTC | #2
On 08.12.22 13:59, Maciej Fijalkowski wrote:
> On Thu, Dec 08, 2022 at 06:40:44AM +0100, Gerhard Engleder wrote:
>> Register xdp_rxq_info with page_pool memory model. This is needed for
>> XDP buffer handling.
>>
>> Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com>
>> ---
>>   drivers/net/ethernet/engleder/tsnep.h      |  5 ++--
>>   drivers/net/ethernet/engleder/tsnep_main.c | 34 +++++++++++++++++-----
>>   2 files changed, 30 insertions(+), 9 deletions(-)
>>
>> diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
>> index 0e7fc36a64e1..70bc133d4a9d 100644
>> --- a/drivers/net/ethernet/engleder/tsnep.h
>> +++ b/drivers/net/ethernet/engleder/tsnep.h
>> @@ -127,6 +127,7 @@ struct tsnep_rx {
>>   	u32 owner_counter;
>>   	int increment_owner_counter;
>>   	struct page_pool *page_pool;
>> +	struct xdp_rxq_info xdp_rxq;
> 
> this occupies full cacheline, did you make sure that you don't break
> tsnep_rx layout with having xdp_rxq_info in the middle of the way?

Actually I did no cacheline optimisation for this structure so far.
I saw that igb/igc put xdp_rxq_info to the end. Is this best practice
to prevent other variables in the same cacheline of xdp_rxq?

>>   
>>   	u32 packets;
>>   	u32 bytes;
>> @@ -139,11 +140,11 @@ struct tsnep_queue {
>>   	struct tsnep_adapter *adapter;
>>   	char name[IFNAMSIZ + 9];
>>   
>> +	struct napi_struct napi;
>> +
>>   	struct tsnep_tx *tx;
>>   	struct tsnep_rx *rx;
>>   
>> -	struct napi_struct napi;
> 
> why this move?

I reordered it because napi is now initialised before tx/rx. A cosmetic
move.

Gerhard
Saeed Mahameed Dec. 9, 2022, 12:53 a.m. UTC | #3
On 08 Dec 21:32, Gerhard Engleder wrote:
>On 08.12.22 13:59, Maciej Fijalkowski wrote:
>>On Thu, Dec 08, 2022 at 06:40:44AM +0100, Gerhard Engleder wrote:
>>>Register xdp_rxq_info with page_pool memory model. This is needed for
>>>XDP buffer handling.
>>>
>>>Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com>
>>>---
>>>  drivers/net/ethernet/engleder/tsnep.h      |  5 ++--
>>>  drivers/net/ethernet/engleder/tsnep_main.c | 34 +++++++++++++++++-----
>>>  2 files changed, 30 insertions(+), 9 deletions(-)
>>>
>>>diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
>>>index 0e7fc36a64e1..70bc133d4a9d 100644
>>>--- a/drivers/net/ethernet/engleder/tsnep.h
>>>+++ b/drivers/net/ethernet/engleder/tsnep.h
>>>@@ -127,6 +127,7 @@ struct tsnep_rx {
>>>  	u32 owner_counter;
>>>  	int increment_owner_counter;
>>>  	struct page_pool *page_pool;
>>>+	struct xdp_rxq_info xdp_rxq;
>>
>>this occupies full cacheline, did you make sure that you don't break
>>tsnep_rx layout with having xdp_rxq_info in the middle of the way?
>
>Actually I did no cacheline optimisation for this structure so far.
>I saw that igb/igc put xdp_rxq_info to the end. Is this best practice
>to prevent other variables in the same cacheline of xdp_rxq?

a rule of thumb, organize the structure in the same order they are
being accessed in the data path.. but this doesn't go without saying you
need to do some layout testing via pahole for example.. 

It's up to you and the maintainer of this driver to decide how critical this
is.

Reviewed-by: Saeed Mahameed <saeed@kernel.org>
Gerhard Engleder Dec. 9, 2022, 8:11 a.m. UTC | #4
On 09.12.22 01:53, Saeed Mahameed wrote:
> On 08 Dec 21:32, Gerhard Engleder wrote:
>> On 08.12.22 13:59, Maciej Fijalkowski wrote:
>>> On Thu, Dec 08, 2022 at 06:40:44AM +0100, Gerhard Engleder wrote:
>>>> Register xdp_rxq_info with page_pool memory model. This is needed for
>>>> XDP buffer handling.
>>>>
>>>> Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com>
>>>> ---
>>>>  drivers/net/ethernet/engleder/tsnep.h      |  5 ++--
>>>>  drivers/net/ethernet/engleder/tsnep_main.c | 34 +++++++++++++++++-----
>>>>  2 files changed, 30 insertions(+), 9 deletions(-)
>>>>
>>>> diff --git a/drivers/net/ethernet/engleder/tsnep.h 
>>>> b/drivers/net/ethernet/engleder/tsnep.h
>>>> index 0e7fc36a64e1..70bc133d4a9d 100644
>>>> --- a/drivers/net/ethernet/engleder/tsnep.h
>>>> +++ b/drivers/net/ethernet/engleder/tsnep.h
>>>> @@ -127,6 +127,7 @@ struct tsnep_rx {
>>>>      u32 owner_counter;
>>>>      int increment_owner_counter;
>>>>      struct page_pool *page_pool;
>>>> +    struct xdp_rxq_info xdp_rxq;
>>>
>>> this occupies full cacheline, did you make sure that you don't break
>>> tsnep_rx layout with having xdp_rxq_info in the middle of the way?
>>
>> Actually I did no cacheline optimisation for this structure so far.
>> I saw that igb/igc put xdp_rxq_info to the end. Is this best practice
>> to prevent other variables in the same cacheline of xdp_rxq?
> 
> a rule of thumb, organize the structure in the same order they are
> being accessed in the data path.. but this doesn't go without saying you
> need to do some layout testing via pahole for example..
> It's up to you and the maintainer of this driver to decide how critical 
> this
> is.
> 
> Reviewed-by: Saeed Mahameed <saeed@kernel.org>

Thanks for the clarification, I will think about it.

I wrote the driver and I'm responsible to keep it working. Is this equal
to being the maintainer?

Thanks for the review!

Gerhard
diff mbox series

Patch

diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
index 0e7fc36a64e1..70bc133d4a9d 100644
--- a/drivers/net/ethernet/engleder/tsnep.h
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -127,6 +127,7 @@  struct tsnep_rx {
 	u32 owner_counter;
 	int increment_owner_counter;
 	struct page_pool *page_pool;
+	struct xdp_rxq_info xdp_rxq;
 
 	u32 packets;
 	u32 bytes;
@@ -139,11 +140,11 @@  struct tsnep_queue {
 	struct tsnep_adapter *adapter;
 	char name[IFNAMSIZ + 9];
 
+	struct napi_struct napi;
+
 	struct tsnep_tx *tx;
 	struct tsnep_rx *rx;
 
-	struct napi_struct napi;
-
 	int irq;
 	u32 irq_mask;
 	void __iomem *irq_delay_addr;
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index ebfc08c1c46d..2b662a98b62a 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -806,6 +806,9 @@  static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
 		entry->page = NULL;
 	}
 
+	if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+		xdp_rxq_info_unreg(&rx->xdp_rxq);
+
 	if (rx->page_pool)
 		page_pool_destroy(rx->page_pool);
 
@@ -821,7 +824,7 @@  static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
 	}
 }
 
-static int tsnep_rx_ring_init(struct tsnep_rx *rx)
+static int tsnep_rx_ring_init(struct tsnep_rx *rx, unsigned int napi_id)
 {
 	struct device *dmadev = rx->adapter->dmadev;
 	struct tsnep_rx_entry *entry;
@@ -864,6 +867,15 @@  static int tsnep_rx_ring_init(struct tsnep_rx *rx)
 		goto failed;
 	}
 
+	retval = xdp_rxq_info_reg(&rx->xdp_rxq, rx->adapter->netdev,
+				  rx->queue_index, napi_id);
+	if (retval)
+		goto failed;
+	retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, MEM_TYPE_PAGE_POOL,
+					    rx->page_pool);
+	if (retval)
+		goto failed;
+
 	for (i = 0; i < TSNEP_RING_SIZE; i++) {
 		entry = &rx->entry[i];
 		next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
@@ -1112,7 +1124,8 @@  static bool tsnep_rx_pending(struct tsnep_rx *rx)
 }
 
 static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
-			 int queue_index, struct tsnep_rx *rx)
+			 unsigned int napi_id, int queue_index,
+			 struct tsnep_rx *rx)
 {
 	dma_addr_t dma;
 	int retval;
@@ -1122,7 +1135,7 @@  static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
 	rx->addr = addr;
 	rx->queue_index = queue_index;
 
-	retval = tsnep_rx_ring_init(rx);
+	retval = tsnep_rx_ring_init(rx, napi_id);
 	if (retval)
 		return retval;
 
@@ -1250,6 +1263,7 @@  int tsnep_netdev_open(struct net_device *netdev)
 {
 	struct tsnep_adapter *adapter = netdev_priv(netdev);
 	int i;
+	unsigned int napi_id;
 	void __iomem *addr;
 	int tx_queue_index = 0;
 	int rx_queue_index = 0;
@@ -1257,6 +1271,11 @@  int tsnep_netdev_open(struct net_device *netdev)
 
 	for (i = 0; i < adapter->num_queues; i++) {
 		adapter->queue[i].adapter = adapter;
+
+		netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
+			       tsnep_poll);
+		napi_id = adapter->queue[i].napi.napi_id;
+
 		if (adapter->queue[i].tx) {
 			addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
 			retval = tsnep_tx_open(adapter, addr, tx_queue_index,
@@ -1267,7 +1286,7 @@  int tsnep_netdev_open(struct net_device *netdev)
 		}
 		if (adapter->queue[i].rx) {
 			addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
-			retval = tsnep_rx_open(adapter, addr,
+			retval = tsnep_rx_open(adapter, addr, napi_id,
 					       rx_queue_index,
 					       adapter->queue[i].rx);
 			if (retval)
@@ -1299,8 +1318,6 @@  int tsnep_netdev_open(struct net_device *netdev)
 		goto phy_failed;
 
 	for (i = 0; i < adapter->num_queues; i++) {
-		netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
-			       tsnep_poll);
 		napi_enable(&adapter->queue[i].napi);
 
 		tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
@@ -1321,6 +1338,8 @@  int tsnep_netdev_open(struct net_device *netdev)
 			tsnep_rx_close(adapter->queue[i].rx);
 		if (adapter->queue[i].tx)
 			tsnep_tx_close(adapter->queue[i].tx);
+
+		netif_napi_del(&adapter->queue[i].napi);
 	}
 	return retval;
 }
@@ -1339,7 +1358,6 @@  int tsnep_netdev_close(struct net_device *netdev)
 		tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
 
 		napi_disable(&adapter->queue[i].napi);
-		netif_napi_del(&adapter->queue[i].napi);
 
 		tsnep_free_irq(&adapter->queue[i], i == 0);
 
@@ -1347,6 +1365,8 @@  int tsnep_netdev_close(struct net_device *netdev)
 			tsnep_rx_close(adapter->queue[i].rx);
 		if (adapter->queue[i].tx)
 			tsnep_tx_close(adapter->queue[i].tx);
+
+		netif_napi_del(&adapter->queue[i].napi);
 	}
 
 	return 0;