diff mbox series

[net-next,v1,1/9] net: Introduce new fields for napi and queue associations

Message ID 169059161688.3736.18170697577939556255.stgit@anambiarhost.jf.intel.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series Introduce NAPI queues support | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 5471 this patch: 5473
netdev/cc_maintainers warning 3 maintainers not CCed: daniel@iogearbox.net edumazet@google.com pabeni@redhat.com
netdev/build_clang fail Errors and warnings before: 2258 this patch: 958
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 5711 this patch: 4386
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 125 lines checked
netdev/kdoc fail Errors and warnings before: 0 this patch: 2
netdev/source_inline success Was 0 now: 0

Commit Message

Nambiar, Amritha July 29, 2023, 12:46 a.m. UTC
Add the napi pointer in netdev queue for tracking the napi
instance for each queue. This achieves the queue<->napi mapping.

Introduce new napi fields 'napi_rxq_list' and 'napi_txq_list'
for rx and tx queue set associated with the napi. Add functions
to associate the queue with the napi and handle their removal
as well. This lists the queue/queue-set on the corresponding
irq line for each napi instance.


Signed-off-by: Amritha Nambiar <amritha.nambiar@intel.com>
---
 include/linux/netdevice.h |   19 ++++++++++++++++
 net/core/dev.c            |   52 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 71 insertions(+)

Comments

kernel test robot July 29, 2023, 9:55 a.m. UTC | #1
Hi Amritha,

kernel test robot noticed the following build errors:

[auto build test ERROR on net-next/main]

url:    https://github.com/intel-lab-lkp/linux/commits/Amritha-Nambiar/net-Introduce-new-fields-for-napi-and-queue-associations/20230729-083646
base:   net-next/main
patch link:    https://lore.kernel.org/r/169059161688.3736.18170697577939556255.stgit%40anambiarhost.jf.intel.com
patch subject: [net-next PATCH v1 1/9] net: Introduce new fields for napi and queue associations
config: x86_64-allyesconfig (https://download.01.org/0day-ci/archive/20230729/202307291714.SUP7uQyV-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0
reproduce: (https://download.01.org/0day-ci/archive/20230729/202307291714.SUP7uQyV-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202307291714.SUP7uQyV-lkp@intel.com/

All errors (new ones prefixed by >>):

   In file included from drivers/infiniband/sw/rxe/rxe_comp.c:11:
>> drivers/infiniband/sw/rxe/rxe_queue.h:53:6: error: redeclaration of 'enum queue_type'
      53 | enum queue_type {
         |      ^~~~~~~~~~
   In file included from include/net/sock.h:46,
                    from include/linux/tcp.h:19,
                    from include/linux/ipv6.h:94,
                    from include/net/ipv6.h:12,
                    from include/rdma/ib_verbs.h:25,
                    from drivers/infiniband/sw/rxe/rxe.h:17,
                    from drivers/infiniband/sw/rxe/rxe_comp.c:9:
   include/linux/netdevice.h:348:6: note: originally defined here
     348 | enum queue_type {
         |      ^~~~~~~~~~


vim +53 drivers/infiniband/sw/rxe/rxe_queue.h

8700e3e7c4857d Moni Shoua  2016-06-16   9  
ae6e843fe08d0e Bob Pearson 2021-09-14  10  /* Implements a simple circular buffer that is shared between user
ae6e843fe08d0e Bob Pearson 2021-09-14  11   * and the driver and can be resized. The requested element size is
ae6e843fe08d0e Bob Pearson 2021-09-14  12   * rounded up to a power of 2 and the number of elements in the buffer
ae6e843fe08d0e Bob Pearson 2021-09-14  13   * is also rounded up to a power of 2. Since the queue is empty when
ae6e843fe08d0e Bob Pearson 2021-09-14  14   * the producer and consumer indices match the maximum capacity of the
ae6e843fe08d0e Bob Pearson 2021-09-14  15   * queue is one less than the number of element slots.
5bcf5a59c41e19 Bob Pearson 2021-05-27  16   *
5bcf5a59c41e19 Bob Pearson 2021-05-27  17   * Notes:
ae6e843fe08d0e Bob Pearson 2021-09-14  18   *   - The driver indices are always masked off to q->index_mask
5bcf5a59c41e19 Bob Pearson 2021-05-27  19   *     before storing so do not need to be checked on reads.
ae6e843fe08d0e Bob Pearson 2021-09-14  20   *   - The user whether user space or kernel is generally
ae6e843fe08d0e Bob Pearson 2021-09-14  21   *     not trusted so its parameters are masked to make sure
ae6e843fe08d0e Bob Pearson 2021-09-14  22   *     they do not access the queue out of bounds on reads.
ae6e843fe08d0e Bob Pearson 2021-09-14  23   *   - The driver indices for queues must not be written
ae6e843fe08d0e Bob Pearson 2021-09-14  24   *     by user so a local copy is used and a shared copy is
ae6e843fe08d0e Bob Pearson 2021-09-14  25   *     stored when the local copy is changed.
5bcf5a59c41e19 Bob Pearson 2021-05-27  26   *   - By passing the type in the parameter list separate from q
5bcf5a59c41e19 Bob Pearson 2021-05-27  27   *     the compiler can eliminate the switch statement when the
ae6e843fe08d0e Bob Pearson 2021-09-14  28   *     actual queue type is known when the function is called at
ae6e843fe08d0e Bob Pearson 2021-09-14  29   *     compile time.
ae6e843fe08d0e Bob Pearson 2021-09-14  30   *   - These queues are lock free. The user and driver must protect
ae6e843fe08d0e Bob Pearson 2021-09-14  31   *     changes to their end of the queues with locks if more than one
ae6e843fe08d0e Bob Pearson 2021-09-14  32   *     CPU can be accessing it at the same time.
8700e3e7c4857d Moni Shoua  2016-06-16  33   */
8700e3e7c4857d Moni Shoua  2016-06-16  34  
ae6e843fe08d0e Bob Pearson 2021-09-14  35  /**
ae6e843fe08d0e Bob Pearson 2021-09-14  36   * enum queue_type - type of queue
ae6e843fe08d0e Bob Pearson 2021-09-14  37   * @QUEUE_TYPE_TO_CLIENT:	Queue is written by rxe driver and
a77a52385e9a76 Bob Pearson 2023-02-14  38   *				read by client which may be a user space
a77a52385e9a76 Bob Pearson 2023-02-14  39   *				application or a kernel ulp.
a77a52385e9a76 Bob Pearson 2023-02-14  40   *				Used by rxe internals only.
ae6e843fe08d0e Bob Pearson 2021-09-14  41   * @QUEUE_TYPE_FROM_CLIENT:	Queue is written by client and
a77a52385e9a76 Bob Pearson 2023-02-14  42   *				read by rxe driver.
a77a52385e9a76 Bob Pearson 2023-02-14  43   *				Used by rxe internals only.
a77a52385e9a76 Bob Pearson 2023-02-14  44   * @QUEUE_TYPE_FROM_ULP:	Queue is written by kernel ulp and
a77a52385e9a76 Bob Pearson 2023-02-14  45   *				read by rxe driver.
a77a52385e9a76 Bob Pearson 2023-02-14  46   *				Used by kernel verbs APIs only on
a77a52385e9a76 Bob Pearson 2023-02-14  47   *				behalf of ulps.
a77a52385e9a76 Bob Pearson 2023-02-14  48   * @QUEUE_TYPE_TO_ULP:		Queue is written by rxe driver and
a77a52385e9a76 Bob Pearson 2023-02-14  49   *				read by kernel ulp.
a77a52385e9a76 Bob Pearson 2023-02-14  50   *				Used by kernel verbs APIs only on
a77a52385e9a76 Bob Pearson 2023-02-14  51   *				behalf of ulps.
ae6e843fe08d0e Bob Pearson 2021-09-14  52   */
59daff49f25fbb Bob Pearson 2021-05-27 @53  enum queue_type {
ae6e843fe08d0e Bob Pearson 2021-09-14  54  	QUEUE_TYPE_TO_CLIENT,
ae6e843fe08d0e Bob Pearson 2021-09-14  55  	QUEUE_TYPE_FROM_CLIENT,
a77a52385e9a76 Bob Pearson 2023-02-14  56  	QUEUE_TYPE_FROM_ULP,
a77a52385e9a76 Bob Pearson 2023-02-14  57  	QUEUE_TYPE_TO_ULP,
59daff49f25fbb Bob Pearson 2021-05-27  58  };
59daff49f25fbb Bob Pearson 2021-05-27  59
Simon Horman July 30, 2023, 5:10 p.m. UTC | #2
On Fri, Jul 28, 2023 at 05:46:56PM -0700, Amritha Nambiar wrote:

...

> diff --git a/net/core/dev.c b/net/core/dev.c
> index b58674774a57..875023ab614c 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -6389,6 +6389,42 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
>  }
>  EXPORT_SYMBOL(dev_set_threaded);
>  
> +/**
> + * netif_napi_add_queue - Associate queue with the napi
> + * @napi: NAPI context
> + * @queue_index: Index of queue
> + * @queue_type: queue type as RX or TX

Hi Arithma,

a minor nit from my side: @queue_type -> @type

> + *
> + * Add queue with its corresponding napi context
> + */
> +int netif_napi_add_queue(struct napi_struct *napi, unsigned int queue_index,
> +			 enum queue_type type)
> +{

...
Nambiar, Amritha July 31, 2023, 10:57 p.m. UTC | #3
On 7/30/2023 10:10 AM, Simon Horman wrote:
> On Fri, Jul 28, 2023 at 05:46:56PM -0700, Amritha Nambiar wrote:
> 
> ...
> 
>> diff --git a/net/core/dev.c b/net/core/dev.c
>> index b58674774a57..875023ab614c 100644
>> --- a/net/core/dev.c
>> +++ b/net/core/dev.c
>> @@ -6389,6 +6389,42 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
>>   }
>>   EXPORT_SYMBOL(dev_set_threaded);
>>   
>> +/**
>> + * netif_napi_add_queue - Associate queue with the napi
>> + * @napi: NAPI context
>> + * @queue_index: Index of queue
>> + * @queue_type: queue type as RX or TX
> 
> Hi Arithma,
> 
> a minor nit from my side: @queue_type -> @type

Will fix in the next version. Thanks.

> 
>> + *
>> + * Add queue with its corresponding napi context
>> + */
>> +int netif_napi_add_queue(struct napi_struct *napi, unsigned int queue_index,
>> +			 enum queue_type type)
>> +{
> 
> ...
>
diff mbox series

Patch

diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 84c36a7f873f..7299872bfdff 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -342,6 +342,14 @@  struct gro_list {
  */
 #define GRO_HASH_BUCKETS	8
 
+/*
+ * napi queue container type
+ */
+enum queue_type {
+	NAPI_QUEUE_RX,
+	NAPI_QUEUE_TX,
+};
+
 /*
  * Structure for NAPI scheduling similar to tasklet but with weighting
  */
@@ -376,6 +384,8 @@  struct napi_struct {
 	/* control-path-only fields follow */
 	struct list_head	dev_list;
 	struct hlist_node	napi_hash_node;
+	struct list_head	napi_rxq_list;
+	struct list_head	napi_txq_list;
 };
 
 enum {
@@ -651,6 +661,9 @@  struct netdev_queue {
 
 	unsigned long		state;
 
+	/* NAPI instance for the queue */
+	struct napi_struct      *napi;
+	struct list_head        q_list;
 #ifdef CONFIG_BQL
 	struct dql		dql;
 #endif
@@ -796,6 +809,9 @@  struct netdev_rx_queue {
 #ifdef CONFIG_XDP_SOCKETS
 	struct xsk_buff_pool            *pool;
 #endif
+	struct list_head		q_list;
+	/* NAPI instance for the queue */
+	struct napi_struct		*napi;
 } ____cacheline_aligned_in_smp;
 
 /*
@@ -2618,6 +2634,9 @@  static inline void *netdev_priv(const struct net_device *dev)
  */
 #define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype))
 
+int netif_napi_add_queue(struct napi_struct *napi, unsigned int queue_index,
+			 enum queue_type type);
+
 /* Default NAPI poll() weight
  * Device drivers are strongly advised to not use bigger value
  */
diff --git a/net/core/dev.c b/net/core/dev.c
index b58674774a57..875023ab614c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6389,6 +6389,42 @@  int dev_set_threaded(struct net_device *dev, bool threaded)
 }
 EXPORT_SYMBOL(dev_set_threaded);
 
+/**
+ * netif_napi_add_queue - Associate queue with the napi
+ * @napi: NAPI context
+ * @queue_index: Index of queue
+ * @queue_type: queue type as RX or TX
+ *
+ * Add queue with its corresponding napi context
+ */
+int netif_napi_add_queue(struct napi_struct *napi, unsigned int queue_index,
+			 enum queue_type type)
+{
+	struct net_device *dev = napi->dev;
+	struct netdev_rx_queue *rxq;
+	struct netdev_queue *txq;
+
+	if (!dev)
+		return -EINVAL;
+
+	switch (type) {
+	case NAPI_QUEUE_RX:
+		rxq = __netif_get_rx_queue(dev, queue_index);
+		rxq->napi = napi;
+		list_add_rcu(&rxq->q_list, &napi->napi_rxq_list);
+		break;
+	case NAPI_QUEUE_TX:
+		txq = netdev_get_tx_queue(dev, queue_index);
+		txq->napi = napi;
+		list_add_rcu(&txq->q_list, &napi->napi_txq_list);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(netif_napi_add_queue);
+
 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
 			   int (*poll)(struct napi_struct *, int), int weight)
 {
@@ -6424,6 +6460,9 @@  void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
 	 */
 	if (dev->threaded && napi_kthread_create(napi))
 		dev->threaded = 0;
+
+	INIT_LIST_HEAD(&napi->napi_rxq_list);
+	INIT_LIST_HEAD(&napi->napi_txq_list);
 }
 EXPORT_SYMBOL(netif_napi_add_weight);
 
@@ -6485,6 +6524,18 @@  static void flush_gro_hash(struct napi_struct *napi)
 	}
 }
 
+static void napi_del_queues(struct napi_struct *napi)
+{
+	struct netdev_rx_queue *rx_queue, *rxq;
+	struct netdev_queue *tx_queue, *txq;
+
+	list_for_each_entry_safe(rx_queue, rxq, &napi->napi_rxq_list, q_list)
+		list_del_rcu(&rx_queue->q_list);
+
+	list_for_each_entry_safe(tx_queue, txq, &napi->napi_txq_list, q_list)
+		list_del_rcu(&tx_queue->q_list);
+}
+
 /* Must be called in process context */
 void __netif_napi_del(struct napi_struct *napi)
 {
@@ -6502,6 +6553,7 @@  void __netif_napi_del(struct napi_struct *napi)
 		kthread_stop(napi->thread);
 		napi->thread = NULL;
 	}
+	napi_del_queues(napi);
 }
 EXPORT_SYMBOL(__netif_napi_del);