@@ -346,6 +346,14 @@ struct gro_list {
*/
#define GRO_HASH_BUCKETS 8
+/*
+ * napi queue container type
+ */
+enum q_type {
+ QUEUE_RX,
+ QUEUE_TX,
+};
+
/*
* Structure for NAPI scheduling similar to tasklet but with weighting
*/
@@ -380,6 +388,8 @@ struct napi_struct {
/* control-path-only fields follow */
struct list_head dev_list;
struct hlist_node napi_hash_node;
+ struct list_head napi_rxq_list;
+ struct list_head napi_txq_list;
};
enum {
@@ -655,6 +665,9 @@ struct netdev_queue {
unsigned long state;
+ /* NAPI instance for the queue */
+ struct napi_struct *napi;
+ struct list_head q_list;
#ifdef CONFIG_BQL
struct dql dql;
#endif
@@ -2609,6 +2622,9 @@ static inline void *netdev_priv(const struct net_device *dev)
*/
#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
+int netif_napi_add_queue(struct napi_struct *napi, unsigned int queue_index,
+ enum q_type type);
+
/* Default NAPI poll() weight
* Device drivers are strongly advised to not use bigger value
*/
@@ -21,6 +21,9 @@ struct netdev_rx_queue {
#ifdef CONFIG_XDP_SOCKETS
struct xsk_buff_pool *pool;
#endif
+ struct list_head q_list;
+ /* NAPI instance for the queue */
+ struct napi_struct *napi;
} ____cacheline_aligned_in_smp;
/*
@@ -6391,6 +6391,42 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
}
EXPORT_SYMBOL(dev_set_threaded);
+/**
+ * netif_napi_add_queue - Associate queue with the napi
+ * @napi: NAPI context
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ *
+ * Add queue with its corresponding napi context
+ */
+int netif_napi_add_queue(struct napi_struct *napi, unsigned int queue_index,
+ enum q_type type)
+{
+ struct net_device *dev = napi->dev;
+ struct netdev_rx_queue *rxq;
+ struct netdev_queue *txq;
+
+ if (!dev)
+ return -EINVAL;
+
+ switch (type) {
+ case QUEUE_RX:
+ rxq = __netif_get_rx_queue(dev, queue_index);
+ rxq->napi = napi;
+ list_add_rcu(&rxq->q_list, &napi->napi_rxq_list);
+ break;
+ case QUEUE_TX:
+ txq = netdev_get_tx_queue(dev, queue_index);
+ txq->napi = napi;
+ list_add_rcu(&txq->q_list, &napi->napi_txq_list);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(netif_napi_add_queue);
+
void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight)
{
@@ -6426,6 +6462,9 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
*/
if (dev->threaded && napi_kthread_create(napi))
dev->threaded = 0;
+
+ INIT_LIST_HEAD(&napi->napi_rxq_list);
+ INIT_LIST_HEAD(&napi->napi_txq_list);
}
EXPORT_SYMBOL(netif_napi_add_weight);
@@ -6487,6 +6526,18 @@ static void flush_gro_hash(struct napi_struct *napi)
}
}
+static void napi_del_queues(struct napi_struct *napi)
+{
+ struct netdev_rx_queue *rx_queue, *rxq;
+ struct netdev_queue *tx_queue, *txq;
+
+ list_for_each_entry_safe(rx_queue, rxq, &napi->napi_rxq_list, q_list)
+ list_del_rcu(&rx_queue->q_list);
+
+ list_for_each_entry_safe(tx_queue, txq, &napi->napi_txq_list, q_list)
+ list_del_rcu(&tx_queue->q_list);
+}
+
/* Must be called in process context */
void __netif_napi_del(struct napi_struct *napi)
{
@@ -6504,6 +6555,7 @@ void __netif_napi_del(struct napi_struct *napi)
kthread_stop(napi->thread);
napi->thread = NULL;
}
+ napi_del_queues(napi);
}
EXPORT_SYMBOL(__netif_napi_del);
Add the napi pointer in netdev queue for tracking the napi instance for each queue. This achieves the queue<->napi mapping. Introduce new napi fields 'napi_rxq_list' and 'napi_txq_list' for rx and tx queue set associated with the napi. Add functions to associate the queue with the napi and handle their removal as well. This lists the queue/queue-set on the corresponding irq line for each napi instance. Signed-off-by: Amritha Nambiar <amritha.nambiar@intel.com> --- include/linux/netdevice.h | 16 +++++++++++++ include/net/netdev_rx_queue.h | 3 ++ net/core/dev.c | 52 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+)