Message ID | 168564135094.7284.9691772825401908320.stgit@anambiarhost.jf.intel.com (mailing list archive) |
---|---|
State | RFC |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | Introduce napi queues support | expand |
On Thu, Jun 01, 2023 at 10:42:30AM -0700, Amritha Nambiar wrote: > After the napi context is initialized, map the napi instance > with the queue/queue-set on the corresponding irq line. > > Signed-off-by: Amritha Nambiar <amritha.nambiar@intel.com> Hi Amritha, some minor feedback from my side. ... > diff --git a/net/core/dev.c b/net/core/dev.c > index 9ee8eb3ef223..ba712119ec85 100644 > --- a/net/core/dev.c > +++ b/net/core/dev.c > @@ -6366,6 +6366,40 @@ int dev_set_threaded(struct net_device *dev, bool threaded) > } > EXPORT_SYMBOL(dev_set_threaded); > > +/** > + * netif_napi_add_queue - Associate queue with the napi > + * @napi: NAPI context > + * @queue_index: Index of queue > + * @napi_container_type: queue type as RX or TX s/@napi_container_type:/@type:/ > + * > + * Add queue with its corresponding napi context > + */ > +int netif_napi_add_queue(struct napi_struct *napi, u16 queue_index, > + enum napi_container_type type) > +{ > + struct napi_queue *napi_queue; > + > + napi_queue = kzalloc(sizeof(*napi_queue), GFP_KERNEL); > + if (!napi_queue) > + return -ENOMEM; > + > + napi_queue->queue_index = queue_index; > + > + switch (type) { > + case NAPI_RX_CONTAINER: > + list_add_rcu(&napi_queue->q_list, &napi->napi_rxq_list); > + break; > + case NAPI_TX_CONTAINER: > + list_add_rcu(&napi_queue->q_list, &napi->napi_txq_list); > + break; > + default: Perhaps napi_queue is leaked here. > + return -EINVAL; > + } > + > + return 0; > +} > +EXPORT_SYMBOL(netif_napi_add_queue); > + > void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, > int (*poll)(struct napi_struct *, int), int weight) > { > >
On Thu, 2023-06-01 at 10:42 -0700, Amritha Nambiar wrote: > After the napi context is initialized, map the napi instance > with the queue/queue-set on the corresponding irq line. > > Signed-off-by: Amritha Nambiar <amritha.nambiar@intel.com> > --- > drivers/net/ethernet/intel/ice/ice_lib.c | 57 +++++++++++++++++++++++++++++ > drivers/net/ethernet/intel/ice/ice_lib.h | 4 ++ > drivers/net/ethernet/intel/ice/ice_main.c | 4 ++ > include/linux/netdevice.h | 11 ++++++ > net/core/dev.c | 34 +++++++++++++++++ > 5 files changed, 109 insertions(+), 1 deletion(-) > > diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c > index 5ddb95d1073a..58f68363119f 100644 > --- a/drivers/net/ethernet/intel/ice/ice_lib.c > +++ b/drivers/net/ethernet/intel/ice/ice_lib.c > @@ -2478,6 +2478,12 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) > goto unroll_vector_base; > > ice_vsi_map_rings_to_vectors(vsi); > + > + /* Associate q_vector rings to napi */ > + ret = ice_vsi_add_napi_queues(vsi); > + if (ret) > + goto unroll_vector_base; > + > vsi->stat_offsets_loaded = false; > > if (ice_is_xdp_ena_vsi(vsi)) { > @@ -2957,6 +2963,57 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) > synchronize_irq(vsi->q_vectors[i]->irq.virq); > } > > +/** > + * ice_q_vector_add_napi_queues - Add queue[s] associated with the napi > + * @q_vector: q_vector pointer > + * > + * Associate the q_vector napi with all the queue[s] on the vector > + * Returns 0 on success or < 0 on error > + */ > +int ice_q_vector_add_napi_queues(struct ice_q_vector *q_vector) > +{ > + struct ice_rx_ring *rx_ring; > + struct ice_tx_ring *tx_ring; > + int ret; > + > + ice_for_each_rx_ring(rx_ring, q_vector->rx) { > + ret = netif_napi_add_queue(&q_vector->napi, rx_ring->q_index, > + NAPI_RX_CONTAINER); > + if (ret) > + return ret; > + } > + ice_for_each_tx_ring(tx_ring, q_vector->tx) { > + ret = netif_napi_add_queue(&q_vector->napi, tx_ring->q_index, > + NAPI_TX_CONTAINER); > + if (ret) > + return ret; > + } > + > + return ret; > +} > + > +/** > + * ice_vsi_add_napi_queues > + * @vsi: VSI pointer > + * > + * Associate queue[s] with napi for all vectors > + * Returns 0 on success or < 0 on error > + */ > +int ice_vsi_add_napi_queues(struct ice_vsi *vsi) > +{ > + int i, ret = 0; > + > + if (!vsi->netdev) > + return ret; > + > + ice_for_each_q_vector(vsi, i) { > + ret = ice_q_vector_add_napi_queues(vsi->q_vectors[i]); > + if (ret) > + return ret; > + } > + return ret; > +} > + > /** > * ice_napi_del - Remove NAPI handler for the VSI > * @vsi: VSI for which NAPI handler is to be removed > diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h > index e985766e6bb5..623b5f738a5c 100644 > --- a/drivers/net/ethernet/intel/ice/ice_lib.h > +++ b/drivers/net/ethernet/intel/ice/ice_lib.h > @@ -93,6 +93,10 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); > struct ice_vsi * > ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); > > +int ice_q_vector_add_napi_queues(struct ice_q_vector *q_vector); > + > +int ice_vsi_add_napi_queues(struct ice_vsi *vsi); > + > void ice_napi_del(struct ice_vsi *vsi); > > int ice_vsi_release(struct ice_vsi *vsi); > diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c > index 62e91512aeab..c66ff1473aeb 100644 > --- a/drivers/net/ethernet/intel/ice/ice_main.c > +++ b/drivers/net/ethernet/intel/ice/ice_main.c > @@ -3348,9 +3348,11 @@ static void ice_napi_add(struct ice_vsi *vsi) > if (!vsi->netdev) > return; > > - ice_for_each_q_vector(vsi, v_idx) > + ice_for_each_q_vector(vsi, v_idx) { > netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, > ice_napi_poll); > + ice_q_vector_add_napi_queues(vsi->q_vectors[v_idx]); > + } > } > > /** > diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h > index 49f64401af7c..a562db712c6e 100644 > --- a/include/linux/netdevice.h > +++ b/include/linux/netdevice.h > @@ -342,6 +342,14 @@ struct gro_list { > */ > #define GRO_HASH_BUCKETS 8 > > +/* > + * napi queue container type > + */ > +enum napi_container_type { > + NAPI_RX_CONTAINER, > + NAPI_TX_CONTAINER, > +}; > + > struct napi_queue { > struct list_head q_list; > u16 queue_index; > @@ -2622,6 +2630,9 @@ static inline void *netdev_priv(const struct net_device *dev) > */ > #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) > > +int netif_napi_add_queue(struct napi_struct *napi, u16 queue_index, > + enum napi_container_type); > + > /* Default NAPI poll() weight > * Device drivers are strongly advised to not use bigger value > */ > diff --git a/net/core/dev.c b/net/core/dev.c > index 9ee8eb3ef223..ba712119ec85 100644 > --- a/net/core/dev.c > +++ b/net/core/dev.c > @@ -6366,6 +6366,40 @@ int dev_set_threaded(struct net_device *dev, bool threaded) > } > EXPORT_SYMBOL(dev_set_threaded); > > +/** > + * netif_napi_add_queue - Associate queue with the napi > + * @napi: NAPI context > + * @queue_index: Index of queue > + * @napi_container_type: queue type as RX or TX > + * > + * Add queue with its corresponding napi context > + */ > +int netif_napi_add_queue(struct napi_struct *napi, u16 queue_index, > + enum napi_container_type type) > +{ > + struct napi_queue *napi_queue; > + > + napi_queue = kzalloc(sizeof(*napi_queue), GFP_KERNEL); > + if (!napi_queue) > + return -ENOMEM; > + > + napi_queue->queue_index = queue_index; > + > + switch (type) { > + case NAPI_RX_CONTAINER: > + list_add_rcu(&napi_queue->q_list, &napi->napi_rxq_list); > + break; > + case NAPI_TX_CONTAINER: > + list_add_rcu(&napi_queue->q_list, &napi->napi_txq_list); > + break; > + default: > + return -EINVAL; > + } > + > + return 0; > +} > +EXPORT_SYMBOL(netif_napi_add_queue); > + > void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, > int (*poll)(struct napi_struct *, int), int weight) > { > I think this later 2 chunks are a better fit for the previous patch, so that here there will be only driver-related changes. Also it looks like the napi-queue APIs are going to grow a bit. Perhaps it would be useful move all that new code in a separated file? dev.c is already pretty big. Thanks! Paolo
On 6/2/2023 8:42 AM, Simon Horman wrote: > On Thu, Jun 01, 2023 at 10:42:30AM -0700, Amritha Nambiar wrote: >> After the napi context is initialized, map the napi instance >> with the queue/queue-set on the corresponding irq line. >> >> Signed-off-by: Amritha Nambiar <amritha.nambiar@intel.com> > > Hi Amritha, > > some minor feedback from my side. > > ... > >> diff --git a/net/core/dev.c b/net/core/dev.c >> index 9ee8eb3ef223..ba712119ec85 100644 >> --- a/net/core/dev.c >> +++ b/net/core/dev.c >> @@ -6366,6 +6366,40 @@ int dev_set_threaded(struct net_device *dev, bool threaded) >> } >> EXPORT_SYMBOL(dev_set_threaded); >> >> +/** >> + * netif_napi_add_queue - Associate queue with the napi >> + * @napi: NAPI context >> + * @queue_index: Index of queue >> + * @napi_container_type: queue type as RX or TX > > s/@napi_container_type:/@type:/ > Will fix. >> + * >> + * Add queue with its corresponding napi context >> + */ >> +int netif_napi_add_queue(struct napi_struct *napi, u16 queue_index, >> + enum napi_container_type type) >> +{ >> + struct napi_queue *napi_queue; >> + >> + napi_queue = kzalloc(sizeof(*napi_queue), GFP_KERNEL); >> + if (!napi_queue) >> + return -ENOMEM; >> + >> + napi_queue->queue_index = queue_index; >> + >> + switch (type) { >> + case NAPI_RX_CONTAINER: >> + list_add_rcu(&napi_queue->q_list, &napi->napi_rxq_list); >> + break; >> + case NAPI_TX_CONTAINER: >> + list_add_rcu(&napi_queue->q_list, &napi->napi_txq_list); >> + break; >> + default: > > Perhaps napi_queue is leaked here. > My bad. Will fix in the next version. >> + return -EINVAL; >> + } >> + >> + return 0; >> +} >> +EXPORT_SYMBOL(netif_napi_add_queue); >> + >> void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, >> int (*poll)(struct napi_struct *, int), int weight) >> { >> >>
On 6/2/2023 11:31 PM, Paolo Abeni wrote: > On Thu, 2023-06-01 at 10:42 -0700, Amritha Nambiar wrote: >> After the napi context is initialized, map the napi instance >> with the queue/queue-set on the corresponding irq line. >> >> Signed-off-by: Amritha Nambiar <amritha.nambiar@intel.com> >> --- >> drivers/net/ethernet/intel/ice/ice_lib.c | 57 +++++++++++++++++++++++++++++ >> drivers/net/ethernet/intel/ice/ice_lib.h | 4 ++ >> drivers/net/ethernet/intel/ice/ice_main.c | 4 ++ >> include/linux/netdevice.h | 11 ++++++ >> net/core/dev.c | 34 +++++++++++++++++ >> 5 files changed, 109 insertions(+), 1 deletion(-) >> >> diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c >> index 5ddb95d1073a..58f68363119f 100644 >> --- a/drivers/net/ethernet/intel/ice/ice_lib.c >> +++ b/drivers/net/ethernet/intel/ice/ice_lib.c >> @@ -2478,6 +2478,12 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) >> goto unroll_vector_base; >> >> ice_vsi_map_rings_to_vectors(vsi); >> + >> + /* Associate q_vector rings to napi */ >> + ret = ice_vsi_add_napi_queues(vsi); >> + if (ret) >> + goto unroll_vector_base; >> + >> vsi->stat_offsets_loaded = false; >> >> if (ice_is_xdp_ena_vsi(vsi)) { >> @@ -2957,6 +2963,57 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) >> synchronize_irq(vsi->q_vectors[i]->irq.virq); >> } >> >> +/** >> + * ice_q_vector_add_napi_queues - Add queue[s] associated with the napi >> + * @q_vector: q_vector pointer >> + * >> + * Associate the q_vector napi with all the queue[s] on the vector >> + * Returns 0 on success or < 0 on error >> + */ >> +int ice_q_vector_add_napi_queues(struct ice_q_vector *q_vector) >> +{ >> + struct ice_rx_ring *rx_ring; >> + struct ice_tx_ring *tx_ring; >> + int ret; >> + >> + ice_for_each_rx_ring(rx_ring, q_vector->rx) { >> + ret = netif_napi_add_queue(&q_vector->napi, rx_ring->q_index, >> + NAPI_RX_CONTAINER); >> + if (ret) >> + return ret; >> + } >> + ice_for_each_tx_ring(tx_ring, q_vector->tx) { >> + ret = netif_napi_add_queue(&q_vector->napi, tx_ring->q_index, >> + NAPI_TX_CONTAINER); >> + if (ret) >> + return ret; >> + } >> + >> + return ret; >> +} >> + >> +/** >> + * ice_vsi_add_napi_queues >> + * @vsi: VSI pointer >> + * >> + * Associate queue[s] with napi for all vectors >> + * Returns 0 on success or < 0 on error >> + */ >> +int ice_vsi_add_napi_queues(struct ice_vsi *vsi) >> +{ >> + int i, ret = 0; >> + >> + if (!vsi->netdev) >> + return ret; >> + >> + ice_for_each_q_vector(vsi, i) { >> + ret = ice_q_vector_add_napi_queues(vsi->q_vectors[i]); >> + if (ret) >> + return ret; >> + } >> + return ret; >> +} >> + >> /** >> * ice_napi_del - Remove NAPI handler for the VSI >> * @vsi: VSI for which NAPI handler is to be removed >> diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h >> index e985766e6bb5..623b5f738a5c 100644 >> --- a/drivers/net/ethernet/intel/ice/ice_lib.h >> +++ b/drivers/net/ethernet/intel/ice/ice_lib.h >> @@ -93,6 +93,10 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); >> struct ice_vsi * >> ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); >> >> +int ice_q_vector_add_napi_queues(struct ice_q_vector *q_vector); >> + >> +int ice_vsi_add_napi_queues(struct ice_vsi *vsi); >> + >> void ice_napi_del(struct ice_vsi *vsi); >> >> int ice_vsi_release(struct ice_vsi *vsi); >> diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c >> index 62e91512aeab..c66ff1473aeb 100644 >> --- a/drivers/net/ethernet/intel/ice/ice_main.c >> +++ b/drivers/net/ethernet/intel/ice/ice_main.c >> @@ -3348,9 +3348,11 @@ static void ice_napi_add(struct ice_vsi *vsi) >> if (!vsi->netdev) >> return; >> >> - ice_for_each_q_vector(vsi, v_idx) >> + ice_for_each_q_vector(vsi, v_idx) { >> netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, >> ice_napi_poll); >> + ice_q_vector_add_napi_queues(vsi->q_vectors[v_idx]); >> + } >> } >> >> /** >> diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h >> index 49f64401af7c..a562db712c6e 100644 >> --- a/include/linux/netdevice.h >> +++ b/include/linux/netdevice.h >> @@ -342,6 +342,14 @@ struct gro_list { >> */ >> #define GRO_HASH_BUCKETS 8 >> >> +/* >> + * napi queue container type >> + */ >> +enum napi_container_type { >> + NAPI_RX_CONTAINER, >> + NAPI_TX_CONTAINER, >> +}; >> + >> struct napi_queue { >> struct list_head q_list; >> u16 queue_index; >> @@ -2622,6 +2630,9 @@ static inline void *netdev_priv(const struct net_device *dev) >> */ >> #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) >> >> +int netif_napi_add_queue(struct napi_struct *napi, u16 queue_index, >> + enum napi_container_type); >> + >> /* Default NAPI poll() weight >> * Device drivers are strongly advised to not use bigger value >> */ >> diff --git a/net/core/dev.c b/net/core/dev.c >> index 9ee8eb3ef223..ba712119ec85 100644 >> --- a/net/core/dev.c >> +++ b/net/core/dev.c >> @@ -6366,6 +6366,40 @@ int dev_set_threaded(struct net_device *dev, bool threaded) >> } >> EXPORT_SYMBOL(dev_set_threaded); >> >> +/** >> + * netif_napi_add_queue - Associate queue with the napi >> + * @napi: NAPI context >> + * @queue_index: Index of queue >> + * @napi_container_type: queue type as RX or TX >> + * >> + * Add queue with its corresponding napi context >> + */ >> +int netif_napi_add_queue(struct napi_struct *napi, u16 queue_index, >> + enum napi_container_type type) >> +{ >> + struct napi_queue *napi_queue; >> + >> + napi_queue = kzalloc(sizeof(*napi_queue), GFP_KERNEL); >> + if (!napi_queue) >> + return -ENOMEM; >> + >> + napi_queue->queue_index = queue_index; >> + >> + switch (type) { >> + case NAPI_RX_CONTAINER: >> + list_add_rcu(&napi_queue->q_list, &napi->napi_rxq_list); >> + break; >> + case NAPI_TX_CONTAINER: >> + list_add_rcu(&napi_queue->q_list, &napi->napi_txq_list); >> + break; >> + default: >> + return -EINVAL; >> + } >> + >> + return 0; >> +} >> +EXPORT_SYMBOL(netif_napi_add_queue); >> + >> void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, >> int (*poll)(struct napi_struct *, int), int weight) >> { >> > > I think this later 2 chunks are a better fit for the previous patch, so > that here there will be only driver-related changes. > So if the later chunks are moved to the previous patch, wouldn't git bisect and build throw warnings as the kernel API (netif_napi_add_queue) would only be defined but not used. The function netif_napi_add_queue is being invoked only by the driver code. Hence, I had to move in the kernel function definition to this patch that has all the driver code. > Also it looks like the napi-queue APIs are going to grow a bit. Perhaps > it would be useful move all that new code in a separated file? dev.c is > already pretty big. > Are you suggesting to move just the napi-queue related new code into a separate file, but keep all other napi stuff in dev.c ? In that case, currently, the new file would contain only two function definitions for netif_napi_add_queue and delete (if the napi-queue specific APIs do not grow). I agree there may be more generic napi APIs in future (not just queue info related), but wouldn't it look better if all the napi code were in a new file. > Thanks! > > Paolo >
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 5ddb95d1073a..58f68363119f 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2478,6 +2478,12 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) goto unroll_vector_base; ice_vsi_map_rings_to_vectors(vsi); + + /* Associate q_vector rings to napi */ + ret = ice_vsi_add_napi_queues(vsi); + if (ret) + goto unroll_vector_base; + vsi->stat_offsets_loaded = false; if (ice_is_xdp_ena_vsi(vsi)) { @@ -2957,6 +2963,57 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) synchronize_irq(vsi->q_vectors[i]->irq.virq); } +/** + * ice_q_vector_add_napi_queues - Add queue[s] associated with the napi + * @q_vector: q_vector pointer + * + * Associate the q_vector napi with all the queue[s] on the vector + * Returns 0 on success or < 0 on error + */ +int ice_q_vector_add_napi_queues(struct ice_q_vector *q_vector) +{ + struct ice_rx_ring *rx_ring; + struct ice_tx_ring *tx_ring; + int ret; + + ice_for_each_rx_ring(rx_ring, q_vector->rx) { + ret = netif_napi_add_queue(&q_vector->napi, rx_ring->q_index, + NAPI_RX_CONTAINER); + if (ret) + return ret; + } + ice_for_each_tx_ring(tx_ring, q_vector->tx) { + ret = netif_napi_add_queue(&q_vector->napi, tx_ring->q_index, + NAPI_TX_CONTAINER); + if (ret) + return ret; + } + + return ret; +} + +/** + * ice_vsi_add_napi_queues + * @vsi: VSI pointer + * + * Associate queue[s] with napi for all vectors + * Returns 0 on success or < 0 on error + */ +int ice_vsi_add_napi_queues(struct ice_vsi *vsi) +{ + int i, ret = 0; + + if (!vsi->netdev) + return ret; + + ice_for_each_q_vector(vsi, i) { + ret = ice_q_vector_add_napi_queues(vsi->q_vectors[i]); + if (ret) + return ret; + } + return ret; +} + /** * ice_napi_del - Remove NAPI handler for the VSI * @vsi: VSI for which NAPI handler is to be removed diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index e985766e6bb5..623b5f738a5c 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -93,6 +93,10 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); +int ice_q_vector_add_napi_queues(struct ice_q_vector *q_vector); + +int ice_vsi_add_napi_queues(struct ice_vsi *vsi); + void ice_napi_del(struct ice_vsi *vsi); int ice_vsi_release(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 62e91512aeab..c66ff1473aeb 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3348,9 +3348,11 @@ static void ice_napi_add(struct ice_vsi *vsi) if (!vsi->netdev) return; - ice_for_each_q_vector(vsi, v_idx) + ice_for_each_q_vector(vsi, v_idx) { netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, ice_napi_poll); + ice_q_vector_add_napi_queues(vsi->q_vectors[v_idx]); + } } /** diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 49f64401af7c..a562db712c6e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -342,6 +342,14 @@ struct gro_list { */ #define GRO_HASH_BUCKETS 8 +/* + * napi queue container type + */ +enum napi_container_type { + NAPI_RX_CONTAINER, + NAPI_TX_CONTAINER, +}; + struct napi_queue { struct list_head q_list; u16 queue_index; @@ -2622,6 +2630,9 @@ static inline void *netdev_priv(const struct net_device *dev) */ #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) +int netif_napi_add_queue(struct napi_struct *napi, u16 queue_index, + enum napi_container_type); + /* Default NAPI poll() weight * Device drivers are strongly advised to not use bigger value */ diff --git a/net/core/dev.c b/net/core/dev.c index 9ee8eb3ef223..ba712119ec85 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6366,6 +6366,40 @@ int dev_set_threaded(struct net_device *dev, bool threaded) } EXPORT_SYMBOL(dev_set_threaded); +/** + * netif_napi_add_queue - Associate queue with the napi + * @napi: NAPI context + * @queue_index: Index of queue + * @napi_container_type: queue type as RX or TX + * + * Add queue with its corresponding napi context + */ +int netif_napi_add_queue(struct napi_struct *napi, u16 queue_index, + enum napi_container_type type) +{ + struct napi_queue *napi_queue; + + napi_queue = kzalloc(sizeof(*napi_queue), GFP_KERNEL); + if (!napi_queue) + return -ENOMEM; + + napi_queue->queue_index = queue_index; + + switch (type) { + case NAPI_RX_CONTAINER: + list_add_rcu(&napi_queue->q_list, &napi->napi_rxq_list); + break; + case NAPI_TX_CONTAINER: + list_add_rcu(&napi_queue->q_list, &napi->napi_txq_list); + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(netif_napi_add_queue); + void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int weight) {
After the napi context is initialized, map the napi instance with the queue/queue-set on the corresponding irq line. Signed-off-by: Amritha Nambiar <amritha.nambiar@intel.com> --- drivers/net/ethernet/intel/ice/ice_lib.c | 57 +++++++++++++++++++++++++++++ drivers/net/ethernet/intel/ice/ice_lib.h | 4 ++ drivers/net/ethernet/intel/ice/ice_main.c | 4 ++ include/linux/netdevice.h | 11 ++++++ net/core/dev.c | 34 +++++++++++++++++ 5 files changed, 109 insertions(+), 1 deletion(-)