@@ -3672,7 +3672,7 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
u16 index, bool is_rxqs_map);
int netif_show_xps_queue(struct net_device *dev, unsigned long **mask,
- u16 index);
+ u16 index, bool is_rxqs_map);
/**
* netif_attr_test_mask - Test a CPU or Rx queue set in a mask
@@ -3773,7 +3773,8 @@ static inline int __netif_set_xps_queue(struct net_device *dev,
}
static inline int netif_show_xps_queue(struct net_device *dev,
- unsigned long **mask, u16 index)
+ unsigned long **mask, u16 index,
+ bool is_rxqs_map)
{
return 0;
}
@@ -2832,7 +2832,7 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
EXPORT_SYMBOL(netif_set_xps_queue);
int netif_show_xps_queue(struct net_device *dev, unsigned long **mask,
- u16 index)
+ u16 index, bool is_rxqs_map)
{
const unsigned long *possible_mask = NULL;
int j, num_tc = 1, tc = 0, ret = 0;
@@ -2859,12 +2859,17 @@ int netif_show_xps_queue(struct net_device *dev, unsigned long **mask,
}
}
- dev_maps = rcu_dereference(dev->xps_cpus_map);
+ if (is_rxqs_map) {
+ dev_maps = rcu_dereference(dev->xps_rxqs_map);
+ nr_ids = dev->num_rx_queues;
+ } else {
+ dev_maps = rcu_dereference(dev->xps_cpus_map);
+ nr_ids = nr_cpu_ids;
+ if (num_possible_cpus() > 1)
+ possible_mask = cpumask_bits(cpu_possible_mask);
+ }
if (!dev_maps)
goto out_no_map;
- nr_ids = nr_cpu_ids;
- if (num_possible_cpus() > 1)
- possible_mask = cpumask_bits(cpu_possible_mask);
for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
j < nr_ids;) {
@@ -1329,7 +1329,7 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue, char *buf)
if (!mask)
return -ENOMEM;
- ret = netif_show_xps_queue(dev, &mask, index);
+ ret = netif_show_xps_queue(dev, &mask, index, false);
if (ret) {
bitmap_free(mask);
return ret;
@@ -1379,45 +1379,20 @@ static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
{
struct net_device *dev = queue->dev;
- struct xps_dev_maps *dev_maps;
unsigned long *mask, index;
- int j, len, num_tc = 1, tc = 0;
+ int len, ret;
index = get_netdev_queue_index(queue);
- if (dev->num_tc) {
- num_tc = dev->num_tc;
- tc = netdev_txq_to_tc(dev, index);
- if (tc < 0)
- return -EINVAL;
- }
mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
if (!mask)
return -ENOMEM;
- rcu_read_lock();
- dev_maps = rcu_dereference(dev->xps_rxqs_map);
- if (!dev_maps)
- goto out_no_maps;
-
- for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues),
- j < dev->num_rx_queues;) {
- int i, tci = j * num_tc + tc;
- struct xps_map *map;
-
- map = rcu_dereference(dev_maps->attr_map[tci]);
- if (!map)
- continue;
-
- for (i = map->len; i--;) {
- if (map->queues[i] == index) {
- set_bit(j, mask);
- break;
- }
- }
+ ret = netif_show_xps_queue(dev, &mask, index, true);
+ if (ret) {
+ bitmap_free(mask);
+ return ret;
}
-out_no_maps:
- rcu_read_unlock();
len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
bitmap_free(mask);
Accesses to dev->xps_rxqs_map (when using dev->num_tc) should be protected by the xps_map mutex, to avoid possible race conditions when dev->num_tc is updated while the map is accessed. Make use of the now available netif_show_xps_queue helper which does just that. This also helps to keep xps_cpus_show and xps_rxqs_show synced as their logic is the same (as in __netif_set_xps_queue, the function allocating and setting them up). Fixes: 8af2c06ff4b1 ("net-sysfs: Add interface for Rx queue(s) map per Tx queue") Signed-off-by: Antoine Tenart <atenart@kernel.org> --- include/linux/netdevice.h | 5 +++-- net/core/dev.c | 15 ++++++++++----- net/core/net-sysfs.c | 37 ++++++------------------------------- 3 files changed, 19 insertions(+), 38 deletions(-)