@@ -37,6 +37,7 @@ int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
for_each_cpu(cpu, mask)
qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
+ qmap->use_managed_irq = pdev->dev.irq_affinity_managed;
return 0;
@@ -36,6 +36,9 @@ int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
map->mq_map[cpu] = map->queue_offset + queue;
}
+ /* So far RDMA doesn't use managed irq */
+ map->use_managed_irq = false;
+
return 0;
fallback:
@@ -38,6 +38,7 @@ int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
for_each_cpu(cpu, mask)
qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
+ qmap->use_managed_irq = vdev->dev.irq_affinity_managed;
return 0;
fallback:
@@ -192,7 +192,8 @@ struct blk_mq_hw_ctx {
struct blk_mq_queue_map {
unsigned int *mq_map;
unsigned int nr_queues;
- unsigned int queue_offset;
+ unsigned int queue_offset:31;
+ unsigned int use_managed_irq:1;
};
/**
Retrieve this info via the field 'irq_affinity_managed' of 'struct device' in queue map helpers. Signed-off-by: Ming Lei <ming.lei@redhat.com> --- block/blk-mq-pci.c | 1 + block/blk-mq-rdma.c | 3 +++ block/blk-mq-virtio.c | 1 + include/linux/blk-mq.h | 3 ++- 4 files changed, 7 insertions(+), 1 deletion(-)