@@ -482,6 +482,7 @@ static int init_vq(struct virtio_blk *vblk)
unsigned short num_vqs;
struct virtio_device *vdev = vblk->vdev;
struct irq_affinity desc = { 0, };
+ int node = dev_to_node(&vdev->dev);
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
struct virtio_blk_config, num_queues,
@@ -491,7 +492,8 @@ static int init_vq(struct virtio_blk *vblk)
num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
- vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
+ vblk->vqs = kmalloc_array_node(num_vqs, sizeof(*vblk->vqs),
+ GFP_KERNEL, node);
if (!vblk->vqs)
return -ENOMEM;
@@ -683,6 +685,7 @@ module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
static int virtblk_probe(struct virtio_device *vdev)
{
+ int node = dev_to_node(&vdev->dev);
struct virtio_blk *vblk;
struct request_queue *q;
int err, index;
@@ -714,7 +717,7 @@ static int virtblk_probe(struct virtio_device *vdev)
/* We need an extra sg elements at head and tail. */
sg_elems += 2;
- vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
+ vdev->priv = vblk = kmalloc_node(sizeof(*vblk), GFP_KERNEL, node);
if (!vblk) {
err = -ENOMEM;
goto out_free_index;
Allocate frequently-accessed data structures from the NUMA node associated with this device to avoid slow cross-NUMA node memory accesses. Only the following memory allocations are made NUMA-aware: 1. Called during probe. If called in the data path then hopefully we're executing on a CPU in the same NUMA node as the device. If the CPU is not in the right NUMA node then it's unclear whether forcing memory allocations to use the device's NUMA node will increase or decrease performance. 2. Memory will be frequently accessed from the data path. There is no need to worry about data that is not accessed from performance-critical code paths. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> --- drivers/block/virtio_blk.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-)