@@ -223,15 +223,11 @@ struct vring_virtqueue {
#endif
};
-static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+static struct virtqueue *__vring_new_virtqueue(struct virtio_device *vdev,
+ unsigned int index,
struct vring_virtqueue_split *vring_split,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name,
- struct device *dma_dev);
+ struct vq_transport_config *tp_cfg,
+ struct virtio_vq_config *cfg);
static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
static void vring_free(struct virtqueue *_vq);
@@ -240,6 +236,8 @@ static void vring_free(struct virtqueue *_vq);
*/
#define to_vvq(_vq) container_of_const(_vq, struct vring_virtqueue, vq)
+#define cfg_vq_val(cfg, vq, key) (cfg->key[vq->vq.index])
+#define cfg_vq_get(cfg, vq, key) (cfg->key ? cfg_vq_val(cfg, vq, key) : false)
static bool virtqueue_use_indirect(const struct vring_virtqueue *vq,
unsigned int total_sg)
@@ -1138,32 +1136,28 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
return 0;
}
-static struct virtqueue *vring_create_virtqueue_split(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name,
- struct device *dma_dev)
+static struct virtqueue *vring_create_virtqueue_split(struct virtio_device *vdev,
+ unsigned int index,
+ struct vq_transport_config *tp_cfg,
+ struct virtio_vq_config *cfg)
{
struct vring_virtqueue_split vring_split = {};
struct virtqueue *vq;
int err;
- err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
- may_reduce_num, dma_dev);
+ tp_cfg->dma_dev = tp_cfg->dma_dev ? : vdev->dev.parent;
+
+ err = vring_alloc_queue_split(&vring_split, vdev,
+ tp_cfg->num,
+ tp_cfg->vring_align,
+ tp_cfg->may_reduce_num,
+ tp_cfg->dma_dev);
if (err)
return NULL;
- vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
- context, notify, callback, name, dma_dev);
+ vq = __vring_new_virtqueue(vdev, index, &vring_split, tp_cfg, cfg);
if (!vq) {
- vring_free_split(&vring_split, vdev, dma_dev);
+ vring_free_split(&vring_split, vdev, tp_cfg->dma_dev);
return NULL;
}
@@ -2050,38 +2044,33 @@ static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
}
-static struct virtqueue *vring_create_virtqueue_packed(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name,
- struct device *dma_dev)
+static struct virtqueue *vring_create_virtqueue_packed(struct virtio_device *vdev,
+ unsigned int index,
+ struct vq_transport_config *tp_cfg,
+ struct virtio_vq_config *cfg)
{
struct vring_virtqueue_packed vring_packed = {};
struct vring_virtqueue *vq;
+ struct device *dma_dev;
int err;
- if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev))
+ dma_dev = tp_cfg->dma_dev ? : vdev->dev.parent;
+
+ if (vring_alloc_queue_packed(&vring_packed, vdev, tp_cfg->num, dma_dev))
goto err_ring;
vq = kmalloc(sizeof(*vq), GFP_KERNEL);
if (!vq)
goto err_vq;
- vq->vq.callback = callback;
- vq->vq.vdev = vdev;
- vq->vq.name = name;
vq->vq.index = index;
+ vq->vq.callback = cfg_vq_val(cfg, vq, callbacks);
+ vq->vq.vdev = vdev;
+ vq->vq.name = cfg_vq_val(cfg, vq, names);
vq->vq.reset = false;
vq->we_own_ring = true;
- vq->notify = notify;
- vq->weak_barriers = weak_barriers;
+ vq->notify = tp_cfg->notify;
+ vq->weak_barriers = tp_cfg->weak_barriers;
#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
vq->broken = true;
#else
@@ -2094,7 +2083,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->do_unmap = vq->use_dma_api;
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
- !context;
+ !cfg_vq_get(cfg, vq, ctx);
vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
@@ -2104,9 +2093,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
if (err)
goto err_state_extra;
- virtqueue_vring_init_packed(&vring_packed, !!callback);
+ virtqueue_vring_init_packed(&vring_packed, !!cfg_vq_val(cfg, vq, callbacks));
- virtqueue_init(vq, num);
+ virtqueue_init(vq, tp_cfg->num);
virtqueue_vring_attach_packed(vq, &vring_packed);
spin_lock(&vdev->vqs_list_lock);
@@ -2599,15 +2588,11 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
EXPORT_SYMBOL_GPL(vring_interrupt);
/* Only available for split ring */
-static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+static struct virtqueue *__vring_new_virtqueue(struct virtio_device *vdev,
+ unsigned int index,
struct vring_virtqueue_split *vring_split,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name,
- struct device *dma_dev)
+ struct vq_transport_config *tp_cfg,
+ struct virtio_vq_config *cfg)
{
struct vring_virtqueue *vq;
int err;
@@ -2620,26 +2605,26 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index,
return NULL;
vq->packed_ring = false;
- vq->vq.callback = callback;
- vq->vq.vdev = vdev;
- vq->vq.name = name;
vq->vq.index = index;
+ vq->vq.callback = cfg_vq_val(cfg, vq, callbacks);
+ vq->vq.vdev = vdev;
+ vq->vq.name = cfg_vq_val(cfg, vq, names);
vq->vq.reset = false;
vq->we_own_ring = false;
- vq->notify = notify;
- vq->weak_barriers = weak_barriers;
+ vq->notify = tp_cfg->notify;
+ vq->weak_barriers = tp_cfg->weak_barriers;
#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
vq->broken = true;
#else
vq->broken = false;
#endif
- vq->dma_dev = dma_dev;
+ vq->dma_dev = tp_cfg->dma_dev;
vq->use_dma_api = vring_use_dma_api(vdev);
vq->premapped = false;
vq->do_unmap = vq->use_dma_api;
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
- !context;
+ !cfg_vq_get(cfg, vq, ctx);
vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
@@ -2667,36 +2652,10 @@ struct virtqueue *vring_create_virtqueue(struct virtio_device *vdev,
struct vq_transport_config *tp_cfg,
struct virtio_vq_config *cfg)
{
- struct device *dma_dev;
- unsigned int num;
- unsigned int vring_align;
- bool weak_barriers;
- bool may_reduce_num;
- bool context;
- bool (*notify)(struct virtqueue *_);
- void (*callback)(struct virtqueue *_);
- const char *name;
-
- dma_dev = tp_cfg->dma_dev ? : vdev->dev.parent;
-
- num = tp_cfg->num;
- vring_align = tp_cfg->vring_align;
- weak_barriers = tp_cfg->weak_barriers;
- may_reduce_num = tp_cfg->may_reduce_num;
- notify = tp_cfg->notify;
-
- name = cfg->names[index];
- callback = cfg->callbacks[index];
- context = cfg->ctx ? cfg->ctx[index] : false;
-
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
- return vring_create_virtqueue_packed(index, num, vring_align,
- vdev, weak_barriers, may_reduce_num,
- context, notify, callback, name, dma_dev);
+ return vring_create_virtqueue_packed(vdev, index, tp_cfg, cfg);
- return vring_create_virtqueue_split(index, num, vring_align,
- vdev, weak_barriers, may_reduce_num,
- context, notify, callback, name, dma_dev);
+ return vring_create_virtqueue_split(vdev, index, tp_cfg, cfg);
}
EXPORT_SYMBOL_GPL(vring_create_virtqueue);
@@ -2837,30 +2796,14 @@ struct virtqueue *vring_new_virtqueue(struct virtio_device *vdev,
struct virtio_vq_config *cfg)
{
struct vring_virtqueue_split vring_split = {};
- unsigned int num;
- unsigned int vring_align;
- bool weak_barriers;
- bool context;
- bool (*notify)(struct virtqueue *_);
- void (*callback)(struct virtqueue *_);
- const char *name;
-
- num = tp_cfg->num;
- vring_align = tp_cfg->vring_align;
- weak_barriers = tp_cfg->weak_barriers;
- notify = tp_cfg->notify;
-
- name = cfg->names[index];
- callback = cfg->callbacks[index];
- context = cfg->ctx ? cfg->ctx[index] : false;
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return NULL;
- vring_init(&vring_split.vring, num, pages, vring_align);
- return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
- context, notify, callback, name,
- vdev->dev.parent);
+ tp_cfg->dma_dev = vdev->dev.parent;
+
+ vring_init(&vring_split.vring, tp_cfg->num, pages, tp_cfg->vring_align);
+ return __vring_new_virtqueue(vdev, index, &vring_split, tp_cfg, cfg);
}
EXPORT_SYMBOL_GPL(vring_new_virtqueue);