@@ -43,17 +43,6 @@ static inline bool virt_queue__available(struct virt_queue *vq)
return vq->vring.avail->idx != vq->last_avail_idx;
}
-/*
- * Warning: on 32-bit hosts, shifting pfn left may cause a truncation of pfn values
- * higher than 4GB - thus, pointing to the wrong area in guest virtual memory space
- * and breaking the virt queue which owns this pfn.
- */
-static inline void *guest_pfn_to_host(struct kvm *kvm, u32 pfn)
-{
- return guest_flat_to_host(kvm, (unsigned long)pfn << VIRTIO_PCI_QUEUE_ADDR_SHIFT);
-}
-
-
struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len);
bool virtio_queue__should_signal(struct virt_queue *vq);
@@ -81,7 +70,8 @@ struct virtio_ops {
u8 *(*get_config)(struct kvm *kvm, void *dev);
u32 (*get_host_features)(struct kvm *kvm, void *dev);
void (*set_guest_features)(struct kvm *kvm, void *dev, u32 features);
- int (*init_vq)(struct kvm *kvm, void *dev, u32 vq, u32 pfn);
+ int (*init_vq)(struct kvm *kvm, void *dev, u32 vq, u32 page_size,
+ u32 align, u32 pfn);
int (*notify_vq)(struct kvm *kvm, void *dev, u32 vq);
int (*get_pfn_vq)(struct kvm *kvm, void *dev, u32 vq);
int (*get_size_vq)(struct kvm *kvm, void *dev, u32 vq);
@@ -1254,7 +1254,8 @@ static void set_guest_features(struct kvm *kvm, void *dev, u32 features)
p9dev->features = features;
}
-static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
+static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 page_size, u32 align,
+ u32 pfn)
{
struct p9_dev *p9dev = dev;
struct p9_dev_job *job;
@@ -1265,10 +1266,10 @@ static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
queue = &p9dev->vqs[vq];
queue->pfn = pfn;
- p = guest_pfn_to_host(kvm, queue->pfn);
+ p = guest_flat_to_host(kvm, queue->pfn * page_size);
job = &p9dev->jobs[vq];
- vring_init(&queue->vring, VIRTQUEUE_NUM, p, VIRTIO_PCI_VRING_ALIGN);
+ vring_init(&queue->vring, VIRTQUEUE_NUM, p, align);
*job = (struct p9_dev_job) {
.vq = queue,
@@ -193,7 +193,8 @@ static void set_guest_features(struct kvm *kvm, void *dev, u32 features)
bdev->features = features;
}
-static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
+static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 page_size, u32 align,
+ u32 pfn)
{
struct bln_dev *bdev = dev;
struct virt_queue *queue;
@@ -203,10 +204,10 @@ static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
queue = &bdev->vqs[vq];
queue->pfn = pfn;
- p = guest_pfn_to_host(kvm, queue->pfn);
+ p = guest_flat_to_host(kvm, queue->pfn * page_size);
thread_pool__init_job(&bdev->jobs[vq], kvm, virtio_bln_do_io, queue);
- vring_init(&queue->vring, VIRTIO_BLN_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
+ vring_init(&queue->vring, VIRTIO_BLN_QUEUE_SIZE, p, align);
return 0;
}
@@ -156,7 +156,8 @@ static void set_guest_features(struct kvm *kvm, void *dev, u32 features)
bdev->features = features;
}
-static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
+static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 page_size, u32 align,
+ u32 pfn)
{
struct blk_dev *bdev = dev;
struct virt_queue *queue;
@@ -166,9 +167,9 @@ static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
queue = &bdev->vqs[vq];
queue->pfn = pfn;
- p = guest_pfn_to_host(kvm, queue->pfn);
+ p = guest_flat_to_host(kvm, queue->pfn * page_size);
- vring_init(&queue->vring, VIRTIO_BLK_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
+ vring_init(&queue->vring, VIRTIO_BLK_QUEUE_SIZE, p, align);
return 0;
}
@@ -128,7 +128,8 @@ static void set_guest_features(struct kvm *kvm, void *dev, u32 features)
/* Unused */
}
-static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
+static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 page_size, u32 align,
+ u32 pfn)
{
struct virt_queue *queue;
void *p;
@@ -139,9 +140,9 @@ static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
queue = &cdev.vqs[vq];
queue->pfn = pfn;
- p = guest_pfn_to_host(kvm, queue->pfn);
+ p = guest_flat_to_host(kvm, queue->pfn * page_size);
- vring_init(&queue->vring, VIRTIO_CONSOLE_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
+ vring_init(&queue->vring, VIRTIO_CONSOLE_QUEUE_SIZE, p, align);
if (vq == VIRTIO_CONSOLE_TX_QUEUE)
thread_pool__init_job(&cdev.jobs[vq], kvm, virtio_console_handle_callback, queue);
@@ -163,25 +163,25 @@ static void virtio_mmio_config_out(u64 addr, void *data, u32 len,
case VIRTIO_MMIO_GUEST_PAGE_SIZE:
val = ioport__read32(data);
vmmio->hdr.guest_page_size = val;
- /* FIXME: set guest page size */
break;
case VIRTIO_MMIO_QUEUE_NUM:
val = ioport__read32(data);
vmmio->hdr.queue_num = val;
- /* FIXME: set vq size */
vdev->ops->set_size_vq(vmmio->kvm, vmmio->dev,
vmmio->hdr.queue_sel, val);
break;
case VIRTIO_MMIO_QUEUE_ALIGN:
val = ioport__read32(data);
vmmio->hdr.queue_align = val;
- /* FIXME: set used ring alignment */
break;
case VIRTIO_MMIO_QUEUE_PFN:
val = ioport__read32(data);
virtio_mmio_init_ioeventfd(vmmio->kvm, vdev, vmmio->hdr.queue_sel);
vdev->ops->init_vq(vmmio->kvm, vmmio->dev,
- vmmio->hdr.queue_sel, val);
+ vmmio->hdr.queue_sel,
+ vmmio->hdr.guest_page_size,
+ vmmio->hdr.queue_align,
+ val);
break;
case VIRTIO_MMIO_QUEUE_NOTIFY:
val = ioport__read32(data);
@@ -320,7 +320,8 @@ static void set_guest_features(struct kvm *kvm, void *dev, u32 features)
ndev->features = features;
}
-static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
+static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 page_size, u32 align,
+ u32 pfn)
{
struct vhost_vring_state state = { .index = vq };
struct vhost_vring_addr addr;
@@ -333,10 +334,9 @@ static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
queue = &ndev->vqs[vq];
queue->pfn = pfn;
- p = guest_pfn_to_host(kvm, queue->pfn);
+ p = guest_flat_to_host(kvm, queue->pfn * page_size);
- /* FIXME: respect pci and mmio vring alignment */
- vring_init(&queue->vring, VIRTIO_NET_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
+ vring_init(&queue->vring, VIRTIO_NET_QUEUE_SIZE, p, align);
if (ndev->vhost_fd == 0)
return 0;
@@ -197,7 +197,9 @@ static bool virtio_pci__io_out(struct ioport *ioport, struct kvm *kvm, u16 port,
case VIRTIO_PCI_QUEUE_PFN:
val = ioport__read32(data);
virtio_pci__init_ioeventfd(kvm, vdev, vpci->queue_selector);
- vdev->ops->init_vq(kvm, vpci->dev, vpci->queue_selector, val);
+ vdev->ops->init_vq(kvm, vpci->dev, vpci->queue_selector,
+ 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT,
+ VIRTIO_PCI_VRING_ALIGN, val);
break;
case VIRTIO_PCI_QUEUE_SEL:
vpci->queue_selector = ioport__read16(data);
@@ -86,7 +86,8 @@ static void virtio_rng_do_io(struct kvm *kvm, void *param)
rdev->vdev.ops->signal_vq(kvm, &rdev->vdev, vq - rdev->vqs);
}
-static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
+static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 page_size, u32 align,
+ u32 pfn)
{
struct rng_dev *rdev = dev;
struct virt_queue *queue;
@@ -97,11 +98,11 @@ static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
queue = &rdev->vqs[vq];
queue->pfn = pfn;
- p = guest_pfn_to_host(kvm, queue->pfn);
+ p = guest_flat_to_host(kvm, queue->pfn * page_size);
job = &rdev->jobs[vq];
- vring_init(&queue->vring, VIRTIO_RNG_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
+ vring_init(&queue->vring, VIRTIO_RNG_QUEUE_SIZE, p, align);
*job = (struct rng_dev_job) {
.vq = queue,
@@ -49,7 +49,8 @@ static void set_guest_features(struct kvm *kvm, void *dev, u32 features)
sdev->features = features;
}
-static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
+static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 page_size, u32 align,
+ u32 pfn)
{
struct vhost_vring_state state = { .index = vq };
struct vhost_vring_addr addr;
@@ -62,9 +63,9 @@ static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
queue = &sdev->vqs[vq];
queue->pfn = pfn;
- p = guest_pfn_to_host(kvm, queue->pfn);
+ p = guest_flat_to_host(kvm, queue->pfn * page_size);
- vring_init(&queue->vring, VIRTIO_SCSI_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
+ vring_init(&queue->vring, VIRTIO_SCSI_QUEUE_SIZE, p, align);
if (sdev->vhost_fd == 0)
return 0;
virtio-based PCI devices deal only with 4k memory granules, making direct use of the VIRTIO_PCI_VRING_ALIGN and VIRTIO_PCI_QUEUE_ADDR_SHIFT constants when initialising the virtqueues for a device. For MMIO-based devices, the guest page size is arbitrary and may differ from that of the host (this is the case on AArch64, where both 4k and 64k pages are supported). This patch fixes the virtio drivers to honour the guest page size passed when configuring the virtio device and align the virtqueues accordingly. Signed-off-by: Will Deacon <will.deacon@arm.com> --- tools/kvm/include/kvm/virtio.h | 14 ++------------ tools/kvm/virtio/9p.c | 7 ++++--- tools/kvm/virtio/balloon.c | 7 ++++--- tools/kvm/virtio/blk.c | 7 ++++--- tools/kvm/virtio/console.c | 7 ++++--- tools/kvm/virtio/mmio.c | 8 ++++---- tools/kvm/virtio/net.c | 8 ++++---- tools/kvm/virtio/pci.c | 4 +++- tools/kvm/virtio/rng.c | 7 ++++--- tools/kvm/virtio/scsi.c | 7 ++++--- 10 files changed, 37 insertions(+), 39 deletions(-)