@@ -112,6 +112,8 @@ static inline bool virt_queue__available(struct virt_queue *vq)
return virtio_guest_to_host_u16(vq, vq->vring.avail->idx) != vq->last_avail_idx;
}
+void virt_queue__used_idx_advance(struct virt_queue *queue, u16 jump);
+struct vring_used_elem * virt_queue__set_used_elem_no_update(struct virt_queue *queue, u32 head, u32 len, u16 offset);
struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len);
bool virtio_queue__should_signal(struct virt_queue *vq);
@@ -21,22 +21,17 @@ const char* virtio_trans_name(enum virtio_trans trans)
return "unknown";
}
-struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len)
+void virt_queue__used_idx_advance(struct virt_queue *queue, u16 jump)
{
- struct vring_used_elem *used_elem;
u16 idx = virtio_guest_to_host_u16(queue, queue->vring.used->idx);
- used_elem = &queue->vring.used->ring[idx % queue->vring.num];
- used_elem->id = virtio_host_to_guest_u32(queue, head);
- used_elem->len = virtio_host_to_guest_u32(queue, len);
-
/*
* Use wmb to assure that used elem was updated with head and len.
* We need a wmb here since we can't advance idx unless we're ready
* to pass the used element to the guest.
*/
wmb();
- idx++;
+ idx += jump;
queue->vring.used->idx = virtio_host_to_guest_u16(queue, idx);
/*
@@ -45,6 +40,29 @@ struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32
* an updated idx.
*/
wmb();
+}
+
+struct vring_used_elem *
+virt_queue__set_used_elem_no_update(struct virt_queue *queue, u32 head,
+ u32 len, u16 offset)
+{
+ struct vring_used_elem *used_elem;
+ u16 idx = virtio_guest_to_host_u16(queue, queue->vring.used->idx);
+
+ idx += offset;
+ used_elem = &queue->vring.used->ring[idx % queue->vring.num];
+ used_elem->id = virtio_host_to_guest_u32(queue, head);
+ used_elem->len = virtio_host_to_guest_u32(queue, len);
+
+ return used_elem;
+}
+
+struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len)
+{
+ struct vring_used_elem *used_elem;
+
+ used_elem = virt_queue__set_used_elem_no_update(queue, head, len, 0);
+ virt_queue__used_idx_advance(queue, 1);
return used_elem;
}
@@ -80,14 +80,12 @@ static void virtio_net_fix_tx_hdr(struct virtio_net_hdr *hdr, struct net_dev *nd
hdr->csum_offset = virtio_guest_to_host_u16(&ndev->vdev, hdr->csum_offset);
}
-static void virtio_net_fix_rx_hdr(struct virtio_net_hdr_mrg_rxbuf *hdr, struct net_dev *ndev)
+static void virtio_net_fix_rx_hdr(struct virtio_net_hdr *hdr, struct net_dev *ndev)
{
- hdr->hdr.hdr_len = virtio_host_to_guest_u16(&ndev->vdev, hdr->hdr.hdr_len);
- hdr->hdr.gso_size = virtio_host_to_guest_u16(&ndev->vdev, hdr->hdr.gso_size);
- hdr->hdr.csum_start = virtio_host_to_guest_u16(&ndev->vdev, hdr->hdr.csum_start);
- hdr->hdr.csum_offset = virtio_host_to_guest_u16(&ndev->vdev, hdr->hdr.csum_offset);
- if (has_virtio_feature(ndev, VIRTIO_NET_F_MRG_RXBUF))
- hdr->num_buffers = virtio_host_to_guest_u16(&ndev->vdev, hdr->num_buffers);
+ hdr->hdr_len = virtio_host_to_guest_u16(&ndev->vdev, hdr->hdr_len);
+ hdr->gso_size = virtio_host_to_guest_u16(&ndev->vdev, hdr->gso_size);
+ hdr->csum_start = virtio_host_to_guest_u16(&ndev->vdev, hdr->csum_start);
+ hdr->csum_offset = virtio_host_to_guest_u16(&ndev->vdev, hdr->csum_offset);
}
static void *virtio_net_rx_thread(void *p)
@@ -123,7 +121,7 @@ static void *virtio_net_rx_thread(void *p)
.iov_len = sizeof(buffer),
};
struct virtio_net_hdr_mrg_rxbuf *hdr;
- int i;
+ u16 num_buffers;
len = ndev->ops->rx(&dummy_iov, 1, ndev);
if (len < 0) {
@@ -132,7 +130,7 @@ static void *virtio_net_rx_thread(void *p)
goto out_err;
}
- copied = i = 0;
+ copied = num_buffers = 0;
head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
hdr = iov[0].iov_base;
while (copied < len) {
@@ -140,19 +138,20 @@ static void *virtio_net_rx_thread(void *p)
memcpy_toiovec(iov, buffer + copied, iovsize);
copied += iovsize;
- if (i++ == 0)
- virtio_net_fix_rx_hdr(hdr, ndev);
- if (has_virtio_feature(ndev, VIRTIO_NET_F_MRG_RXBUF)) {
- u16 num_buffers = virtio_guest_to_host_u16(vq, hdr->num_buffers);
- hdr->num_buffers = virtio_host_to_guest_u16(vq, num_buffers + 1);
- }
- virt_queue__set_used_elem(vq, head, iovsize);
+ virt_queue__set_used_elem_no_update(vq, head, iovsize, num_buffers++);
if (copied == len)
break;
while (!virt_queue__available(vq))
sleep(0);
head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
}
+
+ virtio_net_fix_rx_hdr(&hdr->hdr, ndev);
+ if (has_virtio_feature(ndev, VIRTIO_NET_F_MRG_RXBUF))
+ hdr->num_buffers = virtio_host_to_guest_u16(vq, num_buffers);
+
+ virt_queue__used_idx_advance(vq, num_buffers);
+
/* We should interrupt guest right now, otherwise latency is huge. */
if (virtio_queue__should_signal(vq))
ndev->vdev.ops->signal_vq(kvm, &ndev->vdev, id);