@@ -238,6 +238,13 @@ static void vp_slave_set_vring_kick(int fd)
pvq_node->kickfd = fd;
}
+static void vp_slave_set_vring_call(int fd)
+{
+ PeerVqNode *pvq_node = QLIST_FIRST(&vp_slave->pvq_list);
+ if (pvq_node)
+ pvq_node->callfd = fd;
+}
+
static int vp_slave_can_read(void *opaque)
{
return VHOST_USER_HDR_SIZE;
@@ -325,6 +332,17 @@ static void vp_slave_read(void *opaque, const uint8_t *buf, int size)
*/
qemu_set_nonblock(fds[0]);
break;
+ case VHOST_USER_SET_VRING_CALL:
+ /* consume the fd */
+ qemu_chr_fe_get_msgfds(chr_be, fds, 1);
+ vp_slave_set_vring_call(fds[0]);
+ /*
+ * This is a non-blocking eventfd.
+ * The receive function forces it to be blocking,
+ * so revert it back to non-blocking.
+ */
+ qemu_set_nonblock(fds[0]);
+ break;
default:
error_report("vhost-pci-slave does not support msg request = %d",
msg.request);
@@ -11,6 +11,7 @@ typedef struct PeerVqNode {
uint16_t last_avail_idx;
uint32_t vring_num;
int kickfd;
+ int callfd;
struct vhost_vring_addr addr;
QLIST_ENTRY(PeerVqNode) node;
} PeerVqNode;
Update the callfd to the head node in the list. This version of vhost-pci expects the master device to use polling to receive packets, so callfd is recorded here for tomorrow's implementation. Signed-off-by: Wei Wang <wei.w.wang@intel.com> --- hw/virtio/vhost-pci-slave.c | 18 ++++++++++++++++++ include/hw/virtio/vhost-pci-slave.h | 1 + 2 files changed, 19 insertions(+)