Message ID | 7b21118256af2cb3d0dfe45b1e4ef9683fabccb5.1629131628.git.elena.ufimtseva@oracle.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | vfio-user implementation | expand |
On Mon, Aug 16, 2021 at 09:42:47AM -0700, Elena Ufimtseva wrote: > diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c > index 2c9fcb2fa9..29a874c066 100644 > --- a/hw/vfio/pci.c > +++ b/hw/vfio/pci.c > @@ -3406,11 +3406,72 @@ type_init(register_vfio_pci_dev_type) > * vfio-user routines. > */ > > -static int vfio_user_pci_process_req(void *opaque, char *buf, VFIOUserFDs *fds) > +static int vfio_user_dma_read(VFIOPCIDevice *vdev, VFIOUserDMARW *msg) > { > + PCIDevice *pdev = &vdev->pdev; > + char *buf; > + int size = msg->count + sizeof(VFIOUserDMARW); The caller has only checked that hdr->size is large enough for VFIOUserHdr, not VFIOUserDMARW. We must not access VFIOUserDMARW fields until this has been checked. Size should be size_t to avoid signedness issues. Even then, this can overflow on 32-bit hosts so I suggest moving this arithmetic expression below the msg->count > vfio_user_max_xfer() check. That way it's clear that overflow cannot happen. > + > + if (msg->hdr.flags & VFIO_USER_NO_REPLY) { > + return -EINVAL; > + } > + if (msg->count > vfio_user_max_xfer()) { > + return -E2BIG; > + } Does vfio-user allow the request to be smaller than the reply? In other words, is it okay that we're not checking msg->count against hdr->size? > + > + buf = g_malloc0(size); > + memcpy(buf, msg, sizeof(*msg)); > + > + pci_dma_read(pdev, msg->offset, buf + sizeof(*msg), msg->count); The vfio-user spec doesn't go into errors but pci_dma_read() can return errors. Hmm... > + > + vfio_user_send_reply(vdev->vbasedev.proxy, buf, size); > + g_free(buf); > return 0; > } > > +static int vfio_user_dma_write(VFIOPCIDevice *vdev, > + VFIOUserDMARW *msg) > +{ > + PCIDevice *pdev = &vdev->pdev; > + char *buf = (char *)msg + sizeof(*msg); Or: char *buf = msg->data; > + > + /* make sure transfer count isn't larger than the message data */ > + if (msg->count > msg->hdr.size - sizeof(*msg)) { > + return -E2BIG; > + } msg->count cannot be accessed until we have checked that msg->hdr.size is large enough for VFIOUserDMARW. Adding the check also eliminates the underflow in the subtraction if msg->hdr.size was smaller than sizeof(VFIOUserDMARW). > + > + pci_dma_write(pdev, msg->offset, buf, msg->count); > + > + if ((msg->hdr.flags & VFIO_USER_NO_REPLY) == 0) { > + vfio_user_send_reply(vdev->vbasedev.proxy, (char *)msg, > + sizeof(msg->hdr)); > + } > + return 0; > +} > + > +static int vfio_user_pci_process_req(void *opaque, char *buf, VFIOUserFDs *fds) > +{ > + VFIOPCIDevice *vdev = opaque; > + VFIOUserHdr *hdr = (VFIOUserHdr *)buf; > + int ret; > + > + if (fds->recv_fds != 0) { > + return -EINVAL; Where are the fds closed?
On Wed, Sep 08, 2021 at 10:51:11AM +0100, Stefan Hajnoczi wrote: > > + > > + buf = g_malloc0(size); > > + memcpy(buf, msg, sizeof(*msg)); > > + > > + pci_dma_read(pdev, msg->offset, buf + sizeof(*msg), msg->count); > > The vfio-user spec doesn't go into errors but pci_dma_read() can return > errors. Hmm... It's certainly under-specified in the spec, but in terms of the library, we do return EINVAL if we decide something invalid happened... regards john
diff --git a/hw/vfio/user-protocol.h b/hw/vfio/user-protocol.h index ca53fce5f4..c5d9473f8f 100644 --- a/hw/vfio/user-protocol.h +++ b/hw/vfio/user-protocol.h @@ -171,6 +171,17 @@ typedef struct { char data[]; } VFIOUserRegionRW; +/* + * VFIO_USER_DMA_READ + * VFIO_USER_DMA_WRITE + */ +typedef struct { + VFIOUserHdr hdr; + uint64_t offset; + uint32_t count; + char data[]; +} VFIOUserDMARW; + /*imported from struct vfio_bitmap */ typedef struct { uint64_t pgsize; diff --git a/hw/vfio/user.h b/hw/vfio/user.h index 7786ab57c5..32e8b70d28 100644 --- a/hw/vfio/user.h +++ b/hw/vfio/user.h @@ -65,6 +65,7 @@ typedef struct VFIOProxy { VFIOProxy *vfio_user_connect_dev(SocketAddress *addr, Error **errp); void vfio_user_disconnect(VFIOProxy *proxy); +uint64_t vfio_user_max_xfer(void); void vfio_user_set_reqhandler(VFIODevice *vbasdev, int (*handler)(void *opaque, char *buf, VFIOUserFDs *fds), diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index 2c9fcb2fa9..29a874c066 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -3406,11 +3406,72 @@ type_init(register_vfio_pci_dev_type) * vfio-user routines. */ -static int vfio_user_pci_process_req(void *opaque, char *buf, VFIOUserFDs *fds) +static int vfio_user_dma_read(VFIOPCIDevice *vdev, VFIOUserDMARW *msg) { + PCIDevice *pdev = &vdev->pdev; + char *buf; + int size = msg->count + sizeof(VFIOUserDMARW); + + if (msg->hdr.flags & VFIO_USER_NO_REPLY) { + return -EINVAL; + } + if (msg->count > vfio_user_max_xfer()) { + return -E2BIG; + } + + buf = g_malloc0(size); + memcpy(buf, msg, sizeof(*msg)); + + pci_dma_read(pdev, msg->offset, buf + sizeof(*msg), msg->count); + + vfio_user_send_reply(vdev->vbasedev.proxy, buf, size); + g_free(buf); return 0; } +static int vfio_user_dma_write(VFIOPCIDevice *vdev, + VFIOUserDMARW *msg) +{ + PCIDevice *pdev = &vdev->pdev; + char *buf = (char *)msg + sizeof(*msg); + + /* make sure transfer count isn't larger than the message data */ + if (msg->count > msg->hdr.size - sizeof(*msg)) { + return -E2BIG; + } + + pci_dma_write(pdev, msg->offset, buf, msg->count); + + if ((msg->hdr.flags & VFIO_USER_NO_REPLY) == 0) { + vfio_user_send_reply(vdev->vbasedev.proxy, (char *)msg, + sizeof(msg->hdr)); + } + return 0; +} + +static int vfio_user_pci_process_req(void *opaque, char *buf, VFIOUserFDs *fds) +{ + VFIOPCIDevice *vdev = opaque; + VFIOUserHdr *hdr = (VFIOUserHdr *)buf; + int ret; + + if (fds->recv_fds != 0) { + return -EINVAL; + } + switch (hdr->command) { + case VFIO_USER_DMA_READ: + ret = vfio_user_dma_read(vdev, (VFIOUserDMARW *)hdr); + break; + case VFIO_USER_DMA_WRITE: + ret = vfio_user_dma_write(vdev, (VFIOUserDMARW *)hdr); + break; + default: + error_printf("vfio_user_process_req unknown cmd %d\n", hdr->command); + ret = -ENOSYS; + } + return ret; +} + /* * Emulated devices don't use host hot reset */ diff --git a/hw/vfio/user.c b/hw/vfio/user.c index 06bcd46e60..fcc041959c 100644 --- a/hw/vfio/user.c +++ b/hw/vfio/user.c @@ -54,6 +54,11 @@ static void vfio_user_send_recv(VFIOProxy *proxy, VFIOUserHdr *msg, * Functions called by main, CPU, or iothread threads */ +uint64_t vfio_user_max_xfer(void) +{ + return max_xfer_size; +} + static void vfio_user_shutdown(VFIOProxy *proxy) { qio_channel_shutdown(proxy->ioc, QIO_CHANNEL_SHUTDOWN_READ, NULL); @@ -251,7 +256,7 @@ void vfio_user_recv(void *opaque) *reply->msg = msg; data = (char *)reply->msg + sizeof(msg); } else { - if (msg.size > max_xfer_size) { + if (msg.size > max_xfer_size + sizeof(VFIOUserDMARW)) { error_setg(&local_err, "vfio_user_recv request larger than max"); goto fatal; }