Message ID | 20230228093937.2515-2-longpeng2@huawei.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | virtio-pci: optimize set_guest_notifier | expand |
On Tue, Feb 28, 2023 at 05:39:35PM +0800, Longpeng(Mike) wrote: > From: Longpeng <longpeng2@huawei.com> > > The kvm_irqchip_commit_routes() is a time-intensive operation, it needs > scan and update all irqfds that are already assigned during each invocation, > so more vectors means need more time to process them. I think the real reason is it's the write side of RCU. > For virtio-pci, we > can just submit once when enabling vectors of a virtio-pci device. > > This can reduce the downtime when migrating a VM with vhost-vdpa devices. > > Signed-off-by: Longpeng <longpeng2@huawei.com> > --- > hw/virtio/virtio-pci.c | 24 +++++++++++++++++++++--- > 1 file changed, 21 insertions(+), 3 deletions(-) > > diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c > index 247325c193..22e76e3902 100644 > --- a/hw/virtio/virtio-pci.c > +++ b/hw/virtio/virtio-pci.c > @@ -49,6 +49,19 @@ > * configuration space */ > #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) > > +/* Protected by the BQL */ > +static KVMRouteChange virtio_pci_route_change; > + > +static inline void virtio_pci_begin_route_changes(void) > +{ > + virtio_pci_route_change = kvm_irqchip_begin_route_changes(kvm_state); > +} > + > +static inline void virtio_pci_commit_route_changes(void) > +{ > + kvm_irqchip_commit_route_changes(&virtio_pci_route_change); > +} > + > static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, > VirtIOPCIProxy *dev); > static void virtio_pci_reset(DeviceState *qdev); > @@ -790,12 +803,11 @@ static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, > int ret; > > if (irqfd->users == 0) { > - KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state); > - ret = kvm_irqchip_add_msi_route(&c, vector, &proxy->pci_dev); > + ret = kvm_irqchip_add_msi_route(&virtio_pci_route_change, vector, > + &proxy->pci_dev); > if (ret < 0) { > return ret; > } > - kvm_irqchip_commit_route_changes(&c); > irqfd->virq = ret; > } > irqfd->users++; > @@ -903,12 +915,18 @@ static int kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy *proxy, int nvqs) > int ret = 0; > VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); > > + virtio_pci_begin_route_changes(); > + > for (queue_no = 0; queue_no < nvqs; queue_no++) { > if (!virtio_queue_get_num(vdev, queue_no)) { > + virtio_pci_commit_route_changes(); > return -1; > } > ret = kvm_virtio_pci_vector_use_one(proxy, queue_no); > } > + > + virtio_pci_commit_route_changes(); > + > return ret; > } > > -- > 2.23.0
On Tue, Feb 28, 2023 at 05:39:35PM +0800, Longpeng(Mike) wrote: > From: Longpeng <longpeng2@huawei.com> > > The kvm_irqchip_commit_routes() is a time-intensive operation, it needs > scan and update all irqfds that are already assigned during each invocation, > so more vectors means need more time to process them. For virtio-pci, we > can just submit once when enabling vectors of a virtio-pci device. > > This can reduce the downtime when migrating a VM with vhost-vdpa devices. can in what sense? does it or does it not? by how much? > Signed-off-by: Longpeng <longpeng2@huawei.com> > --- > hw/virtio/virtio-pci.c | 24 +++++++++++++++++++++--- > 1 file changed, 21 insertions(+), 3 deletions(-) > > diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c > index 247325c193..22e76e3902 100644 > --- a/hw/virtio/virtio-pci.c > +++ b/hw/virtio/virtio-pci.c > @@ -49,6 +49,19 @@ > * configuration space */ > #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) > > +/* Protected by the BQL */ > +static KVMRouteChange virtio_pci_route_change; > + > +static inline void virtio_pci_begin_route_changes(void) > +{ > + virtio_pci_route_change = kvm_irqchip_begin_route_changes(kvm_state); > +} > + > +static inline void virtio_pci_commit_route_changes(void) > +{ > + kvm_irqchip_commit_route_changes(&virtio_pci_route_change); > +} > + > static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, > VirtIOPCIProxy *dev); > static void virtio_pci_reset(DeviceState *qdev); > @@ -790,12 +803,11 @@ static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, > int ret; > > if (irqfd->users == 0) { > - KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state); > - ret = kvm_irqchip_add_msi_route(&c, vector, &proxy->pci_dev); > + ret = kvm_irqchip_add_msi_route(&virtio_pci_route_change, vector, > + &proxy->pci_dev); > if (ret < 0) { > return ret; > } > - kvm_irqchip_commit_route_changes(&c); > irqfd->virq = ret; > } > irqfd->users++; > @@ -903,12 +915,18 @@ static int kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy *proxy, int nvqs) > int ret = 0; > VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); > > + virtio_pci_begin_route_changes(); > + > for (queue_no = 0; queue_no < nvqs; queue_no++) { > if (!virtio_queue_get_num(vdev, queue_no)) { > + virtio_pci_commit_route_changes(); > return -1; > } > ret = kvm_virtio_pci_vector_use_one(proxy, queue_no); > } > + > + virtio_pci_commit_route_changes(); > + > return ret; > } > > -- > 2.23.0
在 2023/2/28 18:17, Michael S. Tsirkin 写道: > On Tue, Feb 28, 2023 at 05:39:35PM +0800, Longpeng(Mike) wrote: >> From: Longpeng <longpeng2@huawei.com> >> >> The kvm_irqchip_commit_routes() is a time-intensive operation, it needs >> scan and update all irqfds that are already assigned during each invocation, >> so more vectors means need more time to process them. > > I think the real reason is it's the write side of RCU. > Yes, so we can reduce the invocation of it in this way. I'll send other optimizations in the next step, including irqbypass, kvm_irqfd, etc. >> For virtio-pci, we >> can just submit once when enabling vectors of a virtio-pci device. >> >> This can reduce the downtime when migrating a VM with vhost-vdpa devices. >> >> Signed-off-by: Longpeng <longpeng2@huawei.com> >> --- >> hw/virtio/virtio-pci.c | 24 +++++++++++++++++++++--- >> 1 file changed, 21 insertions(+), 3 deletions(-) >> >> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c >> index 247325c193..22e76e3902 100644 >> --- a/hw/virtio/virtio-pci.c >> +++ b/hw/virtio/virtio-pci.c >> @@ -49,6 +49,19 @@ >> * configuration space */ >> #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) >> >> +/* Protected by the BQL */ >> +static KVMRouteChange virtio_pci_route_change; >> + >> +static inline void virtio_pci_begin_route_changes(void) >> +{ >> + virtio_pci_route_change = kvm_irqchip_begin_route_changes(kvm_state); >> +} >> + >> +static inline void virtio_pci_commit_route_changes(void) >> +{ >> + kvm_irqchip_commit_route_changes(&virtio_pci_route_change); >> +} >> + >> static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, >> VirtIOPCIProxy *dev); >> static void virtio_pci_reset(DeviceState *qdev); >> @@ -790,12 +803,11 @@ static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, >> int ret; >> >> if (irqfd->users == 0) { >> - KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state); >> - ret = kvm_irqchip_add_msi_route(&c, vector, &proxy->pci_dev); >> + ret = kvm_irqchip_add_msi_route(&virtio_pci_route_change, vector, >> + &proxy->pci_dev); >> if (ret < 0) { >> return ret; >> } >> - kvm_irqchip_commit_route_changes(&c); >> irqfd->virq = ret; >> } >> irqfd->users++; >> @@ -903,12 +915,18 @@ static int kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy *proxy, int nvqs) >> int ret = 0; >> VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); >> >> + virtio_pci_begin_route_changes(); >> + >> for (queue_no = 0; queue_no < nvqs; queue_no++) { >> if (!virtio_queue_get_num(vdev, queue_no)) { >> + virtio_pci_commit_route_changes(); >> return -1; >> } >> ret = kvm_virtio_pci_vector_use_one(proxy, queue_no); >> } >> + >> + virtio_pci_commit_route_changes(); >> + >> return ret; >> } >> >> -- >> 2.23.0 > > .
在 2023/2/28 18:18, Michael S. Tsirkin 写道: > On Tue, Feb 28, 2023 at 05:39:35PM +0800, Longpeng(Mike) wrote: >> From: Longpeng <longpeng2@huawei.com> >> >> The kvm_irqchip_commit_routes() is a time-intensive operation, it needs >> scan and update all irqfds that are already assigned during each invocation, >> so more vectors means need more time to process them. For virtio-pci, we >> can just submit once when enabling vectors of a virtio-pci device. >> >> This can reduce the downtime when migrating a VM with vhost-vdpa devices. > > can in what sense? does it or does it not? by how much? > I've replied in patch 3. >> Signed-off-by: Longpeng <longpeng2@huawei.com> >> --- >> hw/virtio/virtio-pci.c | 24 +++++++++++++++++++++--- >> 1 file changed, 21 insertions(+), 3 deletions(-) >> >> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c >> index 247325c193..22e76e3902 100644 >> --- a/hw/virtio/virtio-pci.c >> +++ b/hw/virtio/virtio-pci.c >> @@ -49,6 +49,19 @@ >> * configuration space */ >> #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) >> >> +/* Protected by the BQL */ >> +static KVMRouteChange virtio_pci_route_change; >> + >> +static inline void virtio_pci_begin_route_changes(void) >> +{ >> + virtio_pci_route_change = kvm_irqchip_begin_route_changes(kvm_state); >> +} >> + >> +static inline void virtio_pci_commit_route_changes(void) >> +{ >> + kvm_irqchip_commit_route_changes(&virtio_pci_route_change); >> +} >> + >> static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, >> VirtIOPCIProxy *dev); >> static void virtio_pci_reset(DeviceState *qdev); >> @@ -790,12 +803,11 @@ static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, >> int ret; >> >> if (irqfd->users == 0) { >> - KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state); >> - ret = kvm_irqchip_add_msi_route(&c, vector, &proxy->pci_dev); >> + ret = kvm_irqchip_add_msi_route(&virtio_pci_route_change, vector, >> + &proxy->pci_dev); >> if (ret < 0) { >> return ret; >> } >> - kvm_irqchip_commit_route_changes(&c); >> irqfd->virq = ret; >> } >> irqfd->users++; >> @@ -903,12 +915,18 @@ static int kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy *proxy, int nvqs) >> int ret = 0; >> VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); >> >> + virtio_pci_begin_route_changes(); >> + >> for (queue_no = 0; queue_no < nvqs; queue_no++) { >> if (!virtio_queue_get_num(vdev, queue_no)) { >> + virtio_pci_commit_route_changes(); >> return -1; >> } >> ret = kvm_virtio_pci_vector_use_one(proxy, queue_no); >> } >> + >> + virtio_pci_commit_route_changes(); >> + >> return ret; >> } >> >> -- >> 2.23.0 > > .
在 2023/2/28 19:20, Longpeng (Mike, Cloud Infrastructure Service Product Dept.) 写道: > > > 在 2023/2/28 18:17, Michael S. Tsirkin 写道: >> On Tue, Feb 28, 2023 at 05:39:35PM +0800, Longpeng(Mike) wrote: >>> From: Longpeng <longpeng2@huawei.com> >>> >>> The kvm_irqchip_commit_routes() is a time-intensive operation, it needs >>> scan and update all irqfds that are already assigned during each >>> invocation, >>> so more vectors means need more time to process them. >> >> I think the real reason is it's the write side of RCU. >> > > Yes, so we can reduce the invocation of it in this way. > > I'll send other optimizations in the next step, including irqbypass, > kvm_irqfd, etc. > Iterates the irqfds list is also time-consuming, it would iterate all existing irqfds when we commit, so the time complexity is O(n^2) without this patch. >>> For virtio-pci, we >>> can just submit once when enabling vectors of a virtio-pci device. >>> >>> This can reduce the downtime when migrating a VM with vhost-vdpa >>> devices. >>> >>> Signed-off-by: Longpeng <longpeng2@huawei.com> >>> --- >>> hw/virtio/virtio-pci.c | 24 +++++++++++++++++++++--- >>> 1 file changed, 21 insertions(+), 3 deletions(-) >>> >>> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c >>> index 247325c193..22e76e3902 100644 >>> --- a/hw/virtio/virtio-pci.c >>> +++ b/hw/virtio/virtio-pci.c >>> @@ -49,6 +49,19 @@ >>> * configuration space */ >>> #define VIRTIO_PCI_CONFIG_SIZE(dev) >>> VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) >>> +/* Protected by the BQL */ >>> +static KVMRouteChange virtio_pci_route_change; >>> + >>> +static inline void virtio_pci_begin_route_changes(void) >>> +{ >>> + virtio_pci_route_change = >>> kvm_irqchip_begin_route_changes(kvm_state); >>> +} >>> + >>> +static inline void virtio_pci_commit_route_changes(void) >>> +{ >>> + kvm_irqchip_commit_route_changes(&virtio_pci_route_change); >>> +} >>> + >>> static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, >>> VirtIOPCIProxy *dev); >>> static void virtio_pci_reset(DeviceState *qdev); >>> @@ -790,12 +803,11 @@ static int >>> kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, >>> int ret; >>> if (irqfd->users == 0) { >>> - KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state); >>> - ret = kvm_irqchip_add_msi_route(&c, vector, &proxy->pci_dev); >>> + ret = kvm_irqchip_add_msi_route(&virtio_pci_route_change, >>> vector, >>> + &proxy->pci_dev); >>> if (ret < 0) { >>> return ret; >>> } >>> - kvm_irqchip_commit_route_changes(&c); >>> irqfd->virq = ret; >>> } >>> irqfd->users++; >>> @@ -903,12 +915,18 @@ static int >>> kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy *proxy, int nvqs) >>> int ret = 0; >>> VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); >>> + virtio_pci_begin_route_changes(); >>> + >>> for (queue_no = 0; queue_no < nvqs; queue_no++) { >>> if (!virtio_queue_get_num(vdev, queue_no)) { >>> + virtio_pci_commit_route_changes(); >>> return -1; >>> } >>> ret = kvm_virtio_pci_vector_use_one(proxy, queue_no); >>> } >>> + >>> + virtio_pci_commit_route_changes(); >>> + >>> return ret; >>> } >>> -- >>> 2.23.0 >> >> .
On Tue, Feb 28, 2023 at 07:39:06PM +0800, Longpeng (Mike, Cloud Infrastructure Service Product Dept.) wrote: > > > 在 2023/2/28 19:20, Longpeng (Mike, Cloud Infrastructure Service Product > Dept.) 写道: > > > > > > 在 2023/2/28 18:17, Michael S. Tsirkin 写道: > > > On Tue, Feb 28, 2023 at 05:39:35PM +0800, Longpeng(Mike) wrote: > > > > From: Longpeng <longpeng2@huawei.com> > > > > > > > > The kvm_irqchip_commit_routes() is a time-intensive operation, it needs > > > > scan and update all irqfds that are already assigned during each > > > > invocation, > > > > so more vectors means need more time to process them. > > > > > > I think the real reason is it's the write side of RCU. > > > > > > > Yes, so we can reduce the invocation of it in this way. > > > > I'll send other optimizations in the next step, including irqbypass, > > kvm_irqfd, etc. > > > > Iterates the irqfds list is also time-consuming, it would iterate all > existing irqfds when we commit, so the time complexity is O(n^2) without > this patch. Sounds good, pls include this in the commit log. > > > > For virtio-pci, we > > > > can just submit once when enabling vectors of a virtio-pci device. > > > > > > > > This can reduce the downtime when migrating a VM with vhost-vdpa > > > > devices. > > > > > > > > Signed-off-by: Longpeng <longpeng2@huawei.com> > > > > --- > > > > hw/virtio/virtio-pci.c | 24 +++++++++++++++++++++--- > > > > 1 file changed, 21 insertions(+), 3 deletions(-) > > > > > > > > diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c > > > > index 247325c193..22e76e3902 100644 > > > > --- a/hw/virtio/virtio-pci.c > > > > +++ b/hw/virtio/virtio-pci.c > > > > @@ -49,6 +49,19 @@ > > > > * configuration space */ > > > > #define VIRTIO_PCI_CONFIG_SIZE(dev) > > > > VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) > > > > +/* Protected by the BQL */ > > > > +static KVMRouteChange virtio_pci_route_change; > > > > + > > > > +static inline void virtio_pci_begin_route_changes(void) > > > > +{ > > > > + virtio_pci_route_change = > > > > kvm_irqchip_begin_route_changes(kvm_state); > > > > +} > > > > + > > > > +static inline void virtio_pci_commit_route_changes(void) > > > > +{ > > > > + kvm_irqchip_commit_route_changes(&virtio_pci_route_change); > > > > +} > > > > + > > > > static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, > > > > VirtIOPCIProxy *dev); > > > > static void virtio_pci_reset(DeviceState *qdev); > > > > @@ -790,12 +803,11 @@ static int > > > > kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, > > > > int ret; > > > > if (irqfd->users == 0) { > > > > - KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state); > > > > - ret = kvm_irqchip_add_msi_route(&c, vector, &proxy->pci_dev); > > > > + ret = > > > > kvm_irqchip_add_msi_route(&virtio_pci_route_change, vector, > > > > + &proxy->pci_dev); > > > > if (ret < 0) { > > > > return ret; > > > > } > > > > - kvm_irqchip_commit_route_changes(&c); > > > > irqfd->virq = ret; > > > > } > > > > irqfd->users++; > > > > @@ -903,12 +915,18 @@ static int > > > > kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy *proxy, int nvqs) > > > > int ret = 0; > > > > VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); > > > > + virtio_pci_begin_route_changes(); > > > > + > > > > for (queue_no = 0; queue_no < nvqs; queue_no++) { > > > > if (!virtio_queue_get_num(vdev, queue_no)) { > > > > + virtio_pci_commit_route_changes(); > > > > return -1; > > > > } > > > > ret = kvm_virtio_pci_vector_use_one(proxy, queue_no); > > > > } > > > > + > > > > + virtio_pci_commit_route_changes(); > > > > + > > > > return ret; > > > > } > > > > -- > > > > 2.23.0 > > > > > > .
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 247325c193..22e76e3902 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -49,6 +49,19 @@ * configuration space */ #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) +/* Protected by the BQL */ +static KVMRouteChange virtio_pci_route_change; + +static inline void virtio_pci_begin_route_changes(void) +{ + virtio_pci_route_change = kvm_irqchip_begin_route_changes(kvm_state); +} + +static inline void virtio_pci_commit_route_changes(void) +{ + kvm_irqchip_commit_route_changes(&virtio_pci_route_change); +} + static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, VirtIOPCIProxy *dev); static void virtio_pci_reset(DeviceState *qdev); @@ -790,12 +803,11 @@ static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, int ret; if (irqfd->users == 0) { - KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state); - ret = kvm_irqchip_add_msi_route(&c, vector, &proxy->pci_dev); + ret = kvm_irqchip_add_msi_route(&virtio_pci_route_change, vector, + &proxy->pci_dev); if (ret < 0) { return ret; } - kvm_irqchip_commit_route_changes(&c); irqfd->virq = ret; } irqfd->users++; @@ -903,12 +915,18 @@ static int kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy *proxy, int nvqs) int ret = 0; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + virtio_pci_begin_route_changes(); + for (queue_no = 0; queue_no < nvqs; queue_no++) { if (!virtio_queue_get_num(vdev, queue_no)) { + virtio_pci_commit_route_changes(); return -1; } ret = kvm_virtio_pci_vector_use_one(proxy, queue_no); } + + virtio_pci_commit_route_changes(); + return ret; }