Message ID | 20230714183800.3112449-2-dmitry.torokhov@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [v2,1/2] kvm/vfio: ensure kvg instance stays around in kvm_vfio_group_add() | expand |
On Fri, 14 Jul 2023 11:37:57 -0700 Dmitry Torokhov <dmitry.torokhov@gmail.com> wrote: > Stop taking kv->lock mutex in kvm_vfio_update_coherency() and instead > call it with this mutex held: the callers of the function usually > already have it taken (and released) before calling > kvm_vfio_update_coherency(). This avoid bouncing the lock up and down. > > The exception is kvm_vfio_release() where we do not take the lock, but > it is being executed when the very last reference to kvm_device is being > dropped, so there are no concerns about concurrency. > > Suggested-by: Alex Williamson <alex.williamson@redhat.com> > Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com> > --- > > v2: new patch. > > virt/kvm/vfio.c | 25 +++++++++---------------- > 1 file changed, 9 insertions(+), 16 deletions(-) > > diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c > index cd46d7ef98d6..9868e7ccb5fb 100644 > --- a/virt/kvm/vfio.c > +++ b/virt/kvm/vfio.c > @@ -122,8 +122,6 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev) > bool noncoherent = false; > struct kvm_vfio_group *kvg; > > - mutex_lock(&kv->lock); > - > list_for_each_entry(kvg, &kv->group_list, node) { > if (!kvm_vfio_file_enforced_coherent(kvg->file)) { > noncoherent = true; > @@ -139,8 +137,6 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev) > else > kvm_arch_unregister_noncoherent_dma(dev->kvm); > } > - > - mutex_unlock(&kv->lock); > } > > static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd) > @@ -157,7 +153,7 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd) > /* Ensure the FD is a vfio group FD.*/ > if (!kvm_vfio_file_is_group(filp)) { > ret = -EINVAL; > - goto err_fput; > + goto out_fput; > } > > mutex_lock(&kv->lock); > @@ -165,30 +161,27 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd) > list_for_each_entry(kvg, &kv->group_list, node) { > if (kvg->file == filp) { > ret = -EEXIST; > - goto err_unlock; > + goto out_unlock; > } > } > > kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT); > if (!kvg) { > ret = -ENOMEM; > - goto err_unlock; > + goto out_unlock; > } > > - kvg->file = filp; > + kvg->file = get_file(filp); > list_add_tail(&kvg->node, &kv->group_list); > > kvm_arch_start_assignment(dev->kvm); > kvm_vfio_file_set_kvm(kvg->file, dev->kvm); > - > - mutex_unlock(&kv->lock); > - > kvm_vfio_update_coherency(dev); > > - return 0; > -err_unlock: > + ret = 0; Nit, let's initialize ret = 0 when it's declared to avoid this. Series looks good to me otherwise. Thanks, Alex > +out_unlock: > mutex_unlock(&kv->lock); > -err_fput: > +out_fput: > fput(filp); > return ret; > } > @@ -224,12 +217,12 @@ static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd) > break; > } > > + kvm_vfio_update_coherency(dev); > + > mutex_unlock(&kv->lock); > > fdput(f); > > - kvm_vfio_update_coherency(dev); > - > return ret; > } >
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c index cd46d7ef98d6..9868e7ccb5fb 100644 --- a/virt/kvm/vfio.c +++ b/virt/kvm/vfio.c @@ -122,8 +122,6 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev) bool noncoherent = false; struct kvm_vfio_group *kvg; - mutex_lock(&kv->lock); - list_for_each_entry(kvg, &kv->group_list, node) { if (!kvm_vfio_file_enforced_coherent(kvg->file)) { noncoherent = true; @@ -139,8 +137,6 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev) else kvm_arch_unregister_noncoherent_dma(dev->kvm); } - - mutex_unlock(&kv->lock); } static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd) @@ -157,7 +153,7 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd) /* Ensure the FD is a vfio group FD.*/ if (!kvm_vfio_file_is_group(filp)) { ret = -EINVAL; - goto err_fput; + goto out_fput; } mutex_lock(&kv->lock); @@ -165,30 +161,27 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd) list_for_each_entry(kvg, &kv->group_list, node) { if (kvg->file == filp) { ret = -EEXIST; - goto err_unlock; + goto out_unlock; } } kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT); if (!kvg) { ret = -ENOMEM; - goto err_unlock; + goto out_unlock; } - kvg->file = filp; + kvg->file = get_file(filp); list_add_tail(&kvg->node, &kv->group_list); kvm_arch_start_assignment(dev->kvm); kvm_vfio_file_set_kvm(kvg->file, dev->kvm); - - mutex_unlock(&kv->lock); - kvm_vfio_update_coherency(dev); - return 0; -err_unlock: + ret = 0; +out_unlock: mutex_unlock(&kv->lock); -err_fput: +out_fput: fput(filp); return ret; } @@ -224,12 +217,12 @@ static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd) break; } + kvm_vfio_update_coherency(dev); + mutex_unlock(&kv->lock); fdput(f); - kvm_vfio_update_coherency(dev); - return ret; }
Stop taking kv->lock mutex in kvm_vfio_update_coherency() and instead call it with this mutex held: the callers of the function usually already have it taken (and released) before calling kvm_vfio_update_coherency(). This avoid bouncing the lock up and down. The exception is kvm_vfio_release() where we do not take the lock, but it is being executed when the very last reference to kvm_device is being dropped, so there are no concerns about concurrency. Suggested-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com> --- v2: new patch. virt/kvm/vfio.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-)