Message ID | 20161115151950.1e8ab7d6@t450s.home (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 11/16/2016 3:49 AM, Alex Williamson wrote: > On Tue, 15 Nov 2016 20:59:54 +0530 > Kirti Wankhede <kwankhede@nvidia.com> wrote: > ... >> @@ -854,7 +857,28 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, >> */ >> if (dma->task->mm != current->mm) >> break; >> + >> unmapped += dma->size; >> + >> + if (iommu->external_domain && !RB_EMPTY_ROOT(&dma->pfn_list)) { >> + struct vfio_iommu_type1_dma_unmap nb_unmap; >> + >> + nb_unmap.iova = dma->iova; >> + nb_unmap.size = dma->size; >> + >> + /* >> + * Notifier callback would call vfio_unpin_pages() which >> + * would acquire iommu->lock. Release lock here and >> + * reacquire it again. >> + */ >> + mutex_unlock(&iommu->lock); >> + blocking_notifier_call_chain(&iommu->notifier, >> + VFIO_IOMMU_NOTIFY_DMA_UNMAP, >> + &nb_unmap); >> + mutex_lock(&iommu->lock); >> + if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list))) >> + break; >> + } > > > Why exactly do we need to notify per vfio_dma rather than per unmap > request? If we do the latter we can send the notify first, limiting us > to races where a page is pinned between the notify and the locking, > whereas here, even our dma pointer is suspect once we re-acquire the > lock, we don't technically know if another unmap could have removed > that already. Perhaps something like this (untested): > There are checks to validate unmap request, like v2 check and who is calling unmap and is it allowed for that task to unmap. Before these checks its not sure that unmap region range which asked for would be unmapped all. Notify call should be at the place where its sure that the range provided to notify call is definitely going to be removed. My change do that. Thanks, Kirti > diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c > index ee9a680..8504501 100644 > --- a/drivers/vfio/vfio_iommu_type1.c > +++ b/drivers/vfio/vfio_iommu_type1.c > @@ -785,6 +785,8 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, > struct vfio_dma *dma; > size_t unmapped = 0; > int ret = 0; > + struct vfio_iommu_type1_dma_unmap nb_unmap = { .iova = unmap->iova, > + .size = unmap->size }; > > mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; > > @@ -795,6 +797,14 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, > > WARN_ON(mask & PAGE_MASK); > > + /* > + * Notify anyone (mdev vendor drivers) to invalidate and unmap > + * iovas within the range we're about to unmap. Vendor drivers MUST > + * unpin pages in response to an invalidation. > + */ > + blocking_notifier_call_chain(&iommu->notifier, > + VFIO_IOMMU_NOTIFY_DMA_UNMAP, &nb_unmap); > + > mutex_lock(&iommu->lock); > > /* > @@ -853,25 +863,8 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, > > unmapped += dma->size; > > - if (iommu->external_domain && !RB_EMPTY_ROOT(&dma->pfn_list)) { > - struct vfio_iommu_type1_dma_unmap nb_unmap; > + WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)); > > - nb_unmap.iova = dma->iova; > - nb_unmap.size = dma->size; > - > - /* > - * Notifier callback would call vfio_unpin_pages() which > - * would acquire iommu->lock. Release lock here and > - * reacquire it again. > - */ > - mutex_unlock(&iommu->lock); > - blocking_notifier_call_chain(&iommu->notifier, > - VFIO_IOMMU_NOTIFY_DMA_UNMAP, > - &nb_unmap); > - mutex_lock(&iommu->lock); > - if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list))) > - break; > - } > vfio_remove_dma(iommu, dma); > } > > > >> vfio_remove_dma(iommu, dma); >> } >> -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index ee9a680..8504501 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -785,6 +785,8 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, struct vfio_dma *dma; size_t unmapped = 0; int ret = 0; + struct vfio_iommu_type1_dma_unmap nb_unmap = { .iova = unmap->iova, + .size = unmap->size }; mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; @@ -795,6 +797,14 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, WARN_ON(mask & PAGE_MASK); + /* + * Notify anyone (mdev vendor drivers) to invalidate and unmap + * iovas within the range we're about to unmap. Vendor drivers MUST + * unpin pages in response to an invalidation. + */ + blocking_notifier_call_chain(&iommu->notifier, + VFIO_IOMMU_NOTIFY_DMA_UNMAP, &nb_unmap); + mutex_lock(&iommu->lock); /* @@ -853,25 +863,8 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, unmapped += dma->size; - if (iommu->external_domain && !RB_EMPTY_ROOT(&dma->pfn_list)) { - struct vfio_iommu_type1_dma_unmap nb_unmap; + WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)); - nb_unmap.iova = dma->iova; - nb_unmap.size = dma->size; - - /* - * Notifier callback would call vfio_unpin_pages() which - * would acquire iommu->lock. Release lock here and - * reacquire it again. - */ - mutex_unlock(&iommu->lock); - blocking_notifier_call_chain(&iommu->notifier, - VFIO_IOMMU_NOTIFY_DMA_UNMAP, - &nb_unmap); - mutex_lock(&iommu->lock); - if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list))) - break; - } vfio_remove_dma(iommu, dma); }