Message ID | 20170417131926.54af5181@t450s.home (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 4/18/2017 12:49 AM, Alex Williamson wrote: > On Tue, 18 Apr 2017 00:35:06 +0530 > Kirti Wankhede <kwankhede@nvidia.com> wrote: > >> On 4/17/2017 8:02 PM, Alex Williamson wrote: >>> On Mon, 17 Apr 2017 14:47:54 +0800 >>> Peter Xu <peterx@redhat.com> wrote: >>> >>>> On Sun, Apr 16, 2017 at 07:42:27PM -0600, Alex Williamson wrote: >>>> >>>> [...] >>>> >>>>> -static void vfio_lock_acct(struct task_struct *task, long npage) >>>>> +static int vfio_lock_acct(struct task_struct *task, long npage, bool lock_cap) >>>>> { >>>>> - struct vwork *vwork; >>>>> struct mm_struct *mm; >>>>> bool is_current; >>>>> + int ret; >>>>> >>>>> if (!npage) >>>>> - return; >>>>> + return 0; >>>>> >>>>> is_current = (task->mm == current->mm); >>>>> >>>>> mm = is_current ? task->mm : get_task_mm(task); >>>>> if (!mm) >>>>> - return; /* process exited */ >>>>> + return -ESRCH; /* process exited */ >>>>> >>>>> - if (down_write_trylock(&mm->mmap_sem)) { >>>>> - mm->locked_vm += npage; >>>>> - up_write(&mm->mmap_sem); >>>>> - if (!is_current) >>>>> - mmput(mm); >>>>> - return; >>>>> - } >>>>> + ret = down_write_killable(&mm->mmap_sem); >>>>> + if (!ret) { >>>>> + if (npage < 0 || lock_cap) { >>>> >>>> Nit: maybe we can avoid passing in lock_cap in all the callers of >>>> vfio_lock_acct() and fetch it via has_capability() only if npage < 0? >>>> IMHO that'll keep the vfio_lock_acct() interface cleaner, and we won't >>>> need to pass in "false" any time when doing unpins. >>> >>> Unfortunately vfio_pin_pages_remote() needs to know about lock_cap >>> since it tests whether the user is exceeding their locked memory >>> limit. The other callers could certainly get away with >>> vfio_lock_acct() testing the capability itself but that would add a >>> redundant call for the most common user. I'm not a big fan of passing >>> a lock_cap bool either, but it seemed the best fix for now. The >>> cleanest alternative I can up with is this (untested): >>> >> >> In my opinion, passing 'bool lock_cap' looks much clean and simple. >> >> Reviewed-by: Kirti Wankhede <kwankhede@nvidia.com> > > Well shoot, I was just starting to warm up to the bool*. I like that > we're not presuming the polarity for the callers we expect to be > removing pages and I generally just dislike passing fixed bool > parameters to change the function behavior. I've cleaned it up a bit > further and was starting to do some testing on this which I'd propose > for v5. Does it change your opinion? If passing fixed bool parameter is the concern then I would lean towards Peter's suggestion. vfio_pin_pages_remote() will check lock capability outside vfio_lock_acct() and again in vfio_lock_acct(). At other places, it will be takes care within vfio_lock_acct() Thanks, Kirti > > commit cd61c5f507d614ac14b75b0a548c8738deff88ea > Author: Alex Williamson <alex.williamson@redhat.com> > Date: Thu Apr 13 14:10:15 2017 -0600 > > vfio/type1: Remove locked page accounting workqueue > > If the mmap_sem is contented then the vfio type1 IOMMU backend will > defer locked page accounting updates to a workqueue task. This has a > few problems and depending on which side the user tries to play, they > might be over-penalized for unmaps that haven't yet been accounted or > race the workqueue to enter more mappings than they're allowed. The > original intent of this workqueue mechanism seems to be focused on > reducing latency through the ioctl, but we cannot do so at the cost > of correctness. Remove this workqueue mechanism and update the > callers to allow for failure. We can also now recheck the limit under > write lock to make sure we don't exceed it. > > vfio_pin_pages_remote() also now necessarily includes an unwind path > which we can jump to directly if the consecutive page pinning finds > that we're exceeding the user's memory limits. This avoids the > current lazy approach which does accounting and mapping up to the > fault, only to return an error on the next iteration to unwind the > entire vfio_dma. > > Cc: stable@vger.kernel.org > Signed-off-by: Alex Williamson <alex.williamson@redhat.com> > > diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c > index 32d2633092a3..a8a079ba9477 100644 > --- a/drivers/vfio/vfio_iommu_type1.c > +++ b/drivers/vfio/vfio_iommu_type1.c > @@ -246,69 +246,46 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) > return ret; > } > > -struct vwork { > - struct mm_struct *mm; > - long npage; > - struct work_struct work; > -}; > - > -/* delayed decrement/increment for locked_vm */ > -static void vfio_lock_acct_bg(struct work_struct *work) > -{ > - struct vwork *vwork = container_of(work, struct vwork, work); > - struct mm_struct *mm; > - > - mm = vwork->mm; > - down_write(&mm->mmap_sem); > - mm->locked_vm += vwork->npage; > - up_write(&mm->mmap_sem); > - mmput(mm); > - kfree(vwork); > -} > - > -static void vfio_lock_acct(struct task_struct *task, long npage) > +static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap) > { > - struct vwork *vwork; > struct mm_struct *mm; > bool is_current; > + int ret; > > if (!npage) > - return; > + return 0; > > is_current = (task->mm == current->mm); > > mm = is_current ? task->mm : get_task_mm(task); > if (!mm) > - return; /* process exited */ > + return -ESRCH; /* process exited */ > > - if (down_write_trylock(&mm->mmap_sem)) { > - mm->locked_vm += npage; > - up_write(&mm->mmap_sem); > - if (!is_current) > - mmput(mm); > - return; > - } > + ret = down_write_killable(&mm->mmap_sem); > + if (!ret) { > + if (npage > 0) { > + if (lock_cap ? !*lock_cap : > + !has_capability(task, CAP_IPC_LOCK)) { > + unsigned long limit; > + > + limit = task_rlimit(task, > + RLIMIT_MEMLOCK) >> PAGE_SHIFT; > + > + if (mm->locked_vm + npage > limit) > + ret = -ENOMEM; > + } > + } > + > + if (!ret) > + mm->locked_vm += npage; > > - if (is_current) { > - mm = get_task_mm(task); > - if (!mm) > - return; > + up_write(&mm->mmap_sem); > } > > - /* > - * Couldn't get mmap_sem lock, so must setup to update > - * mm->locked_vm later. If locked_vm were atomic, we > - * wouldn't need this silliness > - */ > - vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL); > - if (WARN_ON(!vwork)) { > + if (!is_current) > mmput(mm); > - return; > - } > - INIT_WORK(&vwork->work, vfio_lock_acct_bg); > - vwork->mm = mm; > - vwork->npage = npage; > - schedule_work(&vwork->work); > + > + return ret; > } > > /* > @@ -405,7 +382,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, > static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, > long npage, unsigned long *pfn_base) > { > - unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; > + unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; > bool lock_cap = capable(CAP_IPC_LOCK); > long ret, pinned = 0, lock_acct = 0; > bool rsvd; > @@ -442,8 +419,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, > /* Lock all the consecutive pages from pfn_base */ > for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; > pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { > - unsigned long pfn = 0; > - > ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn); > if (ret) > break; > @@ -460,14 +435,25 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, > put_pfn(pfn, dma->prot); > pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", > __func__, limit << PAGE_SHIFT); > - break; > + ret = -ENOMEM; > + goto unpin_out; > } > lock_acct++; > } > } > > out: > - vfio_lock_acct(current, lock_acct); > + ret = vfio_lock_acct(current, lock_acct, &lock_cap); > + > +unpin_out: > + if (ret) { > + if (!rsvd) { > + for (pfn = *pfn_base ; pinned ; pfn++, pinned--) > + put_pfn(pfn, dma->prot); > + } > + > + return ret; > + } > > return pinned; > } > @@ -488,7 +474,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, > } > > if (do_accounting) > - vfio_lock_acct(dma->task, locked - unlocked); > + vfio_lock_acct(dma->task, locked - unlocked, NULL); > > return unlocked; > } > @@ -522,8 +508,14 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, > goto pin_page_exit; > } > > - if (!rsvd && do_accounting) > - vfio_lock_acct(dma->task, 1); > + if (!rsvd && do_accounting) { > + ret = vfio_lock_acct(dma->task, 1, &lock_cap); > + if (ret) { > + put_pfn(*pfn_base, dma->prot); > + goto pin_page_exit; > + } > + } > + > ret = 1; > > pin_page_exit: > @@ -543,7 +535,7 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, > unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); > > if (do_accounting) > - vfio_lock_acct(dma->task, -unlocked); > + vfio_lock_acct(dma->task, -unlocked, NULL); > > return unlocked; > } > @@ -740,7 +732,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, > > dma->iommu_mapped = false; > if (do_accounting) { > - vfio_lock_acct(dma->task, -unlocked); > + vfio_lock_acct(dma->task, -unlocked, NULL); > return 0; > } > return unlocked; > @@ -1382,7 +1374,7 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) > if (!is_invalid_reserved_pfn(vpfn->pfn)) > locked++; > } > - vfio_lock_acct(dma->task, locked - unlocked); > + vfio_lock_acct(dma->task, locked - unlocked, NULL); > } > } > > > Patch 2/2 would clearly change the &lock_cap in > vfio_pin_page_external() to a NULL, so only _remote passes a pointer > there. Thanks, > > Alex >
On Tue, 18 Apr 2017 01:02:12 +0530 Kirti Wankhede <kwankhede@nvidia.com> wrote: > On 4/18/2017 12:49 AM, Alex Williamson wrote: > > On Tue, 18 Apr 2017 00:35:06 +0530 > > Kirti Wankhede <kwankhede@nvidia.com> wrote: > > > >> On 4/17/2017 8:02 PM, Alex Williamson wrote: > >>> On Mon, 17 Apr 2017 14:47:54 +0800 > >>> Peter Xu <peterx@redhat.com> wrote: > >>> > >>>> On Sun, Apr 16, 2017 at 07:42:27PM -0600, Alex Williamson wrote: > >>>> > >>>> [...] > >>>> > >>>>> -static void vfio_lock_acct(struct task_struct *task, long npage) > >>>>> +static int vfio_lock_acct(struct task_struct *task, long npage, bool lock_cap) > >>>>> { > >>>>> - struct vwork *vwork; > >>>>> struct mm_struct *mm; > >>>>> bool is_current; > >>>>> + int ret; > >>>>> > >>>>> if (!npage) > >>>>> - return; > >>>>> + return 0; > >>>>> > >>>>> is_current = (task->mm == current->mm); > >>>>> > >>>>> mm = is_current ? task->mm : get_task_mm(task); > >>>>> if (!mm) > >>>>> - return; /* process exited */ > >>>>> + return -ESRCH; /* process exited */ > >>>>> > >>>>> - if (down_write_trylock(&mm->mmap_sem)) { > >>>>> - mm->locked_vm += npage; > >>>>> - up_write(&mm->mmap_sem); > >>>>> - if (!is_current) > >>>>> - mmput(mm); > >>>>> - return; > >>>>> - } > >>>>> + ret = down_write_killable(&mm->mmap_sem); > >>>>> + if (!ret) { > >>>>> + if (npage < 0 || lock_cap) { > >>>> > >>>> Nit: maybe we can avoid passing in lock_cap in all the callers of > >>>> vfio_lock_acct() and fetch it via has_capability() only if npage < 0? > >>>> IMHO that'll keep the vfio_lock_acct() interface cleaner, and we won't > >>>> need to pass in "false" any time when doing unpins. > >>> > >>> Unfortunately vfio_pin_pages_remote() needs to know about lock_cap > >>> since it tests whether the user is exceeding their locked memory > >>> limit. The other callers could certainly get away with > >>> vfio_lock_acct() testing the capability itself but that would add a > >>> redundant call for the most common user. I'm not a big fan of passing > >>> a lock_cap bool either, but it seemed the best fix for now. The > >>> cleanest alternative I can up with is this (untested): > >>> > >> > >> In my opinion, passing 'bool lock_cap' looks much clean and simple. > >> > >> Reviewed-by: Kirti Wankhede <kwankhede@nvidia.com> > > > > Well shoot, I was just starting to warm up to the bool*. I like that > > we're not presuming the polarity for the callers we expect to be > > removing pages and I generally just dislike passing fixed bool > > parameters to change the function behavior. I've cleaned it up a bit > > further and was starting to do some testing on this which I'd propose > > for v5. Does it change your opinion? > > If passing fixed bool parameter is the concern then I would lean towards > Peter's suggestion. vfio_pin_pages_remote() will check lock capability > outside vfio_lock_acct() and again in vfio_lock_acct(). At other places, > it will be takes care within vfio_lock_acct() Sorry, I don't see that as a viable option. Testing for CAP_IPC_LOCK in both vfio_pin_pages_remote() and vfio_lock_acct() results in over a 10% performance hit on the mapping path with a custom micro-benchmark. In fact, it suggests we should probably pass that from even higher in the call stack. Thanks, Alex > > > > commit cd61c5f507d614ac14b75b0a548c8738deff88ea > > Author: Alex Williamson <alex.williamson@redhat.com> > > Date: Thu Apr 13 14:10:15 2017 -0600 > > > > vfio/type1: Remove locked page accounting workqueue > > > > If the mmap_sem is contented then the vfio type1 IOMMU backend will > > defer locked page accounting updates to a workqueue task. This has a > > few problems and depending on which side the user tries to play, they > > might be over-penalized for unmaps that haven't yet been accounted or > > race the workqueue to enter more mappings than they're allowed. The > > original intent of this workqueue mechanism seems to be focused on > > reducing latency through the ioctl, but we cannot do so at the cost > > of correctness. Remove this workqueue mechanism and update the > > callers to allow for failure. We can also now recheck the limit under > > write lock to make sure we don't exceed it. > > > > vfio_pin_pages_remote() also now necessarily includes an unwind path > > which we can jump to directly if the consecutive page pinning finds > > that we're exceeding the user's memory limits. This avoids the > > current lazy approach which does accounting and mapping up to the > > fault, only to return an error on the next iteration to unwind the > > entire vfio_dma. > > > > Cc: stable@vger.kernel.org > > Signed-off-by: Alex Williamson <alex.williamson@redhat.com> > > > > diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c > > index 32d2633092a3..a8a079ba9477 100644 > > --- a/drivers/vfio/vfio_iommu_type1.c > > +++ b/drivers/vfio/vfio_iommu_type1.c > > @@ -246,69 +246,46 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) > > return ret; > > } > > > > -struct vwork { > > - struct mm_struct *mm; > > - long npage; > > - struct work_struct work; > > -}; > > - > > -/* delayed decrement/increment for locked_vm */ > > -static void vfio_lock_acct_bg(struct work_struct *work) > > -{ > > - struct vwork *vwork = container_of(work, struct vwork, work); > > - struct mm_struct *mm; > > - > > - mm = vwork->mm; > > - down_write(&mm->mmap_sem); > > - mm->locked_vm += vwork->npage; > > - up_write(&mm->mmap_sem); > > - mmput(mm); > > - kfree(vwork); > > -} > > - > > -static void vfio_lock_acct(struct task_struct *task, long npage) > > +static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap) > > { > > - struct vwork *vwork; > > struct mm_struct *mm; > > bool is_current; > > + int ret; > > > > if (!npage) > > - return; > > + return 0; > > > > is_current = (task->mm == current->mm); > > > > mm = is_current ? task->mm : get_task_mm(task); > > if (!mm) > > - return; /* process exited */ > > + return -ESRCH; /* process exited */ > > > > - if (down_write_trylock(&mm->mmap_sem)) { > > - mm->locked_vm += npage; > > - up_write(&mm->mmap_sem); > > - if (!is_current) > > - mmput(mm); > > - return; > > - } > > + ret = down_write_killable(&mm->mmap_sem); > > + if (!ret) { > > + if (npage > 0) { > > + if (lock_cap ? !*lock_cap : > > + !has_capability(task, CAP_IPC_LOCK)) { > > + unsigned long limit; > > + > > + limit = task_rlimit(task, > > + RLIMIT_MEMLOCK) >> PAGE_SHIFT; > > + > > + if (mm->locked_vm + npage > limit) > > + ret = -ENOMEM; > > + } > > + } > > + > > + if (!ret) > > + mm->locked_vm += npage; > > > > - if (is_current) { > > - mm = get_task_mm(task); > > - if (!mm) > > - return; > > + up_write(&mm->mmap_sem); > > } > > > > - /* > > - * Couldn't get mmap_sem lock, so must setup to update > > - * mm->locked_vm later. If locked_vm were atomic, we > > - * wouldn't need this silliness > > - */ > > - vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL); > > - if (WARN_ON(!vwork)) { > > + if (!is_current) > > mmput(mm); > > - return; > > - } > > - INIT_WORK(&vwork->work, vfio_lock_acct_bg); > > - vwork->mm = mm; > > - vwork->npage = npage; > > - schedule_work(&vwork->work); > > + > > + return ret; > > } > > > > /* > > @@ -405,7 +382,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, > > static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, > > long npage, unsigned long *pfn_base) > > { > > - unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; > > + unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; > > bool lock_cap = capable(CAP_IPC_LOCK); > > long ret, pinned = 0, lock_acct = 0; > > bool rsvd; > > @@ -442,8 +419,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, > > /* Lock all the consecutive pages from pfn_base */ > > for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; > > pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { > > - unsigned long pfn = 0; > > - > > ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn); > > if (ret) > > break; > > @@ -460,14 +435,25 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, > > put_pfn(pfn, dma->prot); > > pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", > > __func__, limit << PAGE_SHIFT); > > - break; > > + ret = -ENOMEM; > > + goto unpin_out; > > } > > lock_acct++; > > } > > } > > > > out: > > - vfio_lock_acct(current, lock_acct); > > + ret = vfio_lock_acct(current, lock_acct, &lock_cap); > > + > > +unpin_out: > > + if (ret) { > > + if (!rsvd) { > > + for (pfn = *pfn_base ; pinned ; pfn++, pinned--) > > + put_pfn(pfn, dma->prot); > > + } > > + > > + return ret; > > + } > > > > return pinned; > > } > > @@ -488,7 +474,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, > > } > > > > if (do_accounting) > > - vfio_lock_acct(dma->task, locked - unlocked); > > + vfio_lock_acct(dma->task, locked - unlocked, NULL); > > > > return unlocked; > > } > > @@ -522,8 +508,14 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, > > goto pin_page_exit; > > } > > > > - if (!rsvd && do_accounting) > > - vfio_lock_acct(dma->task, 1); > > + if (!rsvd && do_accounting) { > > + ret = vfio_lock_acct(dma->task, 1, &lock_cap); > > + if (ret) { > > + put_pfn(*pfn_base, dma->prot); > > + goto pin_page_exit; > > + } > > + } > > + > > ret = 1; > > > > pin_page_exit: > > @@ -543,7 +535,7 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, > > unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); > > > > if (do_accounting) > > - vfio_lock_acct(dma->task, -unlocked); > > + vfio_lock_acct(dma->task, -unlocked, NULL); > > > > return unlocked; > > } > > @@ -740,7 +732,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, > > > > dma->iommu_mapped = false; > > if (do_accounting) { > > - vfio_lock_acct(dma->task, -unlocked); > > + vfio_lock_acct(dma->task, -unlocked, NULL); > > return 0; > > } > > return unlocked; > > @@ -1382,7 +1374,7 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) > > if (!is_invalid_reserved_pfn(vpfn->pfn)) > > locked++; > > } > > - vfio_lock_acct(dma->task, locked - unlocked); > > + vfio_lock_acct(dma->task, locked - unlocked, NULL); > > } > > } > > > > > > Patch 2/2 would clearly change the &lock_cap in > > vfio_pin_page_external() to a NULL, so only _remote passes a pointer > > there. Thanks, > > > > Alex > >
On Mon, Apr 17, 2017 at 03:32:20PM -0600, Alex Williamson wrote: > On Tue, 18 Apr 2017 01:02:12 +0530 > Kirti Wankhede <kwankhede@nvidia.com> wrote: > > > On 4/18/2017 12:49 AM, Alex Williamson wrote: > > > On Tue, 18 Apr 2017 00:35:06 +0530 > > > Kirti Wankhede <kwankhede@nvidia.com> wrote: > > > > > >> On 4/17/2017 8:02 PM, Alex Williamson wrote: > > >>> On Mon, 17 Apr 2017 14:47:54 +0800 > > >>> Peter Xu <peterx@redhat.com> wrote: > > >>> > > >>>> On Sun, Apr 16, 2017 at 07:42:27PM -0600, Alex Williamson wrote: > > >>>> > > >>>> [...] > > >>>> > > >>>>> -static void vfio_lock_acct(struct task_struct *task, long npage) > > >>>>> +static int vfio_lock_acct(struct task_struct *task, long npage, bool lock_cap) > > >>>>> { > > >>>>> - struct vwork *vwork; > > >>>>> struct mm_struct *mm; > > >>>>> bool is_current; > > >>>>> + int ret; > > >>>>> > > >>>>> if (!npage) > > >>>>> - return; > > >>>>> + return 0; > > >>>>> > > >>>>> is_current = (task->mm == current->mm); > > >>>>> > > >>>>> mm = is_current ? task->mm : get_task_mm(task); > > >>>>> if (!mm) > > >>>>> - return; /* process exited */ > > >>>>> + return -ESRCH; /* process exited */ > > >>>>> > > >>>>> - if (down_write_trylock(&mm->mmap_sem)) { > > >>>>> - mm->locked_vm += npage; > > >>>>> - up_write(&mm->mmap_sem); > > >>>>> - if (!is_current) > > >>>>> - mmput(mm); > > >>>>> - return; > > >>>>> - } > > >>>>> + ret = down_write_killable(&mm->mmap_sem); > > >>>>> + if (!ret) { > > >>>>> + if (npage < 0 || lock_cap) { > > >>>> > > >>>> Nit: maybe we can avoid passing in lock_cap in all the callers of > > >>>> vfio_lock_acct() and fetch it via has_capability() only if npage < 0? > > >>>> IMHO that'll keep the vfio_lock_acct() interface cleaner, and we won't > > >>>> need to pass in "false" any time when doing unpins. > > >>> > > >>> Unfortunately vfio_pin_pages_remote() needs to know about lock_cap > > >>> since it tests whether the user is exceeding their locked memory > > >>> limit. The other callers could certainly get away with > > >>> vfio_lock_acct() testing the capability itself but that would add a > > >>> redundant call for the most common user. I'm not a big fan of passing > > >>> a lock_cap bool either, but it seemed the best fix for now. The > > >>> cleanest alternative I can up with is this (untested): > > >>> > > >> > > >> In my opinion, passing 'bool lock_cap' looks much clean and simple. > > >> > > >> Reviewed-by: Kirti Wankhede <kwankhede@nvidia.com> > > > > > > Well shoot, I was just starting to warm up to the bool*. I like that > > > we're not presuming the polarity for the callers we expect to be > > > removing pages and I generally just dislike passing fixed bool > > > parameters to change the function behavior. I've cleaned it up a bit > > > further and was starting to do some testing on this which I'd propose > > > for v5. Does it change your opinion? > > > > If passing fixed bool parameter is the concern then I would lean towards > > Peter's suggestion. vfio_pin_pages_remote() will check lock capability > > outside vfio_lock_acct() and again in vfio_lock_acct(). At other places, > > it will be takes care within vfio_lock_acct() > > Sorry, I don't see that as a viable option. Testing for CAP_IPC_LOCK in > both vfio_pin_pages_remote() and vfio_lock_acct() results in over a > 10% performance hit on the mapping path with a custom micro-benchmark. > In fact, it suggests we should probably pass that from even higher in > the call stack. Thanks, Sorry I wasn't aware of such a performance degradation with such a change. Then I would be perfectly fine with either current patch, or the new one you proposed (with bool *). Thanks,
On 4/18/2017 8:24 AM, Peter Xu wrote: > On Mon, Apr 17, 2017 at 03:32:20PM -0600, Alex Williamson wrote: >> On Tue, 18 Apr 2017 01:02:12 +0530 >> Kirti Wankhede <kwankhede@nvidia.com> wrote: >> >>> On 4/18/2017 12:49 AM, Alex Williamson wrote: >>>> On Tue, 18 Apr 2017 00:35:06 +0530 >>>> Kirti Wankhede <kwankhede@nvidia.com> wrote: >>>> >>>>> On 4/17/2017 8:02 PM, Alex Williamson wrote: >>>>>> On Mon, 17 Apr 2017 14:47:54 +0800 >>>>>> Peter Xu <peterx@redhat.com> wrote: >>>>>> >>>>>>> On Sun, Apr 16, 2017 at 07:42:27PM -0600, Alex Williamson wrote: >>>>>>> >>>>>>> [...] >>>>>>> >>>>>>>> -static void vfio_lock_acct(struct task_struct *task, long npage) >>>>>>>> +static int vfio_lock_acct(struct task_struct *task, long npage, bool lock_cap) >>>>>>>> { >>>>>>>> - struct vwork *vwork; >>>>>>>> struct mm_struct *mm; >>>>>>>> bool is_current; >>>>>>>> + int ret; >>>>>>>> >>>>>>>> if (!npage) >>>>>>>> - return; >>>>>>>> + return 0; >>>>>>>> >>>>>>>> is_current = (task->mm == current->mm); >>>>>>>> >>>>>>>> mm = is_current ? task->mm : get_task_mm(task); >>>>>>>> if (!mm) >>>>>>>> - return; /* process exited */ >>>>>>>> + return -ESRCH; /* process exited */ >>>>>>>> >>>>>>>> - if (down_write_trylock(&mm->mmap_sem)) { >>>>>>>> - mm->locked_vm += npage; >>>>>>>> - up_write(&mm->mmap_sem); >>>>>>>> - if (!is_current) >>>>>>>> - mmput(mm); >>>>>>>> - return; >>>>>>>> - } >>>>>>>> + ret = down_write_killable(&mm->mmap_sem); >>>>>>>> + if (!ret) { >>>>>>>> + if (npage < 0 || lock_cap) { >>>>>>> >>>>>>> Nit: maybe we can avoid passing in lock_cap in all the callers of >>>>>>> vfio_lock_acct() and fetch it via has_capability() only if npage < 0? >>>>>>> IMHO that'll keep the vfio_lock_acct() interface cleaner, and we won't >>>>>>> need to pass in "false" any time when doing unpins. >>>>>> >>>>>> Unfortunately vfio_pin_pages_remote() needs to know about lock_cap >>>>>> since it tests whether the user is exceeding their locked memory >>>>>> limit. The other callers could certainly get away with >>>>>> vfio_lock_acct() testing the capability itself but that would add a >>>>>> redundant call for the most common user. I'm not a big fan of passing >>>>>> a lock_cap bool either, but it seemed the best fix for now. The >>>>>> cleanest alternative I can up with is this (untested): >>>>>> >>>>> >>>>> In my opinion, passing 'bool lock_cap' looks much clean and simple. >>>>> >>>>> Reviewed-by: Kirti Wankhede <kwankhede@nvidia.com> >>>> >>>> Well shoot, I was just starting to warm up to the bool*. I like that >>>> we're not presuming the polarity for the callers we expect to be >>>> removing pages and I generally just dislike passing fixed bool >>>> parameters to change the function behavior. I've cleaned it up a bit >>>> further and was starting to do some testing on this which I'd propose >>>> for v5. Does it change your opinion? >>> >>> If passing fixed bool parameter is the concern then I would lean towards >>> Peter's suggestion. vfio_pin_pages_remote() will check lock capability >>> outside vfio_lock_acct() and again in vfio_lock_acct(). At other places, >>> it will be takes care within vfio_lock_acct() >> >> Sorry, I don't see that as a viable option. Testing for CAP_IPC_LOCK in >> both vfio_pin_pages_remote() and vfio_lock_acct() results in over a >> 10% performance hit on the mapping path with a custom micro-benchmark. >> In fact, it suggests we should probably pass that from even higher in >> the call stack. Thanks, > > Sorry I wasn't aware of such a performance degradation with such a > change. Then I would be perfectly fine with either current patch, or > the new one you proposed (with bool *). Thanks, > Sorry, even I wasn't aware of. Looking at v5 version now. Thanks, Kirti
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 32d2633092a3..a8a079ba9477 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -246,69 +246,46 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) return ret; } -struct vwork { - struct mm_struct *mm; - long npage; - struct work_struct work; -}; - -/* delayed decrement/increment for locked_vm */ -static void vfio_lock_acct_bg(struct work_struct *work) -{ - struct vwork *vwork = container_of(work, struct vwork, work); - struct mm_struct *mm; - - mm = vwork->mm; - down_write(&mm->mmap_sem); - mm->locked_vm += vwork->npage; - up_write(&mm->mmap_sem); - mmput(mm); - kfree(vwork); -} - -static void vfio_lock_acct(struct task_struct *task, long npage) +static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap) { - struct vwork *vwork; struct mm_struct *mm; bool is_current; + int ret; if (!npage) - return; + return 0; is_current = (task->mm == current->mm); mm = is_current ? task->mm : get_task_mm(task); if (!mm) - return; /* process exited */ + return -ESRCH; /* process exited */ - if (down_write_trylock(&mm->mmap_sem)) { - mm->locked_vm += npage; - up_write(&mm->mmap_sem); - if (!is_current) - mmput(mm); - return; - } + ret = down_write_killable(&mm->mmap_sem); + if (!ret) { + if (npage > 0) { + if (lock_cap ? !*lock_cap : + !has_capability(task, CAP_IPC_LOCK)) { + unsigned long limit; + + limit = task_rlimit(task, + RLIMIT_MEMLOCK) >> PAGE_SHIFT; + + if (mm->locked_vm + npage > limit) + ret = -ENOMEM; + } + } + + if (!ret) + mm->locked_vm += npage; - if (is_current) { - mm = get_task_mm(task); - if (!mm) - return; + up_write(&mm->mmap_sem); } - /* - * Couldn't get mmap_sem lock, so must setup to update - * mm->locked_vm later. If locked_vm were atomic, we - * wouldn't need this silliness - */ - vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL); - if (WARN_ON(!vwork)) { + if (!is_current) mmput(mm); - return; - } - INIT_WORK(&vwork->work, vfio_lock_acct_bg); - vwork->mm = mm; - vwork->npage = npage; - schedule_work(&vwork->work); + + return ret; } /* @@ -405,7 +382,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, long npage, unsigned long *pfn_base) { - unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; bool lock_cap = capable(CAP_IPC_LOCK); long ret, pinned = 0, lock_acct = 0; bool rsvd; @@ -442,8 +419,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, /* Lock all the consecutive pages from pfn_base */ for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { - unsigned long pfn = 0; - ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn); if (ret) break; @@ -460,14 +435,25 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, put_pfn(pfn, dma->prot); pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, limit << PAGE_SHIFT); - break; + ret = -ENOMEM; + goto unpin_out; } lock_acct++; } } out: - vfio_lock_acct(current, lock_acct); + ret = vfio_lock_acct(current, lock_acct, &lock_cap); + +unpin_out: + if (ret) { + if (!rsvd) { + for (pfn = *pfn_base ; pinned ; pfn++, pinned--) + put_pfn(pfn, dma->prot); + } + + return ret; + } return pinned; } @@ -488,7 +474,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, } if (do_accounting) - vfio_lock_acct(dma->task, locked - unlocked); + vfio_lock_acct(dma->task, locked - unlocked, NULL); return unlocked; } @@ -522,8 +508,14 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, goto pin_page_exit; } - if (!rsvd && do_accounting) - vfio_lock_acct(dma->task, 1); + if (!rsvd && do_accounting) { + ret = vfio_lock_acct(dma->task, 1, &lock_cap); + if (ret) { + put_pfn(*pfn_base, dma->prot); + goto pin_page_exit; + } + } + ret = 1; pin_page_exit: @@ -543,7 +535,7 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); if (do_accounting) - vfio_lock_acct(dma->task, -unlocked); + vfio_lock_acct(dma->task, -unlocked, NULL); return unlocked; } @@ -740,7 +732,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, dma->iommu_mapped = false; if (do_accounting) { - vfio_lock_acct(dma->task, -unlocked); + vfio_lock_acct(dma->task, -unlocked, NULL); return 0; } return unlocked; @@ -1382,7 +1374,7 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) if (!is_invalid_reserved_pfn(vpfn->pfn)) locked++; } - vfio_lock_acct(dma->task, locked - unlocked); + vfio_lock_acct(dma->task, locked - unlocked, NULL); } }