Message ID | 20230818150845.96679-4-thomas.hellstrom@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | drm/xe: Support optional pinning of userptr pages | expand |
On Fri, Aug 18, 2023 at 05:08:44PM +0200, Thomas Hellström wrote: > Account these pages against RLIMIT_MEMLOCK following how RDMA does this > with CAP_IPC_LOCK bypassing the limit. > > Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Patch LGTM but nits on naming + possible assert. > --- > drivers/gpu/drm/xe/xe_vm.c | 43 ++++++++++++++++++++++++++++++++++++-- > 1 file changed, 41 insertions(+), 2 deletions(-) > > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c > index ecbcad696b60..d9c000689002 100644 > --- a/drivers/gpu/drm/xe/xe_vm.c > +++ b/drivers/gpu/drm/xe/xe_vm.c > @@ -34,6 +34,33 @@ > > #define TEST_VM_ASYNC_OPS_ERROR > > +/* > + * Perform userptr PIN accounting against RLIMIT_MEMLOCK for now, similarly > + * to how RDMA does this. > + */ > +static int xe_vma_mlock_alloc(struct xe_vma *vma, unsigned long num_pages) > +{ xe_vma_userptr_mlock_alloc? or maybe even xe_vma_userptr_mlock_reserve? > + unsigned long lock_limit, new_pinned; > + struct mm_struct *mm = vma->userptr.notifier.mm; > + This be a candidate to use the new aseert macros to ensure that the vma is a userptr + pinned? Not sure if that merged yet. > + if (!can_do_mlock()) > + return -EPERM; > + > + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; > + new_pinned = atomic64_add_return(num_pages, &mm->pinned_vm); > + if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) { > + atomic64_sub(num_pages, &mm->pinned_vm); > + return -ENOMEM; > + } > + > + return 0; > +} > + > +static void xe_vma_mlock_free(struct xe_vma *vma, unsigned long num_pages) > +{ xe_vma_userptr_mlock_free? or maybe even xe_vma_userptr_mlock_release? Same for the assert here. Anyways, I'll leave addressing these nits up to you, with that: Reviewed-by: Matthew Brost <matthew.brost@intel.com> > + atomic64_sub(num_pages, &vma->userptr.notifier.mm->pinned_vm); > +} > + > /** > * xe_vma_userptr_check_repin() - Advisory check for repin needed > * @vma: The userptr vma > @@ -89,9 +116,17 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) > !read_only); > pages = vma->userptr.pinned_pages; > } else { > + if (xe_vma_is_pinned(vma)) { > + ret = xe_vma_mlock_alloc(vma, num_pages); > + if (ret) > + return ret; > + } > + > pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL); > - if (!pages) > - return -ENOMEM; > + if (!pages) { > + ret = -ENOMEM; > + goto out_account; > + } > } > > pinned = ret = 0; > @@ -187,6 +222,9 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) > mm_closed: > kvfree(pages); > vma->userptr.pinned_pages = NULL; > +out_account: > + if (xe_vma_is_pinned(vma)) > + xe_vma_mlock_free(vma, num_pages); > return ret; > } > > @@ -1004,6 +1042,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma) > unpin_user_pages_dirty_lock(vma->userptr.pinned_pages, > vma->userptr.num_pinned, > !read_only); > + xe_vma_mlock_free(vma, xe_vma_size(vma) >> PAGE_SHIFT); > kvfree(vma->userptr.pinned_pages); > } > > -- > 2.41.0 >
On 8/20/23 05:43, Matthew Brost wrote: > On Fri, Aug 18, 2023 at 05:08:44PM +0200, Thomas Hellström wrote: >> Account these pages against RLIMIT_MEMLOCK following how RDMA does this >> with CAP_IPC_LOCK bypassing the limit. >> >> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> > Patch LGTM but nits on naming + possible assert. > >> --- >> drivers/gpu/drm/xe/xe_vm.c | 43 ++++++++++++++++++++++++++++++++++++-- >> 1 file changed, 41 insertions(+), 2 deletions(-) >> >> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c >> index ecbcad696b60..d9c000689002 100644 >> --- a/drivers/gpu/drm/xe/xe_vm.c >> +++ b/drivers/gpu/drm/xe/xe_vm.c >> @@ -34,6 +34,33 @@ >> >> #define TEST_VM_ASYNC_OPS_ERROR >> >> +/* >> + * Perform userptr PIN accounting against RLIMIT_MEMLOCK for now, similarly >> + * to how RDMA does this. >> + */ >> +static int xe_vma_mlock_alloc(struct xe_vma *vma, unsigned long num_pages) >> +{ > xe_vma_userptr_mlock_alloc? or maybe even xe_vma_userptr_mlock_reserve? > >> + unsigned long lock_limit, new_pinned; >> + struct mm_struct *mm = vma->userptr.notifier.mm; >> + > This be a candidate to use the new aseert macros to ensure that the vma > is a userptr + pinned? Not sure if that merged yet. > >> + if (!can_do_mlock()) >> + return -EPERM; >> + >> + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; >> + new_pinned = atomic64_add_return(num_pages, &mm->pinned_vm); >> + if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) { >> + atomic64_sub(num_pages, &mm->pinned_vm); >> + return -ENOMEM; >> + } >> + >> + return 0; >> +} >> + >> +static void xe_vma_mlock_free(struct xe_vma *vma, unsigned long num_pages) >> +{ > xe_vma_userptr_mlock_free? or maybe even xe_vma_userptr_mlock_release? > > Same for the assert here. > > Anyways, I'll leave addressing these nits up to you, with that: > Reviewed-by: Matthew Brost <matthew.brost@intel.com> OK, thanks. I'll take a look at addressing those. > >> + atomic64_sub(num_pages, &vma->userptr.notifier.mm->pinned_vm); >> +} >> + >> /** >> * xe_vma_userptr_check_repin() - Advisory check for repin needed >> * @vma: The userptr vma >> @@ -89,9 +116,17 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) >> !read_only); >> pages = vma->userptr.pinned_pages; >> } else { >> + if (xe_vma_is_pinned(vma)) { >> + ret = xe_vma_mlock_alloc(vma, num_pages); >> + if (ret) >> + return ret; >> + } >> + >> pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL); >> - if (!pages) >> - return -ENOMEM; >> + if (!pages) { >> + ret = -ENOMEM; >> + goto out_account; >> + } >> } >> >> pinned = ret = 0; >> @@ -187,6 +222,9 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) >> mm_closed: >> kvfree(pages); >> vma->userptr.pinned_pages = NULL; >> +out_account: >> + if (xe_vma_is_pinned(vma)) >> + xe_vma_mlock_free(vma, num_pages); >> return ret; >> } >> >> @@ -1004,6 +1042,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma) >> unpin_user_pages_dirty_lock(vma->userptr.pinned_pages, >> vma->userptr.num_pinned, >> !read_only); >> + xe_vma_mlock_free(vma, xe_vma_size(vma) >> PAGE_SHIFT); >> kvfree(vma->userptr.pinned_pages); >> } >> >> -- >> 2.41.0 >>
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index ecbcad696b60..d9c000689002 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -34,6 +34,33 @@ #define TEST_VM_ASYNC_OPS_ERROR +/* + * Perform userptr PIN accounting against RLIMIT_MEMLOCK for now, similarly + * to how RDMA does this. + */ +static int xe_vma_mlock_alloc(struct xe_vma *vma, unsigned long num_pages) +{ + unsigned long lock_limit, new_pinned; + struct mm_struct *mm = vma->userptr.notifier.mm; + + if (!can_do_mlock()) + return -EPERM; + + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + new_pinned = atomic64_add_return(num_pages, &mm->pinned_vm); + if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) { + atomic64_sub(num_pages, &mm->pinned_vm); + return -ENOMEM; + } + + return 0; +} + +static void xe_vma_mlock_free(struct xe_vma *vma, unsigned long num_pages) +{ + atomic64_sub(num_pages, &vma->userptr.notifier.mm->pinned_vm); +} + /** * xe_vma_userptr_check_repin() - Advisory check for repin needed * @vma: The userptr vma @@ -89,9 +116,17 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) !read_only); pages = vma->userptr.pinned_pages; } else { + if (xe_vma_is_pinned(vma)) { + ret = xe_vma_mlock_alloc(vma, num_pages); + if (ret) + return ret; + } + pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL); - if (!pages) - return -ENOMEM; + if (!pages) { + ret = -ENOMEM; + goto out_account; + } } pinned = ret = 0; @@ -187,6 +222,9 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) mm_closed: kvfree(pages); vma->userptr.pinned_pages = NULL; +out_account: + if (xe_vma_is_pinned(vma)) + xe_vma_mlock_free(vma, num_pages); return ret; } @@ -1004,6 +1042,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma) unpin_user_pages_dirty_lock(vma->userptr.pinned_pages, vma->userptr.num_pinned, !read_only); + xe_vma_mlock_free(vma, xe_vma_size(vma) >> PAGE_SHIFT); kvfree(vma->userptr.pinned_pages); }
Account these pages against RLIMIT_MEMLOCK following how RDMA does this with CAP_IPC_LOCK bypassing the limit. Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> --- drivers/gpu/drm/xe/xe_vm.c | 43 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-)