Message ID | 20230818150845.96679-2-thomas.hellstrom@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | drm/xe: Support optional pinning of userptr pages | expand |
On Fri, Aug 18, 2023 at 05:08:42PM +0200, Thomas Hellström wrote: > Use onion error unwind since that makes the function easier to read > and extend. No functional change. > > Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> > --- > drivers/gpu/drm/xe/xe_vm.c | 37 +++++++++++++++++++------------------ > 1 file changed, 19 insertions(+), 18 deletions(-) > > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c > index 2e99f865d7ec..8bf7f62e6548 100644 > --- a/drivers/gpu/drm/xe/xe_vm.c > +++ b/drivers/gpu/drm/xe/xe_vm.c > @@ -116,19 +116,17 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) > kthread_unuse_mm(vma->userptr.notifier.mm); > mmput(vma->userptr.notifier.mm); > } > -mm_closed: > if (ret) > - goto out; > + goto out_release_pages; > > ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages, > pinned, 0, > (u64)pinned << PAGE_SHIFT, > xe_sg_segment_size(xe->drm.dev), > GFP_KERNEL); > - if (ret) { > - vma->userptr.sg = NULL; > - goto out; > - } > + if (ret) > + goto out_release_pages; > + > vma->userptr.sg = &vma->userptr.sgt; > > ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg, > @@ -136,11 +134,8 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) > DMA_BIDIRECTIONAL, > DMA_ATTR_SKIP_CPU_SYNC | > DMA_ATTR_NO_KERNEL_MAPPING); > - if (ret) { > - sg_free_table(vma->userptr.sg); > - vma->userptr.sg = NULL; > - goto out; > - } > + if (ret) > + goto out_free_sg; > > for (i = 0; i < pinned; ++i) { > if (!read_only) { > @@ -152,17 +147,23 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) > mark_page_accessed(pages[i]); > } > > -out: > release_pages(pages, pinned); > kvfree(pages); > > - if (!(ret < 0)) { > - vma->userptr.notifier_seq = notifier_seq; > - if (xe_vma_userptr_check_repin(vma) == -EAGAIN) > - goto retry; > - } > + vma->userptr.notifier_seq = notifier_seq; > + if (xe_vma_userptr_check_repin(vma) == -EAGAIN) > + goto retry; > + > + return 0; > > - return ret < 0 ? ret : 0; > +out_free_sg: > + sg_free_table(vma->userptr.sg); > + vma->userptr.sg = NULL; > +out_release_pages: > + release_pages(pages, pinned); > +mm_closed: > + kvfree(pages); > + return ret; > } > > static bool preempt_fences_waiting(struct xe_vm *vm) > -- > 2.41.0 >
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 2e99f865d7ec..8bf7f62e6548 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -116,19 +116,17 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) kthread_unuse_mm(vma->userptr.notifier.mm); mmput(vma->userptr.notifier.mm); } -mm_closed: if (ret) - goto out; + goto out_release_pages; ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages, pinned, 0, (u64)pinned << PAGE_SHIFT, xe_sg_segment_size(xe->drm.dev), GFP_KERNEL); - if (ret) { - vma->userptr.sg = NULL; - goto out; - } + if (ret) + goto out_release_pages; + vma->userptr.sg = &vma->userptr.sgt; ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg, @@ -136,11 +134,8 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING); - if (ret) { - sg_free_table(vma->userptr.sg); - vma->userptr.sg = NULL; - goto out; - } + if (ret) + goto out_free_sg; for (i = 0; i < pinned; ++i) { if (!read_only) { @@ -152,17 +147,23 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) mark_page_accessed(pages[i]); } -out: release_pages(pages, pinned); kvfree(pages); - if (!(ret < 0)) { - vma->userptr.notifier_seq = notifier_seq; - if (xe_vma_userptr_check_repin(vma) == -EAGAIN) - goto retry; - } + vma->userptr.notifier_seq = notifier_seq; + if (xe_vma_userptr_check_repin(vma) == -EAGAIN) + goto retry; + + return 0; - return ret < 0 ? ret : 0; +out_free_sg: + sg_free_table(vma->userptr.sg); + vma->userptr.sg = NULL; +out_release_pages: + release_pages(pages, pinned); +mm_closed: + kvfree(pages); + return ret; } static bool preempt_fences_waiting(struct xe_vm *vm)
Use onion error unwind since that makes the function easier to read and extend. No functional change. Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> --- drivers/gpu/drm/xe/xe_vm.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-)