@@ -116,19 +116,17 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
kthread_unuse_mm(vma->userptr.notifier.mm);
mmput(vma->userptr.notifier.mm);
}
-mm_closed:
if (ret)
- goto out;
+ goto out_release_pages;
ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
pinned, 0,
(u64)pinned << PAGE_SHIFT,
xe_sg_segment_size(xe->drm.dev),
GFP_KERNEL);
- if (ret) {
- vma->userptr.sg = NULL;
- goto out;
- }
+ if (ret)
+ goto out_release_pages;
+
vma->userptr.sg = &vma->userptr.sgt;
ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
@@ -136,11 +134,8 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC |
DMA_ATTR_NO_KERNEL_MAPPING);
- if (ret) {
- sg_free_table(vma->userptr.sg);
- vma->userptr.sg = NULL;
- goto out;
- }
+ if (ret)
+ goto out_free_sg;
for (i = 0; i < pinned; ++i) {
if (!read_only) {
@@ -152,17 +147,23 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
mark_page_accessed(pages[i]);
}
-out:
release_pages(pages, pinned);
kvfree(pages);
- if (!(ret < 0)) {
- vma->userptr.notifier_seq = notifier_seq;
- if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
- goto retry;
- }
+ vma->userptr.notifier_seq = notifier_seq;
+ if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
+ goto retry;
+
+ return 0;
- return ret < 0 ? ret : 0;
+out_free_sg:
+ sg_free_table(vma->userptr.sg);
+ vma->userptr.sg = NULL;
+out_release_pages:
+ release_pages(pages, pinned);
+mm_closed:
+ kvfree(pages);
+ return ret;
}
static bool preempt_fences_waiting(struct xe_vm *vm)