@@ -38,6 +38,7 @@ xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
if (!range)
return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&range->garbage_collector_link);
xe_vm_get(gpusvm_to_vm(gpusvm));
return &range->base;
@@ -54,6 +55,24 @@ static struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
return container_of(r, struct xe_svm_range, base);
}
+static void
+xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
+ const struct mmu_notifier_range *mmu_range)
+{
+ struct xe_device *xe = vm->xe;
+
+ drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
+
+ spin_lock(&vm->svm.garbage_collector.lock);
+ if (list_empty(&range->garbage_collector_link))
+ list_add_tail(&range->garbage_collector_link,
+ &vm->svm.garbage_collector.range_list);
+ spin_unlock(&vm->svm.garbage_collector.lock);
+
+ queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq,
+ &vm->svm.garbage_collector.work);
+}
+
static u8
xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
const struct mmu_notifier_range *mmu_range,
@@ -98,7 +117,9 @@ xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
xe_svm_assert_in_notifier(vm);
drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
- /* TODO: Add range to garbage collector if VM is not closed */
+ if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
+ xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
+ mmu_range);
}
static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
@@ -198,6 +219,63 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
xe_svm_range_notifier_event_end(vm, r, mmu_range);
}
+static int __xe_svm_garbage_collector(struct xe_vm *vm,
+ struct xe_svm_range *range)
+{
+ /* TODO: Do unbind */
+
+ drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
+
+ return 0;
+}
+
+static int xe_svm_garbage_collector(struct xe_vm *vm)
+{
+ struct xe_svm_range *range;
+ int err;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ if (xe_vm_is_closed_or_banned(vm))
+ return -ENOENT;
+
+ spin_lock(&vm->svm.garbage_collector.lock);
+ for (;;) {
+ range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
+ typeof(*range),
+ garbage_collector_link);
+ if (!range)
+ break;
+
+ list_del(&range->garbage_collector_link);
+ spin_unlock(&vm->svm.garbage_collector.lock);
+
+ err = __xe_svm_garbage_collector(vm, range);
+ if (err) {
+ drm_warn(&vm->xe->drm,
+ "Garbage collection failed: %pe\n",
+ ERR_PTR(err));
+ xe_vm_kill(vm, true);
+ return err;
+ }
+
+ spin_lock(&vm->svm.garbage_collector.lock);
+ }
+ spin_unlock(&vm->svm.garbage_collector.lock);
+
+ return 0;
+}
+
+static void xe_svm_garbage_collector_work_func(struct work_struct *w)
+{
+ struct xe_vm *vm = container_of(w, struct xe_vm,
+ svm.garbage_collector.work);
+
+ down_write(&vm->lock);
+ xe_svm_garbage_collector(vm);
+ up_write(&vm->lock);
+}
+
static const struct drm_gpusvm_ops gpusvm_ops = {
.range_alloc = xe_svm_range_alloc,
.range_free = xe_svm_range_free,
@@ -222,6 +300,11 @@ int xe_svm_init(struct xe_vm *vm)
{
int err;
+ spin_lock_init(&vm->svm.garbage_collector.lock);
+ INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
+ INIT_WORK(&vm->svm.garbage_collector.work,
+ xe_svm_garbage_collector_work_func);
+
err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
current->mm, NULL, 0, vm->size,
SZ_512M, &gpusvm_ops, fault_chunk_sizes,
@@ -243,6 +326,7 @@ int xe_svm_init(struct xe_vm *vm)
void xe_svm_close(struct xe_vm *vm)
{
xe_assert(vm->xe, xe_vm_is_closed(vm));
+ flush_work(&vm->svm.garbage_collector.work);
}
/**
@@ -292,7 +376,10 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
retry:
- /* TODO: Run garbage collector */
+ /* Always process UNMAPs first so view SVM ranges is current */
+ err = xe_svm_garbage_collector(vm);
+ if (err)
+ return err;
r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, fault_addr,
xe_vma_start(vma), xe_vma_end(vma),
@@ -19,6 +19,11 @@ struct xe_vma;
struct xe_svm_range {
/** @base: base drm_gpusvm_range */
struct drm_gpusvm_range base;
+ /**
+ * @garbage_collector_link: Link into VM's garbage collect SVM range
+ * list. Protected by VM's garbage collect lock.
+ */
+ struct list_head garbage_collector_link;
/**
* @tile_present: Tile mask of binding is present for this range.
* Protected by GPU SVM notifier lock.
@@ -3175,6 +3175,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
}
+ /* Ensure all UNMAPs visible */
+ if (xe_vm_in_fault_mode(vm))
+ flush_work(&vm->svm.garbage_collector.work);
+
err = down_write_killable(&vm->lock);
if (err)
goto put_exec_queue;
@@ -146,6 +146,24 @@ struct xe_vm {
struct {
/** @svm.gpusvm: base GPUSVM used to track fault allocations */
struct drm_gpusvm gpusvm;
+ /**
+ * @svm.garbage_collector: Garbage collector which is used unmap
+ * SVM range's GPU bindings and destroy the ranges.
+ */
+ struct {
+ /** @svm.garbage_collector.lock: Protect's range list */
+ spinlock_t lock;
+ /**
+ * @svm.garbage_collector.range_list: List of SVM ranges
+ * in the garbage collector.
+ */
+ struct list_head range_list;
+ /**
+ * @svm.garbage_collector.work: Worker which the
+ * garbage collector runs on.
+ */
+ struct work_struct work;
+ } garbage_collector;
} svm;
struct xe_device *xe;