@@ -434,3 +434,18 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
return err;
}
+
+/**
+ * xe_svm_has_mapping() - SVM has mappings
+ * @vm: The VM.
+ * @start: Start address.
+ * @end: End address.
+ *
+ * Check if an address range has SVM mappings.
+ *
+ * Return: True if address range has a SVM mapping, False otherwise
+ */
+bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
+{
+ return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
+}
@@ -57,6 +57,8 @@ void xe_svm_close(struct xe_vm *vm);
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
struct xe_tile *tile, u64 fault_addr,
bool atomic);
+
+bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
#else
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
{
@@ -86,6 +88,12 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
{
return 0;
}
+
+static inline
+bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
+{
+ return false;
+}
#endif
/**
@@ -2485,6 +2485,17 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
struct xe_vma *old =
gpuva_to_vma(op->base.remap.unmap->va);
bool skip = xe_vma_is_cpu_addr_mirror(old);
+ u64 start = xe_vma_start(old), end = xe_vma_end(old);
+
+ if (op->base.remap.prev)
+ start = op->base.remap.prev->va.addr +
+ op->base.remap.prev->va.range;
+ if (op->base.remap.next)
+ end = op->base.remap.next->va.addr;
+
+ if (xe_vma_is_cpu_addr_mirror(old) &&
+ xe_svm_has_mapping(vm, start, end))
+ return -EBUSY;
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
@@ -2566,6 +2577,11 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
case DRM_GPUVA_OP_UNMAP:
vma = gpuva_to_vma(op->base.unmap.va);
+ if (xe_vma_is_cpu_addr_mirror(vma) &&
+ xe_svm_has_mapping(vm, xe_vma_start(vma),
+ xe_vma_end(vma)))
+ return -EBUSY;
+
if (!xe_vma_is_cpu_addr_mirror(vma))
xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;