@@ -950,6 +950,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
u64 start, u64 end,
bool read_only,
bool is_null,
+ bool pin,
u8 tile_mask)
{
struct xe_vma *vma;
@@ -981,6 +982,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->gpuva.flags |= XE_VMA_READ_ONLY;
if (is_null)
vma->gpuva.flags |= DRM_GPUVA_SPARSE;
+ if (pin)
+ vma->gpuva.flags |= XE_VMA_PINNED;
if (tile_mask) {
vma->tile_mask = tile_mask;
@@ -2382,6 +2385,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
op->map.read_only =
operation & XE_VM_BIND_FLAG_READONLY;
op->map.is_null = operation & XE_VM_BIND_FLAG_NULL;
+ op->map.pin = operation & XE_VM_BIND_FLAG_PIN;
}
break;
case XE_VM_BIND_OP_UNMAP:
@@ -2446,7 +2450,8 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
}
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
- u8 tile_mask, bool read_only, bool is_null)
+ u8 tile_mask, bool read_only, bool is_null,
+ bool pin)
{
struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
struct xe_vma *vma;
@@ -2462,7 +2467,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
}
vma = xe_vma_create(vm, bo, op->gem.offset,
op->va.addr, op->va.addr +
- op->va.range - 1, read_only, is_null,
+ op->va.range - 1, read_only, is_null, pin,
tile_mask);
if (bo)
xe_bo_unlock(bo, &ww);
@@ -2577,7 +2582,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
vma = new_vma(vm, &op->base.map,
op->tile_mask, op->map.read_only,
- op->map.is_null);
+ op->map.is_null, op->map.pin);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto free_fence;
@@ -2602,10 +2607,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
bool is_null =
op->base.remap.unmap->va->flags &
DRM_GPUVA_SPARSE;
+ bool pin =
+ op->base.remap.unmap->va->flags &
+ XE_VMA_PINNED;
vma = new_vma(vm, op->base.remap.prev,
op->tile_mask, read_only,
- is_null);
+ is_null, pin);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto free_fence;
@@ -2638,10 +2646,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
bool is_null =
op->base.remap.unmap->va->flags &
DRM_GPUVA_SPARSE;
+ bool pin =
+ op->base.remap.unmap->va->flags &
+ XE_VMA_PINNED;
vma = new_vma(vm, op->base.remap.next,
op->tile_mask, read_only,
- is_null);
+ is_null, pin);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto free_fence;
@@ -3146,11 +3157,12 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
#define SUPPORTED_FLAGS \
(FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
- XE_VM_BIND_FLAG_NULL | 0xffff)
+ XE_VM_BIND_FLAG_NULL | XE_VM_BIND_FLAG_PIN | 0xffff)
#else
#define SUPPORTED_FLAGS \
(XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
- XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | 0xffff)
+ XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | \
+ XE_VM_BIND_FLAG_PIN | 0xffff)
#endif
#define XE_64K_PAGE_MASK 0xffffull
@@ -3220,6 +3232,13 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
goto free_bind_ops;
}
+ /* TODO: Support OP_PREFETCH, OP_MAP */
+ if (XE_IOCTL_DBG(xe, (op & XE_VM_BIND_FLAG_PIN) &&
+ VM_BIND_OP(op) != XE_VM_BIND_OP_MAP_USERPTR)) {
+ err = -EINVAL;
+ goto free_bind_ops;
+ }
+
if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, op & ~SUPPORTED_FLAGS) ||
@@ -360,6 +360,8 @@ struct xe_vma_op_map {
bool read_only;
/** @is_null: is NULL binding */
bool is_null;
+ /** @pin: pin underlying memory */
+ bool pin;
};
/** struct xe_vma_op_remap - VMA remap operation */
@@ -631,6 +631,24 @@ struct drm_xe_vm_bind_op {
* intended to implement VK sparse bindings.
*/
#define XE_VM_BIND_FLAG_NULL (0x1 << 19)
+ /*
+ * When the PIN flag is set, the user requests the underlying
+ * backing store of the vma to be pinned, that is, it will be
+ * resident while bound and the underlying physical memory
+ * will not change. For userptr VMAs this means that if the
+ * user performs an operation that changes the underlying
+ * pages of the CPU virtual space, the corresponding pinned
+ * GPU virtual space will not pick up the new memory unless
+ * an OP_UNMAP followed by a OP_MAP_USERPTR is performed.
+ * Pinned userptr memory is accounted in the same way as
+ * mlock(2), and if pinning fails the following error codes
+ * may be returned:
+ * -EINVAL: The memory region does not support pinning.
+ * -EPERM: The process is not permitted to pin.
+ * -ENOMEM: The pinning limit does not allow pinning.
+ * For userptr memory, CAP_IPC_LOCK will bypass the limit checking.
+ */
+#define XE_VM_BIND_FLAG_PIN (0x1 << 20)
/** @op: Operation to perform (lower 16 bits) and flags (upper 16 bits) */
__u32 op;