@@ -16,6 +16,8 @@ struct nvif_vmm_v0 {
#define NVIF_VMM_V0_UNMAP 0x04
#define NVIF_VMM_V0_HMM_MAP 0x05
#define NVIF_VMM_V0_HMM_UNMAP 0x06
+#define NVIF_VMM_V0_HMM_INIT 0x07
+#define NVIF_VMM_V0_HMM_FINI 0x08
struct nvif_vmm_page_v0 {
__u8 version;
@@ -78,4 +80,11 @@ struct nvif_vmm_hmm_unmap_v0 {
__u64 addr;
__u64 npages;
};
+
+struct nvif_vmm_hmm_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 start;
+ __u64 end;
+};
#endif
@@ -39,6 +39,8 @@ void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_mem *, u64 offset);
int nvif_vmm_unmap(struct nvif_vmm *, u64);
+int nvif_vmm_hmm_init(struct nvif_vmm *vmm, u64 hstart, u64 hend);
+void nvif_vmm_hmm_fini(struct nvif_vmm *vmm, u64 hstart, u64 hend);
int nvif_vmm_hmm_map(struct nvif_vmm *vmm, u64 addr, u64 npages, u64 *pages);
int nvif_vmm_hmm_unmap(struct nvif_vmm *vmm, u64 addr, u64 npages);
#endif
@@ -32,6 +32,57 @@ nvif_vmm_unmap(struct nvif_vmm *vmm, u64 addr)
sizeof(struct nvif_vmm_unmap_v0));
}
+int
+nvif_vmm_hmm_init(struct nvif_vmm *vmm, u64 hstart, u64 hend)
+{
+ struct nvif_vmm_hmm_v0 args;
+ int ret;
+
+ if (hstart > PAGE_SIZE) {
+ args.version = 0;
+ args.start = PAGE_SIZE;
+ args.end = hstart;
+ ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_HMM_INIT,
+ &args, sizeof(args));
+ if (ret)
+ return ret;
+ }
+
+ args.version = 0;
+ args.start = hend;
+ args.end = TASK_SIZE;
+ ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_HMM_INIT,
+ &args, sizeof(args));
+ if (ret && hstart > PAGE_SIZE) {
+ args.version = 0;
+ args.start = PAGE_SIZE;
+ args.end = hstart;
+ nvif_object_mthd(&vmm->object, NVIF_VMM_V0_HMM_FINI,
+ &args, sizeof(args));
+ }
+ return ret;
+}
+
+void
+nvif_vmm_hmm_fini(struct nvif_vmm *vmm, u64 hstart, u64 hend)
+{
+ struct nvif_vmm_hmm_v0 args;
+
+ if (hstart > PAGE_SIZE) {
+ args.version = 0;
+ args.start = PAGE_SIZE;
+ args.end = hstart;
+ nvif_object_mthd(&vmm->object, NVIF_VMM_V0_HMM_FINI,
+ &args, sizeof(args));
+ }
+
+ args.version = 0;
+ args.start = hend;
+ args.end = TASK_SIZE;
+ nvif_object_mthd(&vmm->object, NVIF_VMM_V0_HMM_FINI,
+ &args, sizeof(args));
+}
+
int
nvif_vmm_hmm_map(struct nvif_vmm *vmm, u64 addr, u64 npages, u64 *pages)
{
@@ -274,6 +274,43 @@ nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
return 0;
}
+static int
+nvkm_uvmm_mthd_hmm_init(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ union {
+ struct nvif_vmm_hmm_v0 v0;
+ } *args = argv;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma *vma;
+ int ret = -ENOSYS;
+
+ if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false)))
+ return ret;
+
+ mutex_lock(&vmm->mutex);
+ ret = nvkm_vmm_hmm_init(vmm, args->v0.start, args->v0.end, &vma);
+ mutex_unlock(&vmm->mutex);
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_hmm_fini(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ union {
+ struct nvif_vmm_hmm_v0 v0;
+ } *args = argv;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ int ret = -ENOSYS;
+
+ if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false)))
+ return ret;
+
+ mutex_lock(&vmm->mutex);
+ nvkm_vmm_hmm_fini(vmm, args->v0.start, args->v0.end);
+ mutex_unlock(&vmm->mutex);
+ return 0;
+}
+
static int
nvkm_uvmm_mthd_hmm_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
@@ -321,6 +358,8 @@ nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
case NVIF_VMM_V0_PUT : return nvkm_uvmm_mthd_put (uvmm, argv, argc);
case NVIF_VMM_V0_MAP : return nvkm_uvmm_mthd_map (uvmm, argv, argc);
case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
+ case NVIF_VMM_V0_HMM_INIT : return nvkm_uvmm_mthd_hmm_init (uvmm, argv, argc);
+ case NVIF_VMM_V0_HMM_FINI : return nvkm_uvmm_mthd_hmm_fini (uvmm, argv, argc);
case NVIF_VMM_V0_HMM_MAP : return nvkm_uvmm_mthd_hmm_map (uvmm, argv, argc);
case NVIF_VMM_V0_HMM_UNMAP: return nvkm_uvmm_mthd_hmm_unmap(uvmm, argv, argc);
default: