@@ -8,9 +8,20 @@ config HSA_AMD
depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64)
imply AMD_IOMMU_V2 if X86_64
select HMM_MIRROR
- select ZONE_DEVICE
- select DEVICE_PRIVATE
select MMU_NOTIFIER
select DRM_AMDGPU_USERPTR
help
Enable this if you want to use HSA features on AMD GPU devices.
+
+config HSA_AMD_SVM
+ bool "Enable HMM-based shared virtual memory manager"
+ depends on HSA_AMD && DEVICE_PRIVATE
+ default y
+ select HMM_MIRROR
+ select MMU_NOTIFIER
+ help
+ Enable this to use unified memory and managed memory in HIP. This
+ memory manager supports two modes of operation. One based on
+ preemptions and one based on page faults. To enable page fault
+ based memory management on most GFXv9 GPUs, set the module
+ parameter amdgpu.noretry=0.
@@ -54,9 +54,7 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \
$(AMDKFD_PATH)/kfd_dbgdev.o \
$(AMDKFD_PATH)/kfd_dbgmgr.o \
$(AMDKFD_PATH)/kfd_smi_events.o \
- $(AMDKFD_PATH)/kfd_crat.o \
- $(AMDKFD_PATH)/kfd_svm.o \
- $(AMDKFD_PATH)/kfd_migrate.o
+ $(AMDKFD_PATH)/kfd_crat.o
ifneq ($(CONFIG_AMD_IOMMU_V2),)
AMDKFD_FILES += $(AMDKFD_PATH)/kfd_iommu.o
@@ -65,3 +63,8 @@ endif
ifneq ($(CONFIG_DEBUG_FS),)
AMDKFD_FILES += $(AMDKFD_PATH)/kfd_debugfs.o
endif
+
+ifneq ($(CONFIG_HSA_AMD_SVM),)
+AMDKFD_FILES += $(AMDKFD_PATH)/kfd_svm.o \
+ $(AMDKFD_PATH)/kfd_migrate.o
+endif
@@ -1768,6 +1768,7 @@ static int kfd_ioctl_set_xnack_mode(struct file *filep,
return r;
}
+#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
{
struct kfd_ioctl_svm_args *args = data;
@@ -1793,6 +1794,12 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
return r;
}
+#else
+static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
+{
+ return -EPERM;
+}
+#endif
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
@@ -25,6 +25,8 @@
#ifndef KFD_MIGRATE_H_
#define KFD_MIGRATE_H_
+#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
+
#include <linux/rwsem.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -44,17 +46,20 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm);
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
-#if defined(CONFIG_DEVICE_PRIVATE)
int svm_migrate_init(struct amdgpu_device *adev);
void svm_migrate_fini(struct amdgpu_device *adev);
#else
+
static inline int svm_migrate_init(struct amdgpu_device *adev)
{
- DRM_WARN_ONCE("DEVICE_PRIVATE kernel config option is not enabled, "
- "add CONFIG_DEVICE_PRIVATE=y in config file to fix\n");
- return -ENODEV;
+ return 0;
+}
+static inline void svm_migrate_fini(struct amdgpu_device *adev)
+{
+ /* empty */
}
-static inline void svm_migrate_fini(struct amdgpu_device *adev) {}
-#endif
+
+#endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
+
#endif /* KFD_MIGRATE_H_ */
@@ -25,6 +25,8 @@
#ifndef KFD_SVM_H_
#define KFD_SVM_H_
+#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
+
#include <linux/rwsem.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -173,4 +175,32 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
void svm_range_free_dma_mappings(struct svm_range *prange);
void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm);
+#else
+
+struct kfd_process;
+
+static inline int svm_range_list_init(struct kfd_process *p)
+{
+ return 0;
+}
+static inline void svm_range_list_fini(struct kfd_process *p)
+{
+ /* empty */
+}
+
+static inline int svm_range_restore_pages(struct amdgpu_device *adev,
+ unsigned int pasid, uint64_t addr)
+{
+ return -EFAULT;
+}
+
+static inline int svm_range_schedule_evict_svm_bo(
+ struct amdgpu_amdkfd_fence *fence)
+{
+ WARN_ONCE(1, "SVM eviction fence triggered, but SVM is disabled");
+ return -EINVAL;
+}
+
+#endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
+
#endif /* KFD_SVM_H_ */