@@ -501,7 +501,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a3xx_registers;
adreno_gpu->reg_offsets = a3xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1, 0);
if (ret)
goto fail;
@@ -581,7 +581,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a4xx_registers;
adreno_gpu->reg_offsets = a4xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1, 0);
if (ret)
goto fail;
@@ -1521,7 +1521,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
check_speed_bin(&pdev->dev);
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4, 0);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
@@ -819,7 +819,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a6xx_registers;
adreno_gpu->reg_offsets = a6xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1, 0);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);
@@ -693,7 +693,8 @@ static int adreno_get_pwrlevels(struct device *dev,
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu,
- const struct adreno_gpu_funcs *funcs, int nr_rings)
+ const struct adreno_gpu_funcs *funcs, int nr_rings,
+ u32 mmu_features)
{
struct adreno_platform_config *config = pdev->dev.platform_data;
struct msm_gpu_config adreno_gpu_config = { 0 };
@@ -712,6 +713,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.va_end = 0xffffffff;
adreno_gpu_config.nr_rings = nr_rings;
+ adreno_gpu_config.mmu_features = mmu_features;
adreno_get_pwrlevels(&pdev->dev, gpu);
@@ -228,7 +228,7 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
- int nr_rings);
+ int nr_rings, u32 mmu_features);
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
int adreno_load_fw(struct adreno_gpu *adreno_gpu);
@@ -798,7 +798,7 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
static struct msm_gem_address_space *
msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
- uint64_t va_start, uint64_t va_end)
+ uint64_t va_start, uint64_t va_end, u32 mmu_features)
{
struct iommu_domain *iommu;
struct msm_gem_address_space *aspace;
@@ -826,6 +826,8 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
return ERR_CAST(aspace);
}
+ msm_mmu_set_feature(aspace->mmu, mmu_features);
+
ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
if (ret) {
msm_gem_address_space_put(aspace);
@@ -909,7 +911,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
msm_devfreq_init(gpu);
gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
- config->va_start, config->va_end);
+ config->va_start, config->va_end, config->mmu_features);
if (gpu->aspace == NULL)
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
@@ -36,6 +36,7 @@ struct msm_gpu_config {
uint64_t va_start;
uint64_t va_end;
unsigned int nr_rings;
+ u32 mmu_features;
};
/* So far, with hardware that I've seen to date, we can have:
@@ -54,6 +54,7 @@ struct msm_mmu {
struct device *dev;
int (*handler)(void *arg, unsigned long iova, int flags);
void *arg;
+ u32 features;
};
static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
@@ -74,6 +75,16 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
mmu->handler = handler;
}
+static inline void msm_mmu_set_feature(struct msm_mmu *mmu, u32 feature)
+{
+ mmu->features |= feature;
+}
+
+static inline bool msm_mmu_has_feature(struct msm_mmu *mmu, u32 feature)
+{
+ return (mmu->features & feature) ? true : false;
+}
+
/* DPU smmu driver initialize and cleanup functions */
int __init msm_smmu_driver_init(void);
void __exit msm_smmu_driver_cleanup(void);
Allow different Adreno targets the ability to pass specific mmu features to the generic layers. This will help conditionally configure certain iommu features for certain Adreno targets. Also Add a few simple support functions to support a bitmask of features that a specific MMU implementation supports. Signed-off-by: Sharat Masetty <smasetty@codeaurora.org> --- drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 2 +- drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 2 +- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 2 +- drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 2 +- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 4 +++- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 2 +- drivers/gpu/drm/msm/msm_gpu.c | 6 ++++-- drivers/gpu/drm/msm/msm_gpu.h | 1 + drivers/gpu/drm/msm/msm_mmu.h | 11 +++++++++++ 9 files changed, 24 insertions(+), 8 deletions(-)