@@ -938,8 +938,8 @@ static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
kfree(bo);
}
-static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
- size_t size)
+static struct a6xx_gmu_bo *
+a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, size_t size, uint64_t iova)
{
struct a6xx_gmu_bo *bo;
int ret, count, i;
@@ -964,13 +964,13 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
goto err;
}
- bo->iova = gmu->uncached_iova_base;
+ bo->iova = iova ?: gmu->uncached_iova_base;
for (i = 0; i < count; i++) {
ret = iommu_map(gmu->domain,
bo->iova + (PAGE_SIZE * i),
page_to_phys(bo->pages[i]), PAGE_SIZE,
- IOMMU_READ | IOMMU_WRITE);
+ IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV);
if (ret) {
DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
@@ -990,7 +990,8 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
goto err;
/* Align future IOVA addresses on 1MB boundaries */
- gmu->uncached_iova_base += ALIGN(size, SZ_1M);
+ if (!iova)
+ gmu->uncached_iova_base += ALIGN(size, SZ_1M);
return bo;
@@ -1331,12 +1332,12 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto err_put_device;
/* Allocate memory for for the HFI queues */
- gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
+ gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K, 0);
if (IS_ERR(gmu->hfi))
goto err_memory;
/* Allocate memory for the GMU debug region */
- gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
+ gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K, 0);
if (IS_ERR(gmu->debug))
goto err_memory;
Signed-off-by: Jonathan Marek <jonathan@marek.ca> --- drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-)