@@ -29,13 +29,16 @@ static void a2xx_gpummu_detach(struct msm_mmu *mmu)
}
static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, size_t len, int prot)
+ struct sg_table *sgt, size_t off, size_t len,
+ int prot)
{
struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
struct sg_dma_page_iter dma_iter;
unsigned prot_bits = 0;
+ WARN_ON(off != 0);
+
if (prot & IOMMU_WRITE)
prot_bits |= 1;
if (prot & IOMMU_READ)
@@ -434,7 +434,7 @@ static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj,
vma = lookup_vma(obj, vm);
if (!vma) {
- vma = msm_gem_vma_new(vm, obj, range_start, range_end);
+ vma = msm_gem_vma_new(vm, obj, 0, range_start, range_end);
} else {
GEM_WARN_ON(vma->va.addr < range_start);
GEM_WARN_ON((vma->va.addr + obj->size) > range_end);
@@ -472,7 +472,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma)
if (IS_ERR(pages))
return PTR_ERR(pages);
- return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
+ return msm_gem_vma_map(vma, prot, msm_obj->sgt);
}
void msm_gem_unpin_locked(struct drm_gem_object *obj)
@@ -140,9 +140,9 @@ struct msm_gem_vma {
struct drm_gpuva *
msm_gem_vma_new(struct drm_gpuvm *vm, struct drm_gem_object *obj,
- u64 range_start, u64 range_end);
+ u64 offset, u64 range_start, u64 range_end);
void msm_gem_vma_purge(struct drm_gpuva *vma);
-int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt, int size);
+int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt);
void msm_gem_vma_close(struct drm_gpuva *vma);
struct msm_gem_object {
@@ -38,8 +38,7 @@ void msm_gem_vma_purge(struct drm_gpuva *vma)
/* Map and pin vma: */
int
-msm_gem_vma_map(struct drm_gpuva *vma, int prot,
- struct sg_table *sgt, int size)
+msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
{
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
struct msm_gem_vm *vm = to_msm_vm(vma->vm);
@@ -62,8 +61,9 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot,
* Revisit this if we can come up with a scheme to pre-alloc pages
* for the pgtable in map/unmap ops.
*/
- ret = vm->mmu->funcs->map(vm->mmu, vma->va.addr, sgt, size, prot);
-
+ ret = vm->mmu->funcs->map(vm->mmu, vma->va.addr, sgt,
+ vma->gem.offset, vma->va.range,
+ prot);
if (ret) {
msm_vma->mapped = false;
}
@@ -97,7 +97,7 @@ void msm_gem_vma_close(struct drm_gpuva *vma)
/* Create a new vma and allocate an iova for it */
struct drm_gpuva *
msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj,
- u64 range_start, u64 range_end)
+ u64 offset, u64 range_start, u64 range_end)
{
struct msm_gem_vm *vm = to_msm_vm(_vm);
struct drm_gpuvm_bo *vm_bo;
@@ -109,6 +109,7 @@ msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj,
return ERR_PTR(-ENOMEM);
if (vm->managed) {
+ BUG_ON(offset != 0);
spin_lock(&vm->mm_lock);
ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node,
obj->size, PAGE_SIZE, 0,
@@ -124,7 +125,7 @@ msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj,
GEM_WARN_ON((range_end - range_start) > obj->size);
- drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, 0);
+ drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
vma->mapped = false;
mutex_lock(&vm->vm_lock);
@@ -113,7 +113,8 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
}
static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
- struct sg_table *sgt, size_t len, int prot)
+ struct sg_table *sgt, size_t off, size_t len,
+ int prot)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
@@ -125,6 +126,19 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
size_t size = sg->length;
phys_addr_t phys = sg_phys(sg);
+ if (!len)
+ break;
+
+ if (size <= off) {
+ off -= size;
+ continue;
+ }
+
+ phys += off;
+ size -= off;
+ size = min_t(size_t, size, len);
+ off = 0;
+
while (size) {
size_t pgsize, count, mapped = 0;
int ret;
@@ -140,6 +154,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
phys += mapped;
addr += mapped;
size -= mapped;
+ len -= mapped;
if (ret) {
msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
@@ -359,11 +374,14 @@ static void msm_iommu_detach(struct msm_mmu *mmu)
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, size_t len, int prot)
+ struct sg_table *sgt, size_t off, size_t len,
+ int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
+ WARN_ON(off != 0);
+
/* The arm-smmu driver expects the addresses to be sign extended */
if (iova & BIT_ULL(48))
iova |= GENMASK_ULL(63, 49);
@@ -12,7 +12,7 @@
struct msm_mmu_funcs {
void (*detach)(struct msm_mmu *mmu);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- size_t len, int prot);
+ size_t off, size_t len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
void (*destroy)(struct msm_mmu *mmu);
void (*resume_translation)(struct msm_mmu *mmu);