@@ -322,8 +322,9 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
/* Unmap pages from GPU address space and free them */
for (i = 0; i < node->base.mem.size; i++) {
- iommu_unmap(imem->domain,
- (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
+ iommu_unmap_sync(imem->domain,
+ (r->offset + i) << imem->iommu_pgshift,
+ PAGE_SIZE);
dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(node->pages[i]);
@@ -458,14 +459,15 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
for (i = 0; i < npages; i++) {
u32 offset = (r->offset + i) << imem->iommu_pgshift;
- ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
- PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
+ ret = iommu_map_sync(imem->domain, offset, node->dma_addrs[i],
+ PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
if (ret < 0) {
nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
while (i-- > 0) {
offset -= PAGE_SIZE;
- iommu_unmap(imem->domain, offset, PAGE_SIZE);
+ iommu_unmap_sync(imem->domain, offset,
+ PAGE_SIZE);
}
goto release_area;
}