@@ -45,20 +45,29 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
}
mutex_lock(&hdev->mmu_lock);
+
rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size);
if (rc) {
dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr);
- goto err_va_umap;
+ goto err_va_pool_free;
}
+
rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV);
+ if (rc)
+ goto err_mmu_unmap;
+
mutex_unlock(&hdev->mmu_lock);
cb->is_mmu_mapped = true;
- return rc;
-err_va_umap:
+ return 0;
+
+err_mmu_unmap:
+ hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size);
+err_va_pool_free:
mutex_unlock(&hdev->mmu_lock);
gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
+
return rc;
}
@@ -679,7 +679,9 @@ int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
if (rc)
- dev_err_ratelimited(hdev->dev, "MMU cache invalidation failed\n");
+ dev_err_ratelimited(hdev->dev,
+ "%s cache invalidation failed, rc=%d\n",
+ flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", rc);
return rc;
}
@@ -692,7 +694,9 @@ int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, is_hard, flags,
asid, va, size);
if (rc)
- dev_err_ratelimited(hdev->dev, "MMU cache range invalidation failed\n");
+ dev_err_ratelimited(hdev->dev,
+ "%s cache range invalidation failed: va=%#llx, size=%llu, rc=%d",
+ flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", va, size, rc);
return rc;
}