diff mbox series

[05/11] accel/ivpu: Unmap partially mapped BOs in case of errors

Message ID 20241017145817.121590-6-jacek.lawrynowicz@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series accel/ivpu: Changes for 6.13-rc5 | expand

Commit Message

Jacek Lawrynowicz Oct. 17, 2024, 2:58 p.m. UTC
From: Karol Wachowski <karol.wachowski@intel.com>

Ensure that all buffers that were created only partially through
allocated scatter-gather table are unmapped from MMU600 in case of errors.

Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
---
 drivers/accel/ivpu/ivpu_mmu_context.c | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)

Comments

Jeffrey Hugo Oct. 18, 2024, 10:01 p.m. UTC | #1
On 10/17/2024 8:58 AM, Jacek Lawrynowicz wrote:
> From: Karol Wachowski <karol.wachowski@intel.com>
> 
> Ensure that all buffers that were created only partially through
> allocated scatter-gather table are unmapped from MMU600 in case of errors.
> 
> Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
> Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
> Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>

Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
diff mbox series

Patch

diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index 8992fe93b679a..697b57071d546 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -432,6 +432,7 @@  int
 ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
 			 u64 vpu_addr, struct sg_table *sgt,  bool llc_coherent)
 {
+	size_t start_vpu_addr = vpu_addr;
 	struct scatterlist *sg;
 	int ret;
 	u64 prot;
@@ -462,7 +463,7 @@  ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
 		ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
 		if (ret) {
 			ivpu_err(vdev, "Failed to map context pages\n");
-			goto err_unlock;
+			goto err_unmap_pages;
 		}
 		vpu_addr += size;
 	}
@@ -472,7 +473,7 @@  ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
 		if (ret) {
 			ivpu_err(vdev, "Failed to set context descriptor for context %u: %d\n",
 				 ctx->id, ret);
-			goto err_unlock;
+			goto err_unmap_pages;
 		}
 		ctx->is_cd_valid = true;
 	}
@@ -480,17 +481,19 @@  ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
 	/* Ensure page table modifications are flushed from wc buffers to memory */
 	wmb();
 
-	mutex_unlock(&ctx->lock);
-
 	ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
-	if (ret)
+	if (ret) {
 		ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
-	return ret;
+		goto err_unmap_pages;
+	}
 
-err_unlock:
 	mutex_unlock(&ctx->lock);
-	return ret;
+	return 0;
 
+err_unmap_pages:
+	ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, vpu_addr - start_vpu_addr);
+	mutex_unlock(&ctx->lock);
+	return ret;
 }
 
 void