diff mbox series

iommu/rockchip: Add flush_iotlb_all ops

Message ID 20250318152049.14781-1-detlev.casanova@collabora.com (mailing list archive)
State New
Headers show
Series iommu/rockchip: Add flush_iotlb_all ops | expand

Commit Message

Detlev Casanova March 18, 2025, 3:20 p.m. UTC
From: Jonas Karlman <jonas@kwiboo.se>

On some Rockchip cores (like the vdpu34x video decoder), the IOMMU device
is inside the the device that uses it.

The IOMMU device can still be driven by the iommu driver, but when an
error occurs in the main device (e.g. a decoding error that resets the
decoder), the IOMMU device will also be reseted.
In such situation, the IOMMU driver and the hardware are out of sync and
IOMMU errors will start popping up.

To avoid that, add a flush_iotlb_all function that will let the main drivers
(e.g. rkvdec) tell the IOMMU driver to write all its cached mappings into
the IOMMU hardware when such an error occured.

Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
Signed-off-by: Detlev Casanova <detlev.casanova@collabora.com>
---
 drivers/iommu/rockchip-iommu.c | 45 ++++++++++++++++++++++++++++++----
 1 file changed, 40 insertions(+), 5 deletions(-)

Comments

Robin Murphy March 18, 2025, 6:40 p.m. UTC | #1
On 18/03/2025 3:20 pm, Detlev Casanova wrote:
> From: Jonas Karlman <jonas@kwiboo.se>
> 
> On some Rockchip cores (like the vdpu34x video decoder), the IOMMU device
> is inside the the device that uses it.
> 
> The IOMMU device can still be driven by the iommu driver, but when an
> error occurs in the main device (e.g. a decoding error that resets the
> decoder), the IOMMU device will also be reseted.
> In such situation, the IOMMU driver and the hardware are out of sync and
> IOMMU errors will start popping up.
> 
> To avoid that, add a flush_iotlb_all function that will let the main drivers
> (e.g. rkvdec) tell the IOMMU driver to write all its cached mappings into
> the IOMMU hardware when such an error occured.

Eww, this is the exact opposite of what flush_iotlb_all represents, and 
I really don't like the idea of the public IOMMU API being abused for 
inter-driver communication. Please have some kind of proper reset 
notifier mechanism - in fact with runtime PM could you not already 
invoke a suspend/resume cycle via the device links? AFAICS it would also 
work to attach to a different domain then switch back again. Or at worst 
just export a public interface for the other driver to invoke 
rk_iommu_resume() directly. Just don't hide it in something completely 
inappropriate - I mean, consider if someone wants to implement 
IOMMU_CAP_DEFERRED_FLUSH support here in future...

Thanks,
Robin.

> Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
> Signed-off-by: Detlev Casanova <detlev.casanova@collabora.com>
> ---
>   drivers/iommu/rockchip-iommu.c | 45 ++++++++++++++++++++++++++++++----
>   1 file changed, 40 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
> index 323cc665c357..7086716cb8fc 100644
> --- a/drivers/iommu/rockchip-iommu.c
> +++ b/drivers/iommu/rockchip-iommu.c
> @@ -899,6 +899,40 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
>   	return unmap_size;
>   }
>   
> +static void rk_iommu_flush_iotlb_all(struct iommu_domain *domain)
> +{
> +	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
> +	struct list_head *pos;
> +	unsigned long flags;
> +	int i, ret;
> +
> +	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
> +	list_for_each(pos, &rk_domain->iommus) {
> +		struct rk_iommu *iommu = list_entry(pos, struct rk_iommu, node);
> +
> +		ret = pm_runtime_get_if_in_use(iommu->dev);
> +		if (!ret || WARN_ON_ONCE(ret < 0))
> +			continue;
> +
> +		if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
> +			continue;
> +
> +		rk_iommu_enable_stall(iommu);
> +		for (i = 0; i < iommu->num_mmu; i++) {
> +			rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
> +				rk_ops->mk_dtentries(rk_domain->dt_dma));
> +			rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
> +			rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
> +		}
> +		rk_iommu_enable_paging(iommu);
> +		rk_iommu_disable_stall(iommu);
> +
> +		clk_bulk_disable(iommu->num_clocks, iommu->clocks);
> +		pm_runtime_put(iommu->dev);
> +	}
> +	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
> +}
> +
>   static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
>   {
>   	struct rk_iommudata *data = dev_iommu_priv_get(dev);
> @@ -1172,11 +1206,12 @@ static const struct iommu_ops rk_iommu_ops = {
>   	.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
>   	.of_xlate = rk_iommu_of_xlate,
>   	.default_domain_ops = &(const struct iommu_domain_ops) {
> -		.attach_dev	= rk_iommu_attach_device,
> -		.map_pages	= rk_iommu_map,
> -		.unmap_pages	= rk_iommu_unmap,
> -		.iova_to_phys	= rk_iommu_iova_to_phys,
> -		.free		= rk_iommu_domain_free,
> +		.attach_dev		= rk_iommu_attach_device,
> +		.map_pages		= rk_iommu_map,
> +		.unmap_pages		= rk_iommu_unmap,
> +		.flush_iotlb_all	= rk_iommu_flush_iotlb_all,
> +		.iova_to_phys		= rk_iommu_iova_to_phys,
> +		.free			= rk_iommu_domain_free,
>   	}
>   };
>
diff mbox series

Patch

diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 323cc665c357..7086716cb8fc 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -899,6 +899,40 @@  static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
 	return unmap_size;
 }
 
+static void rk_iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+	struct list_head *pos;
+	unsigned long flags;
+	int i, ret;
+
+	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+	list_for_each(pos, &rk_domain->iommus) {
+		struct rk_iommu *iommu = list_entry(pos, struct rk_iommu, node);
+
+		ret = pm_runtime_get_if_in_use(iommu->dev);
+		if (!ret || WARN_ON_ONCE(ret < 0))
+			continue;
+
+		if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
+			continue;
+
+		rk_iommu_enable_stall(iommu);
+		for (i = 0; i < iommu->num_mmu; i++) {
+			rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
+				rk_ops->mk_dtentries(rk_domain->dt_dma));
+			rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+			rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
+		}
+		rk_iommu_enable_paging(iommu);
+		rk_iommu_disable_stall(iommu);
+
+		clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+		pm_runtime_put(iommu->dev);
+	}
+	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+}
+
 static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
 {
 	struct rk_iommudata *data = dev_iommu_priv_get(dev);
@@ -1172,11 +1206,12 @@  static const struct iommu_ops rk_iommu_ops = {
 	.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
 	.of_xlate = rk_iommu_of_xlate,
 	.default_domain_ops = &(const struct iommu_domain_ops) {
-		.attach_dev	= rk_iommu_attach_device,
-		.map_pages	= rk_iommu_map,
-		.unmap_pages	= rk_iommu_unmap,
-		.iova_to_phys	= rk_iommu_iova_to_phys,
-		.free		= rk_iommu_domain_free,
+		.attach_dev		= rk_iommu_attach_device,
+		.map_pages		= rk_iommu_map,
+		.unmap_pages		= rk_iommu_unmap,
+		.flush_iotlb_all	= rk_iommu_flush_iotlb_all,
+		.iova_to_phys		= rk_iommu_iova_to_phys,
+		.free			= rk_iommu_domain_free,
 	}
 };