@@ -143,6 +143,104 @@ static int nsmmu_init_context(struct arm_smmu_domain *smmu_domain)
return 0;
}
+static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct arm_smmu_domain, domain);
+}
+
+static irqreturn_t nsmmu_global_fault_inst(int irq,
+ struct arm_smmu_device *smmu,
+ int inst)
+{
+ u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
+
+ gfsr = readl_relaxed(nsmmu_page(smmu, inst, 0) + ARM_SMMU_GR0_sGFSR);
+ gfsynr0 = readl_relaxed(nsmmu_page(smmu, inst, 0) +
+ ARM_SMMU_GR0_sGFSYNR0);
+ gfsynr1 = readl_relaxed(nsmmu_page(smmu, inst, 0) +
+ ARM_SMMU_GR0_sGFSYNR1);
+ gfsynr2 = readl_relaxed(nsmmu_page(smmu, inst, 0) +
+ ARM_SMMU_GR0_sGFSYNR2);
+
+ if (!gfsr)
+ return IRQ_NONE;
+
+ dev_err_ratelimited(smmu->dev,
+ "Unexpected global fault, this could be serious\n");
+ dev_err_ratelimited(smmu->dev,
+ "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
+ gfsr, gfsynr0, gfsynr1, gfsynr2);
+
+ writel_relaxed(gfsr, nsmmu_page(smmu, inst, 0) + ARM_SMMU_GR0_sGFSR);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nsmmu_global_fault(int irq, void *dev)
+{
+ int inst;
+ irqreturn_t irq_ret = IRQ_NONE;
+ struct arm_smmu_device *smmu = dev;
+
+ for (inst = 0; inst < to_nvidia_smmu(smmu)->num_inst; inst++) {
+ irq_ret = nsmmu_global_fault_inst(irq, smmu, inst);
+ if (irq_ret == IRQ_HANDLED)
+ return irq_ret;
+ }
+
+ return irq_ret;
+}
+
+static irqreturn_t nsmmu_context_fault_bank(int irq,
+ struct arm_smmu_device *smmu,
+ int idx, int inst)
+{
+ u32 fsr, fsynr, cbfrsynra;
+ unsigned long iova;
+
+ fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
+ if (!(fsr & FSR_FAULT))
+ return IRQ_NONE;
+
+ fsynr = readl_relaxed(nsmmu_page(smmu, inst, smmu->numpage + idx) +
+ ARM_SMMU_CB_FSYNR0);
+ iova = readq_relaxed(nsmmu_page(smmu, inst, smmu->numpage + idx) +
+ ARM_SMMU_CB_FAR);
+ cbfrsynra = readl_relaxed(nsmmu_page(smmu, inst, 1) +
+ ARM_SMMU_GR1_CBFRSYNRA(idx));
+
+ dev_err_ratelimited(smmu->dev,
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
+ fsr, iova, fsynr, cbfrsynra, idx);
+
+ writel_relaxed(fsr, nsmmu_page(smmu, inst, smmu->numpage + idx) +
+ ARM_SMMU_CB_FSR);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nsmmu_context_fault(int irq, void *dev)
+{
+ int inst, idx;
+ irqreturn_t irq_ret = IRQ_NONE;
+ struct iommu_domain *domain = dev;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ for (inst = 0; inst < to_nvidia_smmu(smmu)->num_inst; inst++) {
+ /* Interrupt line shared between all context faults.
+ * Check for faults across all contexts.
+ */
+ for (idx = 0; idx < smmu->num_context_banks; idx++) {
+ irq_ret = nsmmu_context_fault_bank(irq, smmu,
+ idx, inst);
+
+ if (irq_ret == IRQ_HANDLED)
+ return irq_ret;
+ }
+ }
+
+ return irq_ret;
+}
+
static const struct arm_smmu_impl nvidia_smmu_impl = {
.read_reg = nsmmu_read_reg,
.write_reg = nsmmu_write_reg,
@@ -150,6 +248,8 @@ static const struct arm_smmu_impl nvidia_smmu_impl = {
.write_reg64 = nsmmu_write_reg64,
.reset = nsmmu_reset,
.init_context = nsmmu_init_context,
+ .global_fault = nsmmu_global_fault,
+ .context_fault = nsmmu_context_fault,
};
struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
@@ -635,6 +635,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ irqreturn_t (*context_fault)(int irq, void *dev);
mutex_lock(&smmu_domain->init_mutex);
if (smmu_domain->smmu)
@@ -797,7 +798,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
* handler seeing a half-initialised domain state.
*/
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
- ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
+ context_fault = (smmu->impl && smmu->impl->context_fault) ?
+ smmu->impl->context_fault : arm_smmu_context_fault;
+ ret = devm_request_irq(smmu->dev, irq, context_fault,
IRQF_SHARED, "arm-smmu-context-fault", domain);
if (ret < 0) {
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
@@ -2008,6 +2011,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
+ irqreturn_t (*global_fault)(int irq, void *dev);
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
@@ -2096,9 +2100,12 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
smmu->num_context_irqs = smmu->num_context_banks;
}
+ global_fault = (smmu->impl && smmu->impl->global_fault) ?
+ smmu->impl->global_fault : arm_smmu_global_fault;
+
for (i = 0; i < smmu->num_global_irqs; ++i) {
err = devm_request_irq(smmu->dev, smmu->irqs[i],
- arm_smmu_global_fault,
+ global_fault,
IRQF_SHARED,
"arm-smmu global fault",
smmu);
@@ -17,6 +17,7 @@
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/io-pgtable.h>
#include <linux/iommu.h>
+#include <linux/irqreturn.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -337,6 +338,8 @@ struct arm_smmu_impl {
int (*cfg_probe)(struct arm_smmu_device *smmu);
int (*reset)(struct arm_smmu_device *smmu);
int (*init_context)(struct arm_smmu_domain *smmu_domain);
+ irqreturn_t (*global_fault)(int irq, void *dev);
+ irqreturn_t (*context_fault)(int irq, void *dev);
};
static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
Add global/context fault hooks to allow NVIDIA SMMU implementation handle faults across multiple SMMUs. Signed-off-by: Krishna Reddy <vdumpa@nvidia.com> --- drivers/iommu/arm-smmu-nvidia.c | 100 ++++++++++++++++++++++++++++++++++++++++ drivers/iommu/arm-smmu.c | 11 ++++- drivers/iommu/arm-smmu.h | 3 ++ 3 files changed, 112 insertions(+), 2 deletions(-)