@@ -13,6 +13,7 @@ struct qcom_smmu {
struct arm_smmu_device smmu;
bool bypass_quirk;
u8 bypass_cbndx;
+ u32 stall_enabled;
};
static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
@@ -23,12 +24,17 @@ static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx,
u32 reg)
{
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+
/*
* On the GPU device we want to process subsequent transactions after a
* fault to keep the GPU from hanging
*/
reg |= ARM_SMMU_SCTLR_HUPCF;
+ if (qsmmu->stall_enabled & BIT(idx))
+ reg |= ARM_SMMU_SCTLR_CFCFG;
+
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
}
@@ -48,6 +54,31 @@ static void qcom_adreno_smmu_get_fault_info(const void *cookie,
info->contextidr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_CONTEXTIDR);
}
+static void qcom_adreno_smmu_set_stall(const void *cookie, bool enabled)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu_domain->smmu);
+
+ if (enabled)
+ qsmmu->stall_enabled |= BIT(cfg->cbndx);
+ else
+ qsmmu->stall_enabled &= ~BIT(cfg->cbndx);
+}
+
+static void qcom_adreno_smmu_resume_translation(const void *cookie, bool terminate)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ u32 reg = 0;
+
+ if (terminate)
+ reg |= ARM_SMMU_RESUME_TERMINATE;
+
+ arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
+}
+
#define QCOM_ADRENO_SMMU_GPU_SID 0
static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
@@ -173,6 +204,8 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
priv->get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg;
priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
+ priv->set_stall = qcom_adreno_smmu_set_stall;
+ priv->resume_translation = qcom_adreno_smmu_resume_translation;
return 0;
}
@@ -45,6 +45,11 @@ struct adreno_smmu_fault_info {
* TTBR0 translation is enabled with the specified cfg
* @get_fault_info: Called by the GPU fault handler to get information about
* the fault
+ * @set_stall: Configure whether stall on fault (CFCFG) is enabled. Call
+ * before set_ttbr0_cfg(). If stalling on fault is enabled,
+ * the GPU driver must call resume_translation()
+ * @resume_translation: Resume translation after a fault
+ *
*
* The GPU driver (drm/msm) and adreno-smmu work together for controlling
* the GPU's SMMU instance. This is by necessity, as the GPU is directly
@@ -60,6 +65,8 @@ struct adreno_smmu_priv {
const struct io_pgtable_cfg *(*get_ttbr1_cfg)(const void *cookie);
int (*set_ttbr0_cfg)(const void *cookie, const struct io_pgtable_cfg *cfg);
void (*get_fault_info)(const void *cookie, struct adreno_smmu_fault_info *info);
+ void (*set_stall)(const void *cookie, bool enabled);
+ void (*resume_translation)(const void *cookie, bool terminate);
};
#endif /* __ADRENO_SMMU_PRIV_H */