@@ -253,6 +253,7 @@ struct arm_smmu_domain {
struct mutex init_mutex; /* Protects smmu pointer */
spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
struct iommu_domain domain;
+ bool has_sys_cache;
};
struct arm_smmu_option_prop {
@@ -880,6 +881,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
+ if (smmu_domain->has_sys_cache)
+ pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_SYS_CACHE;
smmu_domain->smmu = smmu;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
@@ -1539,6 +1542,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
return 0;
+ case DOMAIN_ATTR_USE_SYS_CACHE:
+ *((int *)data) = smmu_domain->has_sys_cache;
+ return 0;
default:
return -ENODEV;
}
@@ -1568,6 +1574,14 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
break;
+ case DOMAIN_ATTR_USE_SYS_CACHE:
+ if (smmu_domain->smmu) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+ if (*((int *)data))
+ smmu_domain->has_sys_cache = true;
+ break;
default:
ret = -ENODEV;
}
@@ -169,9 +169,11 @@
#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
#define ARM_LPAE_MAIR_ATTR_NC 0x44
#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
+#define ARM_LPAE_MAIR_ATTR_SYS_CACHE 0xf4
#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
+#define ARM_LPAE_MAIR_ATTR_IDX_SYS_CACHE 3
/* IOPTE accessors */
#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
@@ -442,6 +444,10 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
else if (prot & IOMMU_CACHE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
+ else if (prot & IOMMU_SYS_CACHE)
+ pte |= (ARM_LPAE_MAIR_ATTR_IDX_SYS_CACHE
+ << ARM_LPAE_PTE_ATTRINDX_SHIFT);
+
} else {
pte = ARM_LPAE_PTE_HAP_FAULT;
if (prot & IOMMU_READ)
@@ -771,7 +777,8 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
u64 reg;
struct arm_lpae_io_pgtable *data;
- if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA))
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA |
+ IO_PGTABLE_QUIRK_SYS_CACHE))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -779,9 +786,14 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
return NULL;
/* TCR */
- reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
- (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
- (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+ if (cfg->quirks & IO_PGTABLE_QUIRK_SYS_CACHE) {
+ reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT);
+ } else {
+ reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT);
+ }
+ reg |= (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
switch (ARM_LPAE_GRANULE(data)) {
case SZ_4K:
@@ -833,7 +845,9 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
(ARM_LPAE_MAIR_ATTR_WBRWA
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
(ARM_LPAE_MAIR_ATTR_DEVICE
- << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
+ (ARM_LPAE_MAIR_ATTR_SYS_CACHE
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_SYS_CACHE));
cfg->arm_lpae_s1_cfg.mair[0] = reg;
cfg->arm_lpae_s1_cfg.mair[1] = 0;
@@ -71,12 +71,16 @@ struct io_pgtable_cfg {
* be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a
* software-emulated IOMMU), such that pagetable updates need not
* be treated as explicit DMA data.
+ *
+ * IO_PGTABLE_QUIRK_SYS_CACHE: Override the attributes set in TCR for
+ * the page table walker when using system cache.
*/
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
#define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
#define IO_PGTABLE_QUIRK_NO_DMA BIT(4)
+ #define IO_PGTABLE_QUIRK_SYS_CACHE BIT(5)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
@@ -41,6 +41,9 @@
* if the IOMMU page table format is equivalent.
*/
#define IOMMU_PRIV (1 << 5)
+/* Use last level cache available with few architectures */
+#define IOMMU_SYS_CACHE (1 << 6)
+
struct iommu_ops;
struct iommu_group;
@@ -124,6 +127,7 @@ enum iommu_attr {
DOMAIN_ATTR_FSL_PAMU_ENABLE,
DOMAIN_ATTR_FSL_PAMUV1,
DOMAIN_ATTR_NESTING, /* two stages of translation */
+ DOMAIN_ATTR_USE_SYS_CACHE,
DOMAIN_ATTR_MAX,
};