@@ -48,6 +48,7 @@
#endif
#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
#define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
+#define PMD_SECT_AP1 (_AT(pmdval_t, 1) << 6)
#define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
/*
@@ -403,6 +403,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
+#ifdef CONFIG_KVM
+void hyp_identity_mapping_add(pgd_t *, unsigned long, unsigned long);
+void hyp_identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end);
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_MMU */
@@ -33,11 +33,16 @@ static void idmap_add_pmd(pgd_t *pgd, unsigned long addr, unsigned long end,
flush_pmd_entry(pmd);
}
-void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
+static void __identity_mapping_add(pgd_t *pgd, unsigned long addr,
+ unsigned long end, bool hyp_mapping)
{
unsigned long prot, next;
prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
+
+ if (hyp_mapping)
+ prot |= PMD_SECT_AP1;
+
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
prot |= PMD_BIT4;
@@ -47,6 +52,12 @@ void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
} while (addr = next, addr < end);
}
+void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
+{
+ __identity_mapping_add(pgd, addr, end, false);
+}
+
+
#ifdef CONFIG_SMP
static void idmap_del_pmd(pgd_t *pgd, unsigned long addr, unsigned long end)
{
@@ -69,6 +80,40 @@ void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
}
#endif
+#ifdef CONFIG_KVM
+void hyp_identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
+{
+ __identity_mapping_add(pgd, addr, end, true);
+}
+
+static void hyp_idmap_del_pmd(pgd_t *pgd, unsigned long addr)
+{
+ pmd_t *pmd;
+
+ pmd = pmd_offset(pgd, addr);
+ pmd_free(NULL, pmd);
+}
+
+/*
+ * This version actually frees the underlying pmds for all pgds in range and
+ * clear the pgds themselves afterwards.
+ */
+void hyp_identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
+{
+ unsigned long next;
+ pgd_t *next_pgd;
+
+ do {
+ next = pgd_addr_end(addr, end);
+ next_pgd = pgd + pgd_index(addr);
+ if (!pgd_none_or_clear_bad(next_pgd)) {
+ hyp_idmap_del_pmd(next_pgd, addr);
+ pgd_clear(next_pgd);
+ }
+ } while (addr = next, addr < end);
+}
+#endif
+
/*
* In order to soft-boot, we need to insert a 1:1 mapping in place of
* the user-mode pages. This will then ensure that we have predictable