@@ -152,7 +152,7 @@ static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
* avoiding the possibility of conflicting TLB entries being allocated.
*/
-static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
+static inline void cpu_replace_ttbr1_phys(phys_addr_t pgd_phys, pgd_t *idmap)
{
typedef void (ttbr_replace_func)(phys_addr_t);
extern ttbr_replace_func idmap_cpu_replace_ttbr1;
@@ -160,9 +160,10 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
unsigned long daif;
/* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
- phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
+ phys_addr_t ttbr1 = phys_to_ttbr(pgd_phys);
- if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
+ if (system_supports_cnp() &&
+ !WARN_ON(pgd_phys != virt_to_phys(lm_alias(swapper_pg_dir)))) {
/*
* cpu_replace_ttbr1() is used when there's a boot CPU
* up (i.e. cpufeature framework is not up yet) and
@@ -189,6 +190,11 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
cpu_uninstall_idmap();
}
+static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
+{
+ cpu_replace_ttbr1_phys(virt_to_phys(pgdp), idmap);
+}
+
/*
* It would be nice to return ASIDs back to the allocator, but unfortunately
* that introduces a race with a generation rollover where we could erroneously
Provide a version of cpu_replace_ttbr1_phys() which operates using a physical address rather than the virtual address of the page tables. Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk> --- arch/arm64/include/asm/mmu_context.h | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-)