@@ -45,6 +45,28 @@ static inline void flush_all_guests_tlb(void)
isb();
}
+/* Flush all hypervisor mappings from the TLB of the local processor. */
+static inline void flush_xen_tlb_local(void)
+{
+ asm volatile("dsb;" /* Ensure preceding are visible */
+ CMD_CP32(TLBIALLH)
+ "dsb;" /* Ensure completion of the TLB flush */
+ "isb;"
+ : : : "memory");
+}
+
+/* Flush TLB of local processor for address va. */
+static inline void __flush_xen_tlb_one_local(vaddr_t va)
+{
+ asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
+}
+
+/* Flush TLB of all processors in the inner-shareable domain for address va. */
+static inline void __flush_xen_tlb_one(vaddr_t va)
+{
+ asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory");
+}
+
#endif /* __ASM_ARM_ARM32_FLUSHTLB_H__ */
/*
* Local variables:
@@ -61,28 +61,6 @@ static inline void invalidate_icache_local(void)
isb(); /* Synchronize fetched instruction stream. */
}
-/* Flush all hypervisor mappings from the TLB of the local processor. */
-static inline void flush_xen_tlb_local(void)
-{
- asm volatile("dsb;" /* Ensure preceding are visible */
- CMD_CP32(TLBIALLH)
- "dsb;" /* Ensure completion of the TLB flush */
- "isb;"
- : : : "memory");
-}
-
-/* Flush TLB of local processor for address va. */
-static inline void __flush_xen_tlb_one_local(vaddr_t va)
-{
- asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
-}
-
-/* Flush TLB of all processors in the inner-shareable domain for address va. */
-static inline void __flush_xen_tlb_one(vaddr_t va)
-{
- asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory");
-}
-
/* Ask the MMU to translate a VA for us */
static inline uint64_t __va_to_par(vaddr_t va)
{
@@ -45,6 +45,29 @@ static inline void flush_all_guests_tlb(void)
: : : "memory");
}
+/* Flush all hypervisor mappings from the TLB of the local processor. */
+static inline void flush_xen_tlb_local(void)
+{
+ asm volatile (
+ "dsb sy;" /* Ensure visibility of PTE writes */
+ "tlbi alle2;" /* Flush hypervisor TLB */
+ "dsb sy;" /* Ensure completion of TLB flush */
+ "isb;"
+ : : : "memory");
+}
+
+/* Flush TLB of local processor for address va. */
+static inline void __flush_xen_tlb_one_local(vaddr_t va)
+{
+ asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
+}
+
+/* Flush TLB of all processors in the inner-shareable domain for address va. */
+static inline void __flush_xen_tlb_one(vaddr_t va)
+{
+ asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
+}
+
#endif /* __ASM_ARM_ARM64_FLUSHTLB_H__ */
/*
* Local variables:
@@ -45,29 +45,6 @@ static inline void invalidate_icache_local(void)
isb();
}
-/* Flush all hypervisor mappings from the TLB of the local processor. */
-static inline void flush_xen_tlb_local(void)
-{
- asm volatile (
- "dsb sy;" /* Ensure visibility of PTE writes */
- "tlbi alle2;" /* Flush hypervisor TLB */
- "dsb sy;" /* Ensure completion of TLB flush */
- "isb;"
- : : : "memory");
-}
-
-/* Flush TLB of local processor for address va. */
-static inline void __flush_xen_tlb_one_local(vaddr_t va)
-{
- asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
-}
-
-/* Flush TLB of all processors in the inner-shareable domain for address va. */
-static inline void __flush_xen_tlb_one(vaddr_t va)
-{
- asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
-}
-
/* Ask the MMU to translate a VA for us */
static inline uint64_t __va_to_par(vaddr_t va)
{
@@ -28,6 +28,44 @@ static inline void page_set_tlbflush_timestamp(struct page_info *page)
/* Flush specified CPUs' TLBs */
void flush_tlb_mask(const cpumask_t *mask);
+/*
+ * Flush a range of VA's hypervisor mappings from the TLB of the local
+ * processor.
+ */
+static inline void flush_xen_tlb_range_va_local(vaddr_t va,
+ unsigned long size)
+{
+ vaddr_t end = va + size;
+
+ dsb(sy); /* Ensure preceding are visible */
+ while ( va < end )
+ {
+ __flush_xen_tlb_one_local(va);
+ va += PAGE_SIZE;
+ }
+ dsb(sy); /* Ensure completion of the TLB flush */
+ isb();
+}
+
+/*
+ * Flush a range of VA's hypervisor mappings from the TLB of all
+ * processors in the inner-shareable domain.
+ */
+static inline void flush_xen_tlb_range_va(vaddr_t va,
+ unsigned long size)
+{
+ vaddr_t end = va + size;
+
+ dsb(sy); /* Ensure preceding are visible */
+ while ( va < end )
+ {
+ __flush_xen_tlb_one(va);
+ va += PAGE_SIZE;
+ }
+ dsb(sy); /* Ensure completion of the TLB flush */
+ isb();
+}
+
#endif /* __ASM_ARM_FLUSHTLB_H__ */
/*
* Local variables:
@@ -233,44 +233,6 @@ static inline int clean_and_invalidate_dcache_va_range
: : "r" (_p), "m" (*_p)); \
} while (0)
-/*
- * Flush a range of VA's hypervisor mappings from the TLB of the local
- * processor.
- */
-static inline void flush_xen_tlb_range_va_local(vaddr_t va,
- unsigned long size)
-{
- vaddr_t end = va + size;
-
- dsb(sy); /* Ensure preceding are visible */
- while ( va < end )
- {
- __flush_xen_tlb_one_local(va);
- va += PAGE_SIZE;
- }
- dsb(sy); /* Ensure completion of the TLB flush */
- isb();
-}
-
-/*
- * Flush a range of VA's hypervisor mappings from the TLB of all
- * processors in the inner-shareable domain.
- */
-static inline void flush_xen_tlb_range_va(vaddr_t va,
- unsigned long size)
-{
- vaddr_t end = va + size;
-
- dsb(sy); /* Ensure preceding are visible */
- while ( va < end )
- {
- __flush_xen_tlb_one(va);
- va += PAGE_SIZE;
- }
- dsb(sy); /* Ensure completion of the TLB flush */
- isb();
-}
-
/* Flush the dcache for an entire page. */
void flush_page_to_ram(unsigned long mfn, bool sync_icache);
At the moment, TLB helpers are scattered in 2 headers: page.h (for Xen TLB helpers) and tlbflush.h (for guest TLB helpers). This patch is gathering all of them in tlbflush. This will help to uniformize and update the logic of the helpers in follow-up patches. Signed-off-by: Julien Grall <julien.grall@arm.com> --- xen/include/asm-arm/arm32/flushtlb.h | 22 +++++++++++++++++++++ xen/include/asm-arm/arm32/page.h | 22 --------------------- xen/include/asm-arm/arm64/flushtlb.h | 23 ++++++++++++++++++++++ xen/include/asm-arm/arm64/page.h | 23 ---------------------- xen/include/asm-arm/flushtlb.h | 38 ++++++++++++++++++++++++++++++++++++ xen/include/asm-arm/page.h | 38 ------------------------------------ 6 files changed, 83 insertions(+), 83 deletions(-)