@@ -45,6 +45,28 @@ static inline void flush_all_guests_tlb(void)
isb();
}
+/* Flush all hypervisor mappings from the TLB of the local processor. */
+static inline void flush_xen_tlb_local(void)
+{
+ asm volatile("dsb;" /* Ensure preceding are visible */
+ CMD_CP32(TLBIALLH)
+ "dsb;" /* Ensure completion of the TLB flush */
+ "isb;"
+ : : : "memory");
+}
+
+/* Flush TLB of local processor for address va. */
+static inline void __flush_xen_tlb_one_local(vaddr_t va)
+{
+ asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
+}
+
+/* Flush TLB of all processors in the inner-shareable domain for address va. */
+static inline void __flush_xen_tlb_one(vaddr_t va)
+{
+ asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory");
+}
+
#endif /* __ASM_ARM_ARM32_FLUSHTLB_H__ */
/*
* Local variables:
@@ -61,28 +61,6 @@ static inline void invalidate_icache_local(void)
isb(); /* Synchronize fetched instruction stream. */
}
-/* Flush all hypervisor mappings from the TLB of the local processor. */
-static inline void flush_xen_tlb_local(void)
-{
- asm volatile("dsb;" /* Ensure preceding are visible */
- CMD_CP32(TLBIALLH)
- "dsb;" /* Ensure completion of the TLB flush */
- "isb;"
- : : : "memory");
-}
-
-/* Flush TLB of local processor for address va. */
-static inline void __flush_xen_tlb_one_local(vaddr_t va)
-{
- asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
-}
-
-/* Flush TLB of all processors in the inner-shareable domain for address va. */
-static inline void __flush_xen_tlb_one(vaddr_t va)
-{
- asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory");
-}
-
/* Ask the MMU to translate a VA for us */
static inline uint64_t __va_to_par(vaddr_t va)
{
@@ -45,6 +45,29 @@ static inline void flush_all_guests_tlb(void)
: : : "memory");
}
+/* Flush all hypervisor mappings from the TLB of the local processor. */
+static inline void flush_xen_tlb_local(void)
+{
+ asm volatile (
+ "dsb sy;" /* Ensure visibility of PTE writes */
+ "tlbi alle2;" /* Flush hypervisor TLB */
+ "dsb sy;" /* Ensure completion of TLB flush */
+ "isb;"
+ : : : "memory");
+}
+
+/* Flush TLB of local processor for address va. */
+static inline void __flush_xen_tlb_one_local(vaddr_t va)
+{
+ asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
+}
+
+/* Flush TLB of all processors in the inner-shareable domain for address va. */
+static inline void __flush_xen_tlb_one(vaddr_t va)
+{
+ asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
+}
+
#endif /* __ASM_ARM_ARM64_FLUSHTLB_H__ */
/*
* Local variables:
@@ -45,29 +45,6 @@ static inline void invalidate_icache_local(void)
isb();
}
-/* Flush all hypervisor mappings from the TLB of the local processor. */
-static inline void flush_xen_tlb_local(void)
-{
- asm volatile (
- "dsb sy;" /* Ensure visibility of PTE writes */
- "tlbi alle2;" /* Flush hypervisor TLB */
- "dsb sy;" /* Ensure completion of TLB flush */
- "isb;"
- : : : "memory");
-}
-
-/* Flush TLB of local processor for address va. */
-static inline void __flush_xen_tlb_one_local(vaddr_t va)
-{
- asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
-}
-
-/* Flush TLB of all processors in the inner-shareable domain for address va. */
-static inline void __flush_xen_tlb_one(vaddr_t va)
-{
- asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
-}
-
/* Ask the MMU to translate a VA for us */
static inline uint64_t __va_to_par(vaddr_t va)
{
@@ -28,6 +28,44 @@ static inline void page_set_tlbflush_timestamp(struct page_info *page)
/* Flush specified CPUs' TLBs */
void flush_tlb_mask(const cpumask_t *mask);
+/*
+ * Flush a range of VA's hypervisor mappings from the TLB of the local
+ * processor.
+ */
+static inline void flush_xen_tlb_range_va_local(vaddr_t va,
+ unsigned long size)
+{
+ vaddr_t end = va + size;
+
+ dsb(sy); /* Ensure preceding are visible */
+ while ( va < end )
+ {
+ __flush_xen_tlb_one_local(va);
+ va += PAGE_SIZE;
+ }
+ dsb(sy); /* Ensure completion of the TLB flush */
+ isb();
+}
+
+/*
+ * Flush a range of VA's hypervisor mappings from the TLB of all
+ * processors in the inner-shareable domain.
+ */
+static inline void flush_xen_tlb_range_va(vaddr_t va,
+ unsigned long size)
+{
+ vaddr_t end = va + size;
+
+ dsb(sy); /* Ensure preceding are visible */
+ while ( va < end )
+ {
+ __flush_xen_tlb_one(va);
+ va += PAGE_SIZE;
+ }
+ dsb(sy); /* Ensure completion of the TLB flush */
+ isb();
+}
+
#endif /* __ASM_ARM_FLUSHTLB_H__ */
/*
* Local variables:
@@ -233,44 +233,6 @@ static inline int clean_and_invalidate_dcache_va_range
: : "r" (_p), "m" (*_p)); \
} while (0)
-/*
- * Flush a range of VA's hypervisor mappings from the TLB of the local
- * processor.
- */
-static inline void flush_xen_tlb_range_va_local(vaddr_t va,
- unsigned long size)
-{
- vaddr_t end = va + size;
-
- dsb(sy); /* Ensure preceding are visible */
- while ( va < end )
- {
- __flush_xen_tlb_one_local(va);
- va += PAGE_SIZE;
- }
- dsb(sy); /* Ensure completion of the TLB flush */
- isb();
-}
-
-/*
- * Flush a range of VA's hypervisor mappings from the TLB of all
- * processors in the inner-shareable domain.
- */
-static inline void flush_xen_tlb_range_va(vaddr_t va,
- unsigned long size)
-{
- vaddr_t end = va + size;
-
- dsb(sy); /* Ensure preceding are visible */
- while ( va < end )
- {
- __flush_xen_tlb_one(va);
- va += PAGE_SIZE;
- }
- dsb(sy); /* Ensure completion of the TLB flush */
- isb();
-}
-
/* Flush the dcache for an entire page. */
void flush_page_to_ram(unsigned long mfn, bool sync_icache);