Message ID | 20190417175815.16905-3-julien.grall@arm.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | xen/arm: TLB flush helpers rework | expand |
Hello Julien, On 17.04.19 20:58, Julien Grall wrote: > The function flush_xen_text_tlb_local() has been misused and will result > to invalidate the instruction cache more than necessary. > > For instance, there are no need to invalidate the instruction cache if > we are setting SCTLR_EL2.WXN. > > There are effectively only one caller (i.e free_init_memory() would > who need to invalidate the instruction cache. > > So rather than keeping around the function flush_xen_text_tlb_local() > around, replace it with call to flush_xen_tlb_local() and explicitely > flush the cache when necessary. > > Signed-off-by: Julien Grall <julien.grall@arm.com> > --- > xen/arch/arm/mm.c | 17 ++++++++++++++--- > xen/include/asm-arm/arm32/page.h | 23 +++++++++-------------- > xen/include/asm-arm/arm64/page.h | 21 +++++---------------- > 3 files changed, 28 insertions(+), 33 deletions(-) > > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c > index 93ad118183..dfbe39c70a 100644 > --- a/xen/arch/arm/mm.c > +++ b/xen/arch/arm/mm.c > @@ -610,8 +610,12 @@ void __init remove_early_mappings(void) > static void xen_pt_enforce_wnx(void) > { > WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2); > - /* Flush everything after setting WXN bit. */ > - flush_xen_text_tlb_local(); > + /* > + * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized > + * before flushing the TLBs. I'm not sure about the comment above, it looks a bit confusing to me. As per my understanding flushing TLB is called to remove cached entries together with cached SCTLR_EL2.WXN. And isb before to tlb flush ensures that WXN is set prior to flush so that all new TLB entries will be fetched with this flag set. > + */ > + isb(); > + flush_xen_data_tlb_local(); > } > > extern void switch_ttbr(uint64_t ttbr); > @@ -1123,7 +1127,7 @@ static void set_pte_flags_on_range(const char *p, unsigned long l, enum mg mg) > } > write_pte(xen_xenmap + i, pte); > } > - flush_xen_text_tlb_local(); > + flush_xen_data_tlb_local(); > } > > /* Release all __init and __initdata ranges to be reused */ > @@ -1136,6 +1140,13 @@ void free_init_memory(void) > uint32_t *p; > > set_pte_flags_on_range(__init_begin, len, mg_rw); > + > + /* > + * From now on, init will not be used for execution anymore, > + * so nuke the instruction cache to remove entries related to init. > + */ > + invalidate_icache_local(); > + > #ifdef CONFIG_ARM_32 > /* udf instruction i.e (see A8.8.247 in ARM DDI 0406C.c) */ > insn = 0xe7f000f0; > diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h > index ea4b312c70..40a77daa9d 100644 > --- a/xen/include/asm-arm/arm32/page.h > +++ b/xen/include/asm-arm/arm32/page.h > @@ -46,24 +46,19 @@ static inline void invalidate_icache(void) > } > > /* > - * Flush all hypervisor mappings from the TLB and branch predictor of > - * the local processor. > - * > - * This is needed after changing Xen code mappings. > - * > - * The caller needs to issue the necessary DSB and D-cache flushes > - * before calling flush_xen_text_tlb. > + * Invalidate all instruction caches on the local processor to PoU. > + * We also need to flush the branch predictor for ARMv7 as it may be > + * architecturally visible to the software (see B2.2.4 in ARM DDI 0406C.b). > */ > -static inline void flush_xen_text_tlb_local(void) > +static inline void invalidate_icache_local(void) > { > asm volatile ( > - "isb;" /* Ensure synchronization with previous changes to text */ > - CMD_CP32(TLBIALLH) /* Flush hypervisor TLB */ > - CMD_CP32(ICIALLU) /* Flush I-cache */ > - CMD_CP32(BPIALL) /* Flush branch predictor */ > - "dsb;" /* Ensure completion of TLB+BP flush */ > - "isb;" > + CMD_CP32(ICIALLU) /* Flush I-cache. */ > + CMD_CP32(BPIALL) /* Flush branch predictor. */ > : : : "memory"); > + > + dsb(nsh); /* Ensure completion of the flush I-cache */ > + isb(); /* Synchronize fetched instruction stream. */ > } > > /* > diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h > index 23d778154d..6c36d0210f 100644 > --- a/xen/include/asm-arm/arm64/page.h > +++ b/xen/include/asm-arm/arm64/page.h > @@ -37,23 +37,12 @@ static inline void invalidate_icache(void) > isb(); > } > > -/* > - * Flush all hypervisor mappings from the TLB of the local processor. > - * > - * This is needed after changing Xen code mappings. > - * > - * The caller needs to issue the necessary DSB and D-cache flushes > - * before calling flush_xen_text_tlb. > - */ > -static inline void flush_xen_text_tlb_local(void) > +/* Invalidate all instruction caches on the local processor to PoU */ > +static inline void invalidate_icache_local(void) > { > - asm volatile ( > - "isb;" /* Ensure synchronization with previous changes to text */ > - "tlbi alle2;" /* Flush hypervisor TLB */ > - "ic iallu;" /* Flush I-cache */ > - "dsb sy;" /* Ensure completion of TLB flush */ > - "isb;" > - : : : "memory"); > + asm volatile ("ic iallu"); > + dsb(nsh); /* Ensure completion of the I-cache flush */ > + isb(); > } > > /* > With minor notes, Reviewed-by: Andrii Anisov <andrii_anisov@epam.com>
On 25/04/2019 19:00, Andrii Anisov wrote: > Hello Julien, Hi, > On 17.04.19 20:58, Julien Grall wrote: >> The function flush_xen_text_tlb_local() has been misused and will result >> to invalidate the instruction cache more than necessary. >> >> For instance, there are no need to invalidate the instruction cache if >> we are setting SCTLR_EL2.WXN. >> >> There are effectively only one caller (i.e free_init_memory() would >> who need to invalidate the instruction cache. >> >> So rather than keeping around the function flush_xen_text_tlb_local() >> around, replace it with call to flush_xen_tlb_local() and explicitely >> flush the cache when necessary. >> >> Signed-off-by: Julien Grall <julien.grall@arm.com> >> --- >> xen/arch/arm/mm.c | 17 ++++++++++++++--- >> xen/include/asm-arm/arm32/page.h | 23 +++++++++-------------- >> xen/include/asm-arm/arm64/page.h | 21 +++++---------------- >> 3 files changed, 28 insertions(+), 33 deletions(-) >> >> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c >> index 93ad118183..dfbe39c70a 100644 >> --- a/xen/arch/arm/mm.c >> +++ b/xen/arch/arm/mm.c >> @@ -610,8 +610,12 @@ void __init remove_early_mappings(void) >> static void xen_pt_enforce_wnx(void) >> { >> WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2); >> - /* Flush everything after setting WXN bit. */ >> - flush_xen_text_tlb_local(); >> + /* >> + * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized >> + * before flushing the TLBs. > I'm not sure about the comment above, it looks a bit confusing to me. > As per my understanding flushing TLB is called to remove cached entries > together with cached SCTLR_EL2.WXN. > And isb before to tlb flush ensures that WXN is set prior to flush so > that all new TLB entries will be fetched > with this flag set. The understanding is correct. I am not entirely sure how I can improve the comment. To be honest, I am not entirely sure whether the isb() is necessary here. At worst, an entry of an existing mappings is not cached with the SCTLR_EL2.WXN set. This may only defer the problem is an entry is incorrectly configured before the flush. This is done early at boot (and will be done much earlier in [1]). So I am kind of tempted to drop the isb() here. [...] > With minor notes, > > Reviewed-by: Andrii Anisov <andrii_anisov@epam.com> > Thank you! Cheers, [1] https://lists.xenproject.org/archives/html/xen-devel/2019-04/msg01667.html
On 25.04.19 23:37, Julien Grall wrote: > The understanding is correct. I am not entirely sure how I can improve > the comment. I've re-read the comment again, and realized it actually fits. So let it be. > > To be honest, I am not entirely sure whether the isb() is necessary > here. At worst, an entry of an existing mappings is not cached with the > SCTLR_EL2.WXN set. This may only defer the problem is an entry is > incorrectly configured before the flush. This is done early at boot (and > will be done much earlier in [1]).Well, following this logic we may finish with TLB flush is not needed here at all. Because entries, already reached TLB, were configured before, and are believed valid. All the next will have WXN set. But in this case, ISB is needed as an exact point from which all new TLB entries will have WXN. > So I am kind of tempted to drop the > isb() here.
Julien, Sorry, reply format is broken. It should be as following: On 26.04.19 16:50, Andrii Anisov wrote: >> To be honest, I am not entirely sure whether the isb() is necessary >> here. At worst, an entry of an existing mappings is not cached with the >> SCTLR_EL2.WXN set. This may only defer the problem is an entry is >> incorrectly configured before the flush. This is done early at boot (and >> will be done much earlier in [1]). Well, following this logic we may finish with TLB flush is not needed here at all. Because entries, already reached TLB, were configured before, and are believed valid. All the next will have WXN set. But in this case, ISB is needed as an exact point from which all new TLB entries will have WXN. >> So I am kind of tempted to drop the >> isb() here.
On 26/04/2019 15:15, Andrii Anisov wrote: > Julien, > > Sorry, reply format is broken. It should be as following: > > On 26.04.19 16:50, Andrii Anisov wrote: >>> To be honest, I am not entirely sure whether the isb() is necessary >>> here. At worst, an entry of an existing mappings is not cached with the >>> SCTLR_EL2.WXN set. This may only defer the problem is an entry is >>> incorrectly configured before the flush. This is done early at boot (and >>> will be done much earlier in [1]). > > > Well, following this logic we may finish with TLB flush is not needed here at > all. Because entries, already reached TLB, were configured before, and are > believed valid. All the next will have WXN set. > But in this case, ISB is needed as an exact point from which all new TLB entries > will have WXN. I am not sure to follow your point here. It wasn't clear to me whether the isb() between setting SCTLR_EL2.WXN and TLB flush is strictly necessary. The TLB flush contain an isb() so, it may has well be enough here. However, speaking with Mark Rutland, it would be best to keep the sequence suggest in this patch. So SCTLR_EL2.WXN is visible before the flush and the flush will ensure all current entries are removed. So none of the entries should have WXN cleared. So I will keep this patch as is. Cheers,
On 26.04.19 17:31, Julien Grall wrote: > However, speaking with Mark Rutland, it would be best to keep the sequence suggest in this patch. So SCTLR_EL2.WXN is visible before the flush and the flush will ensure all current entries are removed. So none of the entries should have WXN cleared. > > So I will keep this patch as is. And it is right from my point of view.
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index 93ad118183..dfbe39c70a 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -610,8 +610,12 @@ void __init remove_early_mappings(void) static void xen_pt_enforce_wnx(void) { WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2); - /* Flush everything after setting WXN bit. */ - flush_xen_text_tlb_local(); + /* + * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized + * before flushing the TLBs. + */ + isb(); + flush_xen_data_tlb_local(); } extern void switch_ttbr(uint64_t ttbr); @@ -1123,7 +1127,7 @@ static void set_pte_flags_on_range(const char *p, unsigned long l, enum mg mg) } write_pte(xen_xenmap + i, pte); } - flush_xen_text_tlb_local(); + flush_xen_data_tlb_local(); } /* Release all __init and __initdata ranges to be reused */ @@ -1136,6 +1140,13 @@ void free_init_memory(void) uint32_t *p; set_pte_flags_on_range(__init_begin, len, mg_rw); + + /* + * From now on, init will not be used for execution anymore, + * so nuke the instruction cache to remove entries related to init. + */ + invalidate_icache_local(); + #ifdef CONFIG_ARM_32 /* udf instruction i.e (see A8.8.247 in ARM DDI 0406C.c) */ insn = 0xe7f000f0; diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h index ea4b312c70..40a77daa9d 100644 --- a/xen/include/asm-arm/arm32/page.h +++ b/xen/include/asm-arm/arm32/page.h @@ -46,24 +46,19 @@ static inline void invalidate_icache(void) } /* - * Flush all hypervisor mappings from the TLB and branch predictor of - * the local processor. - * - * This is needed after changing Xen code mappings. - * - * The caller needs to issue the necessary DSB and D-cache flushes - * before calling flush_xen_text_tlb. + * Invalidate all instruction caches on the local processor to PoU. + * We also need to flush the branch predictor for ARMv7 as it may be + * architecturally visible to the software (see B2.2.4 in ARM DDI 0406C.b). */ -static inline void flush_xen_text_tlb_local(void) +static inline void invalidate_icache_local(void) { asm volatile ( - "isb;" /* Ensure synchronization with previous changes to text */ - CMD_CP32(TLBIALLH) /* Flush hypervisor TLB */ - CMD_CP32(ICIALLU) /* Flush I-cache */ - CMD_CP32(BPIALL) /* Flush branch predictor */ - "dsb;" /* Ensure completion of TLB+BP flush */ - "isb;" + CMD_CP32(ICIALLU) /* Flush I-cache. */ + CMD_CP32(BPIALL) /* Flush branch predictor. */ : : : "memory"); + + dsb(nsh); /* Ensure completion of the flush I-cache */ + isb(); /* Synchronize fetched instruction stream. */ } /* diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h index 23d778154d..6c36d0210f 100644 --- a/xen/include/asm-arm/arm64/page.h +++ b/xen/include/asm-arm/arm64/page.h @@ -37,23 +37,12 @@ static inline void invalidate_icache(void) isb(); } -/* - * Flush all hypervisor mappings from the TLB of the local processor. - * - * This is needed after changing Xen code mappings. - * - * The caller needs to issue the necessary DSB and D-cache flushes - * before calling flush_xen_text_tlb. - */ -static inline void flush_xen_text_tlb_local(void) +/* Invalidate all instruction caches on the local processor to PoU */ +static inline void invalidate_icache_local(void) { - asm volatile ( - "isb;" /* Ensure synchronization with previous changes to text */ - "tlbi alle2;" /* Flush hypervisor TLB */ - "ic iallu;" /* Flush I-cache */ - "dsb sy;" /* Ensure completion of TLB flush */ - "isb;" - : : : "memory"); + asm volatile ("ic iallu"); + dsb(nsh); /* Ensure completion of the I-cache flush */ + isb(); } /*
The function flush_xen_text_tlb_local() has been misused and will result to invalidate the instruction cache more than necessary. For instance, there are no need to invalidate the instruction cache if we are setting SCTLR_EL2.WXN. There are effectively only one caller (i.e free_init_memory() would who need to invalidate the instruction cache. So rather than keeping around the function flush_xen_text_tlb_local() around, replace it with call to flush_xen_tlb_local() and explicitely flush the cache when necessary. Signed-off-by: Julien Grall <julien.grall@arm.com> --- xen/arch/arm/mm.c | 17 ++++++++++++++--- xen/include/asm-arm/arm32/page.h | 23 +++++++++-------------- xen/include/asm-arm/arm64/page.h | 21 +++++---------------- 3 files changed, 28 insertions(+), 33 deletions(-)