Message ID | 20170111144118.17062-3-cov@codeaurora.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Hi Christopher, On Wed, Jan 11, 2017 at 09:41:16AM -0500, Christopher Covington wrote: > This refactoring will allow an errata workaround that repeats tlbi dsb > sequences to only change one location. This is not intended to change the > generated assembly and comparison of before and after preprocessor output > of arch/arm64/mm/mmu.c and vmlinux objdump shows no functional changes. > > Signed-off-by: Christopher Covington <cov@codeaurora.org> > --- > arch/arm64/include/asm/tlbflush.h | 104 +++++++++++++++++++++++++------------- > 1 file changed, 69 insertions(+), 35 deletions(-) > > diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h > index deab523..f28813c 100644 > --- a/arch/arm64/include/asm/tlbflush.h > +++ b/arch/arm64/include/asm/tlbflush.h > @@ -25,22 +25,69 @@ > #include <asm/cputype.h> > > /* > - * Raw TLBI operations. > + * Raw TLBI, DSB operations > * > - * Where necessary, use the __tlbi() macro to avoid asm() > - * boilerplate. Drivers and most kernel code should use the TLB > - * management routines in preference to the macro below. > + * Where necessary, use __tlbi_*dsb() macros to avoid asm() boilerplate. > + * Drivers and most kernel code should use the TLB management routines in > + * preference to the macros below. > * > - * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending > - * on whether a particular TLBI operation takes an argument or > - * not. The macros handles invoking the asm with or without the > - * register argument as appropriate. > + * The __tlbi_dsb() macro handles invoking the asm without any register > + * argument, with a single register argument, and with start (included) > + * and end (excluded) range of register arguments. For example: > + * > + * __tlbi_dsb(op, attr) > + * > + * tlbi op > + * dsb attr > + * > + * __tlbi_dsb(op, attr, addr) > + * > + * mov %[addr], =addr > + * tlbi op, %[addr] > + * dsb attr > + * > + * __tlbi_range_dsb(op, attr, start, end) > + * > + * mov %[arg], =start > + * mov %[end], =end > + * for: > + * tlbi op, %[addr] > + * add %[addr], %[addr], #(1 << (PAGE_SHIFT - 12)) > + * cmp %[addr], %[end] > + * b.ne for > + * dsb attr > */ > -#define __TLBI_0(op, arg) asm ("tlbi " #op) > -#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0" : : "r" (arg)) > -#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) > > -#define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0) > +#define __TLBI_FOR_0(ig0, ig1, ig2) > +#define __TLBI_INSTR_0(op, ig1, ig2) "tlbi " #op > +#define __TLBI_IO_0(ig0, ig1, ig2) : : > + > +#define __TLBI_FOR_1(ig0, ig1, ig2) > +#define __TLBI_INSTR_1(op, ig0, ig1) "tlbi " #op ", %0" > +#define __TLBI_IO_1(ig0, arg, ig1) : : "r" (arg) > + > +#define __TLBI_FOR_2(ig0, start, ig1) unsigned long addr; \ > + for (addr = start; addr < end; \ > + addr += 1 << (PAGE_SHIFT - 12)) > +#define __TLBI_INSTR_2(op, ig0, ig1) "tlbi " #op ", %0" > +#define __TLBI_IO_2(ig0, ig1, ig2) : : "r" (addr) > + > +#define __TLBI_FOR_N(op, a1, a2, n, ...) __TLBI_FOR_##n(op, a1, a2) > +#define __TLBI_INSTR_N(op, a1, a2, n, ...) __TLBI_INSTR_##n(op, a1, a2) > +#define __TLBI_IO_N(op, a1, a2, n, ...) __TLBI_IO_##n(op, a1, a2) > + > +#define __TLBI_FOR(op, ...) __TLBI_FOR_N(op, ##__VA_ARGS__, 2, 1, 0) > +#define __TLBI_INSTR(op, ...) __TLBI_INSTR_N(op, ##__VA_ARGS__, 2, 1, 0) > +#define __TLBI_IO(op, ...) __TLBI_IO_N(op, ##__VA_ARGS__, 2, 1, 0) > + > +#define __tlbi_asm_dsb(as, op, attr, ...) do { \ > + __TLBI_FOR(op, ##__VA_ARGS__) \ > + asm (__TLBI_INSTR(op, ##__VA_ARGS__) \ > + __TLBI_IO(op, ##__VA_ARGS__)); \ > + asm volatile ( as "\ndsb " #attr "\n" \ > + : : : "memory"); } while (0) > + > +#define __tlbi_dsb(...) __tlbi_asm_dsb("", ##__VA_ARGS__) I can't deny that this is cool, but ultimately it's completely unreadable. What I was thinking you'd do would be make __tlbi expand to: tlbi dsb tlbi dsb for Falkor, and: tlbi nop nop nop for everybody else. Wouldn't that localise this change sufficiently that you wouldn't need to change all the callers and encode the looping in your cpp macros? I realise you get an extra dsb in some places with that change, but I'd like to see numbers for the impact of that on top of the workaround. If it's an issue, then an alternative sequence would be: tlbi dsb tlbi and you'd rely on the existing dsb to complete that. Having said that, I don't understand how your current loop code works when the workaround is applied. AFAICT, you end up emitting something like: dsb ishst for i in 0 to n tlbi va+i dsb tlbi va+n dsb which looks wrong to me. Am I misreading something here? Will -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Hi Will, On 01/12/2017 11:58 AM, Will Deacon wrote: > Hi Christopher, > > On Wed, Jan 11, 2017 at 09:41:16AM -0500, Christopher Covington wrote: >> This refactoring will allow an errata workaround that repeats tlbi dsb >> sequences to only change one location. This is not intended to change the >> generated assembly and comparison of before and after preprocessor output >> of arch/arm64/mm/mmu.c and vmlinux objdump shows no functional changes. >> >> Signed-off-by: Christopher Covington <cov@codeaurora.org> >> --- >> arch/arm64/include/asm/tlbflush.h | 104 +++++++++++++++++++++++++------------- >> 1 file changed, 69 insertions(+), 35 deletions(-) >> >> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h >> index deab523..f28813c 100644 >> --- a/arch/arm64/include/asm/tlbflush.h >> +++ b/arch/arm64/include/asm/tlbflush.h >> @@ -25,22 +25,69 @@ >> #include <asm/cputype.h> >> >> /* >> - * Raw TLBI operations. >> + * Raw TLBI, DSB operations >> * >> - * Where necessary, use the __tlbi() macro to avoid asm() >> - * boilerplate. Drivers and most kernel code should use the TLB >> - * management routines in preference to the macro below. >> + * Where necessary, use __tlbi_*dsb() macros to avoid asm() boilerplate. >> + * Drivers and most kernel code should use the TLB management routines in >> + * preference to the macros below. >> * >> - * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending >> - * on whether a particular TLBI operation takes an argument or >> - * not. The macros handles invoking the asm with or without the >> - * register argument as appropriate. >> + * The __tlbi_dsb() macro handles invoking the asm without any register >> + * argument, with a single register argument, and with start (included) >> + * and end (excluded) range of register arguments. For example: >> + * >> + * __tlbi_dsb(op, attr) >> + * >> + * tlbi op >> + * dsb attr >> + * >> + * __tlbi_dsb(op, attr, addr) >> + * >> + * mov %[addr], =addr >> + * tlbi op, %[addr] >> + * dsb attr >> + * >> + * __tlbi_range_dsb(op, attr, start, end) >> + * >> + * mov %[arg], =start >> + * mov %[end], =end >> + * for: >> + * tlbi op, %[addr] >> + * add %[addr], %[addr], #(1 << (PAGE_SHIFT - 12)) >> + * cmp %[addr], %[end] >> + * b.ne for >> + * dsb attr >> */ >> -#define __TLBI_0(op, arg) asm ("tlbi " #op) >> -#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0" : : "r" (arg)) >> -#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) >> >> -#define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0) >> +#define __TLBI_FOR_0(ig0, ig1, ig2) >> +#define __TLBI_INSTR_0(op, ig1, ig2) "tlbi " #op >> +#define __TLBI_IO_0(ig0, ig1, ig2) : : >> + >> +#define __TLBI_FOR_1(ig0, ig1, ig2) >> +#define __TLBI_INSTR_1(op, ig0, ig1) "tlbi " #op ", %0" >> +#define __TLBI_IO_1(ig0, arg, ig1) : : "r" (arg) >> + >> +#define __TLBI_FOR_2(ig0, start, ig1) unsigned long addr; \ >> + for (addr = start; addr < end; \ >> + addr += 1 << (PAGE_SHIFT - 12)) >> +#define __TLBI_INSTR_2(op, ig0, ig1) "tlbi " #op ", %0" >> +#define __TLBI_IO_2(ig0, ig1, ig2) : : "r" (addr) >> + >> +#define __TLBI_FOR_N(op, a1, a2, n, ...) __TLBI_FOR_##n(op, a1, a2) >> +#define __TLBI_INSTR_N(op, a1, a2, n, ...) __TLBI_INSTR_##n(op, a1, a2) >> +#define __TLBI_IO_N(op, a1, a2, n, ...) __TLBI_IO_##n(op, a1, a2) >> + >> +#define __TLBI_FOR(op, ...) __TLBI_FOR_N(op, ##__VA_ARGS__, 2, 1, 0) >> +#define __TLBI_INSTR(op, ...) __TLBI_INSTR_N(op, ##__VA_ARGS__, 2, 1, 0) >> +#define __TLBI_IO(op, ...) __TLBI_IO_N(op, ##__VA_ARGS__, 2, 1, 0) >> + >> +#define __tlbi_asm_dsb(as, op, attr, ...) do { \ >> + __TLBI_FOR(op, ##__VA_ARGS__) \ >> + asm (__TLBI_INSTR(op, ##__VA_ARGS__) \ >> + __TLBI_IO(op, ##__VA_ARGS__)); \ >> + asm volatile ( as "\ndsb " #attr "\n" \ >> + : : : "memory"); } while (0) >> + >> +#define __tlbi_dsb(...) __tlbi_asm_dsb("", ##__VA_ARGS__) > > I can't deny that this is cool, but ultimately it's completely unreadable. > What I was thinking you'd do would be make __tlbi expand to: > > tlbi > dsb > tlbi > dsb > > for Falkor, and: > > tlbi > nop > nop > nop > > for everybody else. Thanks for the suggestion. So would __tlbi take a dsb sharability argument in your proposal? Or would it be communicated in some other fashion, maybe inferred from the tlbi argument? Or would the workaround dsbs all be the worst/broadest case? > Wouldn't that localise this change sufficiently that you wouldn't need > to change all the callers and encode the looping in your cpp macros? > > I realise you get an extra dsb in some places with that change, but I'd > like to see numbers for the impact of that on top of the workaround. If > it's an issue, then an alternative sequence would be: > > tlbi > dsb > tlbi > > and you'd rely on the existing dsb to complete that. > > Having said that, I don't understand how your current loop code works > when the workaround is applied. AFAICT, you end up emitting something > like: > > dsb ishst > for i in 0 to n > tlbi va+i > dsb > tlbi va+n > dsb > > which looks wrong to me. Am I misreading something here? You're right, I am off by 1 << (PAGE_SHIFT - 12) here. I would need to increment, compare, not take the loop branch (regular for loop stuff), then decrement (missing) and perform TLB invalidation again (present but using incorrect value). Thanks, Cov
On Fri, Jan 13, 2017 at 10:12:36AM -0500, Christopher Covington wrote: > On 01/12/2017 11:58 AM, Will Deacon wrote: > > On Wed, Jan 11, 2017 at 09:41:16AM -0500, Christopher Covington wrote: > >> +#define __tlbi_asm_dsb(as, op, attr, ...) do { \ > >> + __TLBI_FOR(op, ##__VA_ARGS__) \ > >> + asm (__TLBI_INSTR(op, ##__VA_ARGS__) \ > >> + __TLBI_IO(op, ##__VA_ARGS__)); \ > >> + asm volatile ( as "\ndsb " #attr "\n" \ > >> + : : : "memory"); } while (0) > >> + > >> +#define __tlbi_dsb(...) __tlbi_asm_dsb("", ##__VA_ARGS__) > > > > I can't deny that this is cool, but ultimately it's completely unreadable. > > What I was thinking you'd do would be make __tlbi expand to: > > > > tlbi > > dsb > > tlbi > > dsb > > > > for Falkor, and: > > > > tlbi > > nop > > nop > > nop > > > > for everybody else. > > Thanks for the suggestion. So would __tlbi take a dsb sharability argument in > your proposal? Or would it be communicated in some other fashion, maybe inferred > from the tlbi argument? Or would the workaround dsbs all be the worst/broadest > case? I think always using inner-shareable should be ok. If you wanted to optimise this, you'd want to avoid the workaround altogether for non-shareable invalidation, but that's fairly rare and I doubt you'd be able to measure the impact. > > Wouldn't that localise this change sufficiently that you wouldn't need > > to change all the callers and encode the looping in your cpp macros? > > > > I realise you get an extra dsb in some places with that change, but I'd > > like to see numbers for the impact of that on top of the workaround. If > > it's an issue, then an alternative sequence would be: > > > > tlbi > > dsb > > tlbi > > > > and you'd rely on the existing dsb to complete that. > > > > Having said that, I don't understand how your current loop code works > > when the workaround is applied. AFAICT, you end up emitting something > > like: > > > > dsb ishst > > for i in 0 to n > > tlbi va+i > > dsb > > tlbi va+n > > dsb > > > > which looks wrong to me. Am I misreading something here? > > You're right, I am off by 1 << (PAGE_SHIFT - 12) here. I would need to > increment, compare, not take the loop branch (regular for loop stuff), > then decrement (missing) and perform TLB invalidation again (present but > using incorrect value). It also strikes me as odd that you only need one extra TLBI after the loop has finished, as opposed to a tlbi; dsb; tlbi loop body (which is what you'd get if you modified __tlbi as I suggest). Is it sufficient to have one extra TLBI after the loop and, if so, is the performance impact of my suggestion therefore unacceptable? Will -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Hi Will, On 01/13/2017 11:12 AM, Will Deacon wrote: > On Fri, Jan 13, 2017 at 10:12:36AM -0500, Christopher Covington wrote: >> On 01/12/2017 11:58 AM, Will Deacon wrote: >>> On Wed, Jan 11, 2017 at 09:41:16AM -0500, Christopher Covington wrote: >>>> +#define __tlbi_asm_dsb(as, op, attr, ...) do { \ >>>> + __TLBI_FOR(op, ##__VA_ARGS__) \ >>>> + asm (__TLBI_INSTR(op, ##__VA_ARGS__) \ >>>> + __TLBI_IO(op, ##__VA_ARGS__)); \ >>>> + asm volatile ( as "\ndsb " #attr "\n" \ >>>> + : : : "memory"); } while (0) >>>> + >>>> +#define __tlbi_dsb(...) __tlbi_asm_dsb("", ##__VA_ARGS__) >>> >>> I can't deny that this is cool, but ultimately it's completely unreadable. >>> What I was thinking you'd do would be make __tlbi expand to: >>> >>> tlbi >>> dsb >>> tlbi >>> dsb >>> >>> for Falkor, and: >>> >>> tlbi >>> nop >>> nop >>> nop >>> >>> for everybody else. I've implemented this (minus the last dsb / nop) in the next revision. >> Thanks for the suggestion. So would __tlbi take a dsb sharability argument in >> your proposal? Or would it be communicated in some other fashion, maybe inferred >> from the tlbi argument? Or would the workaround dsbs all be the worst/broadest >> case? > > I think always using inner-shareable should be ok. If you wanted to optimise > this, you'd want to avoid the workaround altogether for non-shareable > invalidation, but that's fairly rare and I doubt you'd be able to measure > the impact. I did not originally notice that Shanker's original workaround implementation unnecessarily applies the workaround to non-shareable invalidations. They're not affected by the erratum. But as you say, it's simpler to modify __tlbi for all cases. I'm not currently worried about that performance impact. >>> Wouldn't that localise this change sufficiently that you wouldn't need >>> to change all the callers and encode the looping in your cpp macros? >>> >>> I realise you get an extra dsb in some places with that change, but I'd >>> like to see numbers for the impact of that on top of the workaround. If >>> it's an issue, then an alternative sequence would be: >>> >>> tlbi >>> dsb >>> tlbi >>> >>> and you'd rely on the existing dsb to complete that. >>> >>> Having said that, I don't understand how your current loop code works >>> when the workaround is applied. AFAICT, you end up emitting something >>> like: >>> >>> dsb ishst >>> for i in 0 to n >>> tlbi va+i >>> dsb >>> tlbi va+n >>> dsb >>> >>> which looks wrong to me. Am I misreading something here? >> >> You're right, I am off by 1 << (PAGE_SHIFT - 12) here. I would need to >> increment, compare, not take the loop branch (regular for loop stuff), >> then decrement (missing) and perform TLB invalidation again (present but >> using incorrect value). > > It also strikes me as odd that you only need one extra TLBI after the loop > has finished, as opposed to a tlbi; dsb; tlbi loop body (which is what you'd > get if you modified __tlbi as I suggest). > > Is it sufficient to have one extra TLBI after the loop and, if so, is the > performance impact of my suggestion therefore unacceptable? One is sufficient according to the errata documentation. I've described that aspect in the commit message of the next revision. I've suggested colleagues follow up regarding performance. But reliable functionality comes first. Thanks, Cov
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index deab523..f28813c 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -25,22 +25,69 @@ #include <asm/cputype.h> /* - * Raw TLBI operations. + * Raw TLBI, DSB operations * - * Where necessary, use the __tlbi() macro to avoid asm() - * boilerplate. Drivers and most kernel code should use the TLB - * management routines in preference to the macro below. + * Where necessary, use __tlbi_*dsb() macros to avoid asm() boilerplate. + * Drivers and most kernel code should use the TLB management routines in + * preference to the macros below. * - * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending - * on whether a particular TLBI operation takes an argument or - * not. The macros handles invoking the asm with or without the - * register argument as appropriate. + * The __tlbi_dsb() macro handles invoking the asm without any register + * argument, with a single register argument, and with start (included) + * and end (excluded) range of register arguments. For example: + * + * __tlbi_dsb(op, attr) + * + * tlbi op + * dsb attr + * + * __tlbi_dsb(op, attr, addr) + * + * mov %[addr], =addr + * tlbi op, %[addr] + * dsb attr + * + * __tlbi_range_dsb(op, attr, start, end) + * + * mov %[arg], =start + * mov %[end], =end + * for: + * tlbi op, %[addr] + * add %[addr], %[addr], #(1 << (PAGE_SHIFT - 12)) + * cmp %[addr], %[end] + * b.ne for + * dsb attr */ -#define __TLBI_0(op, arg) asm ("tlbi " #op) -#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0" : : "r" (arg)) -#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) -#define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0) +#define __TLBI_FOR_0(ig0, ig1, ig2) +#define __TLBI_INSTR_0(op, ig1, ig2) "tlbi " #op +#define __TLBI_IO_0(ig0, ig1, ig2) : : + +#define __TLBI_FOR_1(ig0, ig1, ig2) +#define __TLBI_INSTR_1(op, ig0, ig1) "tlbi " #op ", %0" +#define __TLBI_IO_1(ig0, arg, ig1) : : "r" (arg) + +#define __TLBI_FOR_2(ig0, start, ig1) unsigned long addr; \ + for (addr = start; addr < end; \ + addr += 1 << (PAGE_SHIFT - 12)) +#define __TLBI_INSTR_2(op, ig0, ig1) "tlbi " #op ", %0" +#define __TLBI_IO_2(ig0, ig1, ig2) : : "r" (addr) + +#define __TLBI_FOR_N(op, a1, a2, n, ...) __TLBI_FOR_##n(op, a1, a2) +#define __TLBI_INSTR_N(op, a1, a2, n, ...) __TLBI_INSTR_##n(op, a1, a2) +#define __TLBI_IO_N(op, a1, a2, n, ...) __TLBI_IO_##n(op, a1, a2) + +#define __TLBI_FOR(op, ...) __TLBI_FOR_N(op, ##__VA_ARGS__, 2, 1, 0) +#define __TLBI_INSTR(op, ...) __TLBI_INSTR_N(op, ##__VA_ARGS__, 2, 1, 0) +#define __TLBI_IO(op, ...) __TLBI_IO_N(op, ##__VA_ARGS__, 2, 1, 0) + +#define __tlbi_asm_dsb(as, op, attr, ...) do { \ + __TLBI_FOR(op, ##__VA_ARGS__) \ + asm (__TLBI_INSTR(op, ##__VA_ARGS__) \ + __TLBI_IO(op, ##__VA_ARGS__)); \ + asm volatile ( as "\ndsb " #attr "\n" \ + : : : "memory"); } while (0) + +#define __tlbi_dsb(...) __tlbi_asm_dsb("", ##__VA_ARGS__) /* * TLB Management @@ -84,16 +131,14 @@ static inline void local_flush_tlb_all(void) { dsb(nshst); - __tlbi(vmalle1); - dsb(nsh); + __tlbi_dsb(vmalle1, nsh); isb(); } static inline void flush_tlb_all(void) { dsb(ishst); - __tlbi(vmalle1is); - dsb(ish); + __tlbi_dsb(vmalle1is, ish); isb(); } @@ -102,8 +147,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm) unsigned long asid = ASID(mm) << 48; dsb(ishst); - __tlbi(aside1is, asid); - dsb(ish); + __tlbi_dsb(aside1is, ish, asid); } static inline void flush_tlb_page(struct vm_area_struct *vma, @@ -112,8 +156,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48); dsb(ishst); - __tlbi(vale1is, addr); - dsb(ish); + __tlbi_dsb(vale1is, ish, addr); } /* @@ -127,7 +170,6 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, bool last_level) { unsigned long asid = ASID(vma->vm_mm) << 48; - unsigned long addr; if ((end - start) > MAX_TLB_RANGE) { flush_tlb_mm(vma->vm_mm); @@ -138,13 +180,10 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, end = asid | (end >> 12); dsb(ishst); - for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { - if (last_level) - __tlbi(vale1is, addr); - else - __tlbi(vae1is, addr); - } - dsb(ish); + if (last_level) + __tlbi_dsb(vale1is, ish, start, end); + else + __tlbi_dsb(vae1is, ish, start, end); } static inline void flush_tlb_range(struct vm_area_struct *vma, @@ -155,8 +194,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - unsigned long addr; - if ((end - start) > MAX_TLB_RANGE) { flush_tlb_all(); return; @@ -166,9 +203,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end end >>= 12; dsb(ishst); - for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) - __tlbi(vaae1is, addr); - dsb(ish); + __tlbi_dsb(vaae1is, ish, start, end); isb(); } @@ -181,8 +216,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm, { unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); - __tlbi(vae1is, addr); - dsb(ish); + __tlbi_dsb(vae1is, ish, addr); } #endif
This refactoring will allow an errata workaround that repeats tlbi dsb sequences to only change one location. This is not intended to change the generated assembly and comparison of before and after preprocessor output of arch/arm64/mm/mmu.c and vmlinux objdump shows no functional changes. Signed-off-by: Christopher Covington <cov@codeaurora.org> --- arch/arm64/include/asm/tlbflush.h | 104 +++++++++++++++++++++++++------------- 1 file changed, 69 insertions(+), 35 deletions(-)