Message ID | 20210218040512.709186-12-yury.norov@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | lib/find_bit: fast path for small bitmaps | expand |
On Wed, Feb 17, 2021 at 08:05:09PM -0800, Yury Norov wrote: > Similarly to bitmap functions, find_next_*_bit() users will benefit > if we'll handle a case of bitmaps that fit into a single word. In the > very best case, the compiler may replace a function call with a few > instructions. > > This is the quite typical find_next_bit() user: > > unsigned int cpumask_next(int n, const struct cpumask *srcp) > { > /* -1 is a legal arg here. */ > if (n != -1) > cpumask_check(n); > return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1); > } > EXPORT_SYMBOL(cpumask_next); > > On ARM64 if CONFIG_FAST_PATH is disabled it generates: bloat-o-meter over specific module and/or vmlinux.o? > 0000000000000000 <cpumask_next>: > 0: a9bf7bfd stp x29, x30, [sp, #-16]! > 4: 11000402 add w2, w0, #0x1 > 8: aa0103e0 mov x0, x1 > c: d2800401 mov x1, #0x40 // #64 > 10: 910003fd mov x29, sp > 14: 93407c42 sxtw x2, w2 > 18: 94000000 bl 0 <find_next_bit> > 1c: a8c17bfd ldp x29, x30, [sp], #16 > 20: d65f03c0 ret > 24: d503201f nop > > If CONFIG_FAST_PATH is enabled: > 0000000000000140 <cpumask_next>: > 140: 11000400 add w0, w0, #0x1 > 144: 93407c00 sxtw x0, w0 > 148: f100fc1f cmp x0, #0x3f > 14c: 54000168 b.hi 178 <cpumask_next+0x38> // b.pmore > 150: f9400023 ldr x3, [x1] > 154: 92800001 mov x1, #0xffffffffffffffff // #-1 > 158: 9ac02020 lsl x0, x1, x0 > 15c: 52800802 mov w2, #0x40 // #64 > 160: 8a030001 and x1, x0, x3 > 164: dac00020 rbit x0, x1 > 168: f100003f cmp x1, #0x0 > 16c: dac01000 clz x0, x0 > 170: 1a800040 csel w0, w2, w0, eq // eq = none > 174: d65f03c0 ret > 178: 52800800 mov w0, #0x40 // #64 > 17c: d65f03c0 ret > > find_next_bit() call is replaced with 6 instructions. (And I suspect > we can improve the GENMASK() for better code generation.) find_next_bit() > itself is 41 instructions. ... > + if (SMALL_CONST(size - 1)) { > + unsigned long val; > + > + if (unlikely(offset >= size)) > + return size; > + val = *addr & GENMASK(size - 1, offset); Yeah, GENMASK() basically for constant values or cases like (x,0). I think here is something what has been done in BITMAP_FIRST/LAST_WORD_MASK will give better results. > + return val ? __ffs(val) : size; > + }
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 7ad70dab8e93..8bd7a33a889d 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h @@ -20,6 +20,16 @@ static inline unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { + if (SMALL_CONST(size - 1)) { + unsigned long val; + + if (unlikely(offset >= size)) + return size; + + val = *addr & GENMASK(size - 1, offset); + return val ? __ffs(val) : size; + } + return _find_next_bit(addr, NULL, size, offset, 0UL, 0); } #endif @@ -40,6 +50,16 @@ unsigned long find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2, unsigned long size, unsigned long offset) { + if (SMALL_CONST(size - 1)) { + unsigned long val; + + if (unlikely(offset >= size)) + return size; + + val = *addr1 & *addr2 & GENMASK(size - 1, offset); + return val ? __ffs(val) : size; + } + return _find_next_bit(addr1, addr2, size, offset, 0UL, 0); } #endif @@ -58,6 +78,16 @@ static inline unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { + if (SMALL_CONST(size - 1)) { + unsigned long val; + + if (unlikely(offset >= size)) + return size; + + val = *addr | ~GENMASK(size - 1, offset); + return val == ~0UL ? size : ffz(val); + } + return _find_next_bit(addr, NULL, size, offset, ~0UL, 0); } #endif diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h index 21305f6cea0b..18ebcf639d7f 100644 --- a/include/asm-generic/bitops/le.h +++ b/include/asm-generic/bitops/le.h @@ -5,6 +5,7 @@ #include <asm-generic/bitops/find.h> #include <asm/types.h> #include <asm/byteorder.h> +#include <linux/swab.h> #if defined(__LITTLE_ENDIAN) @@ -37,6 +38,16 @@ static inline unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset) { + if (SMALL_CONST(size)) { + unsigned long val = *(const unsigned long *)addr; + + if (unlikely(offset >= size)) + return size; + + val = swab(val) | ~GENMASK(size - 1, offset); + return val == ~0UL ? size : ffz(val); + } + return _find_next_bit(addr, NULL, size, offset, ~0UL, 1); } #endif @@ -46,6 +57,16 @@ static inline unsigned long find_next_bit_le(const void *addr, unsigned long size, unsigned long offset) { + if (SMALL_CONST(size)) { + unsigned long val = *(const unsigned long *)addr; + + if (unlikely(offset >= size)) + return size; + + val = swab(val) & GENMASK(size - 1, offset); + return val ? __ffs(val) : size; + } + return _find_next_bit(addr, NULL, size, offset, 0UL, 1); } #endif
Similarly to bitmap functions, find_next_*_bit() users will benefit if we'll handle a case of bitmaps that fit into a single word. In the very best case, the compiler may replace a function call with a few instructions. This is the quite typical find_next_bit() user: unsigned int cpumask_next(int n, const struct cpumask *srcp) { /* -1 is a legal arg here. */ if (n != -1) cpumask_check(n); return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1); } EXPORT_SYMBOL(cpumask_next); On ARM64 if CONFIG_FAST_PATH is disabled it generates: 0000000000000000 <cpumask_next>: 0: a9bf7bfd stp x29, x30, [sp, #-16]! 4: 11000402 add w2, w0, #0x1 8: aa0103e0 mov x0, x1 c: d2800401 mov x1, #0x40 // #64 10: 910003fd mov x29, sp 14: 93407c42 sxtw x2, w2 18: 94000000 bl 0 <find_next_bit> 1c: a8c17bfd ldp x29, x30, [sp], #16 20: d65f03c0 ret 24: d503201f nop If CONFIG_FAST_PATH is enabled: 0000000000000140 <cpumask_next>: 140: 11000400 add w0, w0, #0x1 144: 93407c00 sxtw x0, w0 148: f100fc1f cmp x0, #0x3f 14c: 54000168 b.hi 178 <cpumask_next+0x38> // b.pmore 150: f9400023 ldr x3, [x1] 154: 92800001 mov x1, #0xffffffffffffffff // #-1 158: 9ac02020 lsl x0, x1, x0 15c: 52800802 mov w2, #0x40 // #64 160: 8a030001 and x1, x0, x3 164: dac00020 rbit x0, x1 168: f100003f cmp x1, #0x0 16c: dac01000 clz x0, x0 170: 1a800040 csel w0, w2, w0, eq // eq = none 174: d65f03c0 ret 178: 52800800 mov w0, #0x40 // #64 17c: d65f03c0 ret find_next_bit() call is replaced with 6 instructions. (And I suspect we can improve the GENMASK() for better code generation.) find_next_bit() itself is 41 instructions. Signed-off-by: Yury Norov <yury.norov@gmail.com> --- include/asm-generic/bitops/find.h | 30 ++++++++++++++++++++++++++++++ include/asm-generic/bitops/le.h | 21 +++++++++++++++++++++ 2 files changed, 51 insertions(+)