@@ -157,24 +157,6 @@ extern int _test_and_set_bit(int nr, volatile unsigned long * p);
extern int _test_and_clear_bit(int nr, volatile unsigned long * p);
extern int _test_and_change_bit(int nr, volatile unsigned long * p);
-/*
- * Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
- */
-unsigned long _find_first_zero_bit_le(const unsigned long *p, unsigned long size);
-unsigned long _find_next_zero_bit_le(const unsigned long *p,
- unsigned long size, unsigned long offset);
-unsigned long _find_first_bit_le(const unsigned long *p, unsigned long size);
-unsigned long _find_next_bit_le(const unsigned long *p, unsigned long size, unsigned long offset);
-
-/*
- * Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
- */
-unsigned long _find_first_zero_bit_be(const unsigned long *p, unsigned long size);
-unsigned long _find_next_zero_bit_be(const unsigned long *p,
- unsigned long size, unsigned long offset);
-unsigned long _find_first_bit_be(const unsigned long *p, unsigned long size);
-unsigned long _find_next_bit_be(const unsigned long *p, unsigned long size, unsigned long offset);
-
#ifndef CONFIG_SMP
/*
* The __* form of bitops are non-atomic and may be reordered.
@@ -195,26 +177,6 @@ unsigned long _find_next_bit_be(const unsigned long *p, unsigned long size, unsi
#define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p)
#define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p)
-#ifndef __ARMEB__
-/*
- * These are the little endian, atomic definitions.
- */
-#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz)
-#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off)
-#define find_first_bit(p,sz) _find_first_bit_le(p,sz)
-#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off)
-
-#else
-/*
- * These are the big endian, atomic definitions.
- */
-#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz)
-#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off)
-#define find_first_bit(p,sz) _find_first_bit_be(p,sz)
-#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off)
-
-#endif
-
#if __LINUX_ARM_ARCH__ < 5
#include <asm-generic/bitops/__fls.h>
@@ -237,35 +199,10 @@ unsigned long _find_next_bit_be(const unsigned long *p, unsigned long size, unsi
#endif
#include <asm-generic/bitops/ffz.h>
-
#include <asm-generic/bitops/fls64.h>
-
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
-
-#ifdef __ARMEB__
-
-static inline int find_first_zero_bit_le(const void *p, unsigned size)
-{
- return _find_first_zero_bit_le(p, size);
-}
-#define find_first_zero_bit_le find_first_zero_bit_le
-
-static inline int find_next_zero_bit_le(const void *p, int size, int offset)
-{
- return _find_next_zero_bit_le(p, size, offset);
-}
-#define find_next_zero_bit_le find_next_zero_bit_le
-
-static inline int find_next_bit_le(const void *p, int size, int offset)
-{
- return _find_next_bit_le(p, size, offset);
-}
-#define find_next_bit_le find_next_bit_le
-
-#endif
-
#include <asm-generic/bitops/le.h>
/*
@@ -150,17 +150,6 @@ EXPORT_SYMBOL(_clear_bit);
EXPORT_SYMBOL(_test_and_clear_bit);
EXPORT_SYMBOL(_change_bit);
EXPORT_SYMBOL(_test_and_change_bit);
-EXPORT_SYMBOL(_find_first_zero_bit_le);
-EXPORT_SYMBOL(_find_next_zero_bit_le);
-EXPORT_SYMBOL(_find_first_bit_le);
-EXPORT_SYMBOL(_find_next_bit_le);
-
-#ifdef __ARMEB__
-EXPORT_SYMBOL(_find_first_zero_bit_be);
-EXPORT_SYMBOL(_find_next_zero_bit_be);
-EXPORT_SYMBOL(_find_first_bit_be);
-EXPORT_SYMBOL(_find_next_bit_be);
-#endif
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(__gnu_mcount_nc);
@@ -7,7 +7,7 @@
lib-y := changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
- delay.o delay-loop.o findbit.o memchr.o memcpy.o \
+ delay.o delay-loop.o memchr.o memcpy.o \
memmove.o memset.o setbit.o \
strchr.o strrchr.o \
testchangebit.o testclearbit.o testsetbit.o \
deleted file mode 100644
@@ -1,193 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/arch/arm/lib/findbit.S
- *
- * Copyright (C) 1995-2000 Russell King
- *
- * 16th March 2001 - John Ripley <jripley@sonicblue.com>
- * Fixed so that "size" is an exclusive not an inclusive quantity.
- * All users of these functions expect exclusive sizes, and may
- * also call with zero size.
- * Reworked by rmk.
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
- .text
-
-/*
- * Purpose : Find a 'zero' bit
- * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit);
- */
-ENTRY(_find_first_zero_bit_le)
- teq r1, #0
- beq 3f
- mov r2, #0
-1:
- ARM( ldrb r3, [r0, r2, lsr #3] )
- THUMB( lsr r3, r2, #3 )
- THUMB( ldrb r3, [r0, r3] )
- eors r3, r3, #0xff @ invert bits
- bne .L_found @ any now set - found zero bit
- add r2, r2, #8 @ next bit pointer
-2: cmp r2, r1 @ any more?
- blo 1b
-3: mov r0, r1 @ no free bits
- ret lr
-ENDPROC(_find_first_zero_bit_le)
-
-/*
- * Purpose : Find next 'zero' bit
- * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
- */
-ENTRY(_find_next_zero_bit_le)
- cmp r2, r1
- bhs 3b
- ands ip, r2, #7
- beq 1b @ If new byte, goto old routine
- ARM( ldrb r3, [r0, r2, lsr #3] )
- THUMB( lsr r3, r2, #3 )
- THUMB( ldrb r3, [r0, r3] )
- eor r3, r3, #0xff @ now looking for a 1 bit
- movs r3, r3, lsr ip @ shift off unused bits
- bne .L_found
- orr r2, r2, #7 @ if zero, then no bits here
- add r2, r2, #1 @ align bit pointer
- b 2b @ loop for next bit
-ENDPROC(_find_next_zero_bit_le)
-
-/*
- * Purpose : Find a 'one' bit
- * Prototype: int find_first_bit(const unsigned long *addr, unsigned int maxbit);
- */
-ENTRY(_find_first_bit_le)
- teq r1, #0
- beq 3f
- mov r2, #0
-1:
- ARM( ldrb r3, [r0, r2, lsr #3] )
- THUMB( lsr r3, r2, #3 )
- THUMB( ldrb r3, [r0, r3] )
- movs r3, r3
- bne .L_found @ any now set - found zero bit
- add r2, r2, #8 @ next bit pointer
-2: cmp r2, r1 @ any more?
- blo 1b
-3: mov r0, r1 @ no free bits
- ret lr
-ENDPROC(_find_first_bit_le)
-
-/*
- * Purpose : Find next 'one' bit
- * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
- */
-ENTRY(_find_next_bit_le)
- cmp r2, r1
- bhs 3b
- ands ip, r2, #7
- beq 1b @ If new byte, goto old routine
- ARM( ldrb r3, [r0, r2, lsr #3] )
- THUMB( lsr r3, r2, #3 )
- THUMB( ldrb r3, [r0, r3] )
- movs r3, r3, lsr ip @ shift off unused bits
- bne .L_found
- orr r2, r2, #7 @ if zero, then no bits here
- add r2, r2, #1 @ align bit pointer
- b 2b @ loop for next bit
-ENDPROC(_find_next_bit_le)
-
-#ifdef __ARMEB__
-
-ENTRY(_find_first_zero_bit_be)
- teq r1, #0
- beq 3f
- mov r2, #0
-1: eor r3, r2, #0x18 @ big endian byte ordering
- ARM( ldrb r3, [r0, r3, lsr #3] )
- THUMB( lsr r3, #3 )
- THUMB( ldrb r3, [r0, r3] )
- eors r3, r3, #0xff @ invert bits
- bne .L_found @ any now set - found zero bit
- add r2, r2, #8 @ next bit pointer
-2: cmp r2, r1 @ any more?
- blo 1b
-3: mov r0, r1 @ no free bits
- ret lr
-ENDPROC(_find_first_zero_bit_be)
-
-ENTRY(_find_next_zero_bit_be)
- cmp r2, r1
- bhs 3b
- ands ip, r2, #7
- beq 1b @ If new byte, goto old routine
- eor r3, r2, #0x18 @ big endian byte ordering
- ARM( ldrb r3, [r0, r3, lsr #3] )
- THUMB( lsr r3, #3 )
- THUMB( ldrb r3, [r0, r3] )
- eor r3, r3, #0xff @ now looking for a 1 bit
- movs r3, r3, lsr ip @ shift off unused bits
- bne .L_found
- orr r2, r2, #7 @ if zero, then no bits here
- add r2, r2, #1 @ align bit pointer
- b 2b @ loop for next bit
-ENDPROC(_find_next_zero_bit_be)
-
-ENTRY(_find_first_bit_be)
- teq r1, #0
- beq 3f
- mov r2, #0
-1: eor r3, r2, #0x18 @ big endian byte ordering
- ARM( ldrb r3, [r0, r3, lsr #3] )
- THUMB( lsr r3, #3 )
- THUMB( ldrb r3, [r0, r3] )
- movs r3, r3
- bne .L_found @ any now set - found zero bit
- add r2, r2, #8 @ next bit pointer
-2: cmp r2, r1 @ any more?
- blo 1b
-3: mov r0, r1 @ no free bits
- ret lr
-ENDPROC(_find_first_bit_be)
-
-ENTRY(_find_next_bit_be)
- cmp r2, r1
- bhs 3b
- ands ip, r2, #7
- beq 1b @ If new byte, goto old routine
- eor r3, r2, #0x18 @ big endian byte ordering
- ARM( ldrb r3, [r0, r3, lsr #3] )
- THUMB( lsr r3, #3 )
- THUMB( ldrb r3, [r0, r3] )
- movs r3, r3, lsr ip @ shift off unused bits
- bne .L_found
- orr r2, r2, #7 @ if zero, then no bits here
- add r2, r2, #1 @ align bit pointer
- b 2b @ loop for next bit
-ENDPROC(_find_next_bit_be)
-
-#endif
-
-/*
- * One or more bits in the LSB of r3 are assumed to be set.
- */
-.L_found:
-#if __LINUX_ARM_ARCH__ >= 5
- rsb r0, r3, #0
- and r3, r3, r0
- clz r3, r3
- rsb r3, r3, #31
- add r0, r2, r3
-#else
- tst r3, #0x0f
- addeq r2, r2, #4
- movne r3, r3, lsl #4
- tst r3, #0x30
- addeq r2, r2, #2
- movne r3, r3, lsl #2
- tst r3, #0x40
- addeq r2, r2, #1
- mov r0, r2
-#endif
- cmp r1, r0 @ Clamp to maxbit
- movlo r0, r1
- ret lr
-
Generic code works on par with arch or better, according to my testing, and with recent improvements merged in v6.1, it should be even faster. ARM already uses many generic find_bit() functions - those that it doesn't implement. So we are talking about migrating a subset of the API; most of find_bit() family has only generic implementation on ARM. This patch switches ARM to generic find_bit() entirely. CC: Guenter Roeck <linux@roeck-us.net> CC: Dennis Zhou <dennis@kernel.org> CC: Russell King <linux@armlinux.org.uk> CC: Catalin Marinas <catalin.marinas@arm.com> CC: linux-arm-kernel@lists.infradead.org Reported-by: Guenter Roeck <linux@roeck-us.net> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Yury Norov <yury.norov@gmail.com> --- arch/arm/include/asm/bitops.h | 63 ----------- arch/arm/kernel/armksyms.c | 11 -- arch/arm/lib/Makefile | 2 +- arch/arm/lib/findbit.S | 193 ---------------------------------- 4 files changed, 1 insertion(+), 268 deletions(-) delete mode 100644 arch/arm/lib/findbit.S