@@ -158,7 +158,7 @@ static inline int fls(unsigned int x)
#define arch_ffs(x) ((x) ? 1 + __builtin_ctz(x) : 0)
-#define ffsl(x) ({ unsigned long __t = (x); flsl(ISOLATE_LSB(__t)); })
+#define arch_ffsl(x) ((x) ? 1 + __builtin_ctzl(x) : 0)
/**
* find_first_set_bit - find the first set bit in @word
@@ -174,7 +174,7 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr)
#define flsl(x) generic_flsl(x)
#define fls(x) generic_flsl(x)
#define arch_ffs(x) ((x) ? 1 + __builtin_ctz(x) : 0)
-#define ffsl(x) ({ unsigned long t_ = (x); flsl(t_ & -t_); })
+#define arch_ffsl(x) ((x) ? 1 + __builtin_ctzl(x) : 0)
/**
* hweightN - returns the hamming weight of a N-bit word
@@ -413,23 +413,6 @@ static inline unsigned int find_first_set_bit(unsigned long word)
return (unsigned int)word;
}
-/**
- * ffs - find first bit set
- * @x: the word to search
- *
- * This is defined the same way as the libc and compiler builtin ffs routines.
- */
-static inline int ffsl(unsigned long x)
-{
- long r;
-
- asm ( "bsf %1,%0\n\t"
- "jnz 1f\n\t"
- "mov $-1,%0\n"
- "1:" : "=r" (r) : "rm" (x));
- return (int)r+1;
-}
-
static always_inline unsigned int arch_ffs(unsigned int x)
{
unsigned int r;
@@ -458,6 +441,24 @@ static always_inline unsigned int arch_ffs(unsigned int x)
}
#define arch_ffs arch_ffs
+static always_inline unsigned int arch_ffsl(unsigned long x)
+{
+ unsigned int r;
+
+ /* See arch_ffs() for safety discussions. */
+ if ( __builtin_constant_p(x > 0) && x > 0 )
+ asm ( "bsf %[val], %q[res]"
+ : [res] "=r" (r)
+ : [val] "rm" (x) );
+ else
+ asm ( "bsf %[val], %q[res]"
+ : [res] "=r" (r)
+ : [val] "rm" (x), "[res]" (-1) );
+
+ return r + 1;
+}
+#define arch_ffsl arch_ffsl
+
/**
* fls - find last bit set
* @x: the word to search
@@ -11,6 +11,19 @@ static void __init test_ffs(void)
CHECK(ffs, 7, 1);
CHECK(ffs, 6, 2);
CHECK(ffs, 0x80000000U, 32);
+
+ /* unsigned int ffsl(unsigned long) */
+ CHECK(ffsl, 0, 0);
+ CHECK(ffsl, 1, 1);
+ CHECK(ffsl, 3, 1);
+ CHECK(ffsl, 7, 1);
+ CHECK(ffsl, 6, 2);
+
+ CHECK(ffsl, 1UL << (BITS_PER_LONG - 1), BITS_PER_LONG);
+#if BITS_PER_LONG > 32
+ CHECK(ffsl, 1UL << 32, 33);
+ CHECK(ffsl, 1UL << 63, 64);
+#endif
}
static void __init __constructor test_bitops(void)
@@ -48,6 +48,18 @@ static always_inline __pure unsigned int ffs(unsigned int x)
#endif
}
+static always_inline __pure unsigned int ffsl(unsigned long x)
+{
+ if ( __builtin_constant_p(x) )
+ return __builtin_ffsl(x);
+
+#ifdef arch_ffs
+ return arch_ffsl(x);
+#else
+ return generic_ffsl(x);
+#endif
+}
+
/* --------------------- Please tidy below here --------------------- */
#ifndef find_next_bit
Just like ffs() in the previous changes. Express the upper bound of the testing in terms of BITS_PER_LONG as it varies between architectures. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> CC: Roger Pau Monné <roger.pau@citrix.com> CC: Wei Liu <wl@xen.org> CC: Stefano Stabellini <sstabellini@kernel.org> CC: Julien Grall <julien@xen.org> CC: Volodymyr Babchuk <Volodymyr_Babchuk@epam.com> CC: Bertrand Marquis <bertrand.marquis@arm.com> CC: Michal Orzel <michal.orzel@amd.com> CC: Oleksii Kurochko <oleksii.kurochko@gmail.com> CC: Shawn Anastasio <sanastasio@raptorengineering.com> CC: consulting@bugseng.com <consulting@bugseng.com> CC: Simone Ballarin <simone.ballarin@bugseng.com> CC: Federico Serafini <federico.serafini@bugseng.com> CC: Nicola Vetrini <nicola.vetrini@bugseng.com> v2: * Swap to #if BITS_PER_LONG > 32 to avoid a compile error on arm32 * Changes to mirror ffs() v2. --- xen/arch/arm/include/asm/bitops.h | 2 +- xen/arch/ppc/include/asm/bitops.h | 2 +- xen/arch/x86/include/asm/bitops.h | 35 ++++++++++++++++--------------- xen/common/bitops.c | 13 ++++++++++++ xen/include/xen/bitops.h | 12 +++++++++++ 5 files changed, 45 insertions(+), 19 deletions(-)