Message ID | fe10f7bf2332c868deb88bb59967fa1880b1b34f.1710970229.git.msakai@redhat.com (mailing list archive) |
---|---|
State | Superseded, archived |
Delegated to: | Mike Snitzer |
Headers | show |
Series | dm vdo: use kernel byteswapping routines instead of GCC ones | expand |
On Wed, Mar 20, 2024 at 05:44:05PM -0400, Matthew Sakai wrote: > From: Ken Raeburn <raeburn@redhat.com> > > Reported-by: Guenter Roeck <linux@roeck-us.net> > Signed-off-by: Ken Raeburn <raeburn@redhat.com> > Signed-off-by: Matthew Sakai <msakai@redhat.com> Tested-by: Guenter Roeck <linux@roeck-us.net> > --- > drivers/md/dm-vdo/murmurhash3.c | 18 ++++-------------- > 1 file changed, 4 insertions(+), 14 deletions(-) > > diff --git a/drivers/md/dm-vdo/murmurhash3.c b/drivers/md/dm-vdo/murmurhash3.c > index 00c9b9c05001..124026cb676a 100644 > --- a/drivers/md/dm-vdo/murmurhash3.c > +++ b/drivers/md/dm-vdo/murmurhash3.c > @@ -8,6 +8,8 @@ > > #include "murmurhash3.h" > > +#include <asm/byteorder.h> > + > static inline u64 rotl64(u64 x, s8 r) > { > return (x << r) | (x >> (64 - r)); > @@ -16,24 +18,12 @@ static inline u64 rotl64(u64 x, s8 r) > #define ROTL64(x, y) rotl64(x, y) > static __always_inline u64 getblock64(const u64 *p, int i) > { > -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ > - return p[i]; > -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ > - return __builtin_bswap64(p[i]); > -#else > -#error "can't figure out byte order" > -#endif > + return le64_to_cpup(&p[i]); > } > > static __always_inline void putblock64(u64 *p, int i, u64 value) > { > -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ > - p[i] = value; > -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ > - p[i] = __builtin_bswap64(value); > -#else > -#error "can't figure out byte order" > -#endif > + p[i] = cpu_to_le64(value); > } > > /* Finalization mix - force all bits of a hash block to avalanche */ > -- > 2.42.0 >
On Wed, Mar 20, 2024 at 05:44:05PM -0400, Matthew Sakai wrote: > static __always_inline u64 getblock64(const u64 *p, int i) > { > -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ > - return p[i]; > -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ > - return __builtin_bswap64(p[i]); > -#else > -#error "can't figure out byte order" > -#endif > + return le64_to_cpup(&p[i]); > } > > static __always_inline void putblock64(u64 *p, int i, u64 value) > { > -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ > - p[i] = value; > -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ > - p[i] = __builtin_bswap64(value); > -#else > -#error "can't figure out byte order" > -#endif > + p[i] = cpu_to_le64(value); > } This is very broken. What you're actually looking for is get_unaligned_le64() and put_unaligned_le64(). And they should be folded directly into the caller. - Eric
On 3/20/24 23:52, Eric Biggers wrote: > On Wed, Mar 20, 2024 at 05:44:05PM -0400, Matthew Sakai wrote: >> static __always_inline u64 getblock64(const u64 *p, int i) >> { >> -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ >> - return p[i]; >> -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ >> - return __builtin_bswap64(p[i]); >> -#else >> -#error "can't figure out byte order" >> -#endif >> + return le64_to_cpup(&p[i]); >> } >> >> static __always_inline void putblock64(u64 *p, int i, u64 value) >> { >> -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ >> - p[i] = value; >> -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ >> - p[i] = __builtin_bswap64(value); >> -#else >> -#error "can't figure out byte order" >> -#endif >> + p[i] = cpu_to_le64(value); >> } > > This is very broken. What you're actually looking for is get_unaligned_le64() > and put_unaligned_le64(). And they should be folded directly into the caller. > > - Eric > Thanks for the suggestion. We'll work on a v2 to include this. Matt
diff --git a/drivers/md/dm-vdo/murmurhash3.c b/drivers/md/dm-vdo/murmurhash3.c index 00c9b9c05001..124026cb676a 100644 --- a/drivers/md/dm-vdo/murmurhash3.c +++ b/drivers/md/dm-vdo/murmurhash3.c @@ -8,6 +8,8 @@ #include "murmurhash3.h" +#include <asm/byteorder.h> + static inline u64 rotl64(u64 x, s8 r) { return (x << r) | (x >> (64 - r)); @@ -16,24 +18,12 @@ static inline u64 rotl64(u64 x, s8 r) #define ROTL64(x, y) rotl64(x, y) static __always_inline u64 getblock64(const u64 *p, int i) { -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - return p[i]; -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - return __builtin_bswap64(p[i]); -#else -#error "can't figure out byte order" -#endif + return le64_to_cpup(&p[i]); } static __always_inline void putblock64(u64 *p, int i, u64 value) { -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - p[i] = value; -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - p[i] = __builtin_bswap64(value); -#else -#error "can't figure out byte order" -#endif + p[i] = cpu_to_le64(value); } /* Finalization mix - force all bits of a hash block to avalanche */