Message ID | 20230911-optimize_checksum-v4-1-77cc2ad9e9d7@rivosinc.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | riscv: Add fine-tuned checksum functions | expand |
Charlie Jenkins wrote: > Provide checksum algorithms that have been designed to leverage riscv > instructions such as rotate. In 64-bit, can take advantage of the larger > register to avoid some overflow checking. > > Signed-off-by: Charlie Jenkins <charlie@rivosinc.com> > --- > arch/riscv/include/asm/checksum.h | 95 +++++++++++++++++++++++++++++++++++++++ > 1 file changed, 95 insertions(+) > > diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h > new file mode 100644 > index 000000000000..0d7fc8275a5e > --- /dev/null > +++ b/arch/riscv/include/asm/checksum.h > @@ -0,0 +1,95 @@ > +/* SPDX-License-Identifier: GPL-2.0 */ > +/* > + * IP checksum routines > + * > + * Copyright (C) 2023 Rivos Inc. > + */ > +#ifndef __ASM_RISCV_CHECKSUM_H > +#define __ASM_RISCV_CHECKSUM_H > + > +#include <linux/in6.h> > +#include <linux/uaccess.h> > + > +#ifdef CONFIG_32BIT > +typedef unsigned int csum_t; > +#else > +typedef unsigned long csum_t; > +#endif Hi Charlie, Isn't unsigned long already 32bit on 32bit RISC-V, so why is this #ifdef needed? > + > +/* > + * Fold a partial checksum without adding pseudo headers > + */ > +static inline __sum16 csum_fold(__wsum sum) > +{ > + return (~sum - ror32(sum, 16)) >> 16; > +} > + > +#define csum_fold csum_fold > + > +/* > + * Quickly compute an IP checksum with the assumption that IPv4 headers will > + * always be in multiples of 32-bits, and have an ihl of at least 5. > + * @ihl is the number of 32 bit segments and must be greater than or equal to 5. > + * @iph is assumed to be word aligned. > + */ > +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) > +{ > + csum_t csum = 0; > + int pos = 0; > + > + do { > + csum += ((const unsigned int *)iph)[pos]; > + if (IS_ENABLED(CONFIG_32BIT)) > + csum += csum < ((const unsigned int *)iph)[pos]; > + } while (++pos < ihl); > + > + /* > + * ZBB only saves three instructions on 32-bit and five on 64-bit so not > + * worth checking if supported without Alternatives. > + */ > + if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && > + IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { > + csum_t fold_temp; > + > + asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, > + RISCV_ISA_EXT_ZBB, 1) > + : > + : > + : > + : no_zbb); > + > + if (IS_ENABLED(CONFIG_32BIT)) { > + asm(".option push \n\ > + .option arch,+zbb \n\ > + not %[fold_temp], %[csum] \n\ > + rori %[csum], %[csum], 16 \n\ > + sub %[csum], %[fold_temp], %[csum] \n\ > + .option pop" > + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)); > + } else { > + asm(".option push \n\ > + .option arch,+zbb \n\ > + rori %[fold_temp], %[csum], 32 \n\ > + add %[csum], %[fold_temp], %[csum] \n\ > + srli %[csum], %[csum], 32 \n\ > + not %[fold_temp], %[csum] \n\ > + roriw %[csum], %[csum], 16 \n\ > + subw %[csum], %[fold_temp], %[csum] \n\ > + .option pop" > + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)); > + } > + return csum >> 16; > + } > +no_zbb: > +#ifndef CONFIG_32BIT > + csum += (csum >> 32) | (csum << 32); > + csum >>= 32; The indentation seems off here. /Emil > +#endif > + return csum_fold((__force __wsum)csum); > +} > + > +#define ip_fast_csum ip_fast_csum > + > +#include <asm-generic/checksum.h> > + > +#endif // __ASM_RISCV_CHECKSUM_H > > -- > 2.42.0 > > > _______________________________________________ > linux-riscv mailing list > linux-riscv@lists.infradead.org > http://lists.inradead.org/mailman/listinfo/linux-riscv
On Tue, Sep 12, 2023 at 03:24:29AM -0700, Emil Renner Berthing wrote: > Charlie Jenkins wrote: > > Provide checksum algorithms that have been designed to leverage riscv > > instructions such as rotate. In 64-bit, can take advantage of the larger > > register to avoid some overflow checking. > > > > Signed-off-by: Charlie Jenkins <charlie@rivosinc.com> > > --- > > arch/riscv/include/asm/checksum.h | 95 +++++++++++++++++++++++++++++++++++++++ > > 1 file changed, 95 insertions(+) > > > > diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h > > new file mode 100644 > > index 000000000000..0d7fc8275a5e > > --- /dev/null > > +++ b/arch/riscv/include/asm/checksum.h > > @@ -0,0 +1,95 @@ > > +/* SPDX-License-Identifier: GPL-2.0 */ > > +/* > > + * IP checksum routines > > + * > > + * Copyright (C) 2023 Rivos Inc. > > + */ > > +#ifndef __ASM_RISCV_CHECKSUM_H > > +#define __ASM_RISCV_CHECKSUM_H > > + > > +#include <linux/in6.h> > > +#include <linux/uaccess.h> > > + > > +#ifdef CONFIG_32BIT > > +typedef unsigned int csum_t; > > +#else > > +typedef unsigned long csum_t; > > +#endif > > Hi Charlie, > > Isn't unsigned long already 32bit on 32bit RISC-V, so why is this #ifdef > needed? Oh, I wasn't sure so I ran sizeof(long) in qemu-system-riscv32 and it gave me 8 so assumed a long was 8 bytes. Do you think it would make what is going on more clear if I use u32 and u64 or would you recommend just using long? > > > + > > +/* > > + * Fold a partial checksum without adding pseudo headers > > + */ > > +static inline __sum16 csum_fold(__wsum sum) > > +{ > > + return (~sum - ror32(sum, 16)) >> 16; > > +} > > + > > +#define csum_fold csum_fold > > + > > +/* > > + * Quickly compute an IP checksum with the assumption that IPv4 headers will > > + * always be in multiples of 32-bits, and have an ihl of at least 5. > > + * @ihl is the number of 32 bit segments and must be greater than or equal to 5. > > + * @iph is assumed to be word aligned. > > + */ > > +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) > > +{ > > + csum_t csum = 0; > > + int pos = 0; > > + > > + do { > > + csum += ((const unsigned int *)iph)[pos]; > > + if (IS_ENABLED(CONFIG_32BIT)) > > + csum += csum < ((const unsigned int *)iph)[pos]; > > + } while (++pos < ihl); > > + > > + /* > > + * ZBB only saves three instructions on 32-bit and five on 64-bit so not > > + * worth checking if supported without Alternatives. > > + */ > > + if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && > > + IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { > > + csum_t fold_temp; > > + > > + asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, > > + RISCV_ISA_EXT_ZBB, 1) > > + : > > + : > > + : > > + : no_zbb); > > + > > + if (IS_ENABLED(CONFIG_32BIT)) { > > + asm(".option push \n\ > > + .option arch,+zbb \n\ > > + not %[fold_temp], %[csum] \n\ > > + rori %[csum], %[csum], 16 \n\ > > + sub %[csum], %[fold_temp], %[csum] \n\ > > + .option pop" > > + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)); > > + } else { > > + asm(".option push \n\ > > + .option arch,+zbb \n\ > > + rori %[fold_temp], %[csum], 32 \n\ > > + add %[csum], %[fold_temp], %[csum] \n\ > > + srli %[csum], %[csum], 32 \n\ > > + not %[fold_temp], %[csum] \n\ > > + roriw %[csum], %[csum], 16 \n\ > > + subw %[csum], %[fold_temp], %[csum] \n\ > > + .option pop" > > + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)); > > + } > > + return csum >> 16; > > + } > > +no_zbb: > > +#ifndef CONFIG_32BIT > > + csum += (csum >> 32) | (csum << 32); > > + csum >>= 32; > > The indentation seems off here. > > /Emil > > > +#endif > > + return csum_fold((__force __wsum)csum); > > +} > > + > > +#define ip_fast_csum ip_fast_csum > > + > > +#include <asm-generic/checksum.h> > > + > > +#endif // __ASM_RISCV_CHECKSUM_H > > > > -- > > 2.42.0 > > > > > > _______________________________________________ > > linux-riscv mailing list > > linux-riscv@lists.infradead.org > > http://lists.inradead.org/mailman/listinfo/linux-riscv
Charlie Jenkins wrote: > On Tue, Sep 12, 2023 at 03:24:29AM -0700, Emil Renner Berthing wrote: > > Charlie Jenkins wrote: > > > Provide checksum algorithms that have been designed to leverage riscv > > > instructions such as rotate. In 64-bit, can take advantage of the larger > > > register to avoid some overflow checking. > > > > > > Signed-off-by: Charlie Jenkins <charlie@rivosinc.com> > > > --- > > > arch/riscv/include/asm/checksum.h | 95 +++++++++++++++++++++++++++++++++++++++ > > > 1 file changed, 95 insertions(+) > > > > > > diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h > > > new file mode 100644 > > > index 000000000000..0d7fc8275a5e > > > --- /dev/null > > > +++ b/arch/riscv/include/asm/checksum.h > > > @@ -0,0 +1,95 @@ > > > +/* SPDX-License-Identifier: GPL-2.0 */ > > > +/* > > > + * IP checksum routines > > > + * > > > + * Copyright (C) 2023 Rivos Inc. > > > + */ > > > +#ifndef __ASM_RISCV_CHECKSUM_H > > > +#define __ASM_RISCV_CHECKSUM_H > > > + > > > +#include <linux/in6.h> > > > +#include <linux/uaccess.h> > > > + > > > +#ifdef CONFIG_32BIT > > > +typedef unsigned int csum_t; > > > +#else > > > +typedef unsigned long csum_t; > > > +#endif > > > > Hi Charlie, > > > > Isn't unsigned long already 32bit on 32bit RISC-V, so why is this #ifdef > > needed? > Oh, I wasn't sure so I ran sizeof(long) in qemu-system-riscv32 and it > gave me 8 so assumed a long was 8 bytes. Do you think it would make what > is going on more clear if I use u32 and u64 or would you recommend just > using long? Yeah, it doesn't seem like csum_t is used anywhere else, so I'd just use unsigned long if all you want is a register sized unsigned value. It'll be more familiar and easier to read for most people. > > > > > + > > > +/* > > > + * Fold a partial checksum without adding pseudo headers > > > + */ > > > +static inline __sum16 csum_fold(__wsum sum) > > > +{ > > > + return (~sum - ror32(sum, 16)) >> 16; > > > +} > > > + > > > +#define csum_fold csum_fold > > > + > > > +/* > > > + * Quickly compute an IP checksum with the assumption that IPv4 headers will > > > + * always be in multiples of 32-bits, and have an ihl of at least 5. > > > + * @ihl is the number of 32 bit segments and must be greater than or equal to 5. > > > + * @iph is assumed to be word aligned. > > > + */ > > > +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) > > > +{ > > > + csum_t csum = 0; > > > + int pos = 0; > > > + > > > + do { > > > + csum += ((const unsigned int *)iph)[pos]; > > > + if (IS_ENABLED(CONFIG_32BIT)) > > > + csum += csum < ((const unsigned int *)iph)[pos]; > > > + } while (++pos < ihl); > > > + > > > + /* > > > + * ZBB only saves three instructions on 32-bit and five on 64-bit so not > > > + * worth checking if supported without Alternatives. > > > + */ > > > + if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && > > > + IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { > > > + csum_t fold_temp; > > > + > > > + asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, > > > + RISCV_ISA_EXT_ZBB, 1) > > > + : > > > + : > > > + : > > > + : no_zbb); > > > + > > > + if (IS_ENABLED(CONFIG_32BIT)) { > > > + asm(".option push \n\ > > > + .option arch,+zbb \n\ > > > + not %[fold_temp], %[csum] \n\ > > > + rori %[csum], %[csum], 16 \n\ > > > + sub %[csum], %[fold_temp], %[csum] \n\ > > > + .option pop" > > > + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)); > > > + } else { > > > + asm(".option push \n\ > > > + .option arch,+zbb \n\ > > > + rori %[fold_temp], %[csum], 32 \n\ > > > + add %[csum], %[fold_temp], %[csum] \n\ > > > + srli %[csum], %[csum], 32 \n\ > > > + not %[fold_temp], %[csum] \n\ > > > + roriw %[csum], %[csum], 16 \n\ > > > + subw %[csum], %[fold_temp], %[csum] \n\ > > > + .option pop" > > > + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)); > > > + } > > > + return csum >> 16; > > > + } > > > +no_zbb: > > > +#ifndef CONFIG_32BIT > > > + csum += (csum >> 32) | (csum << 32); > > > + csum >>= 32; > > > > The indentation seems off here. > > > > /Emil > > > > > +#endif > > > + return csum_fold((__force __wsum)csum); > > > +} > > > + > > > +#define ip_fast_csum ip_fast_csum > > > + > > > +#include <asm-generic/checksum.h> > > > + > > > +#endif // __ASM_RISCV_CHECKSUM_H > > > > > > -- > > > 2.42.0 > > > > > > > > > _______________________________________________ > > > linux-riscv mailing list > > > linux-riscv@lists.infradead.org > > > http://lists.inradead.org/mailman/listinfo/linux-riscv
diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h new file mode 100644 index 000000000000..0d7fc8275a5e --- /dev/null +++ b/arch/riscv/include/asm/checksum.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * IP checksum routines + * + * Copyright (C) 2023 Rivos Inc. + */ +#ifndef __ASM_RISCV_CHECKSUM_H +#define __ASM_RISCV_CHECKSUM_H + +#include <linux/in6.h> +#include <linux/uaccess.h> + +#ifdef CONFIG_32BIT +typedef unsigned int csum_t; +#else +typedef unsigned long csum_t; +#endif + +/* + * Fold a partial checksum without adding pseudo headers + */ +static inline __sum16 csum_fold(__wsum sum) +{ + return (~sum - ror32(sum, 16)) >> 16; +} + +#define csum_fold csum_fold + +/* + * Quickly compute an IP checksum with the assumption that IPv4 headers will + * always be in multiples of 32-bits, and have an ihl of at least 5. + * @ihl is the number of 32 bit segments and must be greater than or equal to 5. + * @iph is assumed to be word aligned. + */ +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + csum_t csum = 0; + int pos = 0; + + do { + csum += ((const unsigned int *)iph)[pos]; + if (IS_ENABLED(CONFIG_32BIT)) + csum += csum < ((const unsigned int *)iph)[pos]; + } while (++pos < ihl); + + /* + * ZBB only saves three instructions on 32-bit and five on 64-bit so not + * worth checking if supported without Alternatives. + */ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && + IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { + csum_t fold_temp; + + asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : + : + : + : no_zbb); + + if (IS_ENABLED(CONFIG_32BIT)) { + asm(".option push \n\ + .option arch,+zbb \n\ + not %[fold_temp], %[csum] \n\ + rori %[csum], %[csum], 16 \n\ + sub %[csum], %[fold_temp], %[csum] \n\ + .option pop" + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)); + } else { + asm(".option push \n\ + .option arch,+zbb \n\ + rori %[fold_temp], %[csum], 32 \n\ + add %[csum], %[fold_temp], %[csum] \n\ + srli %[csum], %[csum], 32 \n\ + not %[fold_temp], %[csum] \n\ + roriw %[csum], %[csum], 16 \n\ + subw %[csum], %[fold_temp], %[csum] \n\ + .option pop" + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)); + } + return csum >> 16; + } +no_zbb: +#ifndef CONFIG_32BIT + csum += (csum >> 32) | (csum << 32); + csum >>= 32; +#endif + return csum_fold((__force __wsum)csum); +} + +#define ip_fast_csum ip_fast_csum + +#include <asm-generic/checksum.h> + +#endif // __ASM_RISCV_CHECKSUM_H
Provide checksum algorithms that have been designed to leverage riscv instructions such as rotate. In 64-bit, can take advantage of the larger register to avoid some overflow checking. Signed-off-by: Charlie Jenkins <charlie@rivosinc.com> --- arch/riscv/include/asm/checksum.h | 95 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+)