@@ -10,6 +10,10 @@
#include <linux/in6.h>
#include <linux/uaccess.h>
+#ifdef CONFIG_RISCV_ISA_V
+#include <riscv_vector.h>
+#endif
+
#ifdef CONFIG_32BIT
typedef unsigned int csum_t;
#else
@@ -42,6 +46,77 @@ static inline __sum16 csum_fold(__wsum sum)
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
+#ifdef CONFIG_RISCV_ISA_V
+ if (!has_vector())
+ goto no_vector;
+
+ vuint64m1_t prev_buffer;
+ vuint32m1_t curr_buffer;
+ unsigned int vl;
+
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ csum_t high_result, low_result;
+
+ kernel_vector_begin();
+ asm(".option push \n\
+ .option arch, +v \n\
+ vsetivli x0, 1, e64, ta, ma \n\
+ vmv.v.i %[prev_buffer], 0 \n\
+ 1: \n\
+ vsetvli %[vl], %[ihl], e32, m1, ta, ma \n\
+ vle32.v %[curr_buffer], (%[iph]) \n\
+ vwredsumu.vs %[prev_buffer], %[curr_buffer], %[prev_buffer] \n\
+ sub %[ihl], %[ihl], %[vl] \n\
+ slli %[vl], %[vl], 2 \n\
+ add %[iph], %[vl], %[iph] \n\
+ # If not all of iph could fit into vector reg, do another sum \n\
+ bne %[ihl], zero, 1b \n\
+ vsetivli x0, 1, e64, m1, ta, ma \n\
+ vmv.x.s %[low_result], %[prev_buffer] \n\
+ addi %[vl], x0, 32 \n\
+ vsrl.vx %[prev_buffer], %[prev_buffer], %[vl] \n\
+ vmv.x.s %[high_result], %[prev_buffer] \n\
+ .option pop"
+ : [vl] "=&r" (vl), [prev_buffer] "=&vd" (prev_buffer),
+ [curr_buffer] "=&vd" (curr_buffer),
+ [high_result] "=&r" (high_result),
+ [low_result] "=&r" (low_result)
+ : [iph] "r" (iph), [ihl] "r" (ihl));
+ kernel_vector_end();
+
+ high_result += low_result;
+ high_result += high_result < low_result;
+ } else {
+ csum_t result;
+
+ kernel_vector_begin();
+ asm(".option push \n\
+ .option arch, +v \n\
+ vsetivli x0, 1, e64, ta, ma \n\
+ vmv.v.i %[prev_buffer], 0 \n\
+ 1: \n\
+ # Setup 32-bit sum of iph \n\
+ vsetvli %[vl], %[ihl], e32, m1, ta, ma \n\
+ vle32.v %[curr_buffer], (%[iph]) \n\
+ # Sum each 32-bit segment of iph that can fit into a vector reg \n\
+ vwredsumu.vs %[prev_buffer], %[curr_buffer], %[prev_buffer] \n\
+ subw %[ihl], %[ihl], %[vl] \n\
+ slli %[vl], %[vl], 2 \n\
+ addw %[iph], %[vl], %[iph] \n\
+ # If not all of iph could fit into vector reg, do another sum \n\
+ bne %[ihl], zero, 1b \n\
+ vsetvli x0, x0, e64, m1, ta, ma \n\
+ vmv.x.s %[result], %[prev_buffer] \n\
+ .option pop"
+ : [vl] "=&r" (vl), [prev_buffer] "=&vd" (prev_buffer),
+ [curr_buffer] "=&vd" (curr_buffer),
+ [result] "=&r" (result)
+ : [iph] "r" (iph), [ihl] "r" (ihl));
+ kernel_vector_end();
+ }
+no_vector:
+#endif // !CONFIG_RISCV_ISA_V
+
csum_t csum = 0;
int pos = 0;
Vector code is written in assembly rather than using the GCC vector instrinsics because they did not provide optimal code. Vector instrinsic types are still used so the inline assembly can appropriately select vector registers. However, this code cannot be merged yet because it is currently not possible to use vector instrinsics in the kernel because vector support needs to be directly enabled by assembly. Signed-off-by: Charlie Jenkins <charlie@rivosinc.com> --- arch/riscv/include/asm/checksum.h | 75 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+)