@@ -73,41 +73,28 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
buff += 8;
}
if (len & 7) {
+ unsigned long trail;
#ifdef CONFIG_DCACHE_WORD_ACCESS
unsigned int shift = (8 - (len & 7)) * 8;
- unsigned long trail;
trail = (load_unaligned_zeropad(buff) << shift) >> shift;
-
- asm("addq %[trail],%[res]\n\t"
- "adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [trail] "r" (trail));
#else
+ trail = 0;
if (len & 4) {
- asm("addq %[val],%[res]\n\t"
- "adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [val] "r" ((u64)*(u32 *)buff)
- : "memory");
+ trail += *(u32 *)buff;
buff += 4;
}
if (len & 2) {
- asm("addq %[val],%[res]\n\t"
- "adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [val] "r" ((u64)*(u16 *)buff)
- : "memory");
+ trail += *(u16 *)buff;
buff += 2;
}
- if (len & 1) {
- asm("addq %[val],%[res]\n\t"
- "adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [val] "r" ((u64)*(u8 *)buff)
- : "memory");
- }
+ if (len & 1)
+ trail += *(u8 *)buff;
#endif
+ asm("addq %[trail],%[res]\n\t"
+ "adcq $0,%[res]"
+ : [res] "+r" (temp64)
+ : [trail] "r" (trail));
}
result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
return (__force __wsum)result;
If load_unaligned_zeropad() can't be used (um builds) then just add together the final bytes and do a single 'adc' to add to the 64bit sum. Signed-off-by: David Laight <david.laight@aculab.com> --- It is a shame that this code is needed at all. I doubt um would ever fault just reading the 32bit value. arch/x86/lib/csum-partial_64.c | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-)