tree: https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core head: d31c3c683ee668ba5d87c0730610442fd672525f commit: d31c3c683ee668ba5d87c0730610442fd672525f [1/1] x86/csum: Rewrite/optimize csum_partial() config: um-x86_64_defconfig (attached as .config) compiler: gcc-9 (Debian 9.3.0-22) 9.3.0 reproduce (this is a W=1 build): # https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/commit/?id=d31c3c683ee668ba5d87c0730610442fd672525f git remote add tip https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git git fetch --no-tags tip x86/core git checkout d31c3c683ee668ba5d87c0730610442fd672525f # save the attached .config to linux build tree make W=1 ARCH=um SUBARCH=x86_64 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot All errors (new ones prefixed by >>): arch/x86/um/../lib/csum-partial_64.c: In function 'csum_partial': >> arch/x86/um/../lib/csum-partial_64.c:98:12: error: implicit declaration of function 'load_unaligned_zeropad' [-Werror=implicit-function-declaration] 98 | trail = (load_unaligned_zeropad(buff) << shift) >> shift; | ^~~~~~~~~~~~~~~~~~~~~~ cc1: some warnings being treated as errors vim +/load_unaligned_zeropad +98 arch/x86/um/../lib/csum-partial_64.c 23 24 /* 25 * Do a checksum on an arbitrary memory area. 26 * Returns a 32bit checksum. 27 * 28 * This isn't as time critical as it used to be because many NICs 29 * do hardware checksumming these days. 30 * 31 * Still, with CHECKSUM_COMPLETE this is called to compute 32 * checksums on IPv6 headers (40 bytes) and other small parts. 33 * it's best to have buff aligned on a 64-bit boundary 34 */ 35 __wsum csum_partial(const void *buff, int len, __wsum sum) 36 { 37 u64 temp64 = (__force u64)sum; 38 unsigned odd, result; 39 40 odd = 1 & (unsigned long) buff; 41 if (unlikely(odd)) { 42 if (unlikely(len == 0)) 43 return sum; 44 temp64 += (*(unsigned char *)buff << 8); 45 len--; 46 buff++; 47 } 48 49 while (unlikely(len >= 64)) { 50 asm("addq 0*8(%[src]),%[res]\n\t" 51 "adcq 1*8(%[src]),%[res]\n\t" 52 "adcq 2*8(%[src]),%[res]\n\t" 53 "adcq 3*8(%[src]),%[res]\n\t" 54 "adcq 4*8(%[src]),%[res]\n\t" 55 "adcq 5*8(%[src]),%[res]\n\t" 56 "adcq 6*8(%[src]),%[res]\n\t" 57 "adcq 7*8(%[src]),%[res]\n\t" 58 "adcq $0,%[res]" 59 : [res] "+r" (temp64) 60 : [src] "r" (buff) 61 : "memory"); 62 buff += 64; 63 len -= 64; 64 } 65 66 if (len & 32) { 67 asm("addq 0*8(%[src]),%[res]\n\t" 68 "adcq 1*8(%[src]),%[res]\n\t" 69 "adcq 2*8(%[src]),%[res]\n\t" 70 "adcq 3*8(%[src]),%[res]\n\t" 71 "adcq $0,%[res]" 72 : [res] "+r" (temp64) 73 : [src] "r" (buff) 74 : "memory"); 75 buff += 32; 76 } 77 if (len & 16) { 78 asm("addq 0*8(%[src]),%[res]\n\t" 79 "adcq 1*8(%[src]),%[res]\n\t" 80 "adcq $0,%[res]" 81 : [res] "+r" (temp64) 82 : [src] "r" (buff) 83 : "memory"); 84 buff += 16; 85 } 86 if (len & 8) { 87 asm("addq 0*8(%[src]),%[res]\n\t" 88 "adcq $0,%[res]" 89 : [res] "+r" (temp64) 90 : [src] "r" (buff) 91 : "memory"); 92 buff += 8; 93 } 94 if (len & 7) { 95 unsigned int shift = (8 - (len & 7)) * 8; 96 unsigned long trail; 97 > 98 trail = (load_unaligned_zeropad(buff) << shift) >> shift; 99 100 asm("addq %[trail],%[res]\n\t" 101 "adcq $0,%[res]" 102 : [res] "+r" (temp64) 103 : [trail] "r" (trail)); 104 } 105 result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff); 106 if (unlikely(odd)) { 107 result = from32to16(result); 108 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); 109 } 110 return (__force __wsum)result; 111 } 112 EXPORT_SYMBOL(csum_partial); 113 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org