1 // SPDX-License-Identifier: GPL-2.0
3 * arch/x86_64/lib/csum-partial.c
5 * This file contains network checksum routines that are better done
6 * in an architecture-specific manner due to speed.
9 #include <linux/compiler.h>
10 #include <linux/export.h>
11 #include <asm/checksum.h>
12 #include <asm/word-at-a-time.h>
14 static inline unsigned short from32to16(unsigned a)
16 unsigned short b = a >> 16;
17 asm("addw %w2,%w0\n\t"
24 static inline __wsum csum_tail(u64 temp64, int odd)
28 result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
30 result = from32to16(result);
31 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
33 return (__force __wsum)result;
37 * Do a checksum on an arbitrary memory area.
38 * Returns a 32bit checksum.
40 * This isn't as time critical as it used to be because many NICs
41 * do hardware checksumming these days.
43 * Still, with CHECKSUM_COMPLETE this is called to compute
44 * checksums on IPv6 headers (40 bytes) and other small parts.
45 * it's best to have buff aligned on a 64-bit boundary
47 __wsum csum_partial(const void *buff, int len, __wsum sum)
49 u64 temp64 = (__force u64)sum;
52 odd = 1 & (unsigned long) buff;
54 if (unlikely(len == 0))
56 temp64 = ror32((__force u32)sum, 8);
57 temp64 += (*(unsigned char *)buff << 8);
63 * len == 40 is the hot case due to IPv6 headers, but annotating it likely()
64 * has noticeable negative affect on codegen for all other cases with
65 * minimal performance benefit here.
68 asm("addq 0*8(%[src]),%[res]\n\t"
69 "adcq 1*8(%[src]),%[res]\n\t"
70 "adcq 2*8(%[src]),%[res]\n\t"
71 "adcq 3*8(%[src]),%[res]\n\t"
72 "adcq 4*8(%[src]),%[res]\n\t"
75 : [src] "r"(buff), "m"(*(const char(*)[40])buff));
76 return csum_tail(temp64, odd);
78 if (unlikely(len >= 64)) {
80 * Extra accumulators for better ILP in the loop.
82 u64 tmp_accum, tmp_carries;
84 asm("xorl %k[tmp_accum],%k[tmp_accum]\n\t"
85 "xorl %k[tmp_carries],%k[tmp_carries]\n\t"
86 "subl $64, %[len]\n\t"
88 "addq 0*8(%[src]),%[res]\n\t"
89 "adcq 1*8(%[src]),%[res]\n\t"
90 "adcq 2*8(%[src]),%[res]\n\t"
91 "adcq 3*8(%[src]),%[res]\n\t"
92 "adcl $0,%k[tmp_carries]\n\t"
93 "addq 4*8(%[src]),%[tmp_accum]\n\t"
94 "adcq 5*8(%[src]),%[tmp_accum]\n\t"
95 "adcq 6*8(%[src]),%[tmp_accum]\n\t"
96 "adcq 7*8(%[src]),%[tmp_accum]\n\t"
97 "adcl $0,%k[tmp_carries]\n\t"
98 "addq $64, %[src]\n\t"
99 "subl $64, %[len]\n\t"
101 "addq %[tmp_accum],%[res]\n\t"
102 "adcq %[tmp_carries],%[res]\n\t"
104 : [tmp_accum] "=&r"(tmp_accum),
105 [tmp_carries] "=&r"(tmp_carries), [res] "+r"(temp64),
106 [len] "+r"(len), [src] "+r"(buff)
107 : "m"(*(const char *)buff));
111 asm("addq 0*8(%[src]),%[res]\n\t"
112 "adcq 1*8(%[src]),%[res]\n\t"
113 "adcq 2*8(%[src]),%[res]\n\t"
114 "adcq 3*8(%[src]),%[res]\n\t"
117 : [src] "r"(buff), "m"(*(const char(*)[32])buff));
121 asm("addq 0*8(%[src]),%[res]\n\t"
122 "adcq 1*8(%[src]),%[res]\n\t"
125 : [src] "r"(buff), "m"(*(const char(*)[16])buff));
129 asm("addq 0*8(%[src]),%[res]\n\t"
132 : [src] "r"(buff), "m"(*(const char(*)[8])buff));
136 unsigned int shift = (-len << 3) & 63;
139 trail = (load_unaligned_zeropad(buff) << shift) >> shift;
141 asm("addq %[trail],%[res]\n\t"
144 : [trail] "r"(trail));
146 return csum_tail(temp64, odd);
148 EXPORT_SYMBOL(csum_partial);
151 * this routine is used for miscellaneous IP-like checksums, mainly
154 __sum16 ip_compute_csum(const void *buff, int len)
156 return csum_fold(csum_partial(buff, len, 0));
158 EXPORT_SYMBOL(ip_compute_csum);