1 /* -*- linux-c -*- ------------------------------------------------------- *
3 * Copyright 2002 H. Peter Anvin - All Rights Reserved
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
8 * Boston MA 02111-1307, USA; either version 2 of the License, or
9 * (at your option) any later version; incorporated herein by reference.
11 * ----------------------------------------------------------------------- */
16 * SSE-2 implementation of RAID-6 syndrome functions
20 #include <linux/raid/pq.h>
23 static const struct raid6_sse_constants {
25 } raid6_sse_constants __attribute__((aligned(16))) = {
26 { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL },
29 static int raid6_have_sse2(void)
31 /* Not really boot_cpu but "all_cpus" */
32 return boot_cpu_has(X86_FEATURE_MMX) &&
33 boot_cpu_has(X86_FEATURE_FXSR) &&
34 boot_cpu_has(X86_FEATURE_XMM) &&
35 boot_cpu_has(X86_FEATURE_XMM2);
39 * Plain SSE2 implementation
41 static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
43 u8 **dptr = (u8 **)ptrs;
47 z0 = disks - 3; /* Highest data disk */
48 p = dptr[z0+1]; /* XOR parity */
49 q = dptr[z0+2]; /* RS syndrome */
53 asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
54 asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
56 for ( d = 0 ; d < bytes ; d += 16 ) {
57 asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
58 asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
59 asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
60 asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
61 asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d]));
62 for ( z = z0-2 ; z >= 0 ; z-- ) {
63 asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
64 asm volatile("pcmpgtb %xmm4,%xmm5");
65 asm volatile("paddb %xmm4,%xmm4");
66 asm volatile("pand %xmm0,%xmm5");
67 asm volatile("pxor %xmm5,%xmm4");
68 asm volatile("pxor %xmm5,%xmm5");
69 asm volatile("pxor %xmm6,%xmm2");
70 asm volatile("pxor %xmm6,%xmm4");
71 asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d]));
73 asm volatile("pcmpgtb %xmm4,%xmm5");
74 asm volatile("paddb %xmm4,%xmm4");
75 asm volatile("pand %xmm0,%xmm5");
76 asm volatile("pxor %xmm5,%xmm4");
77 asm volatile("pxor %xmm5,%xmm5");
78 asm volatile("pxor %xmm6,%xmm2");
79 asm volatile("pxor %xmm6,%xmm4");
81 asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
82 asm volatile("pxor %xmm2,%xmm2");
83 asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
84 asm volatile("pxor %xmm4,%xmm4");
87 asm volatile("sfence" : : : "memory");
91 const struct raid6_calls raid6_sse2x1 = {
92 raid6_sse21_gen_syndrome,
95 1 /* Has cache hints */
99 * Unrolled-by-2 SSE2 implementation
101 static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
103 u8 **dptr = (u8 **)ptrs;
107 z0 = disks - 3; /* Highest data disk */
108 p = dptr[z0+1]; /* XOR parity */
109 q = dptr[z0+2]; /* RS syndrome */
113 asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
114 asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
115 asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
117 /* We uniformly assume a single prefetch covers at least 32 bytes */
118 for ( d = 0 ; d < bytes ; d += 32 ) {
119 asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
120 asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
121 asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d+16])); /* P[1] */
122 asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
123 asm volatile("movdqa %xmm3,%xmm6"); /* Q[1] */
124 for ( z = z0-1 ; z >= 0 ; z-- ) {
125 asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
126 asm volatile("pcmpgtb %xmm4,%xmm5");
127 asm volatile("pcmpgtb %xmm6,%xmm7");
128 asm volatile("paddb %xmm4,%xmm4");
129 asm volatile("paddb %xmm6,%xmm6");
130 asm volatile("pand %xmm0,%xmm5");
131 asm volatile("pand %xmm0,%xmm7");
132 asm volatile("pxor %xmm5,%xmm4");
133 asm volatile("pxor %xmm7,%xmm6");
134 asm volatile("movdqa %0,%%xmm5" : : "m" (dptr[z][d]));
135 asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d+16]));
136 asm volatile("pxor %xmm5,%xmm2");
137 asm volatile("pxor %xmm7,%xmm3");
138 asm volatile("pxor %xmm5,%xmm4");
139 asm volatile("pxor %xmm7,%xmm6");
140 asm volatile("pxor %xmm5,%xmm5");
141 asm volatile("pxor %xmm7,%xmm7");
143 asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
144 asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
145 asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
146 asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
149 asm volatile("sfence" : : : "memory");
153 const struct raid6_calls raid6_sse2x2 = {
154 raid6_sse22_gen_syndrome,
157 1 /* Has cache hints */
163 * Unrolled-by-4 SSE2 implementation
165 static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
167 u8 **dptr = (u8 **)ptrs;
171 z0 = disks - 3; /* Highest data disk */
172 p = dptr[z0+1]; /* XOR parity */
173 q = dptr[z0+2]; /* RS syndrome */
177 asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
178 asm volatile("pxor %xmm2,%xmm2"); /* P[0] */
179 asm volatile("pxor %xmm3,%xmm3"); /* P[1] */
180 asm volatile("pxor %xmm4,%xmm4"); /* Q[0] */
181 asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
182 asm volatile("pxor %xmm6,%xmm6"); /* Q[1] */
183 asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
184 asm volatile("pxor %xmm10,%xmm10"); /* P[2] */
185 asm volatile("pxor %xmm11,%xmm11"); /* P[3] */
186 asm volatile("pxor %xmm12,%xmm12"); /* Q[2] */
187 asm volatile("pxor %xmm13,%xmm13"); /* Zero temp */
188 asm volatile("pxor %xmm14,%xmm14"); /* Q[3] */
189 asm volatile("pxor %xmm15,%xmm15"); /* Zero temp */
191 for ( d = 0 ; d < bytes ; d += 64 ) {
192 for ( z = z0 ; z >= 0 ; z-- ) {
193 /* The second prefetch seems to improve performance... */
194 asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
195 asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
196 asm volatile("pcmpgtb %xmm4,%xmm5");
197 asm volatile("pcmpgtb %xmm6,%xmm7");
198 asm volatile("pcmpgtb %xmm12,%xmm13");
199 asm volatile("pcmpgtb %xmm14,%xmm15");
200 asm volatile("paddb %xmm4,%xmm4");
201 asm volatile("paddb %xmm6,%xmm6");
202 asm volatile("paddb %xmm12,%xmm12");
203 asm volatile("paddb %xmm14,%xmm14");
204 asm volatile("pand %xmm0,%xmm5");
205 asm volatile("pand %xmm0,%xmm7");
206 asm volatile("pand %xmm0,%xmm13");
207 asm volatile("pand %xmm0,%xmm15");
208 asm volatile("pxor %xmm5,%xmm4");
209 asm volatile("pxor %xmm7,%xmm6");
210 asm volatile("pxor %xmm13,%xmm12");
211 asm volatile("pxor %xmm15,%xmm14");
212 asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
213 asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
214 asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
215 asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
216 asm volatile("pxor %xmm5,%xmm2");
217 asm volatile("pxor %xmm7,%xmm3");
218 asm volatile("pxor %xmm13,%xmm10");
219 asm volatile("pxor %xmm15,%xmm11");
220 asm volatile("pxor %xmm5,%xmm4");
221 asm volatile("pxor %xmm7,%xmm6");
222 asm volatile("pxor %xmm13,%xmm12");
223 asm volatile("pxor %xmm15,%xmm14");
224 asm volatile("pxor %xmm5,%xmm5");
225 asm volatile("pxor %xmm7,%xmm7");
226 asm volatile("pxor %xmm13,%xmm13");
227 asm volatile("pxor %xmm15,%xmm15");
229 asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
230 asm volatile("pxor %xmm2,%xmm2");
231 asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
232 asm volatile("pxor %xmm3,%xmm3");
233 asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
234 asm volatile("pxor %xmm10,%xmm10");
235 asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
236 asm volatile("pxor %xmm11,%xmm11");
237 asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
238 asm volatile("pxor %xmm4,%xmm4");
239 asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
240 asm volatile("pxor %xmm6,%xmm6");
241 asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
242 asm volatile("pxor %xmm12,%xmm12");
243 asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
244 asm volatile("pxor %xmm14,%xmm14");
247 asm volatile("sfence" : : : "memory");
251 const struct raid6_calls raid6_sse2x4 = {
252 raid6_sse24_gen_syndrome,
255 1 /* Has cache hints */
258 #endif /* CONFIG_X86_64 */