fix build error
[platform/upstream/openblas.git] / kernel / mips / cgemv_t_msa.c
1 /*******************************************************************************
2 Copyright (c) 2016, The OpenBLAS Project
3 All rights reserved.
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions are
6 met:
7 1. Redistributions of source code must retain the above copyright
8 notice, this list of conditions and the following disclaimer.
9 2. Redistributions in binary form must reproduce the above copyright
10 notice, this list of conditions and the following disclaimer in
11 the documentation and/or other materials provided with the
12 distribution.
13 3. Neither the name of the OpenBLAS project nor the names of
14 its contributors may be used to endorse or promote products
15 derived from this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25 USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *******************************************************************************/
27
28 #include "common.h"
29 #include "macros_msa.h"
30
31 #undef OP0
32 #undef OP1
33 #undef OP2
34
35 #if ( !defined(CONJ) && !defined(XCONJ) ) || ( defined(CONJ) && defined(XCONJ) )
36     #define OP0  -=
37     #define OP1  +=
38     #define OP2  +=
39 #else
40     #define OP0  +=
41     #define OP1  +=
42     #define OP2  -=
43 #endif
44
45 #define CGEMV_T_8x4()                        \
46     LD_SP4(pa0 + k, 4, t0, t1, t2, t3);      \
47     LD_SP4(pa1 + k, 4, t4, t5, t6, t7);      \
48     LD_SP4(pa2 + k, 4, t8, t9, t10, t11);    \
49     LD_SP4(pa3 + k, 4, t12, t13, t14, t15);  \
50                                              \
51     PCKEVOD_W2_SP(t1, t0, src0r, src0i);     \
52     PCKEVOD_W2_SP(t3, t2, src1r, src1i);     \
53     PCKEVOD_W2_SP(t5, t4, src2r, src2i);     \
54     PCKEVOD_W2_SP(t7, t6, src3r, src3i);     \
55     PCKEVOD_W2_SP(t9, t8, src4r, src4i);     \
56     PCKEVOD_W2_SP(t11, t10, src5r, src5i);   \
57     PCKEVOD_W2_SP(t13, t12, src6r, src6i);   \
58     PCKEVOD_W2_SP(t15, t14, src7r, src7i);   \
59                                              \
60     tp0r += src0r * x0r;                     \
61     tp0r += src1r * x1r;                     \
62     tp0r OP0 src0i * x0i;                    \
63     tp0r OP0 src1i * x1i;                    \
64                                              \
65     tp1r += src2r * x0r;                     \
66     tp1r += src3r * x1r;                     \
67     tp1r OP0 src2i * x0i;                    \
68     tp1r OP0 src3i * x1i;                    \
69                                              \
70     tp2r += src4r * x0r;                     \
71     tp2r += src5r * x1r;                     \
72     tp2r OP0 src4i * x0i;                    \
73     tp2r OP0 src5i * x1i;                    \
74                                              \
75     tp3r += src6r * x0r;                     \
76     tp3r += src7r * x1r;                     \
77     tp3r OP0 src6i * x0i;                    \
78     tp3r OP0 src7i * x1i;                    \
79                                              \
80     tp0i OP1 src0r * x0i;                    \
81     tp0i OP1 src1r * x1i;                    \
82     tp0i OP2 src0i * x0r;                    \
83     tp0i OP2 src1i * x1r;                    \
84                                              \
85     tp1i OP1 src2r * x0i;                    \
86     tp1i OP1 src3r * x1i;                    \
87     tp1i OP2 src2i * x0r;                    \
88     tp1i OP2 src3i * x1r;                    \
89                                              \
90     tp2i OP1 src4r * x0i;                    \
91     tp2i OP1 src5r * x1i;                    \
92     tp2i OP2 src4i * x0r;                    \
93     tp2i OP2 src5i * x1r;                    \
94                                              \
95     tp3i OP1 src6r * x0i;                    \
96     tp3i OP1 src7r * x1i;                    \
97     tp3i OP2 src6i * x0r;                    \
98     tp3i OP2 src7i * x1r;                    \
99
100 #define CGEMV_T_8x2()                     \
101     LD_SP4(pa0 + k, 4, t0, t1, t2, t3);   \
102     LD_SP4(pa1 + k, 4, t4, t5, t6, t7);   \
103                                           \
104     PCKEVOD_W2_SP(t1, t0, src0r, src0i);  \
105     PCKEVOD_W2_SP(t3, t2, src1r, src1i);  \
106     PCKEVOD_W2_SP(t5, t4, src2r, src2i);  \
107     PCKEVOD_W2_SP(t7, t6, src3r, src3i);  \
108                                           \
109     tp0r += src0r * x0r;                  \
110     tp0r += src1r * x1r;                  \
111     tp0r OP0 src0i * x0i;                 \
112     tp0r OP0 src1i * x1i;                 \
113                                           \
114     tp1r += src2r * x0r;                  \
115     tp1r += src3r * x1r;                  \
116     tp1r OP0 src2i * x0i;                 \
117     tp1r OP0 src3i * x1i;                 \
118                                           \
119     tp0i OP1 src0r * x0i;                 \
120     tp0i OP1 src1r * x1i;                 \
121     tp0i OP2 src0i * x0r;                 \
122     tp0i OP2 src1i * x1r;                 \
123                                           \
124     tp1i OP1 src2r * x0i;                 \
125     tp1i OP1 src3r * x1i;                 \
126     tp1i OP2 src2i * x0r;                 \
127     tp1i OP2 src3i * x1r;                 \
128
129 #define CGEMV_T_8x1()                     \
130     LD_SP4(pa0 + k, 4, t0, t1, t2, t3);   \
131                                           \
132     PCKEVOD_W2_SP(t1, t0, src0r, src0i);  \
133     PCKEVOD_W2_SP(t3, t2, src1r, src1i);  \
134                                           \
135     tp0r += src0r * x0r;                  \
136     tp0r += src1r * x1r;                  \
137     tp0r OP0 src0i * x0i;                 \
138     tp0r OP0 src1i * x1i;                 \
139                                           \
140     tp0i OP1 src0r * x0i;                 \
141     tp0i OP1 src1r * x1i;                 \
142     tp0i OP2 src0i * x0r;                 \
143     tp0i OP2 src1i * x1r;                 \
144
145 #define CGEMV_T_4x4()                       \
146     LD_SP2(pa0 + k, 4, t0, t1);             \
147     LD_SP2(pa1 + k, 4, t4, t5);             \
148     LD_SP2(pa2 + k, 4, t8, t9);             \
149     LD_SP2(pa3 + k, 4, t12, t13);           \
150                                             \
151     PCKEVOD_W2_SP(t1, t0, src0r, src0i);    \
152     PCKEVOD_W2_SP(t5, t4, src2r, src2i);    \
153     PCKEVOD_W2_SP(t9, t8, src4r, src4i);    \
154     PCKEVOD_W2_SP(t13, t12, src6r, src6i);  \
155                                             \
156     tp0r += src0r * x0r;                    \
157     tp0r OP0 src0i * x0i;                   \
158                                             \
159     tp1r += src2r * x0r;                    \
160     tp1r OP0 src2i * x0i;                   \
161                                             \
162     tp2r += src4r * x0r;                    \
163     tp2r OP0 src4i * x0i;                   \
164                                             \
165     tp3r += src6r * x0r;                    \
166     tp3r OP0 src6i * x0i;                   \
167                                             \
168     tp0i OP1 src0r * x0i;                   \
169     tp0i OP2 src0i * x0r;                   \
170                                             \
171     tp1i OP1 src2r * x0i;                   \
172     tp1i OP2 src2i * x0r;                   \
173                                             \
174     tp2i OP1 src4r * x0i;                   \
175     tp2i OP2 src4i * x0r;                   \
176                                             \
177     tp3i OP1 src6r * x0i;                   \
178     tp3i OP2 src6i * x0r;                   \
179
180 #define CGEMV_T_4x2()                     \
181     LD_SP2(pa0 + k, 4, t0, t1);           \
182     LD_SP2(pa1 + k, 4, t4, t5);           \
183                                           \
184     PCKEVOD_W2_SP(t1, t0, src0r, src0i);  \
185     PCKEVOD_W2_SP(t5, t4, src2r, src2i);  \
186                                           \
187     tp0r += src0r * x0r;                  \
188     tp0r OP0 src0i * x0i;                 \
189                                           \
190     tp1r += src2r * x0r;                  \
191     tp1r OP0 src2i * x0i;                 \
192                                           \
193     tp0i OP1 src0r * x0i;                 \
194     tp0i OP2 src0i * x0r;                 \
195                                           \
196     tp1i OP1 src2r * x0i;                 \
197     tp1i OP2 src2i * x0r;                 \
198
199 #define CGEMV_T_4x1()                     \
200     LD_SP2(pa0 + k, 4, t0, t1);           \
201                                           \
202     PCKEVOD_W2_SP(t1, t0, src0r, src0i);  \
203                                           \
204     tp0r += src0r * x0r;                  \
205     tp0r OP0 src0i * x0i;                 \
206                                           \
207     tp0i OP1 src0r * x0i;                 \
208     tp0i OP2 src0i * x0r;                 \
209
210 #define CGEMV_T_1x4()                           \
211     temp0r  += pa0[k + 0] * x[0 * inc_x2];      \
212     temp0r OP0 pa0[k + 1] * x[0 * inc_x2 + 1];  \
213     temp1r  += pa1[k + 0] * x[0 * inc_x2];      \
214     temp1r OP0 pa1[k + 1] * x[0 * inc_x2 + 1];  \
215     temp2r  += pa2[k + 0] * x[0 * inc_x2];      \
216     temp2r OP0 pa2[k + 1] * x[0 * inc_x2 + 1];  \
217     temp3r  += pa3[k + 0] * x[0 * inc_x2];      \
218     temp3r OP0 pa3[k + 1] * x[0 * inc_x2 + 1];  \
219                                                 \
220     temp0i OP1 pa0[k + 0] * x[0 * inc_x2 + 1];  \
221     temp0i OP2 pa0[k + 1] * x[0 * inc_x2];      \
222     temp1i OP1 pa1[k + 0] * x[0 * inc_x2 + 1];  \
223     temp1i OP2 pa1[k + 1] * x[0 * inc_x2];      \
224     temp2i OP1 pa2[k + 0] * x[0 * inc_x2 + 1];  \
225     temp2i OP2 pa2[k + 1] * x[0 * inc_x2];      \
226     temp3i OP1 pa3[k + 0] * x[0 * inc_x2 + 1];  \
227     temp3i OP2 pa3[k + 1] * x[0 * inc_x2];      \
228
229 #define CGEMV_T_1x2()                           \
230     temp0r  += pa0[k + 0] * x[0 * inc_x2];      \
231     temp0r OP0 pa0[k + 1] * x[0 * inc_x2 + 1];  \
232     temp1r  += pa1[k + 0] * x[0 * inc_x2];      \
233     temp1r OP0 pa1[k + 1] * x[0 * inc_x2 + 1];  \
234                                                 \
235     temp0i OP1 pa0[k + 0] * x[0 * inc_x2 + 1];  \
236     temp0i OP2 pa0[k + 1] * x[0 * inc_x2];      \
237     temp1i OP1 pa1[k + 0] * x[0 * inc_x2 + 1];  \
238     temp1i OP2 pa1[k + 1] * x[0 * inc_x2];      \
239
240 #define CGEMV_T_1x1()                           \
241     temp0r  += pa0[k + 0] * x[0 * inc_x2];      \
242     temp0r OP0 pa0[k + 1] * x[0 * inc_x2 + 1];  \
243                                                 \
244     temp0i OP1 pa0[k + 0] * x[0 * inc_x2 + 1];  \
245     temp0i OP2 pa0[k + 1] * x[0 * inc_x2];      \
246
247 #define CSCALE_STORE_Y4_GP()    \
248     res0r = y[0 * inc_y2];      \
249     res1r = y[1 * inc_y2];      \
250     res2r = y[2 * inc_y2];      \
251     res3r = y[3 * inc_y2];      \
252                                 \
253     res0i = y[0 * inc_y2 + 1];  \
254     res1i = y[1 * inc_y2 + 1];  \
255     res2i = y[2 * inc_y2 + 1];  \
256     res3i = y[3 * inc_y2 + 1];  \
257                                 \
258     res0r  += alphar * temp0r;  \
259     res0r OP0 alphai * temp0i;  \
260     res1r  += alphar * temp1r;  \
261     res1r OP0 alphai * temp1i;  \
262     res2r  += alphar * temp2r;  \
263     res2r OP0 alphai * temp2i;  \
264     res3r  += alphar * temp3r;  \
265     res3r OP0 alphai * temp3i;  \
266                                 \
267     res0i OP1 alphar * temp0i;  \
268     res0i OP2 alphai * temp0r;  \
269     res1i OP1 alphar * temp1i;  \
270     res1i OP2 alphai * temp1r;  \
271     res2i OP1 alphar * temp2i;  \
272     res2i OP2 alphai * temp2r;  \
273     res3i OP1 alphar * temp3i;  \
274     res3i OP2 alphai * temp3r;  \
275                                 \
276     y[0 * inc_y2] = res0r;      \
277     y[1 * inc_y2] = res1r;      \
278     y[2 * inc_y2] = res2r;      \
279     y[3 * inc_y2] = res3r;      \
280                                 \
281     y[0 * inc_y2 + 1] = res0i;  \
282     y[1 * inc_y2 + 1] = res1i;  \
283     y[2 * inc_y2 + 1] = res2i;  \
284     y[3 * inc_y2 + 1] = res3i;  \
285
286 #define CSCALE_STORE_Y2_GP()    \
287     res0r = y[0 * inc_y2];      \
288     res1r = y[1 * inc_y2];      \
289                                 \
290     res0i = y[0 * inc_y2 + 1];  \
291     res1i = y[1 * inc_y2 + 1];  \
292                                 \
293     res0r  += alphar * temp0r;  \
294     res0r OP0 alphai * temp0i;  \
295     res1r  += alphar * temp1r;  \
296     res1r OP0 alphai * temp1i;  \
297                                 \
298     res0i OP1 alphar * temp0i;  \
299     res0i OP2 alphai * temp0r;  \
300     res1i OP1 alphar * temp1i;  \
301     res1i OP2 alphai * temp1r;  \
302                                 \
303     y[0 * inc_y2] = res0r;      \
304     y[1 * inc_y2] = res1r;      \
305                                 \
306     y[0 * inc_y2 + 1] = res0i;  \
307     y[1 * inc_y2 + 1] = res1i;  \
308
309
310 #define CSCALE_STORE_Y1_GP()    \
311     res0r = y[0 * inc_y2];      \
312     res0i = y[0 * inc_y2 + 1];  \
313                                 \
314     res0r  += alphar * temp0r;  \
315     res0r OP0 alphai * temp0i;  \
316                                 \
317     res0i OP1 alphar * temp0i;  \
318     res0i OP2 alphai * temp0r;  \
319                                 \
320     y[0 * inc_y2] = res0r;      \
321     y[0 * inc_y2 + 1] = res0i;  \
322
323 #define CLOAD_X8_VECTOR()             \
324     LD_SP4(x, 4, x0, x1, x2, x3);     \
325     PCKEVOD_W2_SP(x1, x0, x0r, x0i);  \
326     PCKEVOD_W2_SP(x3, x2, x1r, x1i);  \
327
328 #define CLOAD_X4_VECTOR()             \
329     LD_SP2(x, 4, x0, x1);             \
330     PCKEVOD_W2_SP(x1, x0, x0r, x0i);  \
331
332 #define CLOAD_X8_GP()                                                                \
333     x0r = (v4f32) __msa_insert_w((v4i32) tp0r, 0, *((int *) (x + 0 * inc_x2)));      \
334     x0r = (v4f32) __msa_insert_w((v4i32) x0r,  1, *((int *) (x + 1 * inc_x2)));      \
335     x0r = (v4f32) __msa_insert_w((v4i32) x0r,  2, *((int *) (x + 2 * inc_x2)));      \
336     x0r = (v4f32) __msa_insert_w((v4i32) x0r,  3, *((int *) (x + 3 * inc_x2)));      \
337     x1r = (v4f32) __msa_insert_w((v4i32) tp0r, 0, *((int *) (x + 4 * inc_x2)));      \
338     x1r = (v4f32) __msa_insert_w((v4i32) x1r,  1, *((int *) (x + 5 * inc_x2)));      \
339     x1r = (v4f32) __msa_insert_w((v4i32) x1r,  2, *((int *) (x + 6 * inc_x2)));      \
340     x1r = (v4f32) __msa_insert_w((v4i32) x1r,  3, *((int *) (x + 7 * inc_x2)));      \
341     x0i = (v4f32) __msa_insert_w((v4i32) tp0r, 0, *((int *) (x + 0 * inc_x2 + 1)));  \
342     x0i = (v4f32) __msa_insert_w((v4i32) x0i,  1, *((int *) (x + 1 * inc_x2 + 1)));  \
343     x0i = (v4f32) __msa_insert_w((v4i32) x0i,  2, *((int *) (x + 2 * inc_x2 + 1)));  \
344     x0i = (v4f32) __msa_insert_w((v4i32) x0i,  3, *((int *) (x + 3 * inc_x2 + 1)));  \
345     x1i = (v4f32) __msa_insert_w((v4i32) tp0r, 0, *((int *) (x + 4 * inc_x2 + 1)));  \
346     x1i = (v4f32) __msa_insert_w((v4i32) x1i,  1, *((int *) (x + 5 * inc_x2 + 1)));  \
347     x1i = (v4f32) __msa_insert_w((v4i32) x1i,  2, *((int *) (x + 6 * inc_x2 + 1)));  \
348     x1i = (v4f32) __msa_insert_w((v4i32) x1i,  3, *((int *) (x + 7 * inc_x2 + 1)));  \
349
350 #define CLOAD_X4_GP()                                                                \
351     x0r = (v4f32) __msa_insert_w((v4i32) tp0r, 0, *((int *) (x + 0 * inc_x2)));      \
352     x0r = (v4f32) __msa_insert_w((v4i32) x0r,  1, *((int *) (x + 1 * inc_x2)));      \
353     x0r = (v4f32) __msa_insert_w((v4i32) x0r,  2, *((int *) (x + 2 * inc_x2)));      \
354     x0r = (v4f32) __msa_insert_w((v4i32) x0r,  3, *((int *) (x + 3 * inc_x2)));      \
355     x0i = (v4f32) __msa_insert_w((v4i32) tp0r, 0, *((int *) (x + 0 * inc_x2 + 1)));  \
356     x0i = (v4f32) __msa_insert_w((v4i32) x0i,  1, *((int *) (x + 1 * inc_x2 + 1)));  \
357     x0i = (v4f32) __msa_insert_w((v4i32) x0i,  2, *((int *) (x + 2 * inc_x2 + 1)));  \
358     x0i = (v4f32) __msa_insert_w((v4i32) x0i,  3, *((int *) (x + 3 * inc_x2 + 1)));  \
359
360 #define CGEMV_T_MSA()                                \
361     for (j = (n >> 2); j--;)                         \
362     {                                                \
363         tp0r = tp1r = tp2r = tp3r = zero;            \
364         tp0i = tp1i = tp2i = tp3i = zero;            \
365                                                      \
366         k = 0;                                       \
367         k_pref = pref_offset;                        \
368         x = srcx_org;                                \
369                                                      \
370         for (i = (m >> 3); i--;)                     \
371         {                                            \
372             PREFETCH(pa0 + k_pref + 16 + 0);         \
373             PREFETCH(pa0 + k_pref + 16 + 8);         \
374             PREFETCH(pa1 + k_pref + 16 + 0);         \
375             PREFETCH(pa1 + k_pref + 16 + 8);         \
376             PREFETCH(pa2 + k_pref + 16 + 0);         \
377             PREFETCH(pa2 + k_pref + 16 + 8);         \
378             PREFETCH(pa3 + k_pref + 16 + 0);         \
379             PREFETCH(pa3 + k_pref + 16 + 8);         \
380                                                      \
381             CLOAD_X8()                               \
382             CGEMV_T_8x4();                           \
383                                                      \
384             k += 2 * 8;                              \
385             k_pref += 2 * 8;                         \
386             x += inc_x2 * 8;                         \
387         }                                            \
388                                                      \
389         if (m & 4)                                   \
390         {                                            \
391             CLOAD_X4();                              \
392                                                      \
393             CGEMV_T_4x4();                           \
394                                                      \
395             k += 2 * 4;                              \
396             x += inc_x2 * 4;                         \
397         }                                            \
398                                                      \
399         TRANSPOSE4x4_SP_SP(tp0r, tp1r, tp2r, tp3r,   \
400                            tp0r, tp1r, tp2r, tp3r);  \
401         TRANSPOSE4x4_SP_SP(tp0i, tp1i, tp2i, tp3i,   \
402                            tp0i, tp1i, tp2i, tp3i);  \
403                                                      \
404         tp0r += tp1r;                                \
405         tp0r += tp2r;                                \
406         tp0r += tp3r;                                \
407         tp0i += tp1i;                                \
408         tp0i += tp2i;                                \
409         tp0i += tp3i;                                \
410                                                      \
411         temp0r = tp0r[0];                            \
412         temp1r = tp0r[1];                            \
413         temp2r = tp0r[2];                            \
414         temp3r = tp0r[3];                            \
415         temp0i = tp0i[0];                            \
416         temp1i = tp0i[1];                            \
417         temp2i = tp0i[2];                            \
418         temp3i = tp0i[3];                            \
419                                                      \
420         for (i = (m & 3); i--;)                      \
421         {                                            \
422             CGEMV_T_1x4();                           \
423                                                      \
424             k += 2;                                  \
425             x += inc_x2;                             \
426         }                                            \
427                                                      \
428         CSCALE_STORE_Y4_GP();                        \
429                                                      \
430         pa0 += 4 * lda2;                             \
431         pa1 += 4 * lda2;                             \
432         pa2 += 4 * lda2;                             \
433         pa3 += 4 * lda2;                             \
434         y += 4 * inc_y2;                             \
435     }                                                \
436                                                      \
437     if (n & 2)                                       \
438     {                                                \
439         tp0r = tp1r = zero;                          \
440         tp0i = tp1i = zero;                          \
441                                                      \
442         k = 0;                                       \
443         x = srcx_org;                                \
444                                                      \
445         for (i = (m >> 3); i--;)                     \
446         {                                            \
447             CLOAD_X8();                              \
448                                                      \
449             CGEMV_T_8x2();                           \
450                                                      \
451             k += 2 * 8;                              \
452             x += inc_x2 * 8;                         \
453         }                                            \
454                                                      \
455         if (m & 4)                                   \
456         {                                            \
457             CLOAD_X4();                              \
458                                                      \
459             CGEMV_T_4x2();                           \
460                                                      \
461             k += 2 * 4;                              \
462             x += inc_x2 * 4;                         \
463         }                                            \
464                                                      \
465         TRANSPOSE4x4_SP_SP(tp0r, tp1r, tp0i, tp1i,   \
466                            tp0r, tp1r, tp0i, tp1i);  \
467                                                      \
468         tp0r += tp1r;                                \
469         tp0r += tp0i;                                \
470         tp0r += tp1i;                                \
471                                                      \
472         temp0r = tp0r[0];                            \
473         temp1r = tp0r[1];                            \
474         temp0i = tp0r[2];                            \
475         temp1i = tp0r[3];                            \
476                                                      \
477         for (i = (m & 3); i--;)                      \
478         {                                            \
479             CGEMV_T_1x2();                           \
480                                                      \
481             k += 2;                                  \
482             x += inc_x2;                             \
483         }                                            \
484                                                      \
485         CSCALE_STORE_Y2_GP();                        \
486                                                      \
487         pa0 += 2 * lda2;                             \
488         pa1 += 2 * lda2;                             \
489         y += 2 * inc_y2;                             \
490     }                                                \
491                                                      \
492     if (n & 1)                                       \
493     {                                                \
494         tp0r = zero;                                 \
495         tp0i = zero;                                 \
496                                                      \
497         k = 0;                                       \
498         x = srcx_org;                                \
499                                                      \
500         for (i = (m >> 3); i--;)                     \
501         {                                            \
502             CLOAD_X8();                              \
503                                                      \
504             CGEMV_T_8x1();                           \
505                                                      \
506             k += 2 * 8;                              \
507             x += inc_x2 * 8;                         \
508         }                                            \
509                                                      \
510         if (m & 4)                                   \
511         {                                            \
512             CLOAD_X4();                              \
513                                                      \
514             CGEMV_T_4x1();                           \
515                                                      \
516             k += 2 * 4;                              \
517             x += inc_x2 * 4;                         \
518         }                                            \
519                                                      \
520         ILVRL_W2_SP(tp0i, tp0r, t0, t1);             \
521                                                      \
522         t0 += t1;                                    \
523                                                      \
524         temp0r = t0[0] + t0[2];                      \
525         temp0i = t0[1] + t0[3];                      \
526                                                      \
527         for (i = (m & 3); i--;)                      \
528         {                                            \
529             CGEMV_T_1x1();                           \
530                                                      \
531             k += 2;                                  \
532             x += inc_x2;                             \
533         }                                            \
534                                                      \
535         CSCALE_STORE_Y1_GP();                        \
536                                                      \
537         pa0 += lda2;                                 \
538         y += inc_y2;                                 \
539     }                                                \
540
541 int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alphar, FLOAT alphai,
542           FLOAT *A, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y,
543           BLASLONG inc_y, FLOAT *buffer)
544 {
545     BLASLONG i, j, k, k_pref, pref_offset;
546     FLOAT *pa0, *pa1, *pa2, *pa3;
547     FLOAT *srcx_org = x;
548     FLOAT temp0r, temp0i, temp2r, temp2i, temp1r, temp1i, temp3r, temp3i;
549     FLOAT res0r, res0i, res2r, res2i, res1r, res1i, res3r, res3i;
550     BLASLONG inc_x2, inc_y2, lda2;
551     v4f32 zero = {0};
552     v4f32 x0, x1, x2, x3, x0r, x1r, x0i, x1i;
553     v4f32 t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15;
554     v4f32 src0r, src1r, src2r, src3r, src4r, src5r, src6r, src7r;
555     v4f32 src0i, src1i, src2i, src3i, src4i, src5i, src6i, src7i;
556     v4f32 tp0r, tp1r, tp2r, tp3r, tp0i, tp1i, tp2i, tp3i;
557
558     lda2 = 2 * lda;
559
560     pref_offset = (uintptr_t)A & (L1_DATA_LINESIZE - 1);
561     pref_offset = L1_DATA_LINESIZE - pref_offset;
562     pref_offset = pref_offset / sizeof(FLOAT);
563
564     pa0 = A;
565     pa1 = A + lda2;
566     pa2 = A + 2 * lda2;
567     pa3 = A + 3 * lda2;
568
569     inc_x2 = 2 * inc_x;
570     inc_y2 = 2 * inc_y;
571
572     if (2 == inc_x2)
573     {
574         #define CLOAD_X8  CLOAD_X8_VECTOR
575         #define CLOAD_X4  CLOAD_X4_VECTOR
576
577         CGEMV_T_MSA();
578
579         #undef CLOAD_X8
580         #undef CLOAD_X4
581     }
582     else
583     {
584         #define CLOAD_X8  CLOAD_X8_GP
585         #define CLOAD_X4  CLOAD_X4_GP
586
587         CGEMV_T_MSA();
588
589         #undef CLOAD_X8
590         #undef CLOAD_X4
591     }
592
593     return(0);
594 }
595
596 #undef OP0
597 #undef OP1
598 #undef OP2