1 /*******************************************************************************
2 Copyright (c) 2016, The OpenBLAS Project
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions are
7 1. Redistributions of source code must retain the above copyright
8 notice, this list of conditions and the following disclaimer.
9 2. Redistributions in binary form must reproduce the above copyright
10 notice, this list of conditions and the following disclaimer in
11 the documentation and/or other materials provided with the
13 3. Neither the name of the OpenBLAS project nor the names of
14 its contributors may be used to endorse or promote products
15 derived from this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25 USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *******************************************************************************/
30 #include "macros_msa.h"
32 #define AND_VEC_W(in) ((v4f32) ((v4i32) in & and_vec))
34 FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
37 FLOAT data0, data1, sumf = 0.0;
38 v4f32 src0, src1, src2, src3, src4, src5, src6, src7;
39 v4f32 src8, src9, src10, src11, src12, src13, src14, src15;
40 v4f32 sum_abs0 = {0, 0, 0, 0};
41 v4f32 sum_abs1 = {0, 0, 0, 0};
42 v4f32 sum_abs2 = {0, 0, 0, 0};
43 v4f32 sum_abs3 = {0, 0, 0, 0};
44 v4f32 zero_v = {0, 0, 0, 0};
45 v4i32 and_vec = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
47 if (n <= 0 || inc_x <= 0) return (sumf);
56 pref_offset = (BLASLONG)x & (L1_DATA_LINESIZE - 1);
59 pref_offset = L1_DATA_LINESIZE - pref_offset;
60 pref_offset = pref_offset / sizeof(FLOAT);
62 x_pref = x + pref_offset + 128 + 32;
64 LD_SP8_INC(x, 4, src0, src1, src2, src3, src4, src5, src6, src7);
65 for (i = 0; i < (n >> 6) - 1; i++)
67 PREF_OFFSET(x_pref, 0);
68 PREF_OFFSET(x_pref, 32);
69 PREF_OFFSET(x_pref, 64);
70 PREF_OFFSET(x_pref, 96);
71 PREF_OFFSET(x_pref, 128);
72 PREF_OFFSET(x_pref, 160);
73 PREF_OFFSET(x_pref, 192);
74 PREF_OFFSET(x_pref, 224);
77 LD_SP8_INC(x, 4, src8, src9, src10, src11, src12, src13, src14, src15);
79 sum_abs0 += AND_VEC_W(src0);
80 sum_abs1 += AND_VEC_W(src1);
81 sum_abs2 += AND_VEC_W(src2);
82 sum_abs3 += AND_VEC_W(src3);
83 sum_abs0 += AND_VEC_W(src4);
84 sum_abs1 += AND_VEC_W(src5);
85 sum_abs2 += AND_VEC_W(src6);
86 sum_abs3 += AND_VEC_W(src7);
88 LD_SP8_INC(x, 4, src0, src1, src2, src3, src4, src5, src6, src7);
90 sum_abs0 += AND_VEC_W(src8);
91 sum_abs1 += AND_VEC_W(src9);
92 sum_abs2 += AND_VEC_W(src10);
93 sum_abs3 += AND_VEC_W(src11);
94 sum_abs0 += AND_VEC_W(src12);
95 sum_abs1 += AND_VEC_W(src13);
96 sum_abs2 += AND_VEC_W(src14);
97 sum_abs3 += AND_VEC_W(src15);
100 LD_SP8_INC(x, 4, src8, src9, src10, src11, src12, src13, src14, src15);
102 sum_abs0 += AND_VEC_W(src0);
103 sum_abs1 += AND_VEC_W(src1);
104 sum_abs2 += AND_VEC_W(src2);
105 sum_abs3 += AND_VEC_W(src3);
106 sum_abs0 += AND_VEC_W(src4);
107 sum_abs1 += AND_VEC_W(src5);
108 sum_abs2 += AND_VEC_W(src6);
109 sum_abs3 += AND_VEC_W(src7);
110 sum_abs0 += AND_VEC_W(src8);
111 sum_abs1 += AND_VEC_W(src9);
112 sum_abs2 += AND_VEC_W(src10);
113 sum_abs3 += AND_VEC_W(src11);
114 sum_abs0 += AND_VEC_W(src12);
115 sum_abs1 += AND_VEC_W(src13);
116 sum_abs2 += AND_VEC_W(src14);
117 sum_abs3 += AND_VEC_W(src15);
124 LD_SP8_INC(x, 4, src0, src1, src2, src3, src4, src5, src6, src7);
126 sum_abs0 += AND_VEC_W(src0);
127 sum_abs1 += AND_VEC_W(src1);
128 sum_abs2 += AND_VEC_W(src2);
129 sum_abs3 += AND_VEC_W(src3);
130 sum_abs0 += AND_VEC_W(src4);
131 sum_abs1 += AND_VEC_W(src5);
132 sum_abs2 += AND_VEC_W(src6);
133 sum_abs3 += AND_VEC_W(src7);
138 LD_SP4_INC(x, 4, src0, src1, src2, src3);
140 sum_abs0 += AND_VEC_W(src0);
141 sum_abs1 += AND_VEC_W(src1);
142 sum_abs2 += AND_VEC_W(src2);
143 sum_abs3 += AND_VEC_W(src3);
148 LD_SP2_INC(x, 4, src0, src1);
150 sum_abs0 += AND_VEC_W(src0);
151 sum_abs1 += AND_VEC_W(src1);
156 src0 = LD_SP(x); x += 4;
158 sum_abs0 += AND_VEC_W(src0);
164 sumf += fabsf(*(x + 1));
174 sum_abs0 += sum_abs1 + sum_abs2 + sum_abs3;
183 for (i = (n >> 4); i--;)
185 src0 = (v4f32) __msa_insert_w((v4i32) zero_v, 0, *((int *) x));
187 src0 = (v4f32) __msa_insert_w((v4i32) src0, 1, *((int *) x));
189 src0 = (v4f32) __msa_insert_w((v4i32) src0, 2, *((int *) x));
191 src0 = (v4f32) __msa_insert_w((v4i32) src0, 3, *((int *) x));
193 src1 = (v4f32) __msa_insert_w((v4i32) zero_v, 0, *((int *) x));
195 src1 = (v4f32) __msa_insert_w((v4i32) src1, 1, *((int *) x));
197 src1 = (v4f32) __msa_insert_w((v4i32) src1, 2, *((int *) x));
199 src1 = (v4f32) __msa_insert_w((v4i32) src1, 3, *((int *) x));
201 src2 = (v4f32) __msa_insert_w((v4i32) zero_v, 0, *((int *) x));
203 src2 = (v4f32) __msa_insert_w((v4i32) src2, 1, *((int *) x));
205 src2 = (v4f32) __msa_insert_w((v4i32) src2, 2, *((int *) x));
207 src2 = (v4f32) __msa_insert_w((v4i32) src2, 3, *((int *) x));
209 src3 = (v4f32) __msa_insert_w((v4i32) zero_v, 0, *((int *) x));
211 src3 = (v4f32) __msa_insert_w((v4i32) src3, 1, *((int *) x));
213 src3 = (v4f32) __msa_insert_w((v4i32) src3, 2, *((int *) x));
215 src3 = (v4f32) __msa_insert_w((v4i32) src3, 3, *((int *) x));
218 sum_abs0 += AND_VEC_W(src0);
219 sum_abs1 += AND_VEC_W(src1);
220 sum_abs2 += AND_VEC_W(src2);
221 sum_abs3 += AND_VEC_W(src3);
228 src0 = (v4f32) __msa_insert_w((v4i32) zero_v, 0, *((int *) x));
230 src0 = (v4f32) __msa_insert_w((v4i32) src0, 1, *((int *) x));
232 src0 = (v4f32) __msa_insert_w((v4i32) src0, 2, *((int *) x));
234 src0 = (v4f32) __msa_insert_w((v4i32) src0, 3, *((int *) x));
236 src1 = (v4f32) __msa_insert_w((v4i32) zero_v, 0, *((int *) x));
238 src1 = (v4f32) __msa_insert_w((v4i32) src1, 1, *((int *) x));
240 src1 = (v4f32) __msa_insert_w((v4i32) src1, 2, *((int *) x));
242 src1 = (v4f32) __msa_insert_w((v4i32) src1, 3, *((int *) x));
245 sum_abs0 += AND_VEC_W(src0);
246 sum_abs1 += AND_VEC_W(src1);
251 src0 = (v4f32) __msa_insert_w((v4i32) zero_v, 0, *((int *) x));
253 src0 = (v4f32) __msa_insert_w((v4i32) src0, 1, *((int *) x));
255 src0 = (v4f32) __msa_insert_w((v4i32) src0, 2, *((int *) x));
257 src0 = (v4f32) __msa_insert_w((v4i32) src0, 3, *((int *) x));
260 sum_abs0 += AND_VEC_W(src0);
265 data0 = fabsf(*x); x += inc_x;
266 data1 = fabsf(*x); x += inc_x;
278 sum_abs0 += sum_abs1 + sum_abs2 + sum_abs3;