2 * Copyright (c) 2017-2018 ARM Limited.
4 * SPDX-License-Identifier: MIT
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 inline void TransformImpl<8, 1, false, 4, 4>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax) {
34 uint32_t *outptr = (uint32_t *)out;
35 const uint32_t *inptr = (uint32_t *)in;
39 for (int y=y0; y<ymax; y+=8) {
40 const uint32_t *inptr0 = inptr + y * ldin + k0;
41 const uint32_t *inptr1 = inptr0 + ldin;
42 const uint32_t *inptr2 = inptr1 + ldin;
43 const uint32_t *inptr3 = inptr2 + ldin;
44 const uint32_t *inptr4 = inptr3 + ldin;
45 const uint32_t *inptr5 = inptr4 + ldin;
46 const uint32_t *inptr6 = inptr5 + ldin;
47 const uint32_t *inptr7 = inptr6 + ldin;
60 /* Cope with ragged cases by copying from a buffer of zeroes instead */
61 if ((y + 7) >= ymax) {
62 switch ((y + 7) - ymax) {
63 /* Everything falls through in here */
84 // Load up 8 elements (2 vectors) from each of 8 sources.
85 "LDP q0, q1, [%[inptr0]], #32\n" // q0=A0A1A2A3
86 "LDP q2, q3, [%[inptr1]], #32\n" // q2=B0B1B2B3
87 "LDP q4, q5, [%[inptr2]], #32\n" // q4=C0C1C2C3
88 "ZIP1 v16.4s, v0.4s, v4.4s\n" // q16=A0C0A1C1
89 ASM_PREFETCH("[%[inptr0], #128]")
90 "LDP q6, q7, [%[inptr3]], #32\n" // q6=D0D1D2D3
91 "ZIP1 v17.4s, v2.4s, v6.4s\n" // q17=B0D0B1D1
92 "LDP q8, q9, [%[inptr4]], #32\n"
93 "LDP q10, q11, [%[inptr5]], #32\n"
94 "LDP q12, q13, [%[inptr6]], #32\n"
95 "ZIP1 v18.4s, v8.4s, v12.4s\n"
96 ASM_PREFETCH("[%[inptr1], #128]")
97 "LDP q14, q15, [%[inptr7]], #32\n"
98 "ZIP1 v19.4s, v10.4s, v14.4s\n"
100 "ZIP1 v20.4s, v16.4s, v17.4s\n" // q20=A0B0C0D0
101 ASM_PREFETCH("[%[inptr2], #128]")
102 "ZIP1 v21.4s, v18.4s, v19.4s\n"
103 "ZIP2 v22.4s, v16.4s, v17.4s\n"
104 "ZIP2 v23.4s, v18.4s, v19.4s\n"
106 "ZIP2 v16.4s, v0.4s, v4.4s\n"
107 ASM_PREFETCH("[%[inptr3], #128]")
108 "ZIP2 v17.4s, v2.4s, v6.4s\n"
109 "STP q20, q21, [%[outptr]], #32\n" // Write back the first element of each source
111 "ZIP2 v18.4s, v8.4s, v12.4s\n"
112 "ZIP2 v19.4s, v10.4s, v14.4s\n"
113 "STP q22, q23, [%[outptr]], #32\n" // Write back the second element of each source
115 "ZIP1 v20.4s, v16.4s, v17.4s\n"
116 ASM_PREFETCH("[%[inptr4], #128]")
117 "ZIP1 v21.4s, v18.4s, v19.4s\n"
118 "ZIP2 v22.4s, v16.4s, v17.4s\n"
119 "ZIP2 v23.4s, v18.4s, v19.4s\n"
121 "ZIP1 v16.4s, v1.4s, v5.4s\n"
122 ASM_PREFETCH("[%[inptr5], #128]")
123 "ZIP1 v17.4s, v3.4s, v7.4s\n"
124 "STP q20, q21, [%[outptr]], #32\n" // Third element
126 "ZIP1 v18.4s, v9.4s, v13.4s\n"
127 "ZIP1 v19.4s, v11.4s, v15.4s\n"
128 "STP q22, q23, [%[outptr]], #32\n" // Fourth element
130 "ZIP1 v20.4s, v16.4s, v17.4s\n"
131 "ZIP1 v21.4s, v18.4s, v19.4s\n"
132 "ZIP2 v22.4s, v16.4s, v17.4s\n"
133 ASM_PREFETCH("[%[inptr6], #128]")
134 "ZIP2 v23.4s, v18.4s, v19.4s\n"
136 "ZIP2 v16.4s, v1.4s, v5.4s\n"
137 "ZIP2 v17.4s, v3.4s, v7.4s\n"
138 "STP q20, q21, [%[outptr]], #32\n" // Fifth element
140 "ZIP2 v18.4s, v9.4s, v13.4s\n"
141 ASM_PREFETCH("[%[inptr7], #128]")
142 "ZIP2 v19.4s, v11.4s, v15.4s\n"
143 "STP q22, q23, [%[outptr]], #32\n" // Sixth element
145 "ZIP1 v20.4s, v16.4s, v17.4s\n"
146 "ZIP1 v21.4s, v18.4s, v19.4s\n"
147 "STP q20, q21, [%[outptr]], #32\n" // Seventh element
149 "ZIP2 v22.4s, v16.4s, v17.4s\n"
150 "ZIP2 v23.4s, v18.4s, v19.4s\n"
151 "STP q22, q23, [%[outptr]], #32\n" // Eighth element
152 : [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3),
153 [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7), [outptr] "+r" (outptr)
155 : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12",
156 "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
161 *outptr++ = *inptr0++;
162 *outptr++ = *inptr1++;
163 *outptr++ = *inptr2++;
164 *outptr++ = *inptr3++;
165 *outptr++ = *inptr4++;
166 *outptr++ = *inptr5++;
167 *outptr++ = *inptr6++;
168 *outptr++ = *inptr7++;
173 #endif // __aarch64__