2 * Copyright 2011 The LibYuv Project Authors. All rights reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "libyuv/row.h"
13 #include "libyuv/basic_types.h"
20 #if !defined(LIBYUV_DISABLE_MIPS) && \
21 defined(__mips_dsp) && (__mips_dsp_rev >= 2) && \
22 (_MIPS_SIM == _MIPS_SIM_ABI32)
24 void TransposeWx8_MIPS_DSPR2(const uint8* src, int src_stride,
25 uint8* dst, int dst_stride,
27 __asm__ __volatile__ (
30 "sll $t2, %[src_stride], 0x1 \n" // src_stride x 2
31 "sll $t4, %[src_stride], 0x2 \n" // src_stride x 4
32 "sll $t9, %[src_stride], 0x3 \n" // src_stride x 8
33 "addu $t3, $t2, %[src_stride] \n"
34 "addu $t5, $t4, %[src_stride] \n"
35 "addu $t6, $t2, $t4 \n"
36 "andi $t0, %[dst], 0x3 \n"
37 "andi $t1, %[dst_stride], 0x3 \n"
40 " subu $t7, $t9, %[src_stride] \n"
41 //dst + dst_stride word aligned
43 "lbu $t0, 0(%[src]) \n"
44 "lbux $t1, %[src_stride](%[src]) \n"
45 "lbux $t8, $t2(%[src]) \n"
46 "lbux $t9, $t3(%[src]) \n"
51 "precr.qb.ph $s0, $t8, $t0 \n"
52 "lbux $t0, $t4(%[src]) \n"
53 "lbux $t1, $t5(%[src]) \n"
54 "lbux $t8, $t6(%[src]) \n"
55 "lbux $t9, $t7(%[src]) \n"
60 "precr.qb.ph $s1, $t8, $t0 \n"
61 "sw $s0, 0(%[dst]) \n"
62 "addiu %[width], -1 \n"
64 "sw $s1, 4(%[dst]) \n"
65 "bnez %[width], 1b \n"
66 " addu %[dst], %[dst], %[dst_stride] \n"
68 //dst + dst_stride unaligned
70 "lbu $t0, 0(%[src]) \n"
71 "lbux $t1, %[src_stride](%[src]) \n"
72 "lbux $t8, $t2(%[src]) \n"
73 "lbux $t9, $t3(%[src]) \n"
78 "precr.qb.ph $s0, $t8, $t0 \n"
79 "lbux $t0, $t4(%[src]) \n"
80 "lbux $t1, $t5(%[src]) \n"
81 "lbux $t8, $t6(%[src]) \n"
82 "lbux $t9, $t7(%[src]) \n"
87 "precr.qb.ph $s1, $t8, $t0 \n"
88 "swr $s0, 0(%[dst]) \n"
89 "swl $s0, 3(%[dst]) \n"
90 "addiu %[width], -1 \n"
92 "swr $s1, 4(%[dst]) \n"
93 "swl $s1, 7(%[dst]) \n"
94 "bnez %[width], 11b \n"
95 "addu %[dst], %[dst], %[dst_stride] \n"
101 :[src_stride] "r" (src_stride),
102 [dst_stride] "r" (dst_stride)
103 : "t0", "t1", "t2", "t3", "t4", "t5",
104 "t6", "t7", "t8", "t9",
109 void TransposeWx8_FAST_MIPS_DSPR2(const uint8* src, int src_stride,
110 uint8* dst, int dst_stride,
112 __asm__ __volatile__ (
116 "beqz %[width], 2f \n"
117 " sll $t2, %[src_stride], 0x1 \n" // src_stride x 2
118 "sll $t4, %[src_stride], 0x2 \n" // src_stride x 4
119 "sll $t9, %[src_stride], 0x3 \n" // src_stride x 8
120 "addu $t3, $t2, %[src_stride] \n"
121 "addu $t5, $t4, %[src_stride] \n"
122 "addu $t6, $t2, $t4 \n"
124 "srl $AT, %[width], 0x2 \n"
125 "andi $t0, %[dst], 0x3 \n"
126 "andi $t1, %[dst_stride], 0x3 \n"
127 "or $t0, $t0, $t1 \n"
129 " subu $t7, $t9, %[src_stride] \n"
130 //dst + dst_stride word aligned
132 "lw $t0, 0(%[src]) \n"
133 "lwx $t1, %[src_stride](%[src]) \n"
134 "lwx $t8, $t2(%[src]) \n"
135 "lwx $t9, $t3(%[src]) \n"
137 // t0 = | 30 | 20 | 10 | 00 |
138 // t1 = | 31 | 21 | 11 | 01 |
139 // t8 = | 32 | 22 | 12 | 02 |
140 // t9 = | 33 | 23 | 13 | 03 |
142 "precr.qb.ph $s0, $t1, $t0 \n"
143 "precr.qb.ph $s1, $t9, $t8 \n"
144 "precrq.qb.ph $s2, $t1, $t0 \n"
145 "precrq.qb.ph $s3, $t9, $t8 \n"
147 // s0 = | 21 | 01 | 20 | 00 |
148 // s1 = | 23 | 03 | 22 | 02 |
149 // s2 = | 31 | 11 | 30 | 10 |
150 // s3 = | 33 | 13 | 32 | 12 |
152 "precr.qb.ph $s4, $s1, $s0 \n"
153 "precrq.qb.ph $s5, $s1, $s0 \n"
154 "precr.qb.ph $s6, $s3, $s2 \n"
155 "precrq.qb.ph $s7, $s3, $s2 \n"
157 // s4 = | 03 | 02 | 01 | 00 |
158 // s5 = | 23 | 22 | 21 | 20 |
159 // s6 = | 13 | 12 | 11 | 10 |
160 // s7 = | 33 | 32 | 31 | 30 |
162 "lwx $t0, $t4(%[src]) \n"
163 "lwx $t1, $t5(%[src]) \n"
164 "lwx $t8, $t6(%[src]) \n"
165 "lwx $t9, $t7(%[src]) \n"
167 // t0 = | 34 | 24 | 14 | 04 |
168 // t1 = | 35 | 25 | 15 | 05 |
169 // t8 = | 36 | 26 | 16 | 06 |
170 // t9 = | 37 | 27 | 17 | 07 |
172 "precr.qb.ph $s0, $t1, $t0 \n"
173 "precr.qb.ph $s1, $t9, $t8 \n"
174 "precrq.qb.ph $s2, $t1, $t0 \n"
175 "precrq.qb.ph $s3, $t9, $t8 \n"
177 // s0 = | 25 | 05 | 24 | 04 |
178 // s1 = | 27 | 07 | 26 | 06 |
179 // s2 = | 35 | 15 | 34 | 14 |
180 // s3 = | 37 | 17 | 36 | 16 |
182 "precr.qb.ph $t0, $s1, $s0 \n"
183 "precrq.qb.ph $t1, $s1, $s0 \n"
184 "precr.qb.ph $t8, $s3, $s2 \n"
185 "precrq.qb.ph $t9, $s3, $s2 \n"
187 // t0 = | 07 | 06 | 05 | 04 |
188 // t1 = | 27 | 26 | 25 | 24 |
189 // t8 = | 17 | 16 | 15 | 14 |
190 // t9 = | 37 | 36 | 35 | 34 |
192 "addu $s0, %[dst], %[dst_stride] \n"
193 "addu $s1, $s0, %[dst_stride] \n"
194 "addu $s2, $s1, %[dst_stride] \n"
196 "sw $s4, 0(%[dst]) \n"
197 "sw $t0, 4(%[dst]) \n"
209 " addu %[dst], $s2, %[dst_stride] \n"
211 //dst + dst_stride unaligned
213 "lw $t0, 0(%[src]) \n"
214 "lwx $t1, %[src_stride](%[src]) \n"
215 "lwx $t8, $t2(%[src]) \n"
216 "lwx $t9, $t3(%[src]) \n"
218 // t0 = | 30 | 20 | 10 | 00 |
219 // t1 = | 31 | 21 | 11 | 01 |
220 // t8 = | 32 | 22 | 12 | 02 |
221 // t9 = | 33 | 23 | 13 | 03 |
223 "precr.qb.ph $s0, $t1, $t0 \n"
224 "precr.qb.ph $s1, $t9, $t8 \n"
225 "precrq.qb.ph $s2, $t1, $t0 \n"
226 "precrq.qb.ph $s3, $t9, $t8 \n"
228 // s0 = | 21 | 01 | 20 | 00 |
229 // s1 = | 23 | 03 | 22 | 02 |
230 // s2 = | 31 | 11 | 30 | 10 |
231 // s3 = | 33 | 13 | 32 | 12 |
233 "precr.qb.ph $s4, $s1, $s0 \n"
234 "precrq.qb.ph $s5, $s1, $s0 \n"
235 "precr.qb.ph $s6, $s3, $s2 \n"
236 "precrq.qb.ph $s7, $s3, $s2 \n"
238 // s4 = | 03 | 02 | 01 | 00 |
239 // s5 = | 23 | 22 | 21 | 20 |
240 // s6 = | 13 | 12 | 11 | 10 |
241 // s7 = | 33 | 32 | 31 | 30 |
243 "lwx $t0, $t4(%[src]) \n"
244 "lwx $t1, $t5(%[src]) \n"
245 "lwx $t8, $t6(%[src]) \n"
246 "lwx $t9, $t7(%[src]) \n"
248 // t0 = | 34 | 24 | 14 | 04 |
249 // t1 = | 35 | 25 | 15 | 05 |
250 // t8 = | 36 | 26 | 16 | 06 |
251 // t9 = | 37 | 27 | 17 | 07 |
253 "precr.qb.ph $s0, $t1, $t0 \n"
254 "precr.qb.ph $s1, $t9, $t8 \n"
255 "precrq.qb.ph $s2, $t1, $t0 \n"
256 "precrq.qb.ph $s3, $t9, $t8 \n"
258 // s0 = | 25 | 05 | 24 | 04 |
259 // s1 = | 27 | 07 | 26 | 06 |
260 // s2 = | 35 | 15 | 34 | 14 |
261 // s3 = | 37 | 17 | 36 | 16 |
263 "precr.qb.ph $t0, $s1, $s0 \n"
264 "precrq.qb.ph $t1, $s1, $s0 \n"
265 "precr.qb.ph $t8, $s3, $s2 \n"
266 "precrq.qb.ph $t9, $s3, $s2 \n"
268 // t0 = | 07 | 06 | 05 | 04 |
269 // t1 = | 27 | 26 | 25 | 24 |
270 // t8 = | 17 | 16 | 15 | 14 |
271 // t9 = | 37 | 36 | 35 | 34 |
273 "addu $s0, %[dst], %[dst_stride] \n"
274 "addu $s1, $s0, %[dst_stride] \n"
275 "addu $s2, $s1, %[dst_stride] \n"
277 "swr $s4, 0(%[dst]) \n"
278 "swl $s4, 3(%[dst]) \n"
279 "swr $t0, 4(%[dst]) \n"
280 "swl $t0, 7(%[dst]) \n"
298 " addu %[dst], $s2, %[dst_stride] \n"
305 :[src_stride] "r" (src_stride),
306 [dst_stride] "r" (dst_stride)
307 : "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9",
308 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7"
312 void TransposeUVWx8_MIPS_DSPR2(const uint8* src, int src_stride,
313 uint8* dst_a, int dst_stride_a,
314 uint8* dst_b, int dst_stride_b,
316 __asm__ __volatile__ (
319 "beqz %[width], 2f \n"
320 " sll $t2, %[src_stride], 0x1 \n" // src_stride x 2
321 "sll $t4, %[src_stride], 0x2 \n" // src_stride x 4
322 "sll $t9, %[src_stride], 0x3 \n" // src_stride x 8
323 "addu $t3, $t2, %[src_stride] \n"
324 "addu $t5, $t4, %[src_stride] \n"
325 "addu $t6, $t2, $t4 \n"
326 "subu $t7, $t9, %[src_stride] \n"
327 "srl $t1, %[width], 1 \n"
329 // check word aligment for dst_a, dst_b, dst_stride_a and dst_stride_b
330 "andi $t0, %[dst_a], 0x3 \n"
331 "andi $t8, %[dst_b], 0x3 \n"
332 "or $t0, $t0, $t8 \n"
333 "andi $t8, %[dst_stride_a], 0x3 \n"
334 "andi $s5, %[dst_stride_b], 0x3 \n"
335 "or $t8, $t8, $s5 \n"
336 "or $t0, $t0, $t8 \n"
339 // dst + dst_stride word aligned (both, a & b dst addresses)
341 "lw $t0, 0(%[src]) \n" // |B0|A0|b0|a0|
342 "lwx $t8, %[src_stride](%[src]) \n" // |B1|A1|b1|a1|
343 "addu $s5, %[dst_a], %[dst_stride_a] \n"
344 "lwx $t9, $t2(%[src]) \n" // |B2|A2|b2|a2|
345 "lwx $s0, $t3(%[src]) \n" // |B3|A3|b3|a3|
346 "addu $s6, %[dst_b], %[dst_stride_b] \n"
348 "precrq.ph.w $s1, $t8, $t0 \n" // |B1|A1|B0|A0|
349 "precrq.ph.w $s2, $s0, $t9 \n" // |B3|A3|B2|A2|
350 "precr.qb.ph $s3, $s2, $s1 \n" // |A3|A2|A1|A0|
351 "precrq.qb.ph $s4, $s2, $s1 \n" // |B3|B2|B1|B0|
353 "sll $t0, $t0, 16 \n"
354 "packrl.ph $s1, $t8, $t0 \n" // |b1|a1|b0|a0|
355 "sll $t9, $t9, 16 \n"
356 "packrl.ph $s2, $s0, $t9 \n" // |b3|a3|b2|a2|
361 "precr.qb.ph $s3, $s2, $s1 \n" // |a3|a2|a1|a0|
362 "precrq.qb.ph $s4, $s2, $s1 \n" // |b3|b2|b1|b0|
364 "lwx $t0, $t4(%[src]) \n" // |B4|A4|b4|a4|
365 "lwx $t8, $t5(%[src]) \n" // |B5|A5|b5|a5|
366 "lwx $t9, $t6(%[src]) \n" // |B6|A6|b6|a6|
367 "lwx $s0, $t7(%[src]) \n" // |B7|A7|b7|a7|
368 "sw $s3, 0(%[dst_a]) \n"
369 "sw $s4, 0(%[dst_b]) \n"
371 "precrq.ph.w $s1, $t8, $t0 \n" // |B5|A5|B4|A4|
372 "precrq.ph.w $s2, $s0, $t9 \n" // |B6|A6|B7|A7|
373 "precr.qb.ph $s3, $s2, $s1 \n" // |A7|A6|A5|A4|
374 "precrq.qb.ph $s4, $s2, $s1 \n" // |B7|B6|B5|B4|
376 "sll $t0, $t0, 16 \n"
377 "packrl.ph $s1, $t8, $t0 \n" // |b5|a5|b4|a4|
378 "sll $t9, $t9, 16 \n"
379 "packrl.ph $s2, $s0, $t9 \n" // |b7|a7|b6|a6|
383 "precr.qb.ph $s3, $s2, $s1 \n" // |a7|a6|a5|a4|
384 "precrq.qb.ph $s4, $s2, $s1 \n" // |b7|b6|b5|b4|
388 "sll $t0, %[dst_stride_a], 1 \n"
389 "sll $t8, %[dst_stride_b], 1 \n"
390 "sw $s3, 4(%[dst_a]) \n"
391 "sw $s4, 4(%[dst_b]) \n"
392 "addu %[dst_a], %[dst_a], $t0 \n"
394 " addu %[dst_b], %[dst_b], $t8 \n"
398 // dst_a or dst_b or dst_stride_a or dst_stride_b not word aligned
400 "lw $t0, 0(%[src]) \n" // |B0|A0|b0|a0|
401 "lwx $t8, %[src_stride](%[src]) \n" // |B1|A1|b1|a1|
402 "addu $s5, %[dst_a], %[dst_stride_a] \n"
403 "lwx $t9, $t2(%[src]) \n" // |B2|A2|b2|a2|
404 "lwx $s0, $t3(%[src]) \n" // |B3|A3|b3|a3|
405 "addu $s6, %[dst_b], %[dst_stride_b] \n"
407 "precrq.ph.w $s1, $t8, $t0 \n" // |B1|A1|B0|A0|
408 "precrq.ph.w $s2, $s0, $t9 \n" // |B3|A3|B2|A2|
409 "precr.qb.ph $s3, $s2, $s1 \n" // |A3|A2|A1|A0|
410 "precrq.qb.ph $s4, $s2, $s1 \n" // |B3|B2|B1|B0|
412 "sll $t0, $t0, 16 \n"
413 "packrl.ph $s1, $t8, $t0 \n" // |b1|a1|b0|a0|
414 "sll $t9, $t9, 16 \n"
415 "packrl.ph $s2, $s0, $t9 \n" // |b3|a3|b2|a2|
422 "precr.qb.ph $s3, $s2, $s1 \n" // |a3|a2|a1|a0|
423 "precrq.qb.ph $s4, $s2, $s1 \n" // |b3|b2|b1|b0|
425 "lwx $t0, $t4(%[src]) \n" // |B4|A4|b4|a4|
426 "lwx $t8, $t5(%[src]) \n" // |B5|A5|b5|a5|
427 "lwx $t9, $t6(%[src]) \n" // |B6|A6|b6|a6|
428 "lwx $s0, $t7(%[src]) \n" // |B7|A7|b7|a7|
429 "swr $s3, 0(%[dst_a]) \n"
430 "swl $s3, 3(%[dst_a]) \n"
431 "swr $s4, 0(%[dst_b]) \n"
432 "swl $s4, 3(%[dst_b]) \n"
434 "precrq.ph.w $s1, $t8, $t0 \n" // |B5|A5|B4|A4|
435 "precrq.ph.w $s2, $s0, $t9 \n" // |B6|A6|B7|A7|
436 "precr.qb.ph $s3, $s2, $s1 \n" // |A7|A6|A5|A4|
437 "precrq.qb.ph $s4, $s2, $s1 \n" // |B7|B6|B5|B4|
439 "sll $t0, $t0, 16 \n"
440 "packrl.ph $s1, $t8, $t0 \n" // |b5|a5|b4|a4|
441 "sll $t9, $t9, 16 \n"
442 "packrl.ph $s2, $s0, $t9 \n" // |b7|a7|b6|a6|
449 "precr.qb.ph $s3, $s2, $s1 \n" // |a7|a6|a5|a4|
450 "precrq.qb.ph $s4, $s2, $s1 \n" // |b7|b6|b5|b4|
454 "sll $t0, %[dst_stride_a], 1 \n"
455 "sll $t8, %[dst_stride_b], 1 \n"
456 "swr $s3, 4(%[dst_a]) \n"
457 "swl $s3, 7(%[dst_a]) \n"
458 "swr $s4, 4(%[dst_b]) \n"
459 "swl $s4, 7(%[dst_b]) \n"
460 "addu %[dst_a], %[dst_a], $t0 \n"
462 " addu %[dst_b], %[dst_b], $t8 \n"
467 [dst_a] "+r" (dst_a),
468 [dst_b] "+r" (dst_b),
469 [width] "+r" (width),
470 [src_stride] "+r" (src_stride)
471 : [dst_stride_a] "r" (dst_stride_a),
472 [dst_stride_b] "r" (dst_stride_b)
473 : "t0", "t1", "t2", "t3", "t4", "t5",
474 "t6", "t7", "t8", "t9",
475 "s0", "s1", "s2", "s3",
480 #endif // defined(__mips_dsp) && (__mips_dsp_rev >= 2)
484 } // namespace libyuv