2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "./vpx_config.h"
14 static INLINE void vp8_mbloop_filter_neon(
15 uint8x16_t qblimit, // mblimit
16 uint8x16_t qlimit, // limit
17 uint8x16_t qthresh, // thresh
26 uint8x16_t *q4r, // p1
27 uint8x16_t *q5r, // p1
28 uint8x16_t *q6r, // p0
29 uint8x16_t *q7r, // q0
30 uint8x16_t *q8r, // q1
31 uint8x16_t *q9r) { // q1
32 uint8x16_t q0u8, q1u8, q11u8, q12u8, q13u8, q14u8, q15u8;
33 int16x8_t q0s16, q2s16, q11s16, q12s16, q13s16, q14s16, q15s16;
34 int8x16_t q1s8, q6s8, q7s8, q2s8, q11s8, q13s8;
35 uint16x8_t q0u16, q11u16, q12u16, q13u16, q14u16, q15u16;
36 int8x16_t q0s8, q12s8, q14s8, q15s8;
37 int8x8_t d0, d1, d2, d3, d4, d5, d24, d25, d28, d29;
39 q11u8 = vabdq_u8(q3, q4);
40 q12u8 = vabdq_u8(q4, q5);
41 q13u8 = vabdq_u8(q5, q6);
42 q14u8 = vabdq_u8(q8, q7);
43 q1u8 = vabdq_u8(q9, q8);
44 q0u8 = vabdq_u8(q10, q9);
46 q11u8 = vmaxq_u8(q11u8, q12u8);
47 q12u8 = vmaxq_u8(q13u8, q14u8);
48 q1u8 = vmaxq_u8(q1u8, q0u8);
49 q15u8 = vmaxq_u8(q11u8, q12u8);
51 q12u8 = vabdq_u8(q6, q7);
54 q13u8 = vcgtq_u8(q13u8, qthresh);
55 q14u8 = vcgtq_u8(q14u8, qthresh);
56 q15u8 = vmaxq_u8(q15u8, q1u8);
58 q15u8 = vcgeq_u8(qlimit, q15u8);
60 q1u8 = vabdq_u8(q5, q8);
61 q12u8 = vqaddq_u8(q12u8, q12u8);
63 // vp8_filter() function
65 q0u8 = vdupq_n_u8(0x80);
66 q9 = veorq_u8(q9, q0u8);
67 q8 = veorq_u8(q8, q0u8);
68 q7 = veorq_u8(q7, q0u8);
69 q6 = veorq_u8(q6, q0u8);
70 q5 = veorq_u8(q5, q0u8);
71 q4 = veorq_u8(q4, q0u8);
73 q1u8 = vshrq_n_u8(q1u8, 1);
74 q12u8 = vqaddq_u8(q12u8, q1u8);
76 q14u8 = vorrq_u8(q13u8, q14u8);
77 q12u8 = vcgeq_u8(qblimit, q12u8);
79 q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)),
80 vget_low_s8(vreinterpretq_s8_u8(q6)));
81 q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)),
82 vget_high_s8(vreinterpretq_s8_u8(q6)));
84 q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5),
85 vreinterpretq_s8_u8(q8));
87 q11s16 = vdupq_n_s16(3);
88 q2s16 = vmulq_s16(q2s16, q11s16);
89 q13s16 = vmulq_s16(q13s16, q11s16);
91 q15u8 = vandq_u8(q15u8, q12u8);
93 q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8));
94 q13s16 = vaddw_s8(q13s16, vget_high_s8(q1s8));
96 q12u8 = vdupq_n_u8(3);
97 q11u8 = vdupq_n_u8(4);
98 // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
99 d2 = vqmovn_s16(q2s16);
100 d3 = vqmovn_s16(q13s16);
101 q1s8 = vcombine_s8(d2, d3);
102 q1s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q15u8));
103 q13s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
105 q2s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q11u8));
106 q13s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q12u8));
107 q2s8 = vshrq_n_s8(q2s8, 3);
108 q13s8 = vshrq_n_s8(q13s8, 3);
110 q7s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q2s8);
111 q6s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q13s8);
113 q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
115 q0u16 = q11u16 = q12u16 = q13u16 = q14u16 = q15u16 = vdupq_n_u16(63);
119 q0s16 = vmlal_s8(vreinterpretq_s16_u16(q0u16), vget_low_s8(q1s8), d5);
120 q11s16 = vmlal_s8(vreinterpretq_s16_u16(q11u16), vget_high_s8(q1s8), d5);
122 q12s16 = vmlal_s8(vreinterpretq_s16_u16(q12u16), vget_low_s8(q1s8), d4);
123 q13s16 = vmlal_s8(vreinterpretq_s16_u16(q13u16), vget_high_s8(q1s8), d4);
124 q14s16 = vmlal_s8(vreinterpretq_s16_u16(q14u16), vget_low_s8(q1s8), d5);
125 q15s16 = vmlal_s8(vreinterpretq_s16_u16(q15u16), vget_high_s8(q1s8), d5);
127 d0 = vqshrn_n_s16(q0s16 , 7);
128 d1 = vqshrn_n_s16(q11s16, 7);
129 d24 = vqshrn_n_s16(q12s16, 7);
130 d25 = vqshrn_n_s16(q13s16, 7);
131 d28 = vqshrn_n_s16(q14s16, 7);
132 d29 = vqshrn_n_s16(q15s16, 7);
134 q0s8 = vcombine_s8(d0, d1);
135 q12s8 = vcombine_s8(d24, d25);
136 q14s8 = vcombine_s8(d28, d29);
138 q11s8 = vqsubq_s8(vreinterpretq_s8_u8(q9), q0s8);
139 q0s8 = vqaddq_s8(vreinterpretq_s8_u8(q4), q0s8);
140 q13s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q12s8);
141 q12s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q12s8);
142 q15s8 = vqsubq_s8((q7s8), q14s8);
143 q14s8 = vqaddq_s8((q6s8), q14s8);
145 q1u8 = vdupq_n_u8(0x80);
146 *q9r = veorq_u8(vreinterpretq_u8_s8(q11s8), q1u8);
147 *q8r = veorq_u8(vreinterpretq_u8_s8(q13s8), q1u8);
148 *q7r = veorq_u8(vreinterpretq_u8_s8(q15s8), q1u8);
149 *q6r = veorq_u8(vreinterpretq_u8_s8(q14s8), q1u8);
150 *q5r = veorq_u8(vreinterpretq_u8_s8(q12s8), q1u8);
151 *q4r = veorq_u8(vreinterpretq_u8_s8(q0s8), q1u8);
155 void vp8_mbloop_filter_horizontal_edge_y_neon(
158 unsigned char blimit,
160 unsigned char thresh) {
161 uint8x16_t qblimit, qlimit, qthresh, q3, q4;
162 uint8x16_t q5, q6, q7, q8, q9, q10;
164 qblimit = vdupq_n_u8(blimit);
165 qlimit = vdupq_n_u8(limit);
166 qthresh = vdupq_n_u8(thresh);
186 vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
187 q5, q6, q7, q8, q9, q10,
188 &q4, &q5, &q6, &q7, &q8, &q9);
205 void vp8_mbloop_filter_horizontal_edge_uv_neon(
208 unsigned char blimit,
210 unsigned char thresh,
212 uint8x16_t qblimit, qlimit, qthresh, q3, q4;
213 uint8x16_t q5, q6, q7, q8, q9, q10;
214 uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
215 uint8x8_t d15, d16, d17, d18, d19, d20, d21;
217 qblimit = vdupq_n_u8(blimit);
218 qlimit = vdupq_n_u8(limit);
219 qthresh = vdupq_n_u8(thresh);
255 q3 = vcombine_u8(d6, d7);
256 q4 = vcombine_u8(d8, d9);
257 q5 = vcombine_u8(d10, d11);
258 q6 = vcombine_u8(d12, d13);
259 q7 = vcombine_u8(d14, d15);
260 q8 = vcombine_u8(d16, d17);
261 q9 = vcombine_u8(d18, d19);
262 q10 = vcombine_u8(d20, d21);
264 vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
265 q5, q6, q7, q8, q9, q10,
266 &q4, &q5, &q6, &q7, &q8, &q9);
270 vst1_u8(u, vget_low_u8(q4));
272 vst1_u8(v, vget_high_u8(q4));
274 vst1_u8(u, vget_low_u8(q5));
276 vst1_u8(v, vget_high_u8(q5));
278 vst1_u8(u, vget_low_u8(q6));
280 vst1_u8(v, vget_high_u8(q6));
282 vst1_u8(u, vget_low_u8(q7));
284 vst1_u8(v, vget_high_u8(q7));
286 vst1_u8(u, vget_low_u8(q8));
288 vst1_u8(v, vget_high_u8(q8));
290 vst1_u8(u, vget_low_u8(q9));
291 vst1_u8(v, vget_high_u8(q9));
295 void vp8_mbloop_filter_vertical_edge_y_neon(
298 unsigned char blimit,
300 unsigned char thresh) {
301 unsigned char *s1, *s2;
302 uint8x16_t qblimit, qlimit, qthresh, q3, q4;
303 uint8x16_t q5, q6, q7, q8, q9, q10;
304 uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
305 uint8x8_t d15, d16, d17, d18, d19, d20, d21;
306 uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
307 uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
308 uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
310 qblimit = vdupq_n_u8(blimit);
311 qlimit = vdupq_n_u8(limit);
312 qthresh = vdupq_n_u8(thresh);
347 q3 = vcombine_u8(d6, d7);
348 q4 = vcombine_u8(d8, d9);
349 q5 = vcombine_u8(d10, d11);
350 q6 = vcombine_u8(d12, d13);
351 q7 = vcombine_u8(d14, d15);
352 q8 = vcombine_u8(d16, d17);
353 q9 = vcombine_u8(d18, d19);
354 q10 = vcombine_u8(d20, d21);
356 q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
357 q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
358 q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
359 q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
361 q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
362 vreinterpretq_u16_u32(q2tmp2.val[0]));
363 q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
364 vreinterpretq_u16_u32(q2tmp3.val[0]));
365 q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
366 vreinterpretq_u16_u32(q2tmp2.val[1]));
367 q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
368 vreinterpretq_u16_u32(q2tmp3.val[1]));
370 q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
371 vreinterpretq_u8_u16(q2tmp5.val[0]));
372 q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
373 vreinterpretq_u8_u16(q2tmp5.val[1]));
374 q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
375 vreinterpretq_u8_u16(q2tmp7.val[0]));
376 q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
377 vreinterpretq_u8_u16(q2tmp7.val[1]));
386 q10 = q2tmp11.val[1];
388 vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
389 q5, q6, q7, q8, q9, q10,
390 &q4, &q5, &q6, &q7, &q8, &q9);
392 q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
393 q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
394 q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
395 q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
397 q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
398 vreinterpretq_u16_u32(q2tmp2.val[0]));
399 q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
400 vreinterpretq_u16_u32(q2tmp3.val[0]));
401 q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
402 vreinterpretq_u16_u32(q2tmp2.val[1]));
403 q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
404 vreinterpretq_u16_u32(q2tmp3.val[1]));
406 q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
407 vreinterpretq_u8_u16(q2tmp5.val[0]));
408 q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
409 vreinterpretq_u8_u16(q2tmp5.val[1]));
410 q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
411 vreinterpretq_u8_u16(q2tmp7.val[0]));
412 q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
413 vreinterpretq_u8_u16(q2tmp7.val[1]));
422 q10 = q2tmp11.val[1];
427 vst1_u8(s1, vget_low_u8(q3));
429 vst1_u8(s2, vget_high_u8(q3));
431 vst1_u8(s1, vget_low_u8(q4));
433 vst1_u8(s2, vget_high_u8(q4));
435 vst1_u8(s1, vget_low_u8(q5));
437 vst1_u8(s2, vget_high_u8(q5));
439 vst1_u8(s1, vget_low_u8(q6));
441 vst1_u8(s2, vget_high_u8(q6));
443 vst1_u8(s1, vget_low_u8(q7));
445 vst1_u8(s2, vget_high_u8(q7));
447 vst1_u8(s1, vget_low_u8(q8));
449 vst1_u8(s2, vget_high_u8(q8));
451 vst1_u8(s1, vget_low_u8(q9));
453 vst1_u8(s2, vget_high_u8(q9));
455 vst1_u8(s1, vget_low_u8(q10));
456 vst1_u8(s2, vget_high_u8(q10));
460 void vp8_mbloop_filter_vertical_edge_uv_neon(
463 unsigned char blimit,
465 unsigned char thresh,
467 unsigned char *us, *ud;
468 unsigned char *vs, *vd;
469 uint8x16_t qblimit, qlimit, qthresh, q3, q4;
470 uint8x16_t q5, q6, q7, q8, q9, q10;
471 uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
472 uint8x8_t d15, d16, d17, d18, d19, d20, d21;
473 uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
474 uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
475 uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
477 qblimit = vdupq_n_u8(blimit);
478 qlimit = vdupq_n_u8(limit);
479 qthresh = vdupq_n_u8(thresh);
514 q3 = vcombine_u8(d6, d7);
515 q4 = vcombine_u8(d8, d9);
516 q5 = vcombine_u8(d10, d11);
517 q6 = vcombine_u8(d12, d13);
518 q7 = vcombine_u8(d14, d15);
519 q8 = vcombine_u8(d16, d17);
520 q9 = vcombine_u8(d18, d19);
521 q10 = vcombine_u8(d20, d21);
523 q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
524 q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
525 q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
526 q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
528 q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
529 vreinterpretq_u16_u32(q2tmp2.val[0]));
530 q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
531 vreinterpretq_u16_u32(q2tmp3.val[0]));
532 q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
533 vreinterpretq_u16_u32(q2tmp2.val[1]));
534 q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
535 vreinterpretq_u16_u32(q2tmp3.val[1]));
537 q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
538 vreinterpretq_u8_u16(q2tmp5.val[0]));
539 q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
540 vreinterpretq_u8_u16(q2tmp5.val[1]));
541 q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
542 vreinterpretq_u8_u16(q2tmp7.val[0]));
543 q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
544 vreinterpretq_u8_u16(q2tmp7.val[1]));
553 q10 = q2tmp11.val[1];
555 vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
556 q5, q6, q7, q8, q9, q10,
557 &q4, &q5, &q6, &q7, &q8, &q9);
559 q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
560 q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
561 q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
562 q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
564 q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
565 vreinterpretq_u16_u32(q2tmp2.val[0]));
566 q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
567 vreinterpretq_u16_u32(q2tmp3.val[0]));
568 q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
569 vreinterpretq_u16_u32(q2tmp2.val[1]));
570 q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
571 vreinterpretq_u16_u32(q2tmp3.val[1]));
573 q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
574 vreinterpretq_u8_u16(q2tmp5.val[0]));
575 q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
576 vreinterpretq_u8_u16(q2tmp5.val[1]));
577 q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
578 vreinterpretq_u8_u16(q2tmp7.val[0]));
579 q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
580 vreinterpretq_u8_u16(q2tmp7.val[1]));
589 q10 = q2tmp11.val[1];
592 vst1_u8(ud, vget_low_u8(q3));
594 vst1_u8(ud, vget_low_u8(q4));
596 vst1_u8(ud, vget_low_u8(q5));
598 vst1_u8(ud, vget_low_u8(q6));
600 vst1_u8(ud, vget_low_u8(q7));
602 vst1_u8(ud, vget_low_u8(q8));
604 vst1_u8(ud, vget_low_u8(q9));
606 vst1_u8(ud, vget_low_u8(q10));
609 vst1_u8(vd, vget_high_u8(q3));
611 vst1_u8(vd, vget_high_u8(q4));
613 vst1_u8(vd, vget_high_u8(q5));
615 vst1_u8(vd, vget_high_u8(q6));
617 vst1_u8(vd, vget_high_u8(q7));
619 vst1_u8(vd, vget_high_u8(q8));
621 vst1_u8(vd, vget_high_u8(q9));
623 vst1_u8(vd, vget_high_u8(q10));