2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "./vpx_config.h"
14 #if (__GNUC__ == 4 && (__GNUC_MINOR__ >= 7))
15 static INLINE void write_2x8(unsigned char *dst, int pitch,
16 const uint8x8x2_t result,
17 const uint8x8x2_t result2) {
18 vst2_lane_u8(dst, result, 0);
20 vst2_lane_u8(dst, result, 1);
22 vst2_lane_u8(dst, result, 2);
24 vst2_lane_u8(dst, result, 3);
26 vst2_lane_u8(dst, result, 4);
28 vst2_lane_u8(dst, result, 5);
30 vst2_lane_u8(dst, result, 6);
32 vst2_lane_u8(dst, result, 7);
35 vst2_lane_u8(dst, result2, 0);
37 vst2_lane_u8(dst, result2, 1);
39 vst2_lane_u8(dst, result2, 2);
41 vst2_lane_u8(dst, result2, 3);
43 vst2_lane_u8(dst, result2, 4);
45 vst2_lane_u8(dst, result2, 5);
47 vst2_lane_u8(dst, result2, 6);
49 vst2_lane_u8(dst, result2, 7);
52 static INLINE void write_2x4(unsigned char *dst, int pitch,
53 const uint8x8x2_t result) {
56 00 01 02 03 | 04 05 06 07
57 10 11 12 13 | 14 15 16 17
60 00 10 02 12 | 04 14 06 16
61 01 11 03 13 | 05 15 07 17
63 const uint8x8x2_t r01_u8 = vtrn_u8(result.val[0],
65 const uint16x4_t x_0_4 = vreinterpret_u16_u8(r01_u8.val[0]);
66 const uint16x4_t x_1_5 = vreinterpret_u16_u8(r01_u8.val[1]);
67 vst1_lane_u16((uint16_t *)dst, x_0_4, 0);
69 vst1_lane_u16((uint16_t *)dst, x_1_5, 0);
71 vst1_lane_u16((uint16_t *)dst, x_0_4, 1);
73 vst1_lane_u16((uint16_t *)dst, x_1_5, 1);
75 vst1_lane_u16((uint16_t *)dst, x_0_4, 2);
77 vst1_lane_u16((uint16_t *)dst, x_1_5, 2);
79 vst1_lane_u16((uint16_t *)dst, x_0_4, 3);
81 vst1_lane_u16((uint16_t *)dst, x_1_5, 3);
84 static INLINE void write_2x8(unsigned char *dst, int pitch,
85 const uint8x8x2_t result,
86 const uint8x8x2_t result2) {
87 write_2x4(dst, pitch, result);
89 write_2x4(dst, pitch, result2);
94 #if (__GNUC__ == 4 && (__GNUC_MINOR__ >= 7))
96 uint8x8x4_t read_4x8(unsigned char *src, int pitch, uint8x8x4_t x) {
97 x = vld4_lane_u8(src, x, 0);
99 x = vld4_lane_u8(src, x, 1);
101 x = vld4_lane_u8(src, x, 2);
103 x = vld4_lane_u8(src, x, 3);
105 x = vld4_lane_u8(src, x, 4);
107 x = vld4_lane_u8(src, x, 5);
109 x = vld4_lane_u8(src, x, 6);
111 x = vld4_lane_u8(src, x, 7);
116 uint8x8x4_t read_4x8(unsigned char *src, int pitch, uint8x8x4_t x) {
117 const uint8x8_t a = vld1_u8(src);
118 const uint8x8_t b = vld1_u8(src + pitch * 1);
119 const uint8x8_t c = vld1_u8(src + pitch * 2);
120 const uint8x8_t d = vld1_u8(src + pitch * 3);
121 const uint8x8_t e = vld1_u8(src + pitch * 4);
122 const uint8x8_t f = vld1_u8(src + pitch * 5);
123 const uint8x8_t g = vld1_u8(src + pitch * 6);
124 const uint8x8_t h = vld1_u8(src + pitch * 7);
125 const uint32x2x2_t r04_u32 = vtrn_u32(vreinterpret_u32_u8(a),
126 vreinterpret_u32_u8(e));
127 const uint32x2x2_t r15_u32 = vtrn_u32(vreinterpret_u32_u8(b),
128 vreinterpret_u32_u8(f));
129 const uint32x2x2_t r26_u32 = vtrn_u32(vreinterpret_u32_u8(c),
130 vreinterpret_u32_u8(g));
131 const uint32x2x2_t r37_u32 = vtrn_u32(vreinterpret_u32_u8(d),
132 vreinterpret_u32_u8(h));
133 const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u32(r04_u32.val[0]),
134 vreinterpret_u16_u32(r26_u32.val[0]));
135 const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u32(r15_u32.val[0]),
136 vreinterpret_u16_u32(r37_u32.val[0]));
137 const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]),
138 vreinterpret_u8_u16(r13_u16.val[0]));
139 const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]),
140 vreinterpret_u8_u16(r13_u16.val[1]));
143 00 01 02 03 | 40 41 42 43
144 10 11 12 13 | 50 51 52 53
145 20 21 22 23 | 60 61 62 63
146 30 31 32 33 | 70 71 72 73
149 00 01 20 21 | 40 41 60 61
150 02 03 22 23 | 42 43 62 63
151 10 11 30 31 | 50 51 70 71
152 12 13 32 33 | 52 52 72 73
154 00 01 20 21 | 40 41 60 61
155 10 11 30 31 | 50 51 70 71
156 02 03 22 23 | 42 43 62 63
157 12 13 32 33 | 52 52 72 73
160 00 10 20 30 | 40 50 60 70
161 01 11 21 31 | 41 51 61 71
162 02 12 22 32 | 42 52 62 72
163 03 13 23 33 | 43 53 63 73
165 x.val[0] = r01_u8.val[0];
166 x.val[1] = r01_u8.val[1];
167 x.val[2] = r23_u8.val[0];
168 x.val[3] = r23_u8.val[1];
174 static INLINE void vp8_loop_filter_simple_vertical_edge_neon(
177 const unsigned char *blimit) {
179 uint8x16_t qblimit, q0u8;
180 uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q11u8, q12u8, q14u8, q15u8;
181 int16x8_t q2s16, q13s16, q11s16;
182 int8x8_t d28s8, d29s8;
183 int8x16_t q2s8, q3s8, q10s8, q11s8, q14s8;
184 uint8x8x4_t d0u8x4; // d6, d7, d8, d9
185 uint8x8x4_t d1u8x4; // d10, d11, d12, d13
186 uint8x8x2_t d2u8x2; // d12, d13
187 uint8x8x2_t d3u8x2; // d14, d15
189 qblimit = vdupq_n_u8(*blimit);
192 d0u8x4 = read_4x8(src1, p, d0u8x4);
194 d1u8x4 = read_4x8(src1, p, d1u8x4);
196 q3u8 = vcombine_u8(d0u8x4.val[0], d1u8x4.val[0]); // d6 d10
197 q4u8 = vcombine_u8(d0u8x4.val[2], d1u8x4.val[2]); // d8 d12
198 q5u8 = vcombine_u8(d0u8x4.val[1], d1u8x4.val[1]); // d7 d11
199 q6u8 = vcombine_u8(d0u8x4.val[3], d1u8x4.val[3]); // d9 d13
201 q15u8 = vabdq_u8(q5u8, q4u8);
202 q14u8 = vabdq_u8(q3u8, q6u8);
204 q15u8 = vqaddq_u8(q15u8, q15u8);
205 q14u8 = vshrq_n_u8(q14u8, 1);
206 q0u8 = vdupq_n_u8(0x80);
207 q11s16 = vdupq_n_s16(3);
208 q15u8 = vqaddq_u8(q15u8, q14u8);
210 q3u8 = veorq_u8(q3u8, q0u8);
211 q4u8 = veorq_u8(q4u8, q0u8);
212 q5u8 = veorq_u8(q5u8, q0u8);
213 q6u8 = veorq_u8(q6u8, q0u8);
215 q15u8 = vcgeq_u8(qblimit, q15u8);
217 q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q4u8)),
218 vget_low_s8(vreinterpretq_s8_u8(q5u8)));
219 q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q4u8)),
220 vget_high_s8(vreinterpretq_s8_u8(q5u8)));
222 q14s8 = vqsubq_s8(vreinterpretq_s8_u8(q3u8),
223 vreinterpretq_s8_u8(q6u8));
225 q2s16 = vmulq_s16(q2s16, q11s16);
226 q13s16 = vmulq_s16(q13s16, q11s16);
228 q11u8 = vdupq_n_u8(3);
229 q12u8 = vdupq_n_u8(4);
231 q2s16 = vaddw_s8(q2s16, vget_low_s8(q14s8));
232 q13s16 = vaddw_s8(q13s16, vget_high_s8(q14s8));
234 d28s8 = vqmovn_s16(q2s16);
235 d29s8 = vqmovn_s16(q13s16);
236 q14s8 = vcombine_s8(d28s8, d29s8);
238 q14s8 = vandq_s8(q14s8, vreinterpretq_s8_u8(q15u8));
240 q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q11u8));
241 q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q12u8));
242 q2s8 = vshrq_n_s8(q2s8, 3);
243 q14s8 = vshrq_n_s8(q3s8, 3);
245 q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q5u8), q2s8);
246 q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q4u8), q14s8);
248 q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
249 q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
251 d2u8x2.val[0] = vget_low_u8(q6u8); // d12
252 d2u8x2.val[1] = vget_low_u8(q7u8); // d14
253 d3u8x2.val[0] = vget_high_u8(q6u8); // d13
254 d3u8x2.val[1] = vget_high_u8(q7u8); // d15
257 write_2x8(src1, p, d2u8x2, d3u8x2);
260 void vp8_loop_filter_bvs_neon(
261 unsigned char *y_ptr,
263 const unsigned char *blimit) {
265 vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
267 vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
269 vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
273 void vp8_loop_filter_mbvs_neon(
274 unsigned char *y_ptr,
276 const unsigned char *blimit) {
277 vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);