Upstream version 9.38.198.0
[platform/framework/web/crosswalk.git] / src / third_party / libvpx / source / libvpx / vp8 / encoder / arm / neon / denoising_neon.c
1 /*
2  *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10
11 #include <arm_neon.h>
12
13 #include "vp8/encoder/denoising.h"
14 #include "vpx_mem/vpx_mem.h"
15 #include "./vp8_rtcd.h"
16
17 /*
18  * The filter function was modified to reduce the computational complexity.
19  *
20  * Step 1:
21  *  Instead of applying tap coefficients for each pixel, we calculated the
22  *  pixel adjustments vs. pixel diff value ahead of time.
23  *     adjustment = filtered_value - current_raw
24  *                = (filter_coefficient * diff + 128) >> 8
25  *  where
26  *     filter_coefficient = (255 << 8) / (256 + ((abs_diff * 330) >> 3));
27  *     filter_coefficient += filter_coefficient /
28  *                           (3 + motion_magnitude_adjustment);
29  *     filter_coefficient is clamped to 0 ~ 255.
30  *
31  * Step 2:
32  *  The adjustment vs. diff curve becomes flat very quick when diff increases.
33  *  This allowed us to use only several levels to approximate the curve without
34  *  changing the filtering algorithm too much.
35  *  The adjustments were further corrected by checking the motion magnitude.
36  *  The levels used are:
37  *      diff          level       adjustment w/o       adjustment w/
38  *                               motion correction    motion correction
39  *      [-255, -16]     3              -6                   -7
40  *      [-15, -8]       2              -4                   -5
41  *      [-7, -4]        1              -3                   -4
42  *      [-3, 3]         0              diff                 diff
43  *      [4, 7]          1               3                    4
44  *      [8, 15]         2               4                    5
45  *      [16, 255]       3               6                    7
46  */
47
48 int vp8_denoiser_filter_neon(unsigned char *mc_running_avg_y,
49                              int mc_running_avg_y_stride,
50                              unsigned char *running_avg_y,
51                              int running_avg_y_stride,
52                              unsigned char *sig, int sig_stride,
53                              unsigned int motion_magnitude,
54                              int increase_denoising) {
55     /* If motion_magnitude is small, making the denoiser more aggressive by
56      * increasing the adjustment for each level, level1 adjustment is
57      * increased, the deltas stay the same.
58      */
59     int shift_inc  = (increase_denoising &&
60         motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0;
61     const uint8x16_t v_level1_adjustment = vmovq_n_u8(
62         (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 + shift_inc : 3);
63     const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
64     const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
65     const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc);
66     const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
67     const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
68     int64x2_t v_sum_diff_total = vdupq_n_s64(0);
69
70     /* Go over lines. */
71     int r;
72     for (r = 0; r < 16; ++r) {
73         /* Load inputs. */
74         const uint8x16_t v_sig = vld1q_u8(sig);
75         const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
76
77         /* Calculate absolute difference and sign masks. */
78         const uint8x16_t v_abs_diff      = vabdq_u8(v_sig, v_mc_running_avg_y);
79         const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y);
80         const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y);
81
82         /* Figure out which level that put us in. */
83         const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold,
84                                                   v_abs_diff);
85         const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold,
86                                                   v_abs_diff);
87         const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold,
88                                                   v_abs_diff);
89
90         /* Calculate absolute adjustments for level 1, 2 and 3. */
91         const uint8x16_t v_level2_adjustment = vandq_u8(v_level2_mask,
92                                                         v_delta_level_1_and_2);
93         const uint8x16_t v_level3_adjustment = vandq_u8(v_level3_mask,
94                                                         v_delta_level_2_and_3);
95         const uint8x16_t v_level1and2_adjustment = vaddq_u8(v_level1_adjustment,
96             v_level2_adjustment);
97         const uint8x16_t v_level1and2and3_adjustment = vaddq_u8(
98             v_level1and2_adjustment, v_level3_adjustment);
99
100         /* Figure adjustment absolute value by selecting between the absolute
101          * difference if in level0 or the value for level 1, 2 and 3.
102          */
103         const uint8x16_t v_abs_adjustment = vbslq_u8(v_level1_mask,
104             v_level1and2and3_adjustment, v_abs_diff);
105
106         /* Calculate positive and negative adjustments. Apply them to the signal
107          * and accumulate them. Adjustments are less than eight and the maximum
108          * sum of them (7 * 16) can fit in a signed char.
109          */
110         const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask,
111                                                      v_abs_adjustment);
112         const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask,
113                                                      v_abs_adjustment);
114
115         uint8x16_t v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment);
116         v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment);
117
118         /* Store results. */
119         vst1q_u8(running_avg_y, v_running_avg_y);
120
121         /* Sum all the accumulators to have the sum of all pixel differences
122          * for this macroblock.
123          */
124         {
125             const int8x16_t v_sum_diff =
126                 vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment),
127                           vreinterpretq_s8_u8(v_neg_adjustment));
128
129             const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
130
131             const int32x4_t fedc_ba98_7654_3210 =
132                 vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
133
134             const int64x2_t fedcba98_76543210 =
135                 vpaddlq_s32(fedc_ba98_7654_3210);
136
137             v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
138         }
139
140         /* Update pointers for next iteration. */
141         sig += sig_stride;
142         mc_running_avg_y += mc_running_avg_y_stride;
143         running_avg_y += running_avg_y_stride;
144     }
145
146     /* Too much adjustments => copy block. */
147     {
148         int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total),
149                                       vget_low_s64(v_sum_diff_total));
150         int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
151         int sum_diff_thresh = SUM_DIFF_THRESHOLD;
152
153         if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH;
154         if (sum_diff > sum_diff_thresh) {
155           // Before returning to copy the block (i.e., apply no denoising),
156           // checK if we can still apply some (weaker) temporal filtering to
157           // this block, that would otherwise not be denoised at all. Simplest
158           // is to apply an additional adjustment to running_avg_y to bring it
159           // closer to sig. The adjustment is capped by a maximum delta, and
160           // chosen such that in most cases the resulting sum_diff will be
161           // within the accceptable range given by sum_diff_thresh.
162
163           // The delta is set by the excess of absolute pixel diff over the
164           // threshold.
165           int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1;
166           // Only apply the adjustment for max delta up to 3.
167           if (delta < 4) {
168             const uint8x16_t k_delta = vmovq_n_u8(delta);
169             sig -= sig_stride * 16;
170             mc_running_avg_y -= mc_running_avg_y_stride * 16;
171             running_avg_y -= running_avg_y_stride * 16;
172             for (r = 0; r < 16; ++r) {
173               uint8x16_t v_running_avg_y = vld1q_u8(running_avg_y);
174               const uint8x16_t v_sig = vld1q_u8(sig);
175               const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
176
177               /* Calculate absolute difference and sign masks. */
178               const uint8x16_t v_abs_diff      = vabdq_u8(v_sig,
179                                                           v_mc_running_avg_y);
180               const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig,
181                                                           v_mc_running_avg_y);
182               const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig,
183                                                           v_mc_running_avg_y);
184               // Clamp absolute difference to delta to get the adjustment.
185               const uint8x16_t v_abs_adjustment =
186                   vminq_u8(v_abs_diff, (k_delta));
187
188               const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask,
189                                                            v_abs_adjustment);
190               const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask,
191                                                            v_abs_adjustment);
192
193               v_running_avg_y = vqsubq_u8(v_running_avg_y, v_pos_adjustment);
194               v_running_avg_y = vqaddq_u8(v_running_avg_y, v_neg_adjustment);
195
196               /* Store results. */
197               vst1q_u8(running_avg_y, v_running_avg_y);
198
199               {
200                   const int8x16_t v_sum_diff =
201                       vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment),
202                                 vreinterpretq_s8_u8(v_pos_adjustment));
203
204                   const int16x8_t fe_dc_ba_98_76_54_32_10 =
205                       vpaddlq_s8(v_sum_diff);
206                   const int32x4_t fedc_ba98_7654_3210 =
207                       vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
208                   const int64x2_t fedcba98_76543210 =
209                       vpaddlq_s32(fedc_ba98_7654_3210);
210
211                   v_sum_diff_total = vqaddq_s64(v_sum_diff_total,
212                                                 fedcba98_76543210);
213               }
214               /* Update pointers for next iteration. */
215               sig += sig_stride;
216               mc_running_avg_y += mc_running_avg_y_stride;
217               running_avg_y += running_avg_y_stride;
218             }
219             {
220               // Update the sum of all pixel differences of this MB.
221               x = vqadd_s64(vget_high_s64(v_sum_diff_total),
222                             vget_low_s64(v_sum_diff_total));
223               sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
224
225               if (sum_diff > sum_diff_thresh) {
226                 return COPY_BLOCK;
227               }
228             }
229           } else {
230             return COPY_BLOCK;
231           }
232         }
233     }
234
235     /* Tell above level that block was filtered. */
236     running_avg_y -= running_avg_y_stride * 16;
237     sig -= sig_stride * 16;
238
239     vp8_copy_mem16x16(running_avg_y, running_avg_y_stride, sig, sig_stride);
240
241     return FILTER_BLOCK;
242 }
243
244 int vp8_denoiser_filter_uv_neon(unsigned char *mc_running_avg,
245                              int mc_running_avg_stride,
246                              unsigned char *running_avg,
247                              int running_avg_stride,
248                              unsigned char *sig, int sig_stride,
249                              unsigned int motion_magnitude,
250                              int increase_denoising) {
251     /* If motion_magnitude is small, making the denoiser more aggressive by
252      * increasing the adjustment for each level, level1 adjustment is
253      * increased, the deltas stay the same.
254      */
255     int shift_inc  = (increase_denoising &&
256         motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) ? 1 : 0;
257     const uint8x16_t v_level1_adjustment = vmovq_n_u8(
258         (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) ? 4 + shift_inc : 3);
259
260     const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
261     const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
262     const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc);
263     const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
264     const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
265     int64x2_t v_sum_diff_total = vdupq_n_s64(0);
266     int r;
267
268     {
269       uint16x4_t v_sum_block = vdup_n_u16(0);
270
271       // Avoid denoising color signal if its close to average level.
272       for (r = 0; r < 8; ++r) {
273         const uint8x8_t v_sig = vld1_u8(sig);
274         const uint16x4_t _76_54_32_10 = vpaddl_u8(v_sig);
275         v_sum_block = vqadd_u16(v_sum_block, _76_54_32_10);
276         sig += sig_stride;
277       }
278       sig -= sig_stride * 8;
279       {
280         const uint32x2_t _7654_3210 = vpaddl_u16(v_sum_block);
281         const uint64x1_t _76543210 = vpaddl_u32(_7654_3210);
282         const int sum_block =
283             vget_lane_s32(vreinterpret_s32_u64(_76543210), 0);
284         if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) {
285           return COPY_BLOCK;
286         }
287       }
288     }
289
290     /* Go over lines. */
291     for (r = 0; r < 4; ++r) {
292         /* Load inputs. */
293         const uint8x8_t v_sig_lo = vld1_u8(sig);
294         const uint8x8_t v_sig_hi = vld1_u8(&sig[sig_stride]);
295         const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi);
296         const uint8x8_t v_mc_running_avg_lo = vld1_u8(mc_running_avg);
297         const uint8x8_t v_mc_running_avg_hi =
298             vld1_u8(&mc_running_avg[mc_running_avg_stride]);
299         const uint8x16_t v_mc_running_avg =
300             vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi);
301         /* Calculate absolute difference and sign masks. */
302         const uint8x16_t v_abs_diff      = vabdq_u8(v_sig, v_mc_running_avg);
303         const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg);
304         const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg);
305
306         /* Figure out which level that put us in. */
307         const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold,
308                                                   v_abs_diff);
309         const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold,
310                                                   v_abs_diff);
311         const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold,
312                                                   v_abs_diff);
313
314         /* Calculate absolute adjustments for level 1, 2 and 3. */
315         const uint8x16_t v_level2_adjustment = vandq_u8(v_level2_mask,
316                                                         v_delta_level_1_and_2);
317         const uint8x16_t v_level3_adjustment = vandq_u8(v_level3_mask,
318                                                         v_delta_level_2_and_3);
319         const uint8x16_t v_level1and2_adjustment = vaddq_u8(v_level1_adjustment,
320             v_level2_adjustment);
321         const uint8x16_t v_level1and2and3_adjustment = vaddq_u8(
322             v_level1and2_adjustment, v_level3_adjustment);
323
324         /* Figure adjustment absolute value by selecting between the absolute
325          * difference if in level0 or the value for level 1, 2 and 3.
326          */
327         const uint8x16_t v_abs_adjustment = vbslq_u8(v_level1_mask,
328             v_level1and2and3_adjustment, v_abs_diff);
329
330         /* Calculate positive and negative adjustments. Apply them to the signal
331          * and accumulate them. Adjustments are less than eight and the maximum
332          * sum of them (7 * 16) can fit in a signed char.
333          */
334         const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask,
335                                                      v_abs_adjustment);
336         const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask,
337                                                      v_abs_adjustment);
338
339         uint8x16_t v_running_avg = vqaddq_u8(v_sig, v_pos_adjustment);
340         v_running_avg = vqsubq_u8(v_running_avg, v_neg_adjustment);
341
342         /* Store results. */
343         vst1_u8(running_avg, vget_low_u8(v_running_avg));
344         vst1_u8(&running_avg[running_avg_stride], vget_high_u8(v_running_avg));
345
346         /* Sum all the accumulators to have the sum of all pixel differences
347          * for this macroblock.
348          */
349         {
350             const int8x16_t v_sum_diff =
351                 vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment),
352                           vreinterpretq_s8_u8(v_neg_adjustment));
353
354             const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
355
356             const int32x4_t fedc_ba98_7654_3210 =
357                 vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
358
359             const int64x2_t fedcba98_76543210 =
360                 vpaddlq_s32(fedc_ba98_7654_3210);
361
362             v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
363         }
364
365         /* Update pointers for next iteration. */
366         sig += sig_stride * 2;
367         mc_running_avg += mc_running_avg_stride * 2;
368         running_avg += running_avg_stride * 2;
369     }
370
371
372     /* Too much adjustments => copy block. */
373     {
374         int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total),
375                                       vget_low_s64(v_sum_diff_total));
376         int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
377         int sum_diff_thresh = SUM_DIFF_THRESHOLD_UV;
378         if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV;
379         if (sum_diff > sum_diff_thresh) {
380           // Before returning to copy the block (i.e., apply no denoising),
381           // checK if we can still apply some (weaker) temporal filtering to
382           // this block, that would otherwise not be denoised at all. Simplest
383           // is to apply an additional adjustment to running_avg_y to bring it
384           // closer to sig. The adjustment is capped by a maximum delta, and
385           // chosen such that in most cases the resulting sum_diff will be
386           // within the accceptable range given by sum_diff_thresh.
387
388           // The delta is set by the excess of absolute pixel diff over the
389           // threshold.
390           int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1;
391           // Only apply the adjustment for max delta up to 3.
392           if (delta < 4) {
393             const uint8x16_t k_delta = vmovq_n_u8(delta);
394             sig -= sig_stride * 8;
395             mc_running_avg -= mc_running_avg_stride * 8;
396             running_avg -= running_avg_stride * 8;
397             for (r = 0; r < 4; ++r) {
398               const uint8x8_t v_sig_lo = vld1_u8(sig);
399               const uint8x8_t v_sig_hi = vld1_u8(&sig[sig_stride]);
400               const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi);
401               const uint8x8_t v_mc_running_avg_lo = vld1_u8(mc_running_avg);
402               const uint8x8_t v_mc_running_avg_hi =
403                   vld1_u8(&mc_running_avg[mc_running_avg_stride]);
404               const uint8x16_t v_mc_running_avg =
405                   vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi);
406               /* Calculate absolute difference and sign masks. */
407               const uint8x16_t v_abs_diff      = vabdq_u8(v_sig,
408                                                           v_mc_running_avg);
409               const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig,
410                                                           v_mc_running_avg);
411               const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig,
412                                                           v_mc_running_avg);
413               // Clamp absolute difference to delta to get the adjustment.
414               const uint8x16_t v_abs_adjustment =
415                   vminq_u8(v_abs_diff, (k_delta));
416
417               const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask,
418                                                            v_abs_adjustment);
419               const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask,
420                                                            v_abs_adjustment);
421               const uint8x8_t v_running_avg_lo = vld1_u8(running_avg);
422               const uint8x8_t v_running_avg_hi =
423                   vld1_u8(&running_avg[running_avg_stride]);
424               uint8x16_t v_running_avg =
425                   vcombine_u8(v_running_avg_lo, v_running_avg_hi);
426
427               v_running_avg = vqsubq_u8(v_running_avg, v_pos_adjustment);
428               v_running_avg = vqaddq_u8(v_running_avg, v_neg_adjustment);
429
430               /* Store results. */
431               vst1_u8(running_avg, vget_low_u8(v_running_avg));
432               vst1_u8(&running_avg[running_avg_stride],
433                       vget_high_u8(v_running_avg));
434
435               {
436                   const int8x16_t v_sum_diff =
437                       vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment),
438                                 vreinterpretq_s8_u8(v_pos_adjustment));
439
440                   const int16x8_t fe_dc_ba_98_76_54_32_10 =
441                       vpaddlq_s8(v_sum_diff);
442                   const int32x4_t fedc_ba98_7654_3210 =
443                       vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
444                   const int64x2_t fedcba98_76543210 =
445                       vpaddlq_s32(fedc_ba98_7654_3210);
446
447                   v_sum_diff_total = vqaddq_s64(v_sum_diff_total,
448                                                 fedcba98_76543210);
449               }
450               /* Update pointers for next iteration. */
451               sig += sig_stride * 2;
452               mc_running_avg += mc_running_avg_stride * 2;
453               running_avg += running_avg_stride * 2;
454             }
455             {
456               // Update the sum of all pixel differences of this MB.
457               x = vqadd_s64(vget_high_s64(v_sum_diff_total),
458                             vget_low_s64(v_sum_diff_total));
459               sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
460
461               if (sum_diff > sum_diff_thresh) {
462                 return COPY_BLOCK;
463               }
464             }
465           } else {
466             return COPY_BLOCK;
467           }
468         }
469     }
470
471     /* Tell above level that block was filtered. */
472     running_avg -= running_avg_stride * 8;
473     sig -= sig_stride * 8;
474
475     vp8_copy_mem8x8(running_avg, running_avg_stride, sig, sig_stride);
476
477     return FILTER_BLOCK;
478 }