Revert "VP8 for ARMv8 by using NEON intrinsics 08"
authorJohann <johannkoenig@google.com>
Tue, 6 May 2014 19:25:33 +0000 (12:25 -0700)
committerJohann <johannkoenig@google.com>
Tue, 6 May 2014 20:20:24 +0000 (13:20 -0700)
This reverts commit a5d79f43b963ced59b462206faf3b7857bdeff7b

There is an issue with gcc 4.6 in the Android NDK:
loopfilter_neon.c: In function 'vp8_loop_filter_vertical_edge_y_neon':
loopfilter_neon.c:394:1: error: insn does not satisfy its constraints:

Change-Id: I2b8c6ee3fa595c152ac3a5c08dd79bd9770c7b52

vp8/common/arm/neon/loopfilter_neon.asm [new file with mode: 0644]
vp8/common/arm/neon/loopfilter_neon.c [deleted file]
vp8/vp8_common.mk

diff --git a/vp8/common/arm/neon/loopfilter_neon.asm b/vp8/common/arm/neon/loopfilter_neon.asm
new file mode 100644 (file)
index 0000000..c4f09c7
--- /dev/null
@@ -0,0 +1,409 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_loop_filter_horizontal_edge_y_neon|
+    EXPORT  |vp8_loop_filter_horizontal_edge_uv_neon|
+    EXPORT  |vp8_loop_filter_vertical_edge_y_neon|
+    EXPORT  |vp8_loop_filter_vertical_edge_uv_neon|
+    ARM
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+
+; r0    unsigned char *src
+; r1    int pitch
+; r2    unsigned char blimit
+; r3    unsigned char limit
+; sp    unsigned char thresh,
+|vp8_loop_filter_horizontal_edge_y_neon| PROC
+    push        {lr}
+    vpush       {d8-d15}
+
+    vdup.u8     q0, r2                     ; duplicate blimit
+    vdup.u8     q1, r3                     ; duplicate limit
+    sub         r2, r0, r1, lsl #2         ; move src pointer down by 4 lines
+    ldr         r3, [sp, #68]              ; load thresh
+    add         r12, r2, r1
+    add         r1, r1, r1
+
+    vdup.u8     q2, r3                     ; duplicate thresh
+
+    vld1.u8     {q3}, [r2@128], r1              ; p3
+    vld1.u8     {q4}, [r12@128], r1             ; p2
+    vld1.u8     {q5}, [r2@128], r1              ; p1
+    vld1.u8     {q6}, [r12@128], r1             ; p0
+    vld1.u8     {q7}, [r2@128], r1              ; q0
+    vld1.u8     {q8}, [r12@128], r1             ; q1
+    vld1.u8     {q9}, [r2@128]                  ; q2
+    vld1.u8     {q10}, [r12@128]                ; q3
+
+    sub         r2, r2, r1, lsl #1
+    sub         r12, r12, r1, lsl #1
+
+    bl          vp8_loop_filter_neon
+
+    vst1.u8     {q5}, [r2@128], r1              ; store op1
+    vst1.u8     {q6}, [r12@128], r1             ; store op0
+    vst1.u8     {q7}, [r2@128], r1              ; store oq0
+    vst1.u8     {q8}, [r12@128], r1             ; store oq1
+
+    vpop        {d8-d15}
+    pop         {pc}
+    ENDP        ; |vp8_loop_filter_horizontal_edge_y_neon|
+
+
+; r0    unsigned char *u,
+; r1    int pitch,
+; r2    unsigned char blimit
+; r3    unsigned char limit
+; sp    unsigned char thresh,
+; sp+4  unsigned char *v
+|vp8_loop_filter_horizontal_edge_uv_neon| PROC
+    push        {lr}
+    vpush       {d8-d15}
+
+    vdup.u8     q0, r2                      ; duplicate blimit
+    vdup.u8     q1, r3                      ; duplicate limit
+    ldr         r12, [sp, #68]              ; load thresh
+    ldr         r2, [sp, #72]               ; load v ptr
+    vdup.u8     q2, r12                     ; duplicate thresh
+
+    sub         r3, r0, r1, lsl #2          ; move u pointer down by 4 lines
+    sub         r12, r2, r1, lsl #2         ; move v pointer down by 4 lines
+
+    vld1.u8     {d6}, [r3@64], r1              ; p3
+    vld1.u8     {d7}, [r12@64], r1             ; p3
+    vld1.u8     {d8}, [r3@64], r1              ; p2
+    vld1.u8     {d9}, [r12@64], r1             ; p2
+    vld1.u8     {d10}, [r3@64], r1             ; p1
+    vld1.u8     {d11}, [r12@64], r1            ; p1
+    vld1.u8     {d12}, [r3@64], r1             ; p0
+    vld1.u8     {d13}, [r12@64], r1            ; p0
+    vld1.u8     {d14}, [r3@64], r1             ; q0
+    vld1.u8     {d15}, [r12@64], r1            ; q0
+    vld1.u8     {d16}, [r3@64], r1             ; q1
+    vld1.u8     {d17}, [r12@64], r1            ; q1
+    vld1.u8     {d18}, [r3@64], r1             ; q2
+    vld1.u8     {d19}, [r12@64], r1            ; q2
+    vld1.u8     {d20}, [r3@64]                 ; q3
+    vld1.u8     {d21}, [r12@64]                ; q3
+
+    bl          vp8_loop_filter_neon
+
+    sub         r0, r0, r1, lsl #1
+    sub         r2, r2, r1, lsl #1
+
+    vst1.u8     {d10}, [r0@64], r1             ; store u op1
+    vst1.u8     {d11}, [r2@64], r1             ; store v op1
+    vst1.u8     {d12}, [r0@64], r1             ; store u op0
+    vst1.u8     {d13}, [r2@64], r1             ; store v op0
+    vst1.u8     {d14}, [r0@64], r1             ; store u oq0
+    vst1.u8     {d15}, [r2@64], r1             ; store v oq0
+    vst1.u8     {d16}, [r0@64]                 ; store u oq1
+    vst1.u8     {d17}, [r2@64]                 ; store v oq1
+
+    vpop        {d8-d15}
+    pop         {pc}
+    ENDP        ; |vp8_loop_filter_horizontal_edge_uv_neon|
+
+; void vp8_loop_filter_vertical_edge_y_neon(unsigned char *src, int pitch,
+;                                           const signed char *flimit,
+;                                           const signed char *limit,
+;                                           const signed char *thresh,
+;                                           int count)
+; r0    unsigned char *src
+; r1    int pitch
+; r2    unsigned char blimit
+; r3    unsigned char limit
+; sp    unsigned char thresh,
+
+|vp8_loop_filter_vertical_edge_y_neon| PROC
+    push        {lr}
+    vpush       {d8-d15}
+
+    vdup.u8     q0, r2                     ; duplicate blimit
+    vdup.u8     q1, r3                     ; duplicate limit
+    sub         r2, r0, #4                 ; src ptr down by 4 columns
+    add         r1, r1, r1
+    ldr         r3, [sp, #68]              ; load thresh
+    add         r12, r2, r1, asr #1
+
+    vld1.u8     {d6}, [r2], r1
+    vld1.u8     {d8}, [r12], r1
+    vld1.u8     {d10}, [r2], r1
+    vld1.u8     {d12}, [r12], r1
+    vld1.u8     {d14}, [r2], r1
+    vld1.u8     {d16}, [r12], r1
+    vld1.u8     {d18}, [r2], r1
+    vld1.u8     {d20}, [r12], r1
+
+    vld1.u8     {d7}, [r2], r1              ; load second 8-line src data
+    vld1.u8     {d9}, [r12], r1
+    vld1.u8     {d11}, [r2], r1
+    vld1.u8     {d13}, [r12], r1
+    vld1.u8     {d15}, [r2], r1
+    vld1.u8     {d17}, [r12], r1
+    vld1.u8     {d19}, [r2]
+    vld1.u8     {d21}, [r12]
+
+    ;transpose to 8x16 matrix
+    vtrn.32     q3, q7
+    vtrn.32     q4, q8
+    vtrn.32     q5, q9
+    vtrn.32     q6, q10
+
+    vdup.u8     q2, r3                     ; duplicate thresh
+
+    vtrn.16     q3, q5
+    vtrn.16     q4, q6
+    vtrn.16     q7, q9
+    vtrn.16     q8, q10
+
+    vtrn.8      q3, q4
+    vtrn.8      q5, q6
+    vtrn.8      q7, q8
+    vtrn.8      q9, q10
+
+    bl          vp8_loop_filter_neon
+
+    vswp        d12, d11
+    vswp        d16, d13
+
+    sub         r0, r0, #2                 ; dst ptr
+
+    vswp        d14, d12
+    vswp        d16, d15
+
+    add         r12, r0, r1, asr #1
+
+    ;store op1, op0, oq0, oq1
+    vst4.8      {d10[0], d11[0], d12[0], d13[0]}, [r0], r1
+    vst4.8      {d10[1], d11[1], d12[1], d13[1]}, [r12], r1
+    vst4.8      {d10[2], d11[2], d12[2], d13[2]}, [r0], r1
+    vst4.8      {d10[3], d11[3], d12[3], d13[3]}, [r12], r1
+    vst4.8      {d10[4], d11[4], d12[4], d13[4]}, [r0], r1
+    vst4.8      {d10[5], d11[5], d12[5], d13[5]}, [r12], r1
+    vst4.8      {d10[6], d11[6], d12[6], d13[6]}, [r0], r1
+    vst4.8      {d10[7], d11[7], d12[7], d13[7]}, [r12], r1
+
+    vst4.8      {d14[0], d15[0], d16[0], d17[0]}, [r0], r1
+    vst4.8      {d14[1], d15[1], d16[1], d17[1]}, [r12], r1
+    vst4.8      {d14[2], d15[2], d16[2], d17[2]}, [r0], r1
+    vst4.8      {d14[3], d15[3], d16[3], d17[3]}, [r12], r1
+    vst4.8      {d14[4], d15[4], d16[4], d17[4]}, [r0], r1
+    vst4.8      {d14[5], d15[5], d16[5], d17[5]}, [r12], r1
+    vst4.8      {d14[6], d15[6], d16[6], d17[6]}, [r0]
+    vst4.8      {d14[7], d15[7], d16[7], d17[7]}, [r12]
+
+    vpop        {d8-d15}
+    pop         {pc}
+    ENDP        ; |vp8_loop_filter_vertical_edge_y_neon|
+
+; void vp8_loop_filter_vertical_edge_uv_neon(unsigned char *u, int pitch
+;                                            const signed char *flimit,
+;                                            const signed char *limit,
+;                                            const signed char *thresh,
+;                                            unsigned char *v)
+; r0    unsigned char *u,
+; r1    int pitch,
+; r2    unsigned char blimit
+; r3    unsigned char limit
+; sp    unsigned char thresh,
+; sp+4  unsigned char *v
+|vp8_loop_filter_vertical_edge_uv_neon| PROC
+    push        {lr}
+    vpush       {d8-d15}
+
+    vdup.u8     q0, r2                      ; duplicate blimit
+    sub         r12, r0, #4                 ; move u pointer down by 4 columns
+    ldr         r2, [sp, #72]               ; load v ptr
+    vdup.u8     q1, r3                      ; duplicate limit
+    sub         r3, r2, #4                  ; move v pointer down by 4 columns
+
+    vld1.u8     {d6}, [r12], r1             ;load u data
+    vld1.u8     {d7}, [r3], r1              ;load v data
+    vld1.u8     {d8}, [r12], r1
+    vld1.u8     {d9}, [r3], r1
+    vld1.u8     {d10}, [r12], r1
+    vld1.u8     {d11}, [r3], r1
+    vld1.u8     {d12}, [r12], r1
+    vld1.u8     {d13}, [r3], r1
+    vld1.u8     {d14}, [r12], r1
+    vld1.u8     {d15}, [r3], r1
+    vld1.u8     {d16}, [r12], r1
+    vld1.u8     {d17}, [r3], r1
+    vld1.u8     {d18}, [r12], r1
+    vld1.u8     {d19}, [r3], r1
+    vld1.u8     {d20}, [r12]
+    vld1.u8     {d21}, [r3]
+
+    ldr        r12, [sp, #68]              ; load thresh
+
+    ;transpose to 8x16 matrix
+    vtrn.32     q3, q7
+    vtrn.32     q4, q8
+    vtrn.32     q5, q9
+    vtrn.32     q6, q10
+
+    vdup.u8     q2, r12                     ; duplicate thresh
+
+    vtrn.16     q3, q5
+    vtrn.16     q4, q6
+    vtrn.16     q7, q9
+    vtrn.16     q8, q10
+
+    vtrn.8      q3, q4
+    vtrn.8      q5, q6
+    vtrn.8      q7, q8
+    vtrn.8      q9, q10
+
+    bl          vp8_loop_filter_neon
+
+    vswp        d12, d11
+    vswp        d16, d13
+    vswp        d14, d12
+    vswp        d16, d15
+
+    sub         r0, r0, #2
+    sub         r2, r2, #2
+
+    ;store op1, op0, oq0, oq1
+    vst4.8      {d10[0], d11[0], d12[0], d13[0]}, [r0], r1
+    vst4.8      {d14[0], d15[0], d16[0], d17[0]}, [r2], r1
+    vst4.8      {d10[1], d11[1], d12[1], d13[1]}, [r0], r1
+    vst4.8      {d14[1], d15[1], d16[1], d17[1]}, [r2], r1
+    vst4.8      {d10[2], d11[2], d12[2], d13[2]}, [r0], r1
+    vst4.8      {d14[2], d15[2], d16[2], d17[2]}, [r2], r1
+    vst4.8      {d10[3], d11[3], d12[3], d13[3]}, [r0], r1
+    vst4.8      {d14[3], d15[3], d16[3], d17[3]}, [r2], r1
+    vst4.8      {d10[4], d11[4], d12[4], d13[4]}, [r0], r1
+    vst4.8      {d14[4], d15[4], d16[4], d17[4]}, [r2], r1
+    vst4.8      {d10[5], d11[5], d12[5], d13[5]}, [r0], r1
+    vst4.8      {d14[5], d15[5], d16[5], d17[5]}, [r2], r1
+    vst4.8      {d10[6], d11[6], d12[6], d13[6]}, [r0], r1
+    vst4.8      {d14[6], d15[6], d16[6], d17[6]}, [r2], r1
+    vst4.8      {d10[7], d11[7], d12[7], d13[7]}, [r0]
+    vst4.8      {d14[7], d15[7], d16[7], d17[7]}, [r2]
+
+    vpop        {d8-d15}
+    pop         {pc}
+    ENDP        ; |vp8_loop_filter_vertical_edge_uv_neon|
+
+; void vp8_loop_filter_neon();
+; This is a helper function for the loopfilters. The invidual functions do the
+; necessary load, transpose (if necessary) and store.
+
+; r0-r3 PRESERVE
+; q0    flimit
+; q1    limit
+; q2    thresh
+; q3    p3
+; q4    p2
+; q5    p1
+; q6    p0
+; q7    q0
+; q8    q1
+; q9    q2
+; q10   q3
+|vp8_loop_filter_neon| PROC
+
+    ; vp8_filter_mask
+    vabd.u8     q11, q3, q4                 ; abs(p3 - p2)
+    vabd.u8     q12, q4, q5                 ; abs(p2 - p1)
+    vabd.u8     q13, q5, q6                 ; abs(p1 - p0)
+    vabd.u8     q14, q8, q7                 ; abs(q1 - q0)
+    vabd.u8     q3, q9, q8                  ; abs(q2 - q1)
+    vabd.u8     q4, q10, q9                 ; abs(q3 - q2)
+
+    vmax.u8     q11, q11, q12
+    vmax.u8     q12, q13, q14
+    vmax.u8     q3, q3, q4
+    vmax.u8     q15, q11, q12
+
+    vabd.u8     q9, q6, q7                  ; abs(p0 - q0)
+
+    ; vp8_hevmask
+    vcgt.u8     q13, q13, q2                ; (abs(p1 - p0) > thresh)*-1
+    vcgt.u8     q14, q14, q2                ; (abs(q1 - q0) > thresh)*-1
+    vmax.u8     q15, q15, q3
+
+    vmov.u8     q10, #0x80                   ; 0x80
+
+    vabd.u8     q2, q5, q8                  ; a = abs(p1 - q1)
+    vqadd.u8    q9, q9, q9                  ; b = abs(p0 - q0) * 2
+
+    vcge.u8     q15, q1, q15
+
+    ; vp8_filter() function
+    ; convert to signed
+    veor        q7, q7, q10                 ; qs0
+    vshr.u8     q2, q2, #1                  ; a = a / 2
+    veor        q6, q6, q10                 ; ps0
+
+    veor        q5, q5, q10                 ; ps1
+    vqadd.u8    q9, q9, q2                  ; a = b + a
+
+    veor        q8, q8, q10                 ; qs1
+
+    vmov.u8     q10, #3                     ; #3
+
+    vsubl.s8    q2, d14, d12                ; ( qs0 - ps0)
+    vsubl.s8    q11, d15, d13
+
+    vcge.u8     q9, q0, q9                  ; (a > flimit * 2 + limit) * -1
+
+    vmovl.u8    q4, d20
+
+    vqsub.s8    q1, q5, q8                  ; vp8_filter = clamp(ps1-qs1)
+    vorr        q14, q13, q14               ; vp8_hevmask
+
+    vmul.i16    q2, q2, q4                  ; 3 * ( qs0 - ps0)
+    vmul.i16    q11, q11, q4
+
+    vand        q1, q1, q14                 ; vp8_filter &= hev
+    vand        q15, q15, q9                ; vp8_filter_mask
+
+    vaddw.s8    q2, q2, d2
+    vaddw.s8    q11, q11, d3
+
+    vmov.u8     q9, #4                      ; #4
+
+    ; vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
+    vqmovn.s16  d2, q2
+    vqmovn.s16  d3, q11
+    vand        q1, q1, q15                 ; vp8_filter &= mask
+
+    vqadd.s8    q2, q1, q10                 ; Filter2 = clamp(vp8_filter+3)
+    vqadd.s8    q1, q1, q9                  ; Filter1 = clamp(vp8_filter+4)
+    vshr.s8     q2, q2, #3                  ; Filter2 >>= 3
+    vshr.s8     q1, q1, #3                  ; Filter1 >>= 3
+
+
+    vqadd.s8    q11, q6, q2                 ; u = clamp(ps0 + Filter2)
+    vqsub.s8    q10, q7, q1                 ; u = clamp(qs0 - Filter1)
+
+    ; outer tap adjustments: ++vp8_filter >> 1
+    vrshr.s8    q1, q1, #1
+    vbic        q1, q1, q14                 ; vp8_filter &= ~hev
+    vmov.u8     q0, #0x80                   ; 0x80
+    vqadd.s8    q13, q5, q1                 ; u = clamp(ps1 + vp8_filter)
+    vqsub.s8    q12, q8, q1                 ; u = clamp(qs1 - vp8_filter)
+
+    veor        q6, q11, q0                 ; *op0 = u^0x80
+    veor        q7, q10, q0                 ; *oq0 = u^0x80
+    veor        q5, q13, q0                 ; *op1 = u^0x80
+    veor        q8, q12, q0                 ; *oq1 = u^0x80
+
+    bx          lr
+    ENDP        ; |vp8_loop_filter_horizontal_edge_y_neon|
+
+;-----------------
+
+    END
diff --git a/vp8/common/arm/neon/loopfilter_neon.c b/vp8/common/arm/neon/loopfilter_neon.c
deleted file mode 100644 (file)
index be77e67..0000000
+++ /dev/null
@@ -1,539 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-#include "./vpx_config.h"
-
-static INLINE void vp8_loop_filter_neon(
-        uint8x16_t qblimit,  // flimit
-        uint8x16_t qlimit,   // limit
-        uint8x16_t qthresh,  // thresh
-        uint8x16_t q3,       // p3
-        uint8x16_t q4,       // p2
-        uint8x16_t q5,       // p1
-        uint8x16_t q6,       // p0
-        uint8x16_t q7,       // q0
-        uint8x16_t q8,       // q1
-        uint8x16_t q9,       // q2
-        uint8x16_t q10,      // q3
-        uint8x16_t *q5r,     // p1
-        uint8x16_t *q6r,     // p0
-        uint8x16_t *q7r,     // q0
-        uint8x16_t *q8r) {   // q1
-    uint8x16_t q0u8, q1u8, q2u8, q11u8, q12u8, q13u8, q14u8, q15u8;
-    int16x8_t q2s16, q11s16;
-    uint16x8_t q4u16;
-    int8x16_t q1s8, q2s8, q10s8, q11s8, q12s8, q13s8;
-    int8x8_t d2s8, d3s8;
-
-    q11u8 = vabdq_u8(q3, q4);
-    q12u8 = vabdq_u8(q4, q5);
-    q13u8 = vabdq_u8(q5, q6);
-    q14u8 = vabdq_u8(q8, q7);
-    q3    = vabdq_u8(q9, q8);
-    q4    = vabdq_u8(q10, q9);
-
-    q11u8 = vmaxq_u8(q11u8, q12u8);
-    q12u8 = vmaxq_u8(q13u8, q14u8);
-    q3    = vmaxq_u8(q3, q4);
-    q15u8 = vmaxq_u8(q11u8, q12u8);
-
-    q9 = vabdq_u8(q6, q7);
-
-    // vp8_hevmask
-    q13u8 = vcgtq_u8(q13u8, qthresh);
-    q14u8 = vcgtq_u8(q14u8, qthresh);
-    q15u8 = vmaxq_u8(q15u8, q3);
-
-    q2u8 = vabdq_u8(q5, q8);
-    q9 = vqaddq_u8(q9, q9);
-
-    q15u8 = vcgeq_u8(qlimit, q15u8);
-
-    // vp8_filter() function
-    // convert to signed
-    q10 = vdupq_n_u8(0x80);
-    q8 = veorq_u8(q8, q10);
-    q7 = veorq_u8(q7, q10);
-    q6 = veorq_u8(q6, q10);
-    q5 = veorq_u8(q5, q10);
-
-    q2u8 = vshrq_n_u8(q2u8, 1);
-    q9 = vqaddq_u8(q9, q2u8);
-
-    q10 = vdupq_n_u8(3);
-
-    q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)),
-                     vget_low_s8(vreinterpretq_s8_u8(q6)));
-    q11s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)),
-                      vget_high_s8(vreinterpretq_s8_u8(q6)));
-
-    q9 = vcgeq_u8(qblimit, q9);
-
-    q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5),
-                    vreinterpretq_s8_u8(q8));
-
-    q14u8 = vorrq_u8(q13u8, q14u8);
-
-    q4u16 = vmovl_u8(vget_low_u8(q10));
-    q2s16 = vmulq_s16(q2s16, vreinterpretq_s16_u16(q4u16));
-    q11s16 = vmulq_s16(q11s16, vreinterpretq_s16_u16(q4u16));
-
-    q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q14u8);
-    q15u8 = vandq_u8(q15u8, q9);
-
-    q1s8 = vreinterpretq_s8_u8(q1u8);
-    q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8));
-    q11s16 = vaddw_s8(q11s16, vget_high_s8(q1s8));
-
-    q9 = vdupq_n_u8(4);
-    // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
-    d2s8 = vqmovn_s16(q2s16);
-    d3s8 = vqmovn_s16(q11s16);
-    q1s8 = vcombine_s8(d2s8, d3s8);
-    q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q15u8);
-    q1s8 = vreinterpretq_s8_u8(q1u8);
-
-    q2s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q10));
-    q1s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q9));
-    q2s8 = vshrq_n_s8(q2s8, 3);
-    q1s8 = vshrq_n_s8(q1s8, 3);
-
-    q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q2s8);
-    q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q1s8);
-
-    q1s8 = vrshrq_n_s8(q1s8, 1);
-    q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
-
-    q13s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q1s8);
-    q12s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q1s8);
-
-    q0u8 = vdupq_n_u8(0x80);
-    *q8r = veorq_u8(vreinterpretq_u8_s8(q12s8), q0u8);
-    *q7r = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
-    *q6r = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
-    *q5r = veorq_u8(vreinterpretq_u8_s8(q13s8), q0u8);
-    return;
-}
-
-void vp8_loop_filter_horizontal_edge_y_neon(
-        unsigned char *src,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh) {
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit  = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-    src -= (pitch << 2);
-
-    q3 = vld1q_u8(src);
-    src += pitch;
-    q4 = vld1q_u8(src);
-    src += pitch;
-    q5 = vld1q_u8(src);
-    src += pitch;
-    q6 = vld1q_u8(src);
-    src += pitch;
-    q7 = vld1q_u8(src);
-    src += pitch;
-    q8 = vld1q_u8(src);
-    src += pitch;
-    q9 = vld1q_u8(src);
-    src += pitch;
-    q10 = vld1q_u8(src);
-
-    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q5, &q6, &q7, &q8);
-
-    src -= (pitch * 5);
-    vst1q_u8(src, q5);
-    src += pitch;
-    vst1q_u8(src, q6);
-    src += pitch;
-    vst1q_u8(src, q7);
-    src += pitch;
-    vst1q_u8(src, q8);
-    return;
-}
-
-void vp8_loop_filter_horizontal_edge_uv_neon(
-        unsigned char *u,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh,
-        unsigned char *v) {
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
-    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit  = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    u -= (pitch << 2);
-    v -= (pitch << 2);
-
-    d6  = vld1_u8(u);
-    u += pitch;
-    d7  = vld1_u8(v);
-    v += pitch;
-    d8  = vld1_u8(u);
-    u += pitch;
-    d9  = vld1_u8(v);
-    v += pitch;
-    d10 = vld1_u8(u);
-    u += pitch;
-    d11 = vld1_u8(v);
-    v += pitch;
-    d12 = vld1_u8(u);
-    u += pitch;
-    d13 = vld1_u8(v);
-    v += pitch;
-    d14 = vld1_u8(u);
-    u += pitch;
-    d15 = vld1_u8(v);
-    v += pitch;
-    d16 = vld1_u8(u);
-    u += pitch;
-    d17 = vld1_u8(v);
-    v += pitch;
-    d18 = vld1_u8(u);
-    u += pitch;
-    d19 = vld1_u8(v);
-    v += pitch;
-    d20 = vld1_u8(u);
-    d21 = vld1_u8(v);
-
-    q3 = vcombine_u8(d6, d7);
-    q4 = vcombine_u8(d8, d9);
-    q5 = vcombine_u8(d10, d11);
-    q6 = vcombine_u8(d12, d13);
-    q7 = vcombine_u8(d14, d15);
-    q8 = vcombine_u8(d16, d17);
-    q9 = vcombine_u8(d18, d19);
-    q10 = vcombine_u8(d20, d21);
-
-    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q5, &q6, &q7, &q8);
-
-    u -= (pitch * 5);
-    vst1_u8(u, vget_low_u8(q5));
-    u += pitch;
-    vst1_u8(u, vget_low_u8(q6));
-    u += pitch;
-    vst1_u8(u, vget_low_u8(q7));
-    u += pitch;
-    vst1_u8(u, vget_low_u8(q8));
-
-    v -= (pitch * 5);
-    vst1_u8(v, vget_high_u8(q5));
-    v += pitch;
-    vst1_u8(v, vget_high_u8(q6));
-    v += pitch;
-    vst1_u8(v, vget_high_u8(q7));
-    v += pitch;
-    vst1_u8(v, vget_high_u8(q8));
-    return;
-}
-
-void vp8_loop_filter_vertical_edge_y_neon(
-        unsigned char *src,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh) {
-    unsigned char *s, *d;
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
-    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
-    uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
-    uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
-    uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
-    uint8x8x4_t q4ResultH, q4ResultL;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit  = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    s = src - 4;
-    d6  = vld1_u8(s);
-    s += pitch;
-    d8  = vld1_u8(s);
-    s += pitch;
-    d10 = vld1_u8(s);
-    s += pitch;
-    d12 = vld1_u8(s);
-    s += pitch;
-    d14 = vld1_u8(s);
-    s += pitch;
-    d16 = vld1_u8(s);
-    s += pitch;
-    d18 = vld1_u8(s);
-    s += pitch;
-    d20 = vld1_u8(s);
-    s += pitch;
-    d7  = vld1_u8(s);
-    s += pitch;
-    d9  = vld1_u8(s);
-    s += pitch;
-    d11 = vld1_u8(s);
-    s += pitch;
-    d13 = vld1_u8(s);
-    s += pitch;
-    d15 = vld1_u8(s);
-    s += pitch;
-    d17 = vld1_u8(s);
-    s += pitch;
-    d19 = vld1_u8(s);
-    s += pitch;
-    d21 = vld1_u8(s);
-
-    q3 = vcombine_u8(d6, d7);
-    q4 = vcombine_u8(d8, d9);
-    q5 = vcombine_u8(d10, d11);
-    q6 = vcombine_u8(d12, d13);
-    q7 = vcombine_u8(d14, d15);
-    q8 = vcombine_u8(d16, d17);
-    q9 = vcombine_u8(d18, d19);
-    q10 = vcombine_u8(d20, d21);
-
-    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
-    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
-    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
-    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
-
-    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
-                       vreinterpretq_u16_u32(q2tmp2.val[0]));
-    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
-                       vreinterpretq_u16_u32(q2tmp3.val[0]));
-    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
-                       vreinterpretq_u16_u32(q2tmp2.val[1]));
-    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
-                       vreinterpretq_u16_u32(q2tmp3.val[1]));
-
-    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
-                       vreinterpretq_u8_u16(q2tmp5.val[0]));
-    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
-                       vreinterpretq_u8_u16(q2tmp5.val[1]));
-    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
-                       vreinterpretq_u8_u16(q2tmp7.val[0]));
-    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
-                       vreinterpretq_u8_u16(q2tmp7.val[1]));
-
-    q3 = q2tmp8.val[0];
-    q4 = q2tmp8.val[1];
-    q5 = q2tmp9.val[0];
-    q6 = q2tmp9.val[1];
-    q7 = q2tmp10.val[0];
-    q8 = q2tmp10.val[1];
-    q9 = q2tmp11.val[0];
-    q10 = q2tmp11.val[1];
-
-    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q5, &q6, &q7, &q8);
-
-    q4ResultL.val[0] = vget_low_u8(q5);   // d10
-    q4ResultL.val[1] = vget_low_u8(q6);   // d12
-    q4ResultL.val[2] = vget_low_u8(q7);   // d14
-    q4ResultL.val[3] = vget_low_u8(q8);   // d16
-    q4ResultH.val[0] = vget_high_u8(q5);  // d11
-    q4ResultH.val[1] = vget_high_u8(q6);  // d13
-    q4ResultH.val[2] = vget_high_u8(q7);  // d15
-    q4ResultH.val[3] = vget_high_u8(q8);  // d17
-
-    d = src - 2;
-    vst4_lane_u8(d, q4ResultL, 0);
-    d += pitch;
-    vst4_lane_u8(d, q4ResultL, 1);
-    d += pitch;
-    vst4_lane_u8(d, q4ResultL, 2);
-    d += pitch;
-    vst4_lane_u8(d, q4ResultL, 3);
-    d += pitch;
-    vst4_lane_u8(d, q4ResultL, 4);
-    d += pitch;
-    vst4_lane_u8(d, q4ResultL, 5);
-    d += pitch;
-    vst4_lane_u8(d, q4ResultL, 6);
-    d += pitch;
-    vst4_lane_u8(d, q4ResultL, 7);
-    d += pitch;
-    vst4_lane_u8(d, q4ResultH, 0);
-    d += pitch;
-    vst4_lane_u8(d, q4ResultH, 1);
-    d += pitch;
-    vst4_lane_u8(d, q4ResultH, 2);
-    d += pitch;
-    vst4_lane_u8(d, q4ResultH, 3);
-    d += pitch;
-    vst4_lane_u8(d, q4ResultH, 4);
-    d += pitch;
-    vst4_lane_u8(d, q4ResultH, 5);
-    d += pitch;
-    vst4_lane_u8(d, q4ResultH, 6);
-    d += pitch;
-    vst4_lane_u8(d, q4ResultH, 7);
-    return;
-}
-
-void vp8_loop_filter_vertical_edge_uv_neon(
-        unsigned char *u,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh,
-        unsigned char *v) {
-    unsigned char *us, *ud;
-    unsigned char *vs, *vd;
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
-    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
-    uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
-    uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
-    uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
-    uint8x8x4_t q4ResultH, q4ResultL;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit  = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    us = u - 4;
-    d6 = vld1_u8(us);
-    us += pitch;
-    d8 = vld1_u8(us);
-    us += pitch;
-    d10 = vld1_u8(us);
-    us += pitch;
-    d12 = vld1_u8(us);
-    us += pitch;
-    d14 = vld1_u8(us);
-    us += pitch;
-    d16 = vld1_u8(us);
-    us += pitch;
-    d18 = vld1_u8(us);
-    us += pitch;
-    d20 = vld1_u8(us);
-
-    vs = v - 4;
-    d7 = vld1_u8(vs);
-    vs += pitch;
-    d9 = vld1_u8(vs);
-    vs += pitch;
-    d11 = vld1_u8(vs);
-    vs += pitch;
-    d13 = vld1_u8(vs);
-    vs += pitch;
-    d15 = vld1_u8(vs);
-    vs += pitch;
-    d17 = vld1_u8(vs);
-    vs += pitch;
-    d19 = vld1_u8(vs);
-    vs += pitch;
-    d21 = vld1_u8(vs);
-
-    q3 = vcombine_u8(d6, d7);
-    q4 = vcombine_u8(d8, d9);
-    q5 = vcombine_u8(d10, d11);
-    q6 = vcombine_u8(d12, d13);
-    q7 = vcombine_u8(d14, d15);
-    q8 = vcombine_u8(d16, d17);
-    q9 = vcombine_u8(d18, d19);
-    q10 = vcombine_u8(d20, d21);
-
-    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
-    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
-    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
-    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
-
-    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
-                       vreinterpretq_u16_u32(q2tmp2.val[0]));
-    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
-                       vreinterpretq_u16_u32(q2tmp3.val[0]));
-    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
-                       vreinterpretq_u16_u32(q2tmp2.val[1]));
-    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
-                       vreinterpretq_u16_u32(q2tmp3.val[1]));
-
-    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
-                       vreinterpretq_u8_u16(q2tmp5.val[0]));
-    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
-                       vreinterpretq_u8_u16(q2tmp5.val[1]));
-    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
-                       vreinterpretq_u8_u16(q2tmp7.val[0]));
-    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
-                       vreinterpretq_u8_u16(q2tmp7.val[1]));
-
-    q3 = q2tmp8.val[0];
-    q4 = q2tmp8.val[1];
-    q5 = q2tmp9.val[0];
-    q6 = q2tmp9.val[1];
-    q7 = q2tmp10.val[0];
-    q8 = q2tmp10.val[1];
-    q9 = q2tmp11.val[0];
-    q10 = q2tmp11.val[1];
-
-    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q5, &q6, &q7, &q8);
-
-    q4ResultL.val[0] = vget_low_u8(q5);   // d10
-    q4ResultL.val[1] = vget_low_u8(q6);   // d12
-    q4ResultL.val[2] = vget_low_u8(q7);   // d14
-    q4ResultL.val[3] = vget_low_u8(q8);   // d16
-    ud = u - 2;
-    vst4_lane_u8(ud, q4ResultL, 0);
-    ud += pitch;
-    vst4_lane_u8(ud, q4ResultL, 1);
-    ud += pitch;
-    vst4_lane_u8(ud, q4ResultL, 2);
-    ud += pitch;
-    vst4_lane_u8(ud, q4ResultL, 3);
-    ud += pitch;
-    vst4_lane_u8(ud, q4ResultL, 4);
-    ud += pitch;
-    vst4_lane_u8(ud, q4ResultL, 5);
-    ud += pitch;
-    vst4_lane_u8(ud, q4ResultL, 6);
-    ud += pitch;
-    vst4_lane_u8(ud, q4ResultL, 7);
-
-    q4ResultH.val[0] = vget_high_u8(q5);  // d11
-    q4ResultH.val[1] = vget_high_u8(q6);  // d13
-    q4ResultH.val[2] = vget_high_u8(q7);  // d15
-    q4ResultH.val[3] = vget_high_u8(q8);  // d17
-    vd = v - 2;
-    vst4_lane_u8(vd, q4ResultH, 0);
-    vd += pitch;
-    vst4_lane_u8(vd, q4ResultH, 1);
-    vd += pitch;
-    vst4_lane_u8(vd, q4ResultH, 2);
-    vd += pitch;
-    vst4_lane_u8(vd, q4ResultH, 3);
-    vd += pitch;
-    vst4_lane_u8(vd, q4ResultH, 4);
-    vd += pitch;
-    vst4_lane_u8(vd, q4ResultH, 5);
-    vd += pitch;
-    vst4_lane_u8(vd, q4ResultH, 6);
-    vd += pitch;
-    vst4_lane_u8(vd, q4ResultH, 7);
-    return;
-}
index 52e35f2..177e67e 100644 (file)
@@ -159,6 +159,7 @@ VP8_COMMON_SRCS-$(HAVE_MEDIA)  += common/arm/armv6/vp8_variance_halfpixvar16x16_
 VP8_COMMON_SRCS-$(HAVE_MEDIA)  += common/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6$(ASM)
 
 # common (neon)
+VP8_COMMON_SRCS-$(HAVE_NEON)  += common/arm/neon/loopfilter_neon$(ASM)
 VP8_COMMON_SRCS-$(HAVE_NEON)  += common/arm/neon/buildintrapredictorsmby_neon$(ASM)
 VP8_COMMON_SRCS-$(HAVE_NEON)  += common/arm/neon/idct_blk_neon.c
 VP8_COMMON_SRCS-$(HAVE_NEON)  += common/arm/neon/vp8_subpixelvariance8x8_neon$(ASM)
@@ -173,7 +174,6 @@ VP8_COMMON_SRCS-$(HAVE_NEON)  += common/arm/neon/dequant_idct_neon.c
 VP8_COMMON_SRCS-$(HAVE_NEON)  += common/arm/neon/dequantizeb_neon.c
 VP8_COMMON_SRCS-$(HAVE_NEON)  += common/arm/neon/idct_dequant_full_2x_neon.c
 VP8_COMMON_SRCS-$(HAVE_NEON)  += common/arm/neon/iwalsh_neon.c
-VP8_COMMON_SRCS-$(HAVE_NEON)  += common/arm/neon/loopfilter_neon.c
 VP8_COMMON_SRCS-$(HAVE_NEON)  += common/arm/neon/loopfiltersimplehorizontaledge_neon.c
 VP8_COMMON_SRCS-$(HAVE_NEON)  += common/arm/neon/loopfiltersimpleverticaledge_neon.c
 VP8_COMMON_SRCS-$(HAVE_NEON)  += common/arm/neon/mbloopfilter_neon.c