// -----------------------------------------------------------------------------
+void vpx_highbd_d117_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
+ uint16x4_t az, a0, l0az, l0, l1, azl0, col0, col0_even, col0_odd, d0, d1;
+ (void)bd;
+
+ az = vld1_u16(above - 1);
+ a0 = vld1_u16(above + 0);
+ // [ left[0], above[-1], above[0], above[1] ]
+ l0az = vext_u16(vld1_dup_u16(left), az, 3);
+
+ l0 = vld1_u16(left + 0);
+ l1 = vld1_u16(left + 1);
+ // [ above[-1], left[0], left[1], left[2] ]
+ azl0 = vext_u16(vld1_dup_u16(above - 1), l0, 3);
+
+ d0 = vrhadd_u16(az, a0);
+ d1 = vrhadd_u16(vhadd_u16(l0az, a0), az);
+
+ col0 = vrhadd_u16(vhadd_u16(azl0, l1), l0);
+ col0_even = vdup_lane_u16(col0, 0);
+ col0_odd = vdup_lane_u16(col0, 1);
+
+ vst1_u16(dst + 0 * stride, d0);
+ vst1_u16(dst + 1 * stride, d1);
+ vst1_u16(dst + 2 * stride, vext_u16(col0_even, d0, 3));
+ vst1_u16(dst + 3 * stride, vext_u16(col0_odd, d1, 3));
+}
+
+void vpx_highbd_d117_predictor_8x8_neon(uint16_t *dst, ptrdiff_t stride,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
+ uint16x8_t az, a0, l0az, l0, l1, azl0, col0, col0_even, col0_odd, d0, d1;
+ (void)bd;
+
+ az = vld1q_u16(above - 1);
+ a0 = vld1q_u16(above + 0);
+ // [ left[0], above[-1], ..., left[5] ]
+ l0az = vextq_u16(vld1q_dup_u16(left), az, 7);
+
+ l0 = vld1q_u16(left + 0);
+ l1 = vld1q_u16(left + 1);
+ // [ above[-1], left[0], ..., left[6] ]
+ azl0 = vextq_u16(vld1q_dup_u16(above - 1), l0, 7);
+
+ // d0[0] = AVG2(above[-1], above[0])
+ // ...
+ // d0[7] = AVG2(above[6], above[7])
+ d0 = vrhaddq_u16(az, a0);
+
+ // d1[0] = AVG3(left[0], above[-1], above[0])
+ // d1[1] = AVG3(above[-1], above[0], above[1])
+ // ...
+ // d1[7] = AVG3(above[5], above[6], above[7])
+ d1 = vrhaddq_u16(vhaddq_u16(l0az, a0), az);
+
+ // The ext instruction shifts elements in from the end of the vector rather
+ // than the start, so reverse the vector to put the elements to be shifted in
+ // at the end:
+ // col0[7] = AVG3(above[-1], left[0], left[1])
+ // col0[6] = AVG3(left[0], left[1], left[2])
+ // ...
+ // col0[0] = AVG3(left[6], left[7], left[8])
+ col0 = vrhaddq_u16(vhaddq_u16(azl0, l1), l0);
+ col0 = vrev64q_u16(vextq_u16(col0, col0, 4));
+
+ // We don't care about the first parameter to this uzp since we only ever use
+ // the high three elements, we just use col0 again since it is already
+ // available:
+ // col0_even = [ x, x, x, x, x, col0[3], col0[5], col0[7] ]
+ // col0_odd = [ x, x, x, x, x, col0[2], col0[4], col0[6] ]
+ col0_even = vuzpq_u16(col0, col0).val[1];
+ col0_odd = vuzpq_u16(col0, col0).val[0];
+
+ // Incrementally shift more elements from col0 into d0/1:
+ // stride=0 [ d0[0], d0[1], d0[2], d0[3], d0[4], d0[5], d0[6], d0[7] ]
+ // stride=1 [ d1[0], d1[1], d1[2], d1[3], d1[4], d1[5], d1[6], d1[7] ]
+ // stride=2 [ col0[7], d0[0], d0[1], d0[2], d0[3], d0[4], d0[5], d0[6] ]
+ // stride=3 [ col0[6], d1[0], d1[1], d1[2], d1[3], d1[4], d1[5], d1[6] ]
+ // stride=4 [ col0[5], col0[7], d0[0], d0[1], d0[2], d0[3], d0[4], d0[5] ]
+ // stride=5 [ col0[4], col0[6], d1[0], d1[1], d1[2], d1[3], d1[4], d1[5] ]
+ // stride=6 [ col0[3], col0[5], col0[7], d0[0], d0[1], d0[2], d0[3], d0[4] ]
+ // stride=7 [ col0[2], col0[4], col0[6], d1[0], d1[1], d1[2], d1[3], d1[4] ]
+ vst1q_u16(dst + 0 * stride, d0);
+ vst1q_u16(dst + 1 * stride, d1);
+ vst1q_u16(dst + 2 * stride, vextq_u16(col0_even, d0, 7));
+ vst1q_u16(dst + 3 * stride, vextq_u16(col0_odd, d1, 7));
+ vst1q_u16(dst + 4 * stride, vextq_u16(col0_even, d0, 6));
+ vst1q_u16(dst + 5 * stride, vextq_u16(col0_odd, d1, 6));
+ vst1q_u16(dst + 6 * stride, vextq_u16(col0_even, d0, 5));
+ vst1q_u16(dst + 7 * stride, vextq_u16(col0_odd, d1, 5));
+}
+
+void vpx_highbd_d117_predictor_16x16_neon(uint16_t *dst, ptrdiff_t stride,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
+ uint16x8_t az, a0, a6, a7, a8, l0az, l0, l1, l7, l8, l9, azl0, col0_lo,
+ col0_hi, col0_even, col0_odd, d0_lo, d0_hi, d1_lo, d1_hi;
+ (void)bd;
+
+ az = vld1q_u16(above - 1);
+ a0 = vld1q_u16(above + 0);
+ a6 = vld1q_u16(above + 6);
+ a7 = vld1q_u16(above + 7);
+ a8 = vld1q_u16(above + 8);
+ // [ left[0], above[-1], ..., left[5] ]
+ l0az = vextq_u16(vld1q_dup_u16(left), az, 7);
+
+ l0 = vld1q_u16(left + 0);
+ l1 = vld1q_u16(left + 1);
+ l7 = vld1q_u16(left + 7);
+ l8 = vld1q_u16(left + 8);
+ l9 = vld1q_u16(left + 9);
+ // [ above[-1], left[0], ..., left[6] ]
+ azl0 = vextq_u16(vld1q_dup_u16(above - 1), l0, 7);
+
+ d0_lo = vrhaddq_u16(az, a0);
+ d0_hi = vrhaddq_u16(a7, a8);
+ d1_lo = vrhaddq_u16(vhaddq_u16(l0az, a0), az);
+ d1_hi = vrhaddq_u16(vhaddq_u16(a6, a8), a7);
+
+ col0_lo = vrhaddq_u16(vhaddq_u16(azl0, l1), l0);
+ col0_hi = vrhaddq_u16(vhaddq_u16(l7, l9), l8);
+
+ // Reverse within each vector, then swap the array indices in the uzp to
+ // complete the reversal across all 16 elements.
+ col0_lo = vrev64q_u16(vextq_u16(col0_lo, col0_lo, 4));
+ col0_hi = vrev64q_u16(vextq_u16(col0_hi, col0_hi, 4));
+ col0_even = vuzpq_u16(col0_hi, col0_lo).val[1];
+ col0_odd = vuzpq_u16(col0_hi, col0_lo).val[0];
+
+ vst1q_u16(dst + 0 * stride + 0, d0_lo);
+ vst1q_u16(dst + 0 * stride + 8, d0_hi);
+ vst1q_u16(dst + 1 * stride + 0, d1_lo);
+ vst1q_u16(dst + 1 * stride + 8, d1_hi);
+
+ vst1q_u16(dst + 2 * stride + 0, vextq_u16(col0_even, d0_lo, 7));
+ vst1q_u16(dst + 2 * stride + 8, vextq_u16(d0_lo, d0_hi, 7));
+ vst1q_u16(dst + 3 * stride + 0, vextq_u16(col0_odd, d1_lo, 7));
+ vst1q_u16(dst + 3 * stride + 8, vextq_u16(d1_lo, d1_hi, 7));
+
+ vst1q_u16(dst + 4 * stride + 0, vextq_u16(col0_even, d0_lo, 6));
+ vst1q_u16(dst + 4 * stride + 8, vextq_u16(d0_lo, d0_hi, 6));
+ vst1q_u16(dst + 5 * stride + 0, vextq_u16(col0_odd, d1_lo, 6));
+ vst1q_u16(dst + 5 * stride + 8, vextq_u16(d1_lo, d1_hi, 6));
+
+ vst1q_u16(dst + 6 * stride + 0, vextq_u16(col0_even, d0_lo, 5));
+ vst1q_u16(dst + 6 * stride + 8, vextq_u16(d0_lo, d0_hi, 5));
+ vst1q_u16(dst + 7 * stride + 0, vextq_u16(col0_odd, d1_lo, 5));
+ vst1q_u16(dst + 7 * stride + 8, vextq_u16(d1_lo, d1_hi, 5));
+
+ vst1q_u16(dst + 8 * stride + 0, vextq_u16(col0_even, d0_lo, 4));
+ vst1q_u16(dst + 8 * stride + 8, vextq_u16(d0_lo, d0_hi, 4));
+ vst1q_u16(dst + 9 * stride + 0, vextq_u16(col0_odd, d1_lo, 4));
+ vst1q_u16(dst + 9 * stride + 8, vextq_u16(d1_lo, d1_hi, 4));
+
+ vst1q_u16(dst + 10 * stride + 0, vextq_u16(col0_even, d0_lo, 3));
+ vst1q_u16(dst + 10 * stride + 8, vextq_u16(d0_lo, d0_hi, 3));
+ vst1q_u16(dst + 11 * stride + 0, vextq_u16(col0_odd, d1_lo, 3));
+ vst1q_u16(dst + 11 * stride + 8, vextq_u16(d1_lo, d1_hi, 3));
+
+ vst1q_u16(dst + 12 * stride + 0, vextq_u16(col0_even, d0_lo, 2));
+ vst1q_u16(dst + 12 * stride + 8, vextq_u16(d0_lo, d0_hi, 2));
+ vst1q_u16(dst + 13 * stride + 0, vextq_u16(col0_odd, d1_lo, 2));
+ vst1q_u16(dst + 13 * stride + 8, vextq_u16(d1_lo, d1_hi, 2));
+
+ vst1q_u16(dst + 14 * stride + 0, vextq_u16(col0_even, d0_lo, 1));
+ vst1q_u16(dst + 14 * stride + 8, vextq_u16(d0_lo, d0_hi, 1));
+ vst1q_u16(dst + 15 * stride + 0, vextq_u16(col0_odd, d1_lo, 1));
+ vst1q_u16(dst + 15 * stride + 8, vextq_u16(d1_lo, d1_hi, 1));
+}
+
+void vpx_highbd_d117_predictor_32x32_neon(uint16_t *dst, ptrdiff_t stride,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
+ uint16x8_t az, a0, a6, a7, a8, a14, a15, a16, a22, a23, a24, l0az, l0, l1, l7,
+ l8, l9, l15, l16, l17, l23, l24, l25, azl0, d0[4], d1[4], col0[4],
+ col0_even[2], col0_odd[2];
+ (void)bd;
+
+ az = vld1q_u16(above - 1);
+ a0 = vld1q_u16(above + 0);
+ a6 = vld1q_u16(above + 6);
+ a7 = vld1q_u16(above + 7);
+ a8 = vld1q_u16(above + 8);
+ a14 = vld1q_u16(above + 14);
+ a15 = vld1q_u16(above + 15);
+ a16 = vld1q_u16(above + 16);
+ a22 = vld1q_u16(above + 22);
+ a23 = vld1q_u16(above + 23);
+ a24 = vld1q_u16(above + 24);
+ // [ left[0], above[-1], ..., left[5] ]
+ l0az = vextq_u16(vld1q_dup_u16(left), az, 7);
+
+ l0 = vld1q_u16(left + 0);
+ l1 = vld1q_u16(left + 1);
+ l7 = vld1q_u16(left + 7);
+ l8 = vld1q_u16(left + 8);
+ l9 = vld1q_u16(left + 9);
+ l15 = vld1q_u16(left + 15);
+ l16 = vld1q_u16(left + 16);
+ l17 = vld1q_u16(left + 17);
+ l23 = vld1q_u16(left + 23);
+ l24 = vld1q_u16(left + 24);
+ l25 = vld1q_u16(left + 25);
+ // [ above[-1], left[0], ..., left[6] ]
+ azl0 = vextq_u16(vld1q_dup_u16(above - 1), l0, 7);
+
+ d0[0] = vrhaddq_u16(az, a0);
+ d0[1] = vrhaddq_u16(a7, a8);
+ d0[2] = vrhaddq_u16(a15, a16);
+ d0[3] = vrhaddq_u16(a23, a24);
+ d1[0] = vrhaddq_u16(vhaddq_u16(l0az, a0), az);
+ d1[1] = vrhaddq_u16(vhaddq_u16(a6, a8), a7);
+ d1[2] = vrhaddq_u16(vhaddq_u16(a14, a16), a15);
+ d1[3] = vrhaddq_u16(vhaddq_u16(a22, a24), a23);
+
+ col0[0] = vrhaddq_u16(vhaddq_u16(azl0, l1), l0);
+ col0[1] = vrhaddq_u16(vhaddq_u16(l7, l9), l8);
+ col0[2] = vrhaddq_u16(vhaddq_u16(l15, l17), l16);
+ col0[3] = vrhaddq_u16(vhaddq_u16(l23, l25), l24);
+
+ // Reverse within each vector, then swap the array indices in both the uzp
+ // and the col0_{even,odd} assignment to complete the reversal across all
+ // 32-elements.
+ col0[0] = vrev64q_u16(vextq_u16(col0[0], col0[0], 4));
+ col0[1] = vrev64q_u16(vextq_u16(col0[1], col0[1], 4));
+ col0[2] = vrev64q_u16(vextq_u16(col0[2], col0[2], 4));
+ col0[3] = vrev64q_u16(vextq_u16(col0[3], col0[3], 4));
+
+ col0_even[1] = vuzpq_u16(col0[1], col0[0]).val[1];
+ col0_even[0] = vuzpq_u16(col0[3], col0[2]).val[1];
+ col0_odd[1] = vuzpq_u16(col0[1], col0[0]).val[0];
+ col0_odd[0] = vuzpq_u16(col0[3], col0[2]).val[0];
+
+ vst1q_u16(dst + 0 * stride + 0, d0[0]);
+ vst1q_u16(dst + 0 * stride + 8, d0[1]);
+ vst1q_u16(dst + 0 * stride + 16, d0[2]);
+ vst1q_u16(dst + 0 * stride + 24, d0[3]);
+ vst1q_u16(dst + 1 * stride + 0, d1[0]);
+ vst1q_u16(dst + 1 * stride + 8, d1[1]);
+ vst1q_u16(dst + 1 * stride + 16, d1[2]);
+ vst1q_u16(dst + 1 * stride + 24, d1[3]);
+
+ vst1q_u16(dst + 2 * stride + 0, vextq_u16(col0_even[1], d0[0], 7));
+ vst1q_u16(dst + 2 * stride + 8, vextq_u16(d0[0], d0[1], 7));
+ vst1q_u16(dst + 2 * stride + 16, vextq_u16(d0[1], d0[2], 7));
+ vst1q_u16(dst + 2 * stride + 24, vextq_u16(d0[2], d0[3], 7));
+ vst1q_u16(dst + 3 * stride + 0, vextq_u16(col0_odd[1], d1[0], 7));
+ vst1q_u16(dst + 3 * stride + 8, vextq_u16(d1[0], d1[1], 7));
+ vst1q_u16(dst + 3 * stride + 16, vextq_u16(d1[1], d1[2], 7));
+ vst1q_u16(dst + 3 * stride + 24, vextq_u16(d1[2], d1[3], 7));
+
+ vst1q_u16(dst + 4 * stride + 0, vextq_u16(col0_even[1], d0[0], 6));
+ vst1q_u16(dst + 4 * stride + 8, vextq_u16(d0[0], d0[1], 6));
+ vst1q_u16(dst + 4 * stride + 16, vextq_u16(d0[1], d0[2], 6));
+ vst1q_u16(dst + 4 * stride + 24, vextq_u16(d0[2], d0[3], 6));
+ vst1q_u16(dst + 5 * stride + 0, vextq_u16(col0_odd[1], d1[0], 6));
+ vst1q_u16(dst + 5 * stride + 8, vextq_u16(d1[0], d1[1], 6));
+ vst1q_u16(dst + 5 * stride + 16, vextq_u16(d1[1], d1[2], 6));
+ vst1q_u16(dst + 5 * stride + 24, vextq_u16(d1[2], d1[3], 6));
+
+ vst1q_u16(dst + 6 * stride + 0, vextq_u16(col0_even[1], d0[0], 5));
+ vst1q_u16(dst + 6 * stride + 8, vextq_u16(d0[0], d0[1], 5));
+ vst1q_u16(dst + 6 * stride + 16, vextq_u16(d0[1], d0[2], 5));
+ vst1q_u16(dst + 6 * stride + 24, vextq_u16(d0[2], d0[3], 5));
+ vst1q_u16(dst + 7 * stride + 0, vextq_u16(col0_odd[1], d1[0], 5));
+ vst1q_u16(dst + 7 * stride + 8, vextq_u16(d1[0], d1[1], 5));
+ vst1q_u16(dst + 7 * stride + 16, vextq_u16(d1[1], d1[2], 5));
+ vst1q_u16(dst + 7 * stride + 24, vextq_u16(d1[2], d1[3], 5));
+
+ vst1q_u16(dst + 8 * stride + 0, vextq_u16(col0_even[1], d0[0], 4));
+ vst1q_u16(dst + 8 * stride + 8, vextq_u16(d0[0], d0[1], 4));
+ vst1q_u16(dst + 8 * stride + 16, vextq_u16(d0[1], d0[2], 4));
+ vst1q_u16(dst + 8 * stride + 24, vextq_u16(d0[2], d0[3], 4));
+ vst1q_u16(dst + 9 * stride + 0, vextq_u16(col0_odd[1], d1[0], 4));
+ vst1q_u16(dst + 9 * stride + 8, vextq_u16(d1[0], d1[1], 4));
+ vst1q_u16(dst + 9 * stride + 16, vextq_u16(d1[1], d1[2], 4));
+ vst1q_u16(dst + 9 * stride + 24, vextq_u16(d1[2], d1[3], 4));
+
+ vst1q_u16(dst + 10 * stride + 0, vextq_u16(col0_even[1], d0[0], 3));
+ vst1q_u16(dst + 10 * stride + 8, vextq_u16(d0[0], d0[1], 3));
+ vst1q_u16(dst + 10 * stride + 16, vextq_u16(d0[1], d0[2], 3));
+ vst1q_u16(dst + 10 * stride + 24, vextq_u16(d0[2], d0[3], 3));
+ vst1q_u16(dst + 11 * stride + 0, vextq_u16(col0_odd[1], d1[0], 3));
+ vst1q_u16(dst + 11 * stride + 8, vextq_u16(d1[0], d1[1], 3));
+ vst1q_u16(dst + 11 * stride + 16, vextq_u16(d1[1], d1[2], 3));
+ vst1q_u16(dst + 11 * stride + 24, vextq_u16(d1[2], d1[3], 3));
+
+ vst1q_u16(dst + 12 * stride + 0, vextq_u16(col0_even[1], d0[0], 2));
+ vst1q_u16(dst + 12 * stride + 8, vextq_u16(d0[0], d0[1], 2));
+ vst1q_u16(dst + 12 * stride + 16, vextq_u16(d0[1], d0[2], 2));
+ vst1q_u16(dst + 12 * stride + 24, vextq_u16(d0[2], d0[3], 2));
+ vst1q_u16(dst + 13 * stride + 0, vextq_u16(col0_odd[1], d1[0], 2));
+ vst1q_u16(dst + 13 * stride + 8, vextq_u16(d1[0], d1[1], 2));
+ vst1q_u16(dst + 13 * stride + 16, vextq_u16(d1[1], d1[2], 2));
+ vst1q_u16(dst + 13 * stride + 24, vextq_u16(d1[2], d1[3], 2));
+
+ vst1q_u16(dst + 14 * stride + 0, vextq_u16(col0_even[1], d0[0], 1));
+ vst1q_u16(dst + 14 * stride + 8, vextq_u16(d0[0], d0[1], 1));
+ vst1q_u16(dst + 14 * stride + 16, vextq_u16(d0[1], d0[2], 1));
+ vst1q_u16(dst + 14 * stride + 24, vextq_u16(d0[2], d0[3], 1));
+ vst1q_u16(dst + 15 * stride + 0, vextq_u16(col0_odd[1], d1[0], 1));
+ vst1q_u16(dst + 15 * stride + 8, vextq_u16(d1[0], d1[1], 1));
+ vst1q_u16(dst + 15 * stride + 16, vextq_u16(d1[1], d1[2], 1));
+ vst1q_u16(dst + 15 * stride + 24, vextq_u16(d1[2], d1[3], 1));
+
+ vst1q_u16(dst + 16 * stride + 0, col0_even[1]);
+ vst1q_u16(dst + 16 * stride + 8, d0[0]);
+ vst1q_u16(dst + 16 * stride + 16, d0[1]);
+ vst1q_u16(dst + 16 * stride + 24, d0[2]);
+ vst1q_u16(dst + 17 * stride + 0, col0_odd[1]);
+ vst1q_u16(dst + 17 * stride + 8, d1[0]);
+ vst1q_u16(dst + 17 * stride + 16, d1[1]);
+ vst1q_u16(dst + 17 * stride + 24, d1[2]);
+
+ vst1q_u16(dst + 18 * stride + 0, vextq_u16(col0_even[0], col0_even[1], 7));
+ vst1q_u16(dst + 18 * stride + 8, vextq_u16(col0_even[1], d0[0], 7));
+ vst1q_u16(dst + 18 * stride + 16, vextq_u16(d0[0], d0[1], 7));
+ vst1q_u16(dst + 18 * stride + 24, vextq_u16(d0[1], d0[2], 7));
+ vst1q_u16(dst + 19 * stride + 0, vextq_u16(col0_odd[0], col0_odd[1], 7));
+ vst1q_u16(dst + 19 * stride + 8, vextq_u16(col0_odd[1], d1[0], 7));
+ vst1q_u16(dst + 19 * stride + 16, vextq_u16(d1[0], d1[1], 7));
+ vst1q_u16(dst + 19 * stride + 24, vextq_u16(d1[1], d1[2], 7));
+
+ vst1q_u16(dst + 20 * stride + 0, vextq_u16(col0_even[0], col0_even[1], 6));
+ vst1q_u16(dst + 20 * stride + 8, vextq_u16(col0_even[1], d0[0], 6));
+ vst1q_u16(dst + 20 * stride + 16, vextq_u16(d0[0], d0[1], 6));
+ vst1q_u16(dst + 20 * stride + 24, vextq_u16(d0[1], d0[2], 6));
+ vst1q_u16(dst + 21 * stride + 0, vextq_u16(col0_odd[0], col0_odd[1], 6));
+ vst1q_u16(dst + 21 * stride + 8, vextq_u16(col0_odd[1], d1[0], 6));
+ vst1q_u16(dst + 21 * stride + 16, vextq_u16(d1[0], d1[1], 6));
+ vst1q_u16(dst + 21 * stride + 24, vextq_u16(d1[1], d1[2], 6));
+
+ vst1q_u16(dst + 22 * stride + 0, vextq_u16(col0_even[0], col0_even[1], 5));
+ vst1q_u16(dst + 22 * stride + 8, vextq_u16(col0_even[1], d0[0], 5));
+ vst1q_u16(dst + 22 * stride + 16, vextq_u16(d0[0], d0[1], 5));
+ vst1q_u16(dst + 22 * stride + 24, vextq_u16(d0[1], d0[2], 5));
+ vst1q_u16(dst + 23 * stride + 0, vextq_u16(col0_odd[0], col0_odd[1], 5));
+ vst1q_u16(dst + 23 * stride + 8, vextq_u16(col0_odd[1], d1[0], 5));
+ vst1q_u16(dst + 23 * stride + 16, vextq_u16(d1[0], d1[1], 5));
+ vst1q_u16(dst + 23 * stride + 24, vextq_u16(d1[1], d1[2], 5));
+
+ vst1q_u16(dst + 24 * stride + 0, vextq_u16(col0_even[0], col0_even[1], 4));
+ vst1q_u16(dst + 24 * stride + 8, vextq_u16(col0_even[1], d0[0], 4));
+ vst1q_u16(dst + 24 * stride + 16, vextq_u16(d0[0], d0[1], 4));
+ vst1q_u16(dst + 24 * stride + 24, vextq_u16(d0[1], d0[2], 4));
+ vst1q_u16(dst + 25 * stride + 0, vextq_u16(col0_odd[0], col0_odd[1], 4));
+ vst1q_u16(dst + 25 * stride + 8, vextq_u16(col0_odd[1], d1[0], 4));
+ vst1q_u16(dst + 25 * stride + 16, vextq_u16(d1[0], d1[1], 4));
+ vst1q_u16(dst + 25 * stride + 24, vextq_u16(d1[1], d1[2], 4));
+
+ vst1q_u16(dst + 26 * stride + 0, vextq_u16(col0_even[0], col0_even[1], 3));
+ vst1q_u16(dst + 26 * stride + 8, vextq_u16(col0_even[1], d0[0], 3));
+ vst1q_u16(dst + 26 * stride + 16, vextq_u16(d0[0], d0[1], 3));
+ vst1q_u16(dst + 26 * stride + 24, vextq_u16(d0[1], d0[2], 3));
+ vst1q_u16(dst + 27 * stride + 0, vextq_u16(col0_odd[0], col0_odd[1], 3));
+ vst1q_u16(dst + 27 * stride + 8, vextq_u16(col0_odd[1], d1[0], 3));
+ vst1q_u16(dst + 27 * stride + 16, vextq_u16(d1[0], d1[1], 3));
+ vst1q_u16(dst + 27 * stride + 24, vextq_u16(d1[1], d1[2], 3));
+
+ vst1q_u16(dst + 28 * stride + 0, vextq_u16(col0_even[0], col0_even[1], 2));
+ vst1q_u16(dst + 28 * stride + 8, vextq_u16(col0_even[1], d0[0], 2));
+ vst1q_u16(dst + 28 * stride + 16, vextq_u16(d0[0], d0[1], 2));
+ vst1q_u16(dst + 28 * stride + 24, vextq_u16(d0[1], d0[2], 2));
+ vst1q_u16(dst + 29 * stride + 0, vextq_u16(col0_odd[0], col0_odd[1], 2));
+ vst1q_u16(dst + 29 * stride + 8, vextq_u16(col0_odd[1], d1[0], 2));
+ vst1q_u16(dst + 29 * stride + 16, vextq_u16(d1[0], d1[1], 2));
+ vst1q_u16(dst + 29 * stride + 24, vextq_u16(d1[1], d1[2], 2));
+
+ vst1q_u16(dst + 30 * stride + 0, vextq_u16(col0_even[0], col0_even[1], 1));
+ vst1q_u16(dst + 30 * stride + 8, vextq_u16(col0_even[1], d0[0], 1));
+ vst1q_u16(dst + 30 * stride + 16, vextq_u16(d0[0], d0[1], 1));
+ vst1q_u16(dst + 30 * stride + 24, vextq_u16(d0[1], d0[2], 1));
+ vst1q_u16(dst + 31 * stride + 0, vextq_u16(col0_odd[0], col0_odd[1], 1));
+ vst1q_u16(dst + 31 * stride + 8, vextq_u16(col0_odd[1], d1[0], 1));
+ vst1q_u16(dst + 31 * stride + 16, vextq_u16(d1[0], d1[1], 1));
+ vst1q_u16(dst + 31 * stride + 24, vextq_u16(d1[1], d1[2], 1));
+}
+
+// -----------------------------------------------------------------------------
+
void vpx_highbd_d135_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {