2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include <emmintrin.h>
13 #include "./vpx_dsp_rtcd.h"
14 #include "vpx/vpx_integer.h"
15 #include "vpx_dsp/x86/bitdepth_conversion_sse2.h"
16 #include "vpx_ports/mem.h"
18 void vpx_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp,
20 __m128i u0, s0, d0, diff, maxabsdiff, minabsdiff, negdiff, absdiff0, absdiff;
21 u0 = _mm_setzero_si128();
23 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0);
24 d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d)), u0);
25 diff = _mm_subs_epi16(s0, d0);
26 negdiff = _mm_subs_epi16(u0, diff);
27 absdiff0 = _mm_max_epi16(diff, negdiff);
29 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + p)), u0);
30 d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + dp)), u0);
31 diff = _mm_subs_epi16(s0, d0);
32 negdiff = _mm_subs_epi16(u0, diff);
33 absdiff = _mm_max_epi16(diff, negdiff);
34 maxabsdiff = _mm_max_epi16(absdiff0, absdiff);
35 minabsdiff = _mm_min_epi16(absdiff0, absdiff);
37 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 2 * p)), u0);
38 d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 2 * dp)), u0);
39 diff = _mm_subs_epi16(s0, d0);
40 negdiff = _mm_subs_epi16(u0, diff);
41 absdiff = _mm_max_epi16(diff, negdiff);
42 maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
43 minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
45 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 3 * p)), u0);
46 d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 3 * dp)), u0);
47 diff = _mm_subs_epi16(s0, d0);
48 negdiff = _mm_subs_epi16(u0, diff);
49 absdiff = _mm_max_epi16(diff, negdiff);
50 maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
51 minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
53 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 4 * p)), u0);
54 d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 4 * dp)), u0);
55 diff = _mm_subs_epi16(s0, d0);
56 negdiff = _mm_subs_epi16(u0, diff);
57 absdiff = _mm_max_epi16(diff, negdiff);
58 maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
59 minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
61 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 5 * p)), u0);
62 d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 5 * dp)), u0);
63 diff = _mm_subs_epi16(s0, d0);
64 negdiff = _mm_subs_epi16(u0, diff);
65 absdiff = _mm_max_epi16(diff, negdiff);
66 maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
67 minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
69 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 6 * p)), u0);
70 d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 6 * dp)), u0);
71 diff = _mm_subs_epi16(s0, d0);
72 negdiff = _mm_subs_epi16(u0, diff);
73 absdiff = _mm_max_epi16(diff, negdiff);
74 maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
75 minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
77 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 7 * p)), u0);
78 d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 7 * dp)), u0);
79 diff = _mm_subs_epi16(s0, d0);
80 negdiff = _mm_subs_epi16(u0, diff);
81 absdiff = _mm_max_epi16(diff, negdiff);
82 maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
83 minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
85 maxabsdiff = _mm_max_epi16(maxabsdiff, _mm_srli_si128(maxabsdiff, 8));
86 maxabsdiff = _mm_max_epi16(maxabsdiff, _mm_srli_epi64(maxabsdiff, 32));
87 maxabsdiff = _mm_max_epi16(maxabsdiff, _mm_srli_epi64(maxabsdiff, 16));
88 *max = _mm_extract_epi16(maxabsdiff, 0);
90 minabsdiff = _mm_min_epi16(minabsdiff, _mm_srli_si128(minabsdiff, 8));
91 minabsdiff = _mm_min_epi16(minabsdiff, _mm_srli_epi64(minabsdiff, 32));
92 minabsdiff = _mm_min_epi16(minabsdiff, _mm_srli_epi64(minabsdiff, 16));
93 *min = _mm_extract_epi16(minabsdiff, 0);
96 unsigned int vpx_avg_8x8_sse2(const uint8_t *s, int p) {
99 u0 = _mm_setzero_si128();
100 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0);
101 s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + p)), u0);
102 s0 = _mm_adds_epu16(s0, s1);
103 s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 2 * p)), u0);
104 s0 = _mm_adds_epu16(s0, s1);
105 s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 3 * p)), u0);
106 s0 = _mm_adds_epu16(s0, s1);
107 s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 4 * p)), u0);
108 s0 = _mm_adds_epu16(s0, s1);
109 s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 5 * p)), u0);
110 s0 = _mm_adds_epu16(s0, s1);
111 s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 6 * p)), u0);
112 s0 = _mm_adds_epu16(s0, s1);
113 s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 7 * p)), u0);
114 s0 = _mm_adds_epu16(s0, s1);
116 s0 = _mm_adds_epu16(s0, _mm_srli_si128(s0, 8));
117 s0 = _mm_adds_epu16(s0, _mm_srli_epi64(s0, 32));
118 s0 = _mm_adds_epu16(s0, _mm_srli_epi64(s0, 16));
119 avg = _mm_extract_epi16(s0, 0);
120 return (avg + 32) >> 6;
123 unsigned int vpx_avg_4x4_sse2(const uint8_t *s, int p) {
125 unsigned int avg = 0;
126 u0 = _mm_setzero_si128();
127 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0);
128 s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + p)), u0);
129 s0 = _mm_adds_epu16(s0, s1);
130 s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 2 * p)), u0);
131 s0 = _mm_adds_epu16(s0, s1);
132 s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 3 * p)), u0);
133 s0 = _mm_adds_epu16(s0, s1);
135 s0 = _mm_adds_epu16(s0, _mm_srli_si128(s0, 4));
136 s0 = _mm_adds_epu16(s0, _mm_srli_epi64(s0, 16));
137 avg = _mm_extract_epi16(s0, 0);
138 return (avg + 8) >> 4;
141 #if CONFIG_VP9_HIGHBITDEPTH
142 unsigned int vpx_highbd_avg_8x8_sse2(const uint8_t *s8, int p) {
145 const uint16_t *s = CONVERT_TO_SHORTPTR(s8);
146 const __m128i zero = _mm_setzero_si128();
147 s0 = _mm_loadu_si128((const __m128i *)(s));
148 s1 = _mm_loadu_si128((const __m128i *)(s + p));
149 s0 = _mm_adds_epu16(s0, s1);
150 s1 = _mm_loadu_si128((const __m128i *)(s + 2 * p));
151 s0 = _mm_adds_epu16(s0, s1);
152 s1 = _mm_loadu_si128((const __m128i *)(s + 3 * p));
153 s0 = _mm_adds_epu16(s0, s1);
154 s1 = _mm_loadu_si128((const __m128i *)(s + 4 * p));
155 s0 = _mm_adds_epu16(s0, s1);
156 s1 = _mm_loadu_si128((const __m128i *)(s + 5 * p));
157 s0 = _mm_adds_epu16(s0, s1);
158 s1 = _mm_loadu_si128((const __m128i *)(s + 6 * p));
159 s0 = _mm_adds_epu16(s0, s1);
160 s1 = _mm_loadu_si128((const __m128i *)(s + 7 * p));
161 s0 = _mm_adds_epu16(s0, s1);
162 s1 = _mm_unpackhi_epi16(s0, zero);
163 s0 = _mm_unpacklo_epi16(s0, zero);
164 s0 = _mm_add_epi32(s0, s1);
165 s0 = _mm_add_epi32(s0, _mm_srli_si128(s0, 8));
166 s0 = _mm_add_epi32(s0, _mm_srli_si128(s0, 4));
167 avg = _mm_cvtsi128_si32(s0);
169 return (avg + 32) >> 6;
172 unsigned int vpx_highbd_avg_4x4_sse2(const uint8_t *s8, int p) {
175 const uint16_t *s = CONVERT_TO_SHORTPTR(s8);
176 s0 = _mm_loadl_epi64((const __m128i *)(s));
177 s1 = _mm_loadl_epi64((const __m128i *)(s + p));
178 s0 = _mm_adds_epu16(s0, s1);
179 s1 = _mm_loadl_epi64((const __m128i *)(s + 2 * p));
180 s0 = _mm_adds_epu16(s0, s1);
181 s1 = _mm_loadl_epi64((const __m128i *)(s + 3 * p));
182 s0 = _mm_adds_epu16(s0, s1);
183 s0 = _mm_add_epi16(s0, _mm_srli_si128(s0, 4));
184 s0 = _mm_add_epi16(s0, _mm_srli_si128(s0, 2));
185 avg = _mm_extract_epi16(s0, 0);
187 return (avg + 8) >> 4;
189 #endif // CONFIG_VP9_HIGHBITDEPTH
191 static void hadamard_col8_sse2(__m128i *in, int iter) {
201 __m128i b0 = _mm_add_epi16(a0, a1);
202 __m128i b1 = _mm_sub_epi16(a0, a1);
203 __m128i b2 = _mm_add_epi16(a2, a3);
204 __m128i b3 = _mm_sub_epi16(a2, a3);
205 __m128i b4 = _mm_add_epi16(a4, a5);
206 __m128i b5 = _mm_sub_epi16(a4, a5);
207 __m128i b6 = _mm_add_epi16(a6, a7);
208 __m128i b7 = _mm_sub_epi16(a6, a7);
210 a0 = _mm_add_epi16(b0, b2);
211 a1 = _mm_add_epi16(b1, b3);
212 a2 = _mm_sub_epi16(b0, b2);
213 a3 = _mm_sub_epi16(b1, b3);
214 a4 = _mm_add_epi16(b4, b6);
215 a5 = _mm_add_epi16(b5, b7);
216 a6 = _mm_sub_epi16(b4, b6);
217 a7 = _mm_sub_epi16(b5, b7);
220 b0 = _mm_add_epi16(a0, a4);
221 b7 = _mm_add_epi16(a1, a5);
222 b3 = _mm_add_epi16(a2, a6);
223 b4 = _mm_add_epi16(a3, a7);
224 b2 = _mm_sub_epi16(a0, a4);
225 b6 = _mm_sub_epi16(a1, a5);
226 b1 = _mm_sub_epi16(a2, a6);
227 b5 = _mm_sub_epi16(a3, a7);
229 a0 = _mm_unpacklo_epi16(b0, b1);
230 a1 = _mm_unpacklo_epi16(b2, b3);
231 a2 = _mm_unpackhi_epi16(b0, b1);
232 a3 = _mm_unpackhi_epi16(b2, b3);
233 a4 = _mm_unpacklo_epi16(b4, b5);
234 a5 = _mm_unpacklo_epi16(b6, b7);
235 a6 = _mm_unpackhi_epi16(b4, b5);
236 a7 = _mm_unpackhi_epi16(b6, b7);
238 b0 = _mm_unpacklo_epi32(a0, a1);
239 b1 = _mm_unpacklo_epi32(a4, a5);
240 b2 = _mm_unpackhi_epi32(a0, a1);
241 b3 = _mm_unpackhi_epi32(a4, a5);
242 b4 = _mm_unpacklo_epi32(a2, a3);
243 b5 = _mm_unpacklo_epi32(a6, a7);
244 b6 = _mm_unpackhi_epi32(a2, a3);
245 b7 = _mm_unpackhi_epi32(a6, a7);
247 in[0] = _mm_unpacklo_epi64(b0, b1);
248 in[1] = _mm_unpackhi_epi64(b0, b1);
249 in[2] = _mm_unpacklo_epi64(b2, b3);
250 in[3] = _mm_unpackhi_epi64(b2, b3);
251 in[4] = _mm_unpacklo_epi64(b4, b5);
252 in[5] = _mm_unpackhi_epi64(b4, b5);
253 in[6] = _mm_unpacklo_epi64(b6, b7);
254 in[7] = _mm_unpackhi_epi64(b6, b7);
256 in[0] = _mm_add_epi16(a0, a4);
257 in[7] = _mm_add_epi16(a1, a5);
258 in[3] = _mm_add_epi16(a2, a6);
259 in[4] = _mm_add_epi16(a3, a7);
260 in[2] = _mm_sub_epi16(a0, a4);
261 in[6] = _mm_sub_epi16(a1, a5);
262 in[1] = _mm_sub_epi16(a2, a6);
263 in[5] = _mm_sub_epi16(a3, a7);
267 static INLINE void hadamard_8x8_sse2(const int16_t *src_diff,
268 ptrdiff_t src_stride, tran_low_t *_coeff,
271 src[0] = _mm_load_si128((const __m128i *)src_diff);
272 src[1] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
273 src[2] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
274 src[3] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
275 src[4] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
276 src[5] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
277 src[6] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
278 src[7] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
280 hadamard_col8_sse2(src, 0);
281 hadamard_col8_sse2(src, 1);
284 store_tran_low(src[0], _coeff);
286 store_tran_low(src[1], _coeff);
288 store_tran_low(src[2], _coeff);
290 store_tran_low(src[3], _coeff);
292 store_tran_low(src[4], _coeff);
294 store_tran_low(src[5], _coeff);
296 store_tran_low(src[6], _coeff);
298 store_tran_low(src[7], _coeff);
300 int16_t *coeff = (int16_t *)_coeff;
301 _mm_store_si128((__m128i *)coeff, src[0]);
303 _mm_store_si128((__m128i *)coeff, src[1]);
305 _mm_store_si128((__m128i *)coeff, src[2]);
307 _mm_store_si128((__m128i *)coeff, src[3]);
309 _mm_store_si128((__m128i *)coeff, src[4]);
311 _mm_store_si128((__m128i *)coeff, src[5]);
313 _mm_store_si128((__m128i *)coeff, src[6]);
315 _mm_store_si128((__m128i *)coeff, src[7]);
319 void vpx_hadamard_8x8_sse2(const int16_t *src_diff, ptrdiff_t src_stride,
321 hadamard_8x8_sse2(src_diff, src_stride, coeff, 1);
324 void vpx_hadamard_16x16_sse2(const int16_t *src_diff, ptrdiff_t src_stride,
326 #if CONFIG_VP9_HIGHBITDEPTH
327 // For high bitdepths, it is unnecessary to store_tran_low
328 // (mult/unpack/store), then load_tran_low (load/pack) the same memory in the
329 // next stage. Output to an intermediate buffer first, then store_tran_low()
330 // in the final stage.
331 DECLARE_ALIGNED(32, int16_t, temp_coeff[16 * 16]);
332 int16_t *t_coeff = temp_coeff;
334 int16_t *t_coeff = coeff;
337 for (idx = 0; idx < 4; ++idx) {
338 const int16_t *src_ptr =
339 src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8;
340 hadamard_8x8_sse2(src_ptr, src_stride, (tran_low_t *)(t_coeff + idx * 64),
344 for (idx = 0; idx < 64; idx += 8) {
345 __m128i coeff0 = _mm_load_si128((const __m128i *)t_coeff);
346 __m128i coeff1 = _mm_load_si128((const __m128i *)(t_coeff + 64));
347 __m128i coeff2 = _mm_load_si128((const __m128i *)(t_coeff + 128));
348 __m128i coeff3 = _mm_load_si128((const __m128i *)(t_coeff + 192));
350 __m128i b0 = _mm_add_epi16(coeff0, coeff1);
351 __m128i b1 = _mm_sub_epi16(coeff0, coeff1);
352 __m128i b2 = _mm_add_epi16(coeff2, coeff3);
353 __m128i b3 = _mm_sub_epi16(coeff2, coeff3);
355 b0 = _mm_srai_epi16(b0, 1);
356 b1 = _mm_srai_epi16(b1, 1);
357 b2 = _mm_srai_epi16(b2, 1);
358 b3 = _mm_srai_epi16(b3, 1);
360 coeff0 = _mm_add_epi16(b0, b2);
361 coeff1 = _mm_add_epi16(b1, b3);
362 store_tran_low(coeff0, coeff);
363 store_tran_low(coeff1, coeff + 64);
365 coeff2 = _mm_sub_epi16(b0, b2);
366 coeff3 = _mm_sub_epi16(b1, b3);
367 store_tran_low(coeff2, coeff + 128);
368 store_tran_low(coeff3, coeff + 192);
375 void vpx_hadamard_32x32_sse2(const int16_t *src_diff, ptrdiff_t src_stride,
378 for (idx = 0; idx < 4; ++idx) {
379 const int16_t *src_ptr =
380 src_diff + (idx >> 1) * 16 * src_stride + (idx & 0x01) * 16;
381 vpx_hadamard_16x16_sse2(src_ptr, src_stride, coeff + idx * 256);
384 for (idx = 0; idx < 256; idx += 8) {
385 __m128i coeff0 = load_tran_low(coeff);
386 __m128i coeff1 = load_tran_low(coeff + 256);
387 __m128i coeff2 = load_tran_low(coeff + 512);
388 __m128i coeff3 = load_tran_low(coeff + 768);
390 __m128i b0 = _mm_add_epi16(coeff0, coeff1);
391 __m128i b1 = _mm_sub_epi16(coeff0, coeff1);
392 __m128i b2 = _mm_add_epi16(coeff2, coeff3);
393 __m128i b3 = _mm_sub_epi16(coeff2, coeff3);
395 b0 = _mm_srai_epi16(b0, 2);
396 b1 = _mm_srai_epi16(b1, 2);
397 b2 = _mm_srai_epi16(b2, 2);
398 b3 = _mm_srai_epi16(b3, 2);
400 coeff0 = _mm_add_epi16(b0, b2);
401 coeff1 = _mm_add_epi16(b1, b3);
402 store_tran_low(coeff0, coeff);
403 store_tran_low(coeff1, coeff + 256);
405 coeff2 = _mm_sub_epi16(b0, b2);
406 coeff3 = _mm_sub_epi16(b1, b3);
407 store_tran_low(coeff2, coeff + 512);
408 store_tran_low(coeff3, coeff + 768);
414 int vpx_satd_sse2(const tran_low_t *coeff, int length) {
416 const __m128i zero = _mm_setzero_si128();
417 __m128i accum = zero;
419 for (i = 0; i < length; i += 8) {
420 const __m128i src_line = load_tran_low(coeff);
421 const __m128i inv = _mm_sub_epi16(zero, src_line);
422 const __m128i abs = _mm_max_epi16(src_line, inv); // abs(src_line)
423 const __m128i abs_lo = _mm_unpacklo_epi16(abs, zero);
424 const __m128i abs_hi = _mm_unpackhi_epi16(abs, zero);
425 const __m128i sum = _mm_add_epi32(abs_lo, abs_hi);
426 accum = _mm_add_epi32(accum, sum);
430 { // cascading summation of accum
431 __m128i hi = _mm_srli_si128(accum, 8);
432 accum = _mm_add_epi32(accum, hi);
433 hi = _mm_srli_epi64(accum, 32);
434 accum = _mm_add_epi32(accum, hi);
437 return _mm_cvtsi128_si32(accum);
440 void vpx_int_pro_row_sse2(int16_t *hbuf, const uint8_t *ref,
441 const int ref_stride, const int height) {
443 __m128i zero = _mm_setzero_si128();
444 __m128i src_line = _mm_loadu_si128((const __m128i *)ref);
445 __m128i s0 = _mm_unpacklo_epi8(src_line, zero);
446 __m128i s1 = _mm_unpackhi_epi8(src_line, zero);
448 int height_1 = height - 1;
451 for (idx = 1; idx < height_1; idx += 2) {
452 src_line = _mm_loadu_si128((const __m128i *)ref);
453 t0 = _mm_unpacklo_epi8(src_line, zero);
454 t1 = _mm_unpackhi_epi8(src_line, zero);
455 s0 = _mm_adds_epu16(s0, t0);
456 s1 = _mm_adds_epu16(s1, t1);
459 src_line = _mm_loadu_si128((const __m128i *)ref);
460 t0 = _mm_unpacklo_epi8(src_line, zero);
461 t1 = _mm_unpackhi_epi8(src_line, zero);
462 s0 = _mm_adds_epu16(s0, t0);
463 s1 = _mm_adds_epu16(s1, t1);
467 src_line = _mm_loadu_si128((const __m128i *)ref);
468 t0 = _mm_unpacklo_epi8(src_line, zero);
469 t1 = _mm_unpackhi_epi8(src_line, zero);
470 s0 = _mm_adds_epu16(s0, t0);
471 s1 = _mm_adds_epu16(s1, t1);
474 s0 = _mm_srai_epi16(s0, 5);
475 s1 = _mm_srai_epi16(s1, 5);
476 } else if (height == 32) {
477 s0 = _mm_srai_epi16(s0, 4);
478 s1 = _mm_srai_epi16(s1, 4);
480 s0 = _mm_srai_epi16(s0, 3);
481 s1 = _mm_srai_epi16(s1, 3);
484 _mm_storeu_si128((__m128i *)hbuf, s0);
486 _mm_storeu_si128((__m128i *)hbuf, s1);
489 int16_t vpx_int_pro_col_sse2(const uint8_t *ref, const int width) {
490 __m128i zero = _mm_setzero_si128();
491 __m128i src_line = _mm_load_si128((const __m128i *)ref);
492 __m128i s0 = _mm_sad_epu8(src_line, zero);
496 for (i = 16; i < width; i += 16) {
498 src_line = _mm_load_si128((const __m128i *)ref);
499 s1 = _mm_sad_epu8(src_line, zero);
500 s0 = _mm_adds_epu16(s0, s1);
503 s1 = _mm_srli_si128(s0, 8);
504 s0 = _mm_adds_epu16(s0, s1);
506 return _mm_extract_epi16(s0, 0);
509 int vpx_vector_var_sse2(const int16_t *ref, const int16_t *src, const int bwl) {
511 int width = 4 << bwl;
513 __m128i v0 = _mm_loadu_si128((const __m128i *)ref);
514 __m128i v1 = _mm_load_si128((const __m128i *)src);
515 __m128i diff = _mm_subs_epi16(v0, v1);
517 __m128i sse = _mm_madd_epi16(diff, diff);
522 for (idx = 8; idx < width; idx += 8) {
523 v0 = _mm_loadu_si128((const __m128i *)ref);
524 v1 = _mm_load_si128((const __m128i *)src);
525 diff = _mm_subs_epi16(v0, v1);
527 sum = _mm_add_epi16(sum, diff);
528 v0 = _mm_madd_epi16(diff, diff);
529 sse = _mm_add_epi32(sse, v0);
535 v0 = _mm_srli_si128(sum, 8);
536 sum = _mm_add_epi16(sum, v0);
537 v0 = _mm_srli_epi64(sum, 32);
538 sum = _mm_add_epi16(sum, v0);
539 v0 = _mm_srli_epi32(sum, 16);
540 sum = _mm_add_epi16(sum, v0);
542 v1 = _mm_srli_si128(sse, 8);
543 sse = _mm_add_epi32(sse, v1);
544 v1 = _mm_srli_epi64(sse, 32);
545 sse = _mm_add_epi32(sse, v1);
547 mean = _mm_extract_epi16(sum, 0);
549 return _mm_cvtsi128_si32(sse) - ((mean * mean) >> (bwl + 2));