2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "vpx_config.h"
12 #include "vp8/common/variance.h"
13 #include "vpx_ports/mem.h"
14 #include "vp8/common/x86/filter_x86.h"
16 extern void filter_block1d_h6_mmx
18 const unsigned char *src_ptr,
19 unsigned short *output_ptr,
20 unsigned int src_pixels_per_line,
21 unsigned int pixel_step,
22 unsigned int output_height,
23 unsigned int output_width,
26 extern void filter_block1d_v6_mmx
29 unsigned char *output_ptr,
30 unsigned int pixels_per_line,
31 unsigned int pixel_step,
32 unsigned int output_height,
33 unsigned int output_width,
37 extern unsigned int vp8_get_mb_ss_mmx(const short *src_ptr);
38 extern unsigned int vp8_get8x8var_mmx
40 const unsigned char *src_ptr,
42 const unsigned char *ref_ptr,
47 extern unsigned int vp8_get4x4var_mmx
49 const unsigned char *src_ptr,
51 const unsigned char *ref_ptr,
56 extern void vp8_filter_block2d_bil4x4_var_mmx
58 const unsigned char *ref_ptr,
59 int ref_pixels_per_line,
60 const unsigned char *src_ptr,
61 int src_pixels_per_line,
65 unsigned int *sumsquared
67 extern void vp8_filter_block2d_bil_var_mmx
69 const unsigned char *ref_ptr,
70 int ref_pixels_per_line,
71 const unsigned char *src_ptr,
72 int src_pixels_per_line,
77 unsigned int *sumsquared
81 unsigned int vp8_variance4x4_mmx(
82 const unsigned char *src_ptr,
84 const unsigned char *ref_ptr,
91 vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
93 return (var - (((unsigned int)avg * avg) >> 4));
97 unsigned int vp8_variance8x8_mmx(
98 const unsigned char *src_ptr,
100 const unsigned char *ref_ptr,
107 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
110 return (var - (((unsigned int)avg * avg) >> 6));
114 unsigned int vp8_mse16x16_mmx(
115 const unsigned char *src_ptr,
117 const unsigned char *ref_ptr,
121 unsigned int sse0, sse1, sse2, sse3, var;
122 int sum0, sum1, sum2, sum3;
125 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
126 vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
127 vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2) ;
128 vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
130 var = sse0 + sse1 + sse2 + sse3;
136 unsigned int vp8_variance16x16_mmx(
137 const unsigned char *src_ptr,
139 const unsigned char *ref_ptr,
143 unsigned int sse0, sse1, sse2, sse3, var;
144 int sum0, sum1, sum2, sum3, avg;
147 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
148 vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
149 vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2) ;
150 vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
152 var = sse0 + sse1 + sse2 + sse3;
153 avg = sum0 + sum1 + sum2 + sum3;
155 return (var - (((unsigned int)avg * avg) >> 8));
158 unsigned int vp8_variance16x8_mmx(
159 const unsigned char *src_ptr,
161 const unsigned char *ref_ptr,
165 unsigned int sse0, sse1, var;
168 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
169 vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
174 return (var - (((unsigned int)avg * avg) >> 7));
179 unsigned int vp8_variance8x16_mmx(
180 const unsigned char *src_ptr,
182 const unsigned char *ref_ptr,
186 unsigned int sse0, sse1, var;
189 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
190 vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1) ;
196 return (var - (((unsigned int)avg * avg) >> 7));
201 unsigned int vp8_sub_pixel_variance4x4_mmx
203 const unsigned char *src_ptr,
204 int src_pixels_per_line,
207 const unsigned char *dst_ptr,
208 int dst_pixels_per_line,
214 vp8_filter_block2d_bil4x4_var_mmx(
215 src_ptr, src_pixels_per_line,
216 dst_ptr, dst_pixels_per_line,
217 vp8_bilinear_filters_x86_4[xoffset], vp8_bilinear_filters_x86_4[yoffset],
221 return (xxsum - (((unsigned int)xsum * xsum) >> 4));
225 unsigned int vp8_sub_pixel_variance8x8_mmx
227 const unsigned char *src_ptr,
228 int src_pixels_per_line,
231 const unsigned char *dst_ptr,
232 int dst_pixels_per_line,
239 vp8_filter_block2d_bil_var_mmx(
240 src_ptr, src_pixels_per_line,
241 dst_ptr, dst_pixels_per_line, 8,
242 vp8_bilinear_filters_x86_4[xoffset], vp8_bilinear_filters_x86_4[yoffset],
246 return (xxsum - (((unsigned int)xsum * xsum) >> 6));
249 unsigned int vp8_sub_pixel_variance16x16_mmx
251 const unsigned char *src_ptr,
252 int src_pixels_per_line,
255 const unsigned char *dst_ptr,
256 int dst_pixels_per_line,
262 unsigned int xxsum0, xxsum1;
265 vp8_filter_block2d_bil_var_mmx(
266 src_ptr, src_pixels_per_line,
267 dst_ptr, dst_pixels_per_line, 16,
268 vp8_bilinear_filters_x86_4[xoffset], vp8_bilinear_filters_x86_4[yoffset],
273 vp8_filter_block2d_bil_var_mmx(
274 src_ptr + 8, src_pixels_per_line,
275 dst_ptr + 8, dst_pixels_per_line, 16,
276 vp8_bilinear_filters_x86_4[xoffset], vp8_bilinear_filters_x86_4[yoffset],
284 return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
289 unsigned int vp8_sub_pixel_mse16x16_mmx(
290 const unsigned char *src_ptr,
291 int src_pixels_per_line,
294 const unsigned char *dst_ptr,
295 int dst_pixels_per_line,
299 vp8_sub_pixel_variance16x16_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
303 unsigned int vp8_sub_pixel_variance16x8_mmx
305 const unsigned char *src_ptr,
306 int src_pixels_per_line,
309 const unsigned char *dst_ptr,
310 int dst_pixels_per_line,
315 unsigned int xxsum0, xxsum1;
318 vp8_filter_block2d_bil_var_mmx(
319 src_ptr, src_pixels_per_line,
320 dst_ptr, dst_pixels_per_line, 8,
321 vp8_bilinear_filters_x86_4[xoffset], vp8_bilinear_filters_x86_4[yoffset],
326 vp8_filter_block2d_bil_var_mmx(
327 src_ptr + 8, src_pixels_per_line,
328 dst_ptr + 8, dst_pixels_per_line, 8,
329 vp8_bilinear_filters_x86_4[xoffset], vp8_bilinear_filters_x86_4[yoffset],
337 return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 7));
340 unsigned int vp8_sub_pixel_variance8x16_mmx
342 const unsigned char *src_ptr,
343 int src_pixels_per_line,
346 const unsigned char *dst_ptr,
347 int dst_pixels_per_line,
353 vp8_filter_block2d_bil_var_mmx(
354 src_ptr, src_pixels_per_line,
355 dst_ptr, dst_pixels_per_line, 16,
356 vp8_bilinear_filters_x86_4[xoffset], vp8_bilinear_filters_x86_4[yoffset],
360 return (xxsum - (((unsigned int)xsum * xsum) >> 7));
364 unsigned int vp8_variance_halfpixvar16x16_h_mmx(
365 const unsigned char *src_ptr,
367 const unsigned char *ref_ptr,
371 return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 4, 0,
372 ref_ptr, recon_stride, sse);
376 unsigned int vp8_variance_halfpixvar16x16_v_mmx(
377 const unsigned char *src_ptr,
379 const unsigned char *ref_ptr,
383 return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 0, 4,
384 ref_ptr, recon_stride, sse);
388 unsigned int vp8_variance_halfpixvar16x16_hv_mmx(
389 const unsigned char *src_ptr,
391 const unsigned char *ref_ptr,
395 return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 4, 4,
396 ref_ptr, recon_stride, sse);