2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "vp8/encoder/variance.h"
13 #include "vp8/common/pragmas.h"
14 #include "vpx_ports/mem.h"
16 extern void filter_block1d_h6_mmx
18 const unsigned char *src_ptr,
19 unsigned short *output_ptr,
20 unsigned int src_pixels_per_line,
21 unsigned int pixel_step,
22 unsigned int output_height,
23 unsigned int output_width,
26 extern void filter_block1d_v6_mmx
29 unsigned char *output_ptr,
30 unsigned int pixels_per_line,
31 unsigned int pixel_step,
32 unsigned int output_height,
33 unsigned int output_width,
37 extern unsigned int vp8_get_mb_ss_mmx(short *src_ptr);
38 extern unsigned int vp8_get8x8var_mmx
40 const unsigned char *src_ptr,
42 const unsigned char *ref_ptr,
47 extern unsigned int vp8_get4x4var_mmx
49 const unsigned char *src_ptr,
51 const unsigned char *ref_ptr,
56 extern unsigned int vp8_get4x4sse_cs_mmx
58 const unsigned char *src_ptr,
60 const unsigned char *ref_ptr,
63 extern void vp8_filter_block2d_bil4x4_var_mmx
65 const unsigned char *ref_ptr,
66 int ref_pixels_per_line,
67 const unsigned char *src_ptr,
68 int src_pixels_per_line,
72 unsigned int *sumsquared
74 extern void vp8_filter_block2d_bil_var_mmx
76 const unsigned char *ref_ptr,
77 int ref_pixels_per_line,
78 const unsigned char *src_ptr,
79 int src_pixels_per_line,
84 unsigned int *sumsquared
86 extern unsigned int vp8_get16x16pred_error_mmx
88 unsigned char *src_ptr,
90 unsigned char *ref_ptr,
95 unsigned int vp8_get16x16var_mmx(
96 const unsigned char *src_ptr,
98 const unsigned char *ref_ptr,
104 unsigned int sse0, sse1, sse2, sse3, var;
105 int sum0, sum1, sum2, sum3, avg;
108 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
109 vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
110 vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2) ;
111 vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
113 var = sse0 + sse1 + sse2 + sse3;
114 avg = sum0 + sum1 + sum2 + sum3;
118 return (var - ((avg * avg) >> 8));
126 unsigned int vp8_variance4x4_mmx(
127 const unsigned char *src_ptr,
129 const unsigned char *ref_ptr,
136 vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
138 return (var - ((avg * avg) >> 4));
142 unsigned int vp8_variance8x8_mmx(
143 const unsigned char *src_ptr,
145 const unsigned char *ref_ptr,
152 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
155 return (var - ((avg * avg) >> 6));
159 unsigned int vp8_mse16x16_mmx(
160 const unsigned char *src_ptr,
162 const unsigned char *ref_ptr,
166 unsigned int sse0, sse1, sse2, sse3, var;
167 int sum0, sum1, sum2, sum3;
170 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
171 vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
172 vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2) ;
173 vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
175 var = sse0 + sse1 + sse2 + sse3;
181 unsigned int vp8_variance16x16_mmx(
182 const unsigned char *src_ptr,
184 const unsigned char *ref_ptr,
188 unsigned int sse0, sse1, sse2, sse3, var;
189 int sum0, sum1, sum2, sum3, avg;
192 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
193 vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
194 vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2) ;
195 vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
197 var = sse0 + sse1 + sse2 + sse3;
198 avg = sum0 + sum1 + sum2 + sum3;
200 return (var - ((avg * avg) >> 8));
203 unsigned int vp8_variance16x8_mmx(
204 const unsigned char *src_ptr,
206 const unsigned char *ref_ptr,
210 unsigned int sse0, sse1, var;
213 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
214 vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
219 return (var - ((avg * avg) >> 7));
224 unsigned int vp8_variance8x16_mmx(
225 const unsigned char *src_ptr,
227 const unsigned char *ref_ptr,
231 unsigned int sse0, sse1, var;
234 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
235 vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1) ;
241 return (var - ((avg * avg) >> 7));
248 ///////////////////////////////////////////////////////////////////////////
249 // the mmx function that does the bilinear filtering and var calculation //
251 ///////////////////////////////////////////////////////////////////////////
252 DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[8][8]) =
254 { 128, 128, 128, 128, 0, 0, 0, 0 },
255 { 112, 112, 112, 112, 16, 16, 16, 16 },
256 { 96, 96, 96, 96, 32, 32, 32, 32 },
257 { 80, 80, 80, 80, 48, 48, 48, 48 },
258 { 64, 64, 64, 64, 64, 64, 64, 64 },
259 { 48, 48, 48, 48, 80, 80, 80, 80 },
260 { 32, 32, 32, 32, 96, 96, 96, 96 },
261 { 16, 16, 16, 16, 112, 112, 112, 112 }
264 unsigned int vp8_sub_pixel_variance4x4_mmx
266 const unsigned char *src_ptr,
267 int src_pixels_per_line,
270 const unsigned char *dst_ptr,
271 int dst_pixels_per_line,
277 vp8_filter_block2d_bil4x4_var_mmx(
278 src_ptr, src_pixels_per_line,
279 dst_ptr, dst_pixels_per_line,
280 vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
284 return (xxsum - ((xsum * xsum) >> 4));
288 unsigned int vp8_sub_pixel_variance8x8_mmx
290 const unsigned char *src_ptr,
291 int src_pixels_per_line,
294 const unsigned char *dst_ptr,
295 int dst_pixels_per_line,
302 vp8_filter_block2d_bil_var_mmx(
303 src_ptr, src_pixels_per_line,
304 dst_ptr, dst_pixels_per_line, 8,
305 vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
309 return (xxsum - ((xsum * xsum) >> 6));
312 unsigned int vp8_sub_pixel_variance16x16_mmx
314 const unsigned char *src_ptr,
315 int src_pixels_per_line,
318 const unsigned char *dst_ptr,
319 int dst_pixels_per_line,
325 unsigned int xxsum0, xxsum1;
328 vp8_filter_block2d_bil_var_mmx(
329 src_ptr, src_pixels_per_line,
330 dst_ptr, dst_pixels_per_line, 16,
331 vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
336 vp8_filter_block2d_bil_var_mmx(
337 src_ptr + 8, src_pixels_per_line,
338 dst_ptr + 8, dst_pixels_per_line, 16,
339 vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
347 return (xxsum0 - ((xsum0 * xsum0) >> 8));
352 unsigned int vp8_sub_pixel_mse16x16_mmx(
353 const unsigned char *src_ptr,
354 int src_pixels_per_line,
357 const unsigned char *dst_ptr,
358 int dst_pixels_per_line,
362 vp8_sub_pixel_variance16x16_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
366 unsigned int vp8_sub_pixel_variance16x8_mmx
368 const unsigned char *src_ptr,
369 int src_pixels_per_line,
372 const unsigned char *dst_ptr,
373 int dst_pixels_per_line,
378 unsigned int xxsum0, xxsum1;
381 vp8_filter_block2d_bil_var_mmx(
382 src_ptr, src_pixels_per_line,
383 dst_ptr, dst_pixels_per_line, 8,
384 vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
389 vp8_filter_block2d_bil_var_mmx(
390 src_ptr + 8, src_pixels_per_line,
391 dst_ptr + 8, dst_pixels_per_line, 8,
392 vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
400 return (xxsum0 - ((xsum0 * xsum0) >> 7));
403 unsigned int vp8_sub_pixel_variance8x16_mmx
405 const unsigned char *src_ptr,
406 int src_pixels_per_line,
409 const unsigned char *dst_ptr,
410 int dst_pixels_per_line,
416 vp8_filter_block2d_bil_var_mmx(
417 src_ptr, src_pixels_per_line,
418 dst_ptr, dst_pixels_per_line, 16,
419 vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
423 return (xxsum - ((xsum * xsum) >> 7));
427 unsigned int vp8_variance_halfpixvar16x16_h_mmx(
428 const unsigned char *src_ptr,
430 const unsigned char *ref_ptr,
434 return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 4, 0,
435 ref_ptr, recon_stride, sse);
439 unsigned int vp8_variance_halfpixvar16x16_v_mmx(
440 const unsigned char *src_ptr,
442 const unsigned char *ref_ptr,
446 return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 0, 4,
447 ref_ptr, recon_stride, sse);
451 unsigned int vp8_variance_halfpixvar16x16_hv_mmx(
452 const unsigned char *src_ptr,
454 const unsigned char *ref_ptr,
458 return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 4, 4,
459 ref_ptr, recon_stride, sse);