d2199a49901547c2dd3ad52cda71358044804550
[profile/ivi/libvpx.git] / vp8 / encoder / x86 / x86_csystemdependent.c
1 /*
2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10
11
12 #include "vpx_ports/config.h"
13 #include "vpx_ports/x86.h"
14 #include "variance.h"
15 #include "onyx_int.h"
16
17
18 #if HAVE_MMX
19 void vp8_short_fdct8x4_mmx(short *input, short *output, int pitch)
20 {
21     vp8_short_fdct4x4_mmx(input,   output,    pitch);
22     vp8_short_fdct4x4_mmx(input + 4, output + 16, pitch);
23 }
24
25 int vp8_fast_quantize_b_impl_mmx(short *coeff_ptr, short *zbin_ptr,
26                                  short *qcoeff_ptr, short *dequant_ptr,
27                                  short *scan_mask, short *round_ptr,
28                                  short *quant_ptr, short *dqcoeff_ptr);
29 void vp8_fast_quantize_b_mmx(BLOCK *b, BLOCKD *d)
30 {
31     short *scan_mask   = vp8_default_zig_zag_mask;//d->scan_order_mask_ptr;
32     short *coeff_ptr   = b->coeff;
33     short *zbin_ptr    = b->zbin;
34     short *round_ptr   = b->round;
35     short *quant_ptr   = b->quant;
36     short *qcoeff_ptr  = d->qcoeff;
37     short *dqcoeff_ptr = d->dqcoeff;
38     short *dequant_ptr = d->dequant;
39
40     d->eob = vp8_fast_quantize_b_impl_mmx(
41                  coeff_ptr,
42                  zbin_ptr,
43                  qcoeff_ptr,
44                  dequant_ptr,
45                  scan_mask,
46
47                  round_ptr,
48                  quant_ptr,
49                  dqcoeff_ptr
50              );
51 }
52
53 int vp8_mbblock_error_mmx_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
54 int vp8_mbblock_error_mmx(MACROBLOCK *mb, int dc)
55 {
56     short *coeff_ptr =  mb->block[0].coeff;
57     short *dcoef_ptr =  mb->e_mbd.block[0].dqcoeff;
58     return vp8_mbblock_error_mmx_impl(coeff_ptr, dcoef_ptr, dc);
59 }
60
61 int vp8_mbuverror_mmx_impl(short *s_ptr, short *d_ptr);
62 int vp8_mbuverror_mmx(MACROBLOCK *mb)
63 {
64     short *s_ptr = &mb->coeff[256];
65     short *d_ptr = &mb->e_mbd.dqcoeff[256];
66     return vp8_mbuverror_mmx_impl(s_ptr, d_ptr);
67 }
68
69 void vp8_subtract_b_mmx_impl(unsigned char *z,  int src_stride,
70                              short *diff, unsigned char *predictor,
71                              int pitch);
72 void vp8_subtract_b_mmx(BLOCK *be, BLOCKD *bd, int pitch)
73 {
74     unsigned char *z = *(be->base_src) + be->src;
75     unsigned int  src_stride = be->src_stride;
76     short *diff = &be->src_diff[0];
77     unsigned char *predictor = &bd->predictor[0];
78     vp8_subtract_b_mmx_impl(z, src_stride, diff, predictor, pitch);
79 }
80
81 #endif
82
83 #if HAVE_SSE2
84 int vp8_fast_quantize_b_impl_sse2(short *coeff_ptr,
85                                  short *qcoeff_ptr, short *dequant_ptr,
86                                  const short *inv_scan_order, short *round_ptr,
87                                  short *quant_ptr, short *dqcoeff_ptr);
88 void vp8_fast_quantize_b_sse2(BLOCK *b, BLOCKD *d)
89 {
90     short *scan_mask   = vp8_default_zig_zag_mask;//d->scan_order_mask_ptr;
91     short *coeff_ptr   = b->coeff;
92     short *round_ptr   = b->round;
93     short *quant_ptr   = b->quant;
94     short *qcoeff_ptr  = d->qcoeff;
95     short *dqcoeff_ptr = d->dqcoeff;
96     short *dequant_ptr = d->dequant;
97
98     d->eob = vp8_fast_quantize_b_impl_sse2(
99                  coeff_ptr,
100                  qcoeff_ptr,
101                  dequant_ptr,
102                  vp8_default_inv_zig_zag,
103                  round_ptr,
104                  quant_ptr,
105                  dqcoeff_ptr
106              );
107 }
108
109
110 int vp8_regular_quantize_b_impl_sse2(short *coeff_ptr, short *zbin_ptr,
111                                short *qcoeff_ptr,short *dequant_ptr,
112                                const int *default_zig_zag, short *round_ptr,
113                                short *quant_ptr, short *dqcoeff_ptr,
114                                unsigned short zbin_oq_value,
115                                short *zbin_boost_ptr);
116
117 void vp8_regular_quantize_b_sse2(BLOCK *b,BLOCKD *d)
118 {
119     short *zbin_boost_ptr = b->zrun_zbin_boost;
120     short *coeff_ptr      = b->coeff;
121     short *zbin_ptr       = b->zbin;
122     short *round_ptr      = b->round;
123     short *quant_ptr      = b->quant;
124     short *qcoeff_ptr     = d->qcoeff;
125     short *dqcoeff_ptr    = d->dqcoeff;
126     short *dequant_ptr    = d->dequant;
127     short zbin_oq_value   = b->zbin_extra;
128
129     d->eob = vp8_regular_quantize_b_impl_sse2(
130         coeff_ptr,
131         zbin_ptr,
132         qcoeff_ptr,
133         dequant_ptr,
134         vp8_default_zig_zag1d,
135
136         round_ptr,
137         quant_ptr,
138         dqcoeff_ptr,
139         zbin_oq_value,
140         zbin_boost_ptr
141         );
142 }
143
144 int vp8_mbblock_error_xmm_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
145 int vp8_mbblock_error_xmm(MACROBLOCK *mb, int dc)
146 {
147     short *coeff_ptr =  mb->block[0].coeff;
148     short *dcoef_ptr =  mb->e_mbd.block[0].dqcoeff;
149     return vp8_mbblock_error_xmm_impl(coeff_ptr, dcoef_ptr, dc);
150 }
151
152 int vp8_mbuverror_xmm_impl(short *s_ptr, short *d_ptr);
153 int vp8_mbuverror_xmm(MACROBLOCK *mb)
154 {
155     short *s_ptr = &mb->coeff[256];
156     short *d_ptr = &mb->e_mbd.dqcoeff[256];
157     return vp8_mbuverror_xmm_impl(s_ptr, d_ptr);
158 }
159
160 void vp8_subtract_b_sse2_impl(unsigned char *z,  int src_stride,
161                              short *diff, unsigned char *predictor,
162                              int pitch);
163 void vp8_subtract_b_sse2(BLOCK *be, BLOCKD *bd, int pitch)
164 {
165     unsigned char *z = *(be->base_src) + be->src;
166     unsigned int  src_stride = be->src_stride;
167     short *diff = &be->src_diff[0];
168     unsigned char *predictor = &bd->predictor[0];
169     vp8_subtract_b_sse2_impl(z, src_stride, diff, predictor, pitch);
170 }
171
172 #endif
173
174 #if HAVE_SSSE3
175 int vp8_fast_quantize_b_impl_ssse3(short *coeff_ptr,
176                                  short *qcoeff_ptr, short *dequant_ptr,
177                                  short *round_ptr,
178                                  short *quant_ptr, short *dqcoeff_ptr);
179 void vp8_fast_quantize_b_ssse3(BLOCK *b, BLOCKD *d)
180 {
181     d->eob = vp8_fast_quantize_b_impl_ssse3(
182                     b->coeff,
183                     d->qcoeff,
184                     d->dequant,
185                     b->round,
186                     b->quant,
187                     d->dqcoeff
188                );
189 }
190 #endif
191
192
193 void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
194 {
195 #if CONFIG_RUNTIME_CPU_DETECT
196     int flags = x86_simd_caps();
197     int mmx_enabled = flags & HAS_MMX;
198     int xmm_enabled = flags & HAS_SSE;
199     int wmt_enabled = flags & HAS_SSE2;
200     int SSE3Enabled = flags & HAS_SSE3;
201     int SSSE3Enabled = flags & HAS_SSSE3;
202     int SSE4_1Enabled = flags & HAS_SSE4_1;
203
204     /* Note:
205      *
206      * This platform can be built without runtime CPU detection as well. If
207      * you modify any of the function mappings present in this file, be sure
208      * to also update them in static mapings (<arch>/filename_<arch>.h)
209      */
210
211     /* Override default functions with fastest ones for this CPU. */
212 #if HAVE_MMX
213     if (mmx_enabled)
214     {
215         cpi->rtcd.variance.sad16x16              = vp8_sad16x16_mmx;
216         cpi->rtcd.variance.sad16x8               = vp8_sad16x8_mmx;
217         cpi->rtcd.variance.sad8x16               = vp8_sad8x16_mmx;
218         cpi->rtcd.variance.sad8x8                = vp8_sad8x8_mmx;
219         cpi->rtcd.variance.sad4x4                = vp8_sad4x4_mmx;
220
221         cpi->rtcd.variance.var4x4                = vp8_variance4x4_mmx;
222         cpi->rtcd.variance.var8x8                = vp8_variance8x8_mmx;
223         cpi->rtcd.variance.var8x16               = vp8_variance8x16_mmx;
224         cpi->rtcd.variance.var16x8               = vp8_variance16x8_mmx;
225         cpi->rtcd.variance.var16x16              = vp8_variance16x16_mmx;
226
227         cpi->rtcd.variance.subpixvar4x4          = vp8_sub_pixel_variance4x4_mmx;
228         cpi->rtcd.variance.subpixvar8x8          = vp8_sub_pixel_variance8x8_mmx;
229         cpi->rtcd.variance.subpixvar8x16         = vp8_sub_pixel_variance8x16_mmx;
230         cpi->rtcd.variance.subpixvar16x8         = vp8_sub_pixel_variance16x8_mmx;
231         cpi->rtcd.variance.subpixvar16x16        = vp8_sub_pixel_variance16x16_mmx;
232         cpi->rtcd.variance.halfpixvar16x16_h     = vp8_variance_halfpixvar16x16_h_mmx;
233         cpi->rtcd.variance.halfpixvar16x16_v     = vp8_variance_halfpixvar16x16_v_mmx;
234         cpi->rtcd.variance.halfpixvar16x16_hv    = vp8_variance_halfpixvar16x16_hv_mmx;
235         cpi->rtcd.variance.subpixmse16x16        = vp8_sub_pixel_mse16x16_mmx;
236
237         cpi->rtcd.variance.mse16x16              = vp8_mse16x16_mmx;
238         cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_mmx;
239
240         cpi->rtcd.variance.get16x16prederror     = vp8_get16x16pred_error_mmx;
241         cpi->rtcd.variance.get8x8var             = vp8_get8x8var_mmx;
242         cpi->rtcd.variance.get16x16var           = vp8_get16x16var_mmx;
243         cpi->rtcd.variance.get4x4sse_cs          = vp8_get4x4sse_cs_mmx;
244
245         cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_mmx;
246         cpi->rtcd.fdct.short8x4                  = vp8_short_fdct8x4_mmx;
247         cpi->rtcd.fdct.fast4x4                   = vp8_short_fdct4x4_mmx;
248         cpi->rtcd.fdct.fast8x4                   = vp8_short_fdct8x4_mmx;
249
250         cpi->rtcd.fdct.walsh_short4x4            = vp8_short_walsh4x4_c;
251
252         cpi->rtcd.encodemb.berr                  = vp8_block_error_mmx;
253         cpi->rtcd.encodemb.mberr                 = vp8_mbblock_error_mmx;
254         cpi->rtcd.encodemb.mbuverr               = vp8_mbuverror_mmx;
255         cpi->rtcd.encodemb.subb                  = vp8_subtract_b_mmx;
256         cpi->rtcd.encodemb.submby                = vp8_subtract_mby_mmx;
257         cpi->rtcd.encodemb.submbuv               = vp8_subtract_mbuv_mmx;
258
259         /*cpi->rtcd.quantize.fastquantb            = vp8_fast_quantize_b_mmx;*/
260     }
261 #endif
262
263 #if HAVE_SSE2
264     if (wmt_enabled)
265     {
266         cpi->rtcd.variance.sad16x16              = vp8_sad16x16_wmt;
267         cpi->rtcd.variance.sad16x8               = vp8_sad16x8_wmt;
268         cpi->rtcd.variance.sad8x16               = vp8_sad8x16_wmt;
269         cpi->rtcd.variance.sad8x8                = vp8_sad8x8_wmt;
270         cpi->rtcd.variance.sad4x4                = vp8_sad4x4_wmt;
271
272         cpi->rtcd.variance.var4x4                = vp8_variance4x4_wmt;
273         cpi->rtcd.variance.var8x8                = vp8_variance8x8_wmt;
274         cpi->rtcd.variance.var8x16               = vp8_variance8x16_wmt;
275         cpi->rtcd.variance.var16x8               = vp8_variance16x8_wmt;
276         cpi->rtcd.variance.var16x16              = vp8_variance16x16_wmt;
277
278         cpi->rtcd.variance.subpixvar4x4          = vp8_sub_pixel_variance4x4_wmt;
279         cpi->rtcd.variance.subpixvar8x8          = vp8_sub_pixel_variance8x8_wmt;
280         cpi->rtcd.variance.subpixvar8x16         = vp8_sub_pixel_variance8x16_wmt;
281         cpi->rtcd.variance.subpixvar16x8         = vp8_sub_pixel_variance16x8_wmt;
282         cpi->rtcd.variance.subpixvar16x16        = vp8_sub_pixel_variance16x16_wmt;
283         cpi->rtcd.variance.halfpixvar16x16_h     = vp8_variance_halfpixvar16x16_h_wmt;
284         cpi->rtcd.variance.halfpixvar16x16_v     = vp8_variance_halfpixvar16x16_v_wmt;
285         cpi->rtcd.variance.halfpixvar16x16_hv    = vp8_variance_halfpixvar16x16_hv_wmt;
286         cpi->rtcd.variance.subpixmse16x16        = vp8_sub_pixel_mse16x16_wmt;
287
288         cpi->rtcd.variance.mse16x16              = vp8_mse16x16_wmt;
289         cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_sse2;
290
291         cpi->rtcd.variance.get16x16prederror     = vp8_get16x16pred_error_sse2;
292         cpi->rtcd.variance.get8x8var             = vp8_get8x8var_sse2;
293         cpi->rtcd.variance.get16x16var           = vp8_get16x16var_sse2;
294         /* cpi->rtcd.variance.get4x4sse_cs  not implemented for wmt */;
295
296         cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_sse2;
297         cpi->rtcd.fdct.short8x4                  = vp8_short_fdct8x4_sse2;
298         cpi->rtcd.fdct.fast4x4                   = vp8_short_fdct4x4_sse2;
299         cpi->rtcd.fdct.fast8x4                   = vp8_short_fdct8x4_sse2;
300
301         cpi->rtcd.fdct.walsh_short4x4            = vp8_short_walsh4x4_sse2 ;
302
303         cpi->rtcd.encodemb.berr                  = vp8_block_error_xmm;
304         cpi->rtcd.encodemb.mberr                 = vp8_mbblock_error_xmm;
305         cpi->rtcd.encodemb.mbuverr               = vp8_mbuverror_xmm;
306         cpi->rtcd.encodemb.subb                  = vp8_subtract_b_sse2;
307         cpi->rtcd.encodemb.submby                = vp8_subtract_mby_sse2;
308         cpi->rtcd.encodemb.submbuv               = vp8_subtract_mbuv_sse2;
309
310         /*cpi->rtcd.quantize.quantb            = vp8_regular_quantize_b_sse2;*/
311         cpi->rtcd.quantize.fastquantb            = vp8_fast_quantize_b_sse2;
312     }
313 #endif
314
315 #if HAVE_SSE3
316     if (SSE3Enabled)
317     {
318         cpi->rtcd.variance.sad16x16              = vp8_sad16x16_sse3;
319         cpi->rtcd.variance.sad16x16x3            = vp8_sad16x16x3_sse3;
320         cpi->rtcd.variance.sad16x8x3             = vp8_sad16x8x3_sse3;
321         cpi->rtcd.variance.sad8x16x3             = vp8_sad8x16x3_sse3;
322         cpi->rtcd.variance.sad8x8x3              = vp8_sad8x8x3_sse3;
323         cpi->rtcd.variance.sad4x4x3              = vp8_sad4x4x3_sse3;
324         cpi->rtcd.search.full_search             = vp8_full_search_sadx3;
325
326         cpi->rtcd.variance.sad16x16x4d           = vp8_sad16x16x4d_sse3;
327         cpi->rtcd.variance.sad16x8x4d            = vp8_sad16x8x4d_sse3;
328         cpi->rtcd.variance.sad8x16x4d            = vp8_sad8x16x4d_sse3;
329         cpi->rtcd.variance.sad8x8x4d             = vp8_sad8x8x4d_sse3;
330         cpi->rtcd.variance.sad4x4x4d             = vp8_sad4x4x4d_sse3;
331         cpi->rtcd.search.diamond_search          = vp8_diamond_search_sadx4;
332     }
333 #endif
334
335 #if HAVE_SSSE3
336     if (SSSE3Enabled)
337     {
338         cpi->rtcd.variance.sad16x16x3            = vp8_sad16x16x3_ssse3;
339         cpi->rtcd.variance.sad16x8x3             = vp8_sad16x8x3_ssse3;
340
341         cpi->rtcd.quantize.fastquantb            = vp8_fast_quantize_b_ssse3;
342
343     }
344 #endif
345
346 #if HAVE_SSE4_1
347     if (SSE4_1Enabled)
348     {
349         cpi->rtcd.variance.sad16x16x8            = vp8_sad16x16x8_sse4;
350         cpi->rtcd.variance.sad16x8x8             = vp8_sad16x8x8_sse4;
351         cpi->rtcd.variance.sad8x16x8             = vp8_sad8x16x8_sse4;
352         cpi->rtcd.variance.sad8x8x8              = vp8_sad8x8x8_sse4;
353         cpi->rtcd.variance.sad4x4x8              = vp8_sad4x4x8_sse4;
354         cpi->rtcd.search.full_search             = vp8_full_search_sadx8;
355     }
356 #endif
357
358 #endif
359 }