Tizen 2.1 base
[sdk/emulator/qemu.git] / tizen / distrib / libav / libavcodec / x86 / h264dsp_mmx.c
1 /*
2  * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include "libavutil/cpu.h"
22 #include "libavutil/x86_cpu.h"
23 #include "libavcodec/h264dsp.h"
24 #include "dsputil_mmx.h"
25
26 DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1  ) = 0x0103010301030103ULL;
27
28 /***********************************/
29 /* IDCT */
30 #define IDCT_ADD_FUNC(NUM, DEPTH, OPT) \
31 void ff_h264_idct ## NUM ## _add_ ## DEPTH ## _ ## OPT (uint8_t *dst, int16_t *block, int stride);
32
33 IDCT_ADD_FUNC(, 8, mmx)
34 IDCT_ADD_FUNC(, 10, sse2)
35 IDCT_ADD_FUNC(_dc, 8, mmx2)
36 IDCT_ADD_FUNC(_dc, 10, mmx2)
37 IDCT_ADD_FUNC(8_dc, 8, mmx2)
38 IDCT_ADD_FUNC(8_dc, 10, sse2)
39 IDCT_ADD_FUNC(8, 8, mmx)
40 IDCT_ADD_FUNC(8, 8, sse2)
41 IDCT_ADD_FUNC(8, 10, sse2)
42 #if HAVE_AVX
43 IDCT_ADD_FUNC(, 10, avx)
44 IDCT_ADD_FUNC(8_dc, 10, avx)
45 IDCT_ADD_FUNC(8, 10, avx)
46 #endif
47
48
49 #define IDCT_ADD_REP_FUNC(NUM, REP, DEPTH, OPT) \
50 void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
51                               (uint8_t *dst, const int *block_offset, \
52                               DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
53
54 IDCT_ADD_REP_FUNC(8, 4, 8, mmx)
55 IDCT_ADD_REP_FUNC(8, 4, 8, mmx2)
56 IDCT_ADD_REP_FUNC(8, 4, 8, sse2)
57 IDCT_ADD_REP_FUNC(8, 4, 10, sse2)
58 IDCT_ADD_REP_FUNC(8, 4, 10, avx)
59 IDCT_ADD_REP_FUNC(, 16, 8, mmx)
60 IDCT_ADD_REP_FUNC(, 16, 8, mmx2)
61 IDCT_ADD_REP_FUNC(, 16, 8, sse2)
62 IDCT_ADD_REP_FUNC(, 16, 10, sse2)
63 IDCT_ADD_REP_FUNC(, 16intra, 8, mmx)
64 IDCT_ADD_REP_FUNC(, 16intra, 8, mmx2)
65 IDCT_ADD_REP_FUNC(, 16intra, 8, sse2)
66 IDCT_ADD_REP_FUNC(, 16intra, 10, sse2)
67 #if HAVE_AVX
68 IDCT_ADD_REP_FUNC(, 16, 10, avx)
69 IDCT_ADD_REP_FUNC(, 16intra, 10, avx)
70 #endif
71
72
73 #define IDCT_ADD_REP_FUNC2(NUM, REP, DEPTH, OPT) \
74 void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
75                               (uint8_t **dst, const int *block_offset, \
76                               DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
77 IDCT_ADD_REP_FUNC2(, 8, 8, mmx)
78 IDCT_ADD_REP_FUNC2(, 8, 8, mmx2)
79 IDCT_ADD_REP_FUNC2(, 8, 8, sse2)
80 IDCT_ADD_REP_FUNC2(, 8, 10, sse2)
81 #if HAVE_AVX
82 IDCT_ADD_REP_FUNC2(, 8, 10, avx)
83 #endif
84
85 void ff_h264_luma_dc_dequant_idct_mmx (DCTELEM *output, DCTELEM *input, int qmul);
86 void ff_h264_luma_dc_dequant_idct_sse2(DCTELEM *output, DCTELEM *input, int qmul);
87
88 /***********************************/
89 /* deblocking */
90
91 #define h264_loop_filter_strength_iteration_mmx2(bS, nz, ref, mv, bidir, edges, step, mask_mv, dir, d_idx, mask_dir) \
92     do { \
93         x86_reg b_idx; \
94         mask_mv <<= 3; \
95         for( b_idx=0; b_idx<edges; b_idx+=step ) { \
96             if (!mask_dir) \
97             __asm__ volatile( \
98                     "pxor %%mm0, %%mm0 \n\t" \
99                     :: \
100             ); \
101             if(!(mask_mv & b_idx)) { \
102                 if(bidir) { \
103                     __asm__ volatile( \
104                         "movd         %a3(%0,%2), %%mm2 \n" \
105                         "punpckldq    %a4(%0,%2), %%mm2 \n" /* { ref0[bn], ref1[bn] } */ \
106                         "pshufw $0x44, 12(%0,%2), %%mm0 \n" /* { ref0[b], ref0[b] } */ \
107                         "pshufw $0x44, 52(%0,%2), %%mm1 \n" /* { ref1[b], ref1[b] } */ \
108                         "pshufw $0x4E, %%mm2, %%mm3 \n" \
109                         "psubb         %%mm2, %%mm0 \n" /* { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] } */ \
110                         "psubb         %%mm3, %%mm1 \n" /* { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] } */ \
111  \
112                         "por           %%mm1, %%mm0 \n" \
113                         "movq   %a5(%1,%2,4), %%mm1 \n" \
114                         "movq   %a6(%1,%2,4), %%mm2 \n" \
115                         "movq          %%mm1, %%mm3 \n" \
116                         "movq          %%mm2, %%mm4 \n" \
117                         "psubw   48(%1,%2,4), %%mm1 \n" \
118                         "psubw   56(%1,%2,4), %%mm2 \n" \
119                         "psubw  208(%1,%2,4), %%mm3 \n" \
120                         "psubw  216(%1,%2,4), %%mm4 \n" \
121                         "packsswb      %%mm2, %%mm1 \n" \
122                         "packsswb      %%mm4, %%mm3 \n" \
123                         "paddb         %%mm6, %%mm1 \n" \
124                         "paddb         %%mm6, %%mm3 \n" \
125                         "psubusb       %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
126                         "psubusb       %%mm5, %%mm3 \n" \
127                         "packsswb      %%mm3, %%mm1 \n" \
128  \
129                         "por           %%mm1, %%mm0 \n" \
130                         "movq   %a7(%1,%2,4), %%mm1 \n" \
131                         "movq   %a8(%1,%2,4), %%mm2 \n" \
132                         "movq          %%mm1, %%mm3 \n" \
133                         "movq          %%mm2, %%mm4 \n" \
134                         "psubw   48(%1,%2,4), %%mm1 \n" \
135                         "psubw   56(%1,%2,4), %%mm2 \n" \
136                         "psubw  208(%1,%2,4), %%mm3 \n" \
137                         "psubw  216(%1,%2,4), %%mm4 \n" \
138                         "packsswb      %%mm2, %%mm1 \n" \
139                         "packsswb      %%mm4, %%mm3 \n" \
140                         "paddb         %%mm6, %%mm1 \n" \
141                         "paddb         %%mm6, %%mm3 \n" \
142                         "psubusb       %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
143                         "psubusb       %%mm5, %%mm3 \n" \
144                         "packsswb      %%mm3, %%mm1 \n" \
145  \
146                         "pshufw $0x4E, %%mm1, %%mm1 \n" \
147                         "por           %%mm1, %%mm0 \n" \
148                         "pshufw $0x4E, %%mm0, %%mm1 \n" \
149                         "pminub        %%mm1, %%mm0 \n" \
150                         ::"r"(ref), \
151                           "r"(mv), \
152                           "r"(b_idx), \
153                           "i"(d_idx+12), \
154                           "i"(d_idx+52), \
155                           "i"(d_idx*4+48), \
156                           "i"(d_idx*4+56), \
157                           "i"(d_idx*4+208), \
158                           "i"(d_idx*4+216) \
159                     ); \
160                 } else { \
161                     __asm__ volatile( \
162                         "movd   12(%0,%2), %%mm0 \n" \
163                         "psubb %a3(%0,%2), %%mm0 \n" /* ref[b] != ref[bn] */ \
164                         "movq   48(%1,%2,4), %%mm1 \n" \
165                         "movq   56(%1,%2,4), %%mm2 \n" \
166                         "psubw %a4(%1,%2,4), %%mm1 \n" \
167                         "psubw %a5(%1,%2,4), %%mm2 \n" \
168                         "packsswb   %%mm2, %%mm1 \n" \
169                         "paddb      %%mm6, %%mm1 \n" \
170                         "psubusb    %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
171                         "packsswb   %%mm1, %%mm1 \n" \
172                         "por        %%mm1, %%mm0 \n" \
173                         ::"r"(ref), \
174                           "r"(mv), \
175                           "r"(b_idx), \
176                           "i"(d_idx+12), \
177                           "i"(d_idx*4+48), \
178                           "i"(d_idx*4+56) \
179                     ); \
180                 } \
181             } \
182             __asm__ volatile( \
183                 "movd 12(%0,%1), %%mm1 \n" \
184                 "por %a2(%0,%1), %%mm1 \n" /* nnz[b] || nnz[bn] */ \
185                 ::"r"(nnz), \
186                   "r"(b_idx), \
187                   "i"(d_idx+12) \
188             ); \
189             __asm__ volatile( \
190                 "pminub    %%mm7, %%mm1 \n" \
191                 "pminub    %%mm7, %%mm0 \n" \
192                 "psllw        $1, %%mm1 \n" \
193                 "pxor      %%mm2, %%mm2 \n" \
194                 "pmaxub    %%mm0, %%mm1 \n" \
195                 "punpcklbw %%mm2, %%mm1 \n" \
196                 "movq      %%mm1, %a1(%0,%2) \n" \
197                 ::"r"(bS), \
198                   "i"(32*dir), \
199                   "r"(b_idx) \
200                 :"memory" \
201             ); \
202         } \
203     } while (0)
204
205 static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
206                                             int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
207     __asm__ volatile(
208         "movq %0, %%mm7 \n"
209         "movq %1, %%mm6 \n"
210         ::"m"(ff_pb_1), "m"(ff_pb_3)
211     );
212     if(field)
213         __asm__ volatile(
214             "movq %0, %%mm6 \n"
215             ::"m"(ff_pb_3_1)
216         );
217     __asm__ volatile(
218         "movq  %%mm6, %%mm5 \n"
219         "paddb %%mm5, %%mm5 \n"
220     :);
221
222     // could do a special case for dir==0 && edges==1, but it only reduces the
223     // average filter time by 1.2%
224     step  <<= 3;
225     edges <<= 3;
226     h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, edges, step, mask_mv1, 1, -8,  0);
227     h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir,    32,    8, mask_mv0, 0, -1, -1);
228
229     __asm__ volatile(
230         "movq   (%0), %%mm0 \n\t"
231         "movq  8(%0), %%mm1 \n\t"
232         "movq 16(%0), %%mm2 \n\t"
233         "movq 24(%0), %%mm3 \n\t"
234         TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
235         "movq %%mm0,   (%0) \n\t"
236         "movq %%mm3,  8(%0) \n\t"
237         "movq %%mm4, 16(%0) \n\t"
238         "movq %%mm2, 24(%0) \n\t"
239         ::"r"(bS[0])
240         :"memory"
241     );
242 }
243
244 #define LF_FUNC(DIR, TYPE, DEPTH, OPT) \
245 void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
246                                                                 int alpha, int beta, int8_t *tc0);
247 #define LF_IFUNC(DIR, TYPE, DEPTH, OPT) \
248 void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
249                                                                 int alpha, int beta);
250
251 #define LF_FUNCS(type, depth)\
252 LF_FUNC (h,  chroma,       depth, mmxext)\
253 LF_IFUNC(h,  chroma_intra, depth, mmxext)\
254 LF_FUNC (v,  chroma,       depth, mmxext)\
255 LF_IFUNC(v,  chroma_intra, depth, mmxext)\
256 LF_FUNC (h,  luma,         depth, mmxext)\
257 LF_IFUNC(h,  luma_intra,   depth, mmxext)\
258 LF_FUNC (h,  luma,         depth, sse2)\
259 LF_IFUNC(h,  luma_intra,   depth, sse2)\
260 LF_FUNC (v,  luma,         depth, sse2)\
261 LF_IFUNC(v,  luma_intra,   depth, sse2)\
262 LF_FUNC (h,  chroma,       depth, sse2)\
263 LF_IFUNC(h,  chroma_intra, depth, sse2)\
264 LF_FUNC (v,  chroma,       depth, sse2)\
265 LF_IFUNC(v,  chroma_intra, depth, sse2)\
266 LF_FUNC (h,  luma,         depth,  avx)\
267 LF_IFUNC(h,  luma_intra,   depth,  avx)\
268 LF_FUNC (v,  luma,         depth,  avx)\
269 LF_IFUNC(v,  luma_intra,   depth,  avx)\
270 LF_FUNC (h,  chroma,       depth,  avx)\
271 LF_IFUNC(h,  chroma_intra, depth,  avx)\
272 LF_FUNC (v,  chroma,       depth,  avx)\
273 LF_IFUNC(v,  chroma_intra, depth,  avx)
274
275 LF_FUNCS( uint8_t,  8)
276 LF_FUNCS(uint16_t, 10)
277
278 #if ARCH_X86_32
279 LF_FUNC (v8, luma,             8, mmxext)
280 static void ff_deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
281 {
282     if((tc0[0] & tc0[1]) >= 0)
283         ff_deblock_v8_luma_8_mmxext(pix+0, stride, alpha, beta, tc0);
284     if((tc0[2] & tc0[3]) >= 0)
285         ff_deblock_v8_luma_8_mmxext(pix+8, stride, alpha, beta, tc0+2);
286 }
287 LF_IFUNC(v8, luma_intra,        8, mmxext)
288 static void ff_deblock_v_luma_intra_8_mmxext(uint8_t *pix, int stride, int alpha, int beta)
289 {
290     ff_deblock_v8_luma_intra_8_mmxext(pix+0, stride, alpha, beta);
291     ff_deblock_v8_luma_intra_8_mmxext(pix+8, stride, alpha, beta);
292 }
293 #endif /* ARCH_X86_32 */
294
295 LF_FUNC (v,  luma,            10, mmxext)
296 LF_IFUNC(v,  luma_intra,      10, mmxext)
297
298 /***********************************/
299 /* weighted prediction */
300
301 #define H264_WEIGHT(W, H, OPT) \
302 void ff_h264_weight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
303     int stride, int log2_denom, int weight, int offset);
304
305 #define H264_BIWEIGHT(W, H, OPT) \
306 void ff_h264_biweight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
307     uint8_t *src, int stride, int log2_denom, int weightd, \
308     int weights, int offset);
309
310 #define H264_BIWEIGHT_MMX(W,H) \
311 H264_WEIGHT  (W, H, mmx2) \
312 H264_BIWEIGHT(W, H, mmx2)
313
314 #define H264_BIWEIGHT_MMX_SSE(W,H) \
315 H264_BIWEIGHT_MMX(W, H) \
316 H264_WEIGHT      (W, H, sse2) \
317 H264_BIWEIGHT    (W, H, sse2) \
318 H264_BIWEIGHT    (W, H, ssse3)
319
320 H264_BIWEIGHT_MMX_SSE(16, 16)
321 H264_BIWEIGHT_MMX_SSE(16,  8)
322 H264_BIWEIGHT_MMX_SSE( 8, 16)
323 H264_BIWEIGHT_MMX_SSE( 8,  8)
324 H264_BIWEIGHT_MMX_SSE( 8,  4)
325 H264_BIWEIGHT_MMX    ( 4,  8)
326 H264_BIWEIGHT_MMX    ( 4,  4)
327 H264_BIWEIGHT_MMX    ( 4,  2)
328
329 void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth)
330 {
331     int mm_flags = av_get_cpu_flags();
332
333     if (bit_depth == 8) {
334     if (mm_flags & AV_CPU_FLAG_MMX2) {
335         c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
336     }
337 #if HAVE_YASM
338     if (mm_flags & AV_CPU_FLAG_MMX) {
339         c->h264_idct_dc_add         =
340         c->h264_idct_add            = ff_h264_idct_add_8_mmx;
341         c->h264_idct8_dc_add        =
342         c->h264_idct8_add           = ff_h264_idct8_add_8_mmx;
343
344         c->h264_idct_add16          = ff_h264_idct_add16_8_mmx;
345         c->h264_idct8_add4          = ff_h264_idct8_add4_8_mmx;
346         c->h264_idct_add8           = ff_h264_idct_add8_8_mmx;
347         c->h264_idct_add16intra     = ff_h264_idct_add16intra_8_mmx;
348         c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_mmx;
349
350         if (mm_flags & AV_CPU_FLAG_MMX2) {
351             c->h264_idct_dc_add    = ff_h264_idct_dc_add_8_mmx2;
352             c->h264_idct8_dc_add   = ff_h264_idct8_dc_add_8_mmx2;
353             c->h264_idct_add16     = ff_h264_idct_add16_8_mmx2;
354             c->h264_idct8_add4     = ff_h264_idct8_add4_8_mmx2;
355             c->h264_idct_add8      = ff_h264_idct_add8_8_mmx2;
356             c->h264_idct_add16intra= ff_h264_idct_add16intra_8_mmx2;
357
358             c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_8_mmxext;
359             c->h264_h_loop_filter_chroma= ff_deblock_h_chroma_8_mmxext;
360             c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_8_mmxext;
361             c->h264_h_loop_filter_chroma_intra= ff_deblock_h_chroma_intra_8_mmxext;
362 #if ARCH_X86_32
363             c->h264_v_loop_filter_luma= ff_deblock_v_luma_8_mmxext;
364             c->h264_h_loop_filter_luma= ff_deblock_h_luma_8_mmxext;
365             c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_mmxext;
366             c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmxext;
367 #endif
368             c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
369             c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
370             c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
371             c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
372             c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
373             c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
374             c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
375             c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
376
377             c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
378             c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
379             c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
380             c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
381             c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
382             c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
383             c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
384             c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
385
386             if (mm_flags&AV_CPU_FLAG_SSE2) {
387                 c->h264_idct8_add           = ff_h264_idct8_add_8_sse2;
388
389                 c->h264_idct_add16          = ff_h264_idct_add16_8_sse2;
390                 c->h264_idct8_add4          = ff_h264_idct8_add4_8_sse2;
391                 c->h264_idct_add8           = ff_h264_idct_add8_8_sse2;
392                 c->h264_idct_add16intra     = ff_h264_idct_add16intra_8_sse2;
393                 c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_sse2;
394
395                 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_sse2;
396                 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_sse2;
397                 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_sse2;
398                 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_sse2;
399                 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_sse2;
400
401                 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_sse2;
402                 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_sse2;
403                 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_sse2;
404                 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_sse2;
405                 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_sse2;
406
407 #if HAVE_ALIGNED_STACK
408                 c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_sse2;
409                 c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_sse2;
410                 c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_sse2;
411                 c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_sse2;
412 #endif
413             }
414             if (mm_flags&AV_CPU_FLAG_SSSE3) {
415                 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_ssse3;
416                 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_ssse3;
417                 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_ssse3;
418                 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_ssse3;
419                 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_ssse3;
420             }
421             if (mm_flags&AV_CPU_FLAG_AVX) {
422 #if HAVE_ALIGNED_STACK
423                 c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_avx;
424                 c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_avx;
425                 c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_avx;
426                 c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_avx;
427 #endif
428             }
429         }
430     }
431 #endif
432     } else if (bit_depth == 10) {
433 #if HAVE_YASM
434     if (mm_flags & AV_CPU_FLAG_MMX) {
435         if (mm_flags & AV_CPU_FLAG_MMX2) {
436 #if ARCH_X86_32
437             c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_mmxext;
438             c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_mmxext;
439             c->h264_v_loop_filter_luma= ff_deblock_v_luma_10_mmxext;
440             c->h264_h_loop_filter_luma= ff_deblock_h_luma_10_mmxext;
441             c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_mmxext;
442             c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_mmxext;
443 #endif
444             c->h264_idct_dc_add= ff_h264_idct_dc_add_10_mmx2;
445             if (mm_flags&AV_CPU_FLAG_SSE2) {
446                 c->h264_idct_add       = ff_h264_idct_add_10_sse2;
447                 c->h264_idct8_dc_add   = ff_h264_idct8_dc_add_10_sse2;
448
449                 c->h264_idct_add16     = ff_h264_idct_add16_10_sse2;
450                 c->h264_idct_add8      = ff_h264_idct_add8_10_sse2;
451                 c->h264_idct_add16intra= ff_h264_idct_add16intra_10_sse2;
452 #if HAVE_ALIGNED_STACK
453                 c->h264_idct8_add      = ff_h264_idct8_add_10_sse2;
454                 c->h264_idct8_add4     = ff_h264_idct8_add4_10_sse2;
455 #endif
456
457                 c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_sse2;
458                 c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_sse2;
459 #if HAVE_ALIGNED_STACK
460                 c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_sse2;
461                 c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_sse2;
462                 c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_sse2;
463                 c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_sse2;
464 #endif
465             }
466 #if HAVE_AVX
467             if (mm_flags&AV_CPU_FLAG_AVX) {
468                 c->h264_idct_dc_add    =
469                 c->h264_idct_add       = ff_h264_idct_add_10_avx;
470                 c->h264_idct8_dc_add   = ff_h264_idct8_dc_add_10_avx;
471
472                 c->h264_idct_add16     = ff_h264_idct_add16_10_avx;
473                 c->h264_idct_add8      = ff_h264_idct_add8_10_avx;
474                 c->h264_idct_add16intra= ff_h264_idct_add16intra_10_avx;
475 #if HAVE_ALIGNED_STACK
476                 c->h264_idct8_add      = ff_h264_idct8_add_10_avx;
477                 c->h264_idct8_add4     = ff_h264_idct8_add4_10_avx;
478 #endif
479
480                 c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_avx;
481                 c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_avx;
482 #if HAVE_ALIGNED_STACK
483                 c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_avx;
484                 c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_avx;
485                 c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_avx;
486                 c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_avx;
487 #endif
488             }
489 #endif /* HAVE_AVX */
490         }
491     }
492 #endif
493     }
494 }