2 * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "vpx_ports/config.h"
18 #include "entropymode.h"
19 #include "quant_common.h"
20 #include "segmentation.h"
21 #include "setupintrarecon.h"
22 #include "encodeintra.h"
23 #include "reconinter.h"
25 #include "pickinter.h"
26 #include "findnearmv.h"
27 #include "reconintra.h"
31 #include "vpx_ports/vpx_timer.h"
33 #if CONFIG_RUNTIME_CPU_DETECT
34 #define RTCD(x) &cpi->common.rtcd.x
35 #define IF_RTCD(x) (x)
38 #define IF_RTCD(x) NULL
40 extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
42 extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
43 extern void vp8_auto_select_speed(VP8_COMP *cpi);
44 extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
49 void vp8_build_block_offsets(MACROBLOCK *x);
50 void vp8_setup_block_ptrs(MACROBLOCK *x);
51 int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, int recon_yoffset, int recon_uvoffset);
52 int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t);
55 unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
56 unsigned int inter_uv_modes[4] = {0, 0, 0, 0};
57 unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
58 unsigned int y_modes[5] = {0, 0, 0, 0, 0};
59 unsigned int uv_modes[4] = {0, 0, 0, 0};
60 unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
63 static const int qrounding_factors[129] =
65 56, 56, 56, 56, 48, 48, 56, 56,
66 48, 48, 48, 48, 48, 48, 48, 48,
67 48, 48, 48, 48, 48, 48, 48, 48,
68 48, 48, 48, 48, 48, 48, 48, 48,
69 48, 48, 48, 48, 48, 48, 48, 48,
70 48, 48, 48, 48, 48, 48, 48, 48,
71 48, 48, 48, 48, 48, 48, 48, 48,
72 48, 48, 48, 48, 48, 48, 48, 48,
73 48, 48, 48, 48, 48, 48, 48, 48,
74 48, 48, 48, 48, 48, 48, 48, 48,
75 48, 48, 48, 48, 48, 48, 48, 48,
76 48, 48, 48, 48, 48, 48, 48, 48,
77 48, 48, 48, 48, 48, 48, 48, 48,
78 48, 48, 48, 48, 48, 48, 48, 48,
79 48, 48, 48, 48, 48, 48, 48, 48,
80 48, 48, 48, 48, 48, 48, 48, 48,
84 static const int qzbin_factors[129] =
86 72, 72, 72, 72, 80, 80, 72, 72,
87 80, 80, 80, 80, 80, 80, 80, 80,
88 80, 80, 80, 80, 80, 80, 80, 80,
89 80, 80, 80, 80, 80, 80, 80, 80,
90 80, 80, 80, 80, 80, 80, 80, 80,
91 80, 80, 80, 80, 80, 80, 80, 80,
92 80, 80, 80, 80, 80, 80, 80, 80,
93 80, 80, 80, 80, 80, 80, 80, 80,
94 80, 80, 80, 80, 80, 80, 80, 80,
95 80, 80, 80, 80, 80, 80, 80, 80,
96 80, 80, 80, 80, 80, 80, 80, 80,
97 80, 80, 80, 80, 80, 80, 80, 80,
98 80, 80, 80, 80, 80, 80, 80, 80,
99 80, 80, 80, 80, 80, 80, 80, 80,
100 80, 80, 80, 80, 80, 80, 80, 80,
101 80, 80, 80, 80, 80, 80, 80, 80,
105 static const int qrounding_factors_y2[129] =
107 56, 56, 56, 56, 48, 48, 56, 56,
108 48, 48, 48, 48, 48, 48, 48, 48,
109 48, 48, 48, 48, 48, 48, 48, 48,
110 48, 48, 48, 48, 48, 48, 48, 48,
111 48, 48, 48, 48, 48, 48, 48, 48,
112 48, 48, 48, 48, 48, 48, 48, 48,
113 48, 48, 48, 48, 48, 48, 48, 48,
114 48, 48, 48, 48, 48, 48, 48, 48,
115 48, 48, 48, 48, 48, 48, 48, 48,
116 48, 48, 48, 48, 48, 48, 48, 48,
117 48, 48, 48, 48, 48, 48, 48, 48,
118 48, 48, 48, 48, 48, 48, 48, 48,
119 48, 48, 48, 48, 48, 48, 48, 48,
120 48, 48, 48, 48, 48, 48, 48, 48,
121 48, 48, 48, 48, 48, 48, 48, 48,
122 48, 48, 48, 48, 48, 48, 48, 48,
126 static const int qzbin_factors_y2[129] =
128 72, 72, 72, 72, 80, 80, 72, 72,
129 80, 80, 80, 80, 80, 80, 80, 80,
130 80, 80, 80, 80, 80, 80, 80, 80,
131 80, 80, 80, 80, 80, 80, 80, 80,
132 80, 80, 80, 80, 80, 80, 80, 80,
133 80, 80, 80, 80, 80, 80, 80, 80,
134 80, 80, 80, 80, 80, 80, 80, 80,
135 80, 80, 80, 80, 80, 80, 80, 80,
136 80, 80, 80, 80, 80, 80, 80, 80,
137 80, 80, 80, 80, 80, 80, 80, 80,
138 80, 80, 80, 80, 80, 80, 80, 80,
139 80, 80, 80, 80, 80, 80, 80, 80,
140 80, 80, 80, 80, 80, 80, 80, 80,
141 80, 80, 80, 80, 80, 80, 80, 80,
142 80, 80, 80, 80, 80, 80, 80, 80,
143 80, 80, 80, 80, 80, 80, 80, 80,
147 //#define EXACT_QUANT
149 static void vp8cx_invert_quant(short *quant, short *shift, short d)
154 for(l = 0; t > 1; l++)
156 t = 1 + (1<<(16+l))/d;
157 *quant = (short)(t - (1<<16));
161 void vp8cx_init_quantizer(VP8_COMP *cpi)
168 int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
170 for (Q = 0; Q < QINDEX_RANGE; Q++)
173 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
174 vp8cx_invert_quant(cpi->Y1quant[Q][0] + 0,
175 cpi->Y1quant_shift[Q][0] + 0, quant_val);
176 cpi->Y1zbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
177 cpi->Y1round[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;
178 cpi->common.Y1dequant[Q][0][0] = quant_val;
179 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
181 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
182 vp8cx_invert_quant(cpi->Y2quant[Q][0] + 0,
183 cpi->Y2quant_shift[Q][0] + 0, quant_val);
184 cpi->Y2zbin[Q][0][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
185 cpi->Y2round[Q][0][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
186 cpi->common.Y2dequant[Q][0][0] = quant_val;
187 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
189 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
190 vp8cx_invert_quant(cpi->UVquant[Q][0] + 0,
191 cpi->UVquant_shift[Q][0] + 0, quant_val);
192 cpi->UVzbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
193 cpi->UVround[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;
194 cpi->common.UVdequant[Q][0][0] = quant_val;
195 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
197 // all the ac values = ;
198 for (i = 1; i < 16; i++)
200 int rc = vp8_default_zig_zag1d[i];
204 quant_val = vp8_ac_yquant(Q);
205 vp8cx_invert_quant(cpi->Y1quant[Q][r] + c,
206 cpi->Y1quant_shift[Q][r] + c, quant_val);
207 cpi->Y1zbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
208 cpi->Y1round[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;
209 cpi->common.Y1dequant[Q][r][c] = quant_val;
210 cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
212 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
213 vp8cx_invert_quant(cpi->Y2quant[Q][r] + c,
214 cpi->Y2quant_shift[Q][r] + c, quant_val);
215 cpi->Y2zbin[Q][r][c] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
216 cpi->Y2round[Q][r][c] = (qrounding_factors_y2[Q] * quant_val) >> 7;
217 cpi->common.Y2dequant[Q][r][c] = quant_val;
218 cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
220 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
221 vp8cx_invert_quant(cpi->UVquant[Q][r] + c,
222 cpi->UVquant_shift[Q][r] + c, quant_val);
223 cpi->UVzbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
224 cpi->UVround[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;
225 cpi->common.UVdequant[Q][r][c] = quant_val;
226 cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
231 void vp8cx_init_quantizer(VP8_COMP *cpi)
\r
238 int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
\r
240 for (Q = 0; Q < QINDEX_RANGE; Q++)
\r
243 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
\r
244 cpi->Y1quant[Q][0][0] = (1 << 16) / quant_val;
\r
245 cpi->Y1zbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
\r
246 cpi->Y1round[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;
\r
247 cpi->common.Y1dequant[Q][0][0] = quant_val;
\r
248 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
\r
250 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
\r
251 cpi->Y2quant[Q][0][0] = (1 << 16) / quant_val;
\r
252 cpi->Y2zbin[Q][0][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
\r
253 cpi->Y2round[Q][0][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
\r
254 cpi->common.Y2dequant[Q][0][0] = quant_val;
\r
255 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
\r
257 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
\r
258 cpi->UVquant[Q][0][0] = (1 << 16) / quant_val;
\r
259 cpi->UVzbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
\r
260 cpi->UVround[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;
\r
261 cpi->common.UVdequant[Q][0][0] = quant_val;
\r
262 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
\r
264 // all the ac values = ;
\r
265 for (i = 1; i < 16; i++)
\r
267 int rc = vp8_default_zig_zag1d[i];
\r
271 quant_val = vp8_ac_yquant(Q);
\r
272 cpi->Y1quant[Q][r][c] = (1 << 16) / quant_val;
\r
273 cpi->Y1zbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
\r
274 cpi->Y1round[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;
\r
275 cpi->common.Y1dequant[Q][r][c] = quant_val;
\r
276 cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
\r
278 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
\r
279 cpi->Y2quant[Q][r][c] = (1 << 16) / quant_val;
\r
280 cpi->Y2zbin[Q][r][c] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
\r
281 cpi->Y2round[Q][r][c] = (qrounding_factors_y2[Q] * quant_val) >> 7;
\r
282 cpi->common.Y2dequant[Q][r][c] = quant_val;
\r
283 cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
\r
285 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
\r
286 cpi->UVquant[Q][r][c] = (1 << 16) / quant_val;
\r
287 cpi->UVzbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
\r
288 cpi->UVround[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;
\r
289 cpi->common.UVdequant[Q][r][c] = quant_val;
\r
290 cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
\r
295 void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
299 MACROBLOCKD *xd = &x->e_mbd;
302 // Select the baseline MB Q index.
303 if (xd->segmentation_enabled)
306 if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
308 QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
312 QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
313 QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
317 QIndex = cpi->common.base_qindex;
320 zbin_extra = (cpi->common.Y1dequant[QIndex][0][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
322 for (i = 0; i < 16; i++)
324 x->block[i].quant = cpi->Y1quant[QIndex];
325 x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
326 x->block[i].zbin = cpi->Y1zbin[QIndex];
327 x->block[i].round = cpi->Y1round[QIndex];
328 x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
329 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
330 x->block[i].zbin_extra = (short)zbin_extra;
334 zbin_extra = (cpi->common.UVdequant[QIndex][0][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
336 for (i = 16; i < 24; i++)
338 x->block[i].quant = cpi->UVquant[QIndex];
339 x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
340 x->block[i].zbin = cpi->UVzbin[QIndex];
341 x->block[i].round = cpi->UVround[QIndex];
342 x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
343 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
344 x->block[i].zbin_extra = (short)zbin_extra;
348 zbin_extra = (cpi->common.Y2dequant[QIndex][0][1] * ((cpi->zbin_over_quant / 2) + cpi->zbin_mode_boost)) >> 7;
349 x->block[24].quant = cpi->Y2quant[QIndex];
350 x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
351 x->block[24].zbin = cpi->Y2zbin[QIndex];
352 x->block[24].round = cpi->Y2round[QIndex];
353 x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
354 x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
355 x->block[24].zbin_extra = (short)zbin_extra;
358 void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
360 // vp8cx_init_quantizer() is first called in vp8_create_compressor(). A check is added here so that vp8cx_init_quantizer() is only called
361 // when these values are not all zero.
362 if (cpi->common.y1dc_delta_q | cpi->common.y2dc_delta_q | cpi->common.uvdc_delta_q | cpi->common.y2ac_delta_q | cpi->common.uvac_delta_q)
364 vp8cx_init_quantizer(cpi);
367 // MB level quantizer setup
368 vp8cx_mb_init_quantizer(cpi, &cpi->mb);
374 void encode_mb_row(VP8_COMP *cpi,
384 int recon_yoffset, recon_uvoffset;
386 int ref_fb_idx = cm->lst_fb_idx;
387 int dst_fb_idx = cm->new_fb_idx;
388 int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
389 int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
390 int seg_map_index = (mb_row * cpi->common.mb_cols);
393 // reset above block coeffs
394 xd->above_context = cm->above_context;
396 xd->up_available = (mb_row != 0);
397 recon_yoffset = (mb_row * recon_y_stride * 16);
398 recon_uvoffset = (mb_row * recon_uv_stride * 8);
400 cpi->tplist[mb_row].start = *tp;
401 //printf("Main mb_row = %d\n", mb_row);
403 // for each macroblock col in image
404 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
406 // Distance of Mb to the various image edges.
407 // These specified to 8th pel as they are always compared to values that are in 1/8th pel units
408 xd->mb_to_left_edge = -((mb_col * 16) << 3);
409 xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
410 xd->mb_to_top_edge = -((mb_row * 16) << 3);
411 xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
413 // Set up limit values for motion vectors used to prevent them extending outside the UMV borders
414 x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
415 x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
416 x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
417 x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
419 xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
420 xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
421 xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
422 xd->left_available = (mb_col != 0);
424 // Is segmentation enabled
425 // MB level adjutment to quantizer
426 if (xd->segmentation_enabled)
428 // Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
429 if (cpi->segmentation_map[seg_map_index+mb_col] <= 3)
430 xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[seg_map_index+mb_col];
432 xd->mode_info_context->mbmi.segment_id = 0;
434 vp8cx_mb_init_quantizer(cpi, x);
437 xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
439 x->active_ptr = cpi->active_map + seg_map_index + mb_col;
441 if (cm->frame_type == KEY_FRAME)
443 *totalrate += vp8cx_encode_intra_macro_block(cpi, x, tp);
445 y_modes[xd->mbmi.mode] ++;
450 *totalrate += vp8cx_encode_inter_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset);
453 inter_y_modes[xd->mbmi.mode] ++;
455 if (xd->mbmi.mode == SPLITMV)
459 for (b = 0; b < xd->mbmi.partition_count; b++)
461 inter_b_modes[xd->mbmi.partition_bmi[b].mode] ++;
467 // Count of last ref frame 0,0 useage
468 if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
469 cpi->inter_zz_count ++;
471 // Special case code for cyclic refresh
472 // If cyclic update enabled then copy xd->mbmi.segment_id; (which may have been updated based on mode
473 // during vp8cx_encode_inter_macroblock()) back into the global sgmentation map
474 if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)
476 cpi->segmentation_map[seg_map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
478 // If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
479 // Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
480 // else mark it as dirty (1).
481 if (xd->mode_info_context->mbmi.segment_id)
482 cpi->cyclic_refresh_map[seg_map_index+mb_col] = -1;
483 else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
485 if (cpi->cyclic_refresh_map[seg_map_index+mb_col] == 1)
486 cpi->cyclic_refresh_map[seg_map_index+mb_col] = 0;
489 cpi->cyclic_refresh_map[seg_map_index+mb_col] = 1;
494 cpi->tplist[mb_row].stop = *tp;
496 x->gf_active_ptr++; // Increment pointer into gf useage flags structure for next mb
498 for (i = 0; i < 16; i++)
499 vpx_memcpy(&xd->mode_info_context->bmi[i], &xd->block[i].bmi, sizeof(xd->block[i].bmi));
501 // adjust to the next column of macroblocks
502 x->src.y_buffer += 16;
503 x->src.u_buffer += 8;
504 x->src.v_buffer += 8;
509 // Keep track of segment useage
510 segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
513 xd->mode_info_context++;
516 cpi->current_mb_col_main = mb_col;
519 //extend the recon for intra prediction
521 &cm->yv12_fb[dst_fb_idx],
522 xd->dst.y_buffer + 16,
523 xd->dst.u_buffer + 8,
524 xd->dst.v_buffer + 8);
526 // this is to account for the border
527 xd->mode_info_context++;
534 void vp8_encode_frame(VP8_COMP *cpi)
537 MACROBLOCK *const x = & cpi->mb;
538 VP8_COMMON *const cm = & cpi->common;
539 MACROBLOCKD *const xd = & x->e_mbd;
542 TOKENEXTRA *tp = cpi->tok;
543 int segment_counts[MAX_MB_SEGMENTS];
546 if (cm->frame_type != KEY_FRAME)
548 if (cm->mcomp_filter_type == SIXTAP)
550 xd->subpixel_predict = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, sixtap4x4);
551 xd->subpixel_predict8x4 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, sixtap8x4);
552 xd->subpixel_predict8x8 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, sixtap8x8);
553 xd->subpixel_predict16x16 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, sixtap16x16);
557 xd->subpixel_predict = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, bilinear4x4);
558 xd->subpixel_predict8x4 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, bilinear8x4);
559 xd->subpixel_predict8x8 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, bilinear8x8);
560 xd->subpixel_predict16x16 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, bilinear16x16);
566 // For key frames make sure the intra ref frame probability value
567 // is set to "all intra"
568 //cpi->prob_intra_coded = 255;
572 x->gf_active_ptr = (signed char *)cpi->gf_active_flags; // Point to base of GF active flags data structure
574 x->vector_range = 32;
576 // Count of MBs using the alternate Q if any
579 // Reset frame count of inter 0,0 motion vector useage.
580 cpi->inter_zz_count = 0;
582 vpx_memset(segment_counts, 0, sizeof(segment_counts));
584 cpi->prediction_error = 0;
585 cpi->intra_error = 0;
586 cpi->skip_true_count = 0;
587 cpi->skip_false_count = 0;
591 cpi->frame_distortion = 0;
592 cpi->last_mb_distortion = 0;
597 xd->mode_info = cm->mi - 1;
599 xd->mode_info_context = cm->mi;
600 xd->mode_info_stride = cm->mode_info_stride;
602 xd->frame_type = cm->frame_type;
604 xd->frames_since_golden = cm->frames_since_golden;
605 xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
606 vp8_zero(cpi->MVcount);
607 // vp8_zero( Contexts)
608 vp8_zero(cpi->coef_counts);
610 // reset intra mode contexts
611 if (cm->frame_type == KEY_FRAME)
612 vp8_init_mbmode_probs(cm);
615 vp8cx_frame_init_quantizer(cpi);
617 if (cpi->compressor_speed == 2)
619 if (cpi->oxcf.cpu_used < 0)
620 cpi->Speed = -(cpi->oxcf.cpu_used);
622 vp8_auto_select_speed(cpi);
625 vp8_initialize_rd_consts(cpi, vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
626 //vp8_initialize_rd_consts( cpi, vp8_dc_quant(cpi->avg_frame_qindex, cm->y1dc_delta_q) );
627 vp8cx_initialize_me_consts(cpi, cm->base_qindex);
628 //vp8cx_initialize_me_consts( cpi, cpi->avg_frame_qindex);
630 // Copy data over into macro block data sturctures.
632 x->src = * cpi->Source;
633 xd->pre = cm->yv12_fb[cm->lst_fb_idx];
634 xd->dst = cm->yv12_fb[cm->new_fb_idx];
636 // set up frame new frame for intra coded blocks
638 vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
640 vp8_build_block_offsets(x);
642 vp8_setup_block_dptrs(&x->e_mbd);
644 vp8_setup_block_ptrs(x);
646 x->rddiv = cpi->RDDIV;
647 x->rdmult = cpi->RDMULT;
650 // Experimental rd code
651 // 2 Pass - Possibly set Rdmult based on last frame distortion + this frame target bits or other metrics
652 // such as cpi->rate_correction_factor that indicate relative complexity.
653 /*if ( cpi->pass == 2 && (cpi->last_frame_distortion > 0) && (cpi->target_bits_per_mb > 0) )
655 //x->rdmult = ((cpi->last_frame_distortion * 256)/cpi->common.MBs)/ cpi->target_bits_per_mb;
656 x->rdmult = (int)(cpi->RDMULT * cpi->rate_correction_factor);
659 x->rdmult = cpi->RDMULT; */
660 //x->rdmult = (int)(cpi->RDMULT * pow( (cpi->rate_correction_factor * 2.0), 0.75 ));
663 xd->mode_info_context->mbmi.mode = DC_PRED;
664 xd->mode_info_context->mbmi.uv_mode = DC_PRED;
666 xd->left_context = &cm->left_context;
668 vp8_zero(cpi->count_mb_ref_frame_usage)
669 vp8_zero(cpi->ymode_count)
670 vp8_zero(cpi->uv_mode_count)
674 vpx_memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
677 struct vpx_usec_timer emr_timer;
678 vpx_usec_timer_start(&emr_timer);
680 if (!cpi->b_multi_threaded)
682 // for each macroblock row in image
683 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
686 vp8_zero(cm->left_context)
688 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
690 // adjust to the next row of mbs
691 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
692 x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
693 x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
696 cpi->tok_count = tp - cpi->tok;
701 #if CONFIG_MULTITHREAD
702 vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 1, cpi->encoding_thread_count);
704 for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1))
707 cpi->current_mb_col_main = -1;
709 for (i = 0; i < cpi->encoding_thread_count; i++)
711 if ((mb_row + i + 1) >= cm->mb_rows)
714 cpi->mb_row_ei[i].mb_row = mb_row + i + 1;
715 cpi->mb_row_ei[i].tp = cpi->tok + (mb_row + i + 1) * (cm->mb_cols * 16 * 24);
716 cpi->mb_row_ei[i].current_mb_col = -1;
717 //SetEvent(cpi->h_event_mbrencoding[i]);
718 sem_post(&cpi->h_event_mbrencoding[i]);
721 vp8_zero(cm->left_context)
723 tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
725 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
727 // adjust to the next row of mbs
728 x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols;
729 x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
730 x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
732 xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count;
734 if (mb_row < cm->mb_rows - 1)
735 //WaitForSingleObject(cpi->h_event_main, INFINITE);
736 sem_wait(&cpi->h_event_main);
740 for( ;mb_row<cm->mb_rows; mb_row ++)
742 vp8_zero( cm->left_context)
744 tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
746 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
747 // adjust to the next row of mbs
748 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
749 x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
750 x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
756 for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++)
758 cpi->tok_count += cpi->tplist[mb_row].stop - cpi->tplist[mb_row].start;
761 if (xd->segmentation_enabled)
766 if (xd->segmentation_enabled)
769 for (i = 0; i < cpi->encoding_thread_count; i++)
771 for (j = 0; j < 4; j++)
772 segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
778 for (i = 0; i < cpi->encoding_thread_count; i++)
780 totalrate += cpi->mb_row_ei[i].totalrate;
787 vpx_usec_timer_mark(&emr_timer);
788 cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
793 // Work out the segment probabilites if segmentation is enabled
794 if (xd->segmentation_enabled)
800 vpx_memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
802 tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
806 xd->mb_segment_tree_probs[0] = ((segment_counts[0] + segment_counts[1]) * 255) / tot_count;
808 tot_count = segment_counts[0] + segment_counts[1];
812 xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
815 tot_count = segment_counts[2] + segment_counts[3];
818 xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
820 // Zero probabilities not allowed
821 for (i = 0; i < MB_FEATURE_TREE_PROBS; i ++)
823 if (xd->mb_segment_tree_probs[i] == 0)
824 xd->mb_segment_tree_probs[i] = 1;
829 // 256 rate units to the bit
830 cpi->projected_frame_size = totalrate >> 8; // projected_frame_size in units of BYTES
832 // Make a note of the percentage MBs coded Intra.
833 if (cm->frame_type == KEY_FRAME)
835 cpi->this_frame_percent_intra = 100;
841 tot_modes = cpi->count_mb_ref_frame_usage[INTRA_FRAME]
842 + cpi->count_mb_ref_frame_usage[LAST_FRAME]
843 + cpi->count_mb_ref_frame_usage[GOLDEN_FRAME]
844 + cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
847 cpi->this_frame_percent_intra = cpi->count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
854 int flag[2] = {0, 0};
856 for (cnt = 0; cnt < MVPcount; cnt++)
858 if (cm->fc.pre_mvc[0][cnt] != cm->fc.mvc[0][cnt])
861 vpx_memcpy(cm->fc.pre_mvc[0], cm->fc.mvc[0], MVPcount);
866 for (cnt = 0; cnt < MVPcount; cnt++)
868 if (cm->fc.pre_mvc[1][cnt] != cm->fc.mvc[1][cnt])
871 vpx_memcpy(cm->fc.pre_mvc[1], cm->fc.mvc[1], MVPcount);
876 if (flag[0] || flag[1])
877 vp8_build_component_cost_table(cpi->mb.mvcost, cpi->mb.mvsadcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
881 // Adjust the projected reference frame useage probability numbers to reflect
882 // what we have just seen. This may be usefull when we make multiple itterations
883 // of the recode loop rather than continuing to use values from the previous frame.
884 if ((cm->frame_type != KEY_FRAME) && !cm->refresh_alt_ref_frame && !cm->refresh_golden_frame)
886 const int *const rfct = cpi->count_mb_ref_frame_usage;
887 const int rf_intra = rfct[INTRA_FRAME];
888 const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
890 if ((rf_intra + rf_inter) > 0)
892 cpi->prob_intra_coded = (rf_intra * 255) / (rf_intra + rf_inter);
894 if (cpi->prob_intra_coded < 1)
895 cpi->prob_intra_coded = 1;
897 if ((cm->frames_since_golden > 0) || cpi->source_alt_ref_active)
899 cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
901 if (cpi->prob_last_coded < 1)
902 cpi->prob_last_coded = 1;
904 cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
905 ? (rfct[GOLDEN_FRAME] * 255) / (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128;
907 if (cpi->prob_gf_coded < 1)
908 cpi->prob_gf_coded = 1;
914 // Keep record of the total distortion this time around for future use
915 cpi->last_frame_distortion = cpi->frame_distortion;
919 void vp8_setup_block_ptrs(MACROBLOCK *x)
924 for (r = 0; r < 4; r++)
926 for (c = 0; c < 4; c++)
928 x->block[r*4+c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
932 for (r = 0; r < 2; r++)
934 for (c = 0; c < 2; c++)
936 x->block[16 + r*2+c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
941 for (r = 0; r < 2; r++)
943 for (c = 0; c < 2; c++)
945 x->block[20 + r*2+c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
949 x->block[24].src_diff = x->src_diff + 384;
952 for (i = 0; i < 25; i++)
954 x->block[i].coeff = x->coeff + i * 16;
958 void vp8_build_block_offsets(MACROBLOCK *x)
963 vp8_build_block_doffsets(&x->e_mbd);
966 for (br = 0; br < 4; br++)
968 for (bc = 0; bc < 4; bc++)
970 BLOCK *this_block = &x->block[block];
971 this_block->base_src = &x->src.y_buffer;
972 this_block->src_stride = x->src.y_stride;
973 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
979 for (br = 0; br < 2; br++)
981 for (bc = 0; bc < 2; bc++)
983 BLOCK *this_block = &x->block[block];
984 this_block->base_src = &x->src.u_buffer;
985 this_block->src_stride = x->src.uv_stride;
986 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
992 for (br = 0; br < 2; br++)
994 for (bc = 0; bc < 2; bc++)
996 BLOCK *this_block = &x->block[block];
997 this_block->base_src = &x->src.v_buffer;
998 this_block->src_stride = x->src.uv_stride;
999 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1005 static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
1007 const MACROBLOCKD *xd = & x->e_mbd;
1008 const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
1009 const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
1012 const int is_key = cpi->common.frame_type == KEY_FRAME;
1014 ++ (is_key ? uv_modes : inter_uv_modes)[uvm];
1018 unsigned int *const bct = is_key ? b_modes : inter_b_modes;
1024 ++ bct[xd->block[b].bmi.mode];
1031 ++cpi->ymode_count[m];
1032 ++cpi->uv_mode_count[uvm];
1035 int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
1037 int Error4x4, Error16x16, error_uv;
1038 B_PREDICTION_MODE intra_bmodes[16];
1039 int rate4x4, rate16x16, rateuv;
1040 int dist4x4, dist16x16, distuv;
1042 int rate4x4_tokenonly = 0;
1043 int rate16x16_tokenonly = 0;
1044 int rateuv_tokenonly = 0;
1047 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
1049 #if !(CONFIG_REALTIME_ONLY)
1051 if (cpi->sf.RD || cpi->compressor_speed != 2)
1053 Error4x4 = vp8_rd_pick_intra4x4mby_modes(cpi, x, &rate4x4, &rate4x4_tokenonly, &dist4x4);
1055 //save the b modes for possible later use
1056 for (i = 0; i < 16; i++)
1057 intra_bmodes[i] = x->e_mbd.block[i].bmi.mode;
1059 Error16x16 = vp8_rd_pick_intra16x16mby_mode(cpi, x, &rate16x16, &rate16x16_tokenonly, &dist16x16);
1061 error_uv = vp8_rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
1063 x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
1065 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1068 if (Error4x4 < Error16x16)
1071 x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
1073 // get back the intra block modes
1074 for (i = 0; i < 16; i++)
1075 x->e_mbd.block[i].bmi.mode = intra_bmodes[i];
1077 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1078 cpi->prediction_error += Error4x4 ;
1080 // Experimental RD code
1081 cpi->frame_distortion += dist4x4;
1086 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1090 // Experimental RD code
1091 cpi->prediction_error += Error16x16;
1092 cpi->frame_distortion += dist16x16;
1096 sum_intra_stats(cpi, x);
1098 vp8_tokenize_mb(cpi, &x->e_mbd, t);
1104 int rate2, distortion2;
1105 MB_PREDICTION_MODE mode, best_mode = DC_PRED;
1107 Error16x16 = INT_MAX;
1109 for (mode = DC_PRED; mode <= TM_PRED; mode ++)
1111 x->e_mbd.mode_info_context->mbmi.mode = mode;
1112 vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
1113 distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16prederror)(x->src.y_buffer, x->src.y_stride, x->e_mbd.predictor, 16, 0x7fffffff);
1114 rate2 = x->mbmode_cost[x->e_mbd.frame_type][mode];
1115 this_rd = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
1117 if (Error16x16 > this_rd)
1119 Error16x16 = this_rd;
1124 vp8_pick_intra4x4mby_modes(IF_RTCD(&cpi->rtcd), x, &rate2, &distortion2);
1126 if (distortion2 == INT_MAX)
1129 Error4x4 = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
1131 x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
1133 if (Error4x4 < Error16x16)
1135 x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
1136 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1137 cpi->prediction_error += Error4x4;
1141 x->e_mbd.mode_info_context->mbmi.mode = best_mode;
1142 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1143 cpi->prediction_error += Error16x16;
1146 vp8_pick_intra_mbuv_mode(x);
1147 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1148 sum_intra_stats(cpi, x);
1149 vp8_tokenize_mb(cpi, &x->e_mbd, t);
1158 extern void vp8_fix_contexts(MACROBLOCKD *x);
1160 int vp8cx_encode_inter_macroblock
1162 VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
1163 int recon_yoffset, int recon_uvoffset
1166 MACROBLOCKD *const xd = &x->e_mbd;
1168 int intra_error = 0;
1174 if (xd->segmentation_enabled)
1175 x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
1177 x->encode_breakout = cpi->oxcf.encode_breakout;
1179 #if !(CONFIG_REALTIME_ONLY)
1183 inter_error = vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
1187 inter_error = vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
1190 cpi->prediction_error += inter_error;
1191 cpi->intra_error += intra_error;
1194 // Experimental RD code
1195 cpi->frame_distortion += distortion;
1196 cpi->last_mb_distortion = distortion;
1199 // MB level adjutment to quantizer setup
1200 if (xd->segmentation_enabled || cpi->zbin_mode_boost_enabled)
1202 // If cyclic update enabled
1203 if (cpi->cyclic_refresh_mode_enabled)
1205 // Clear segment_id back to 0 if not coded (last frame 0,0)
1206 if ((xd->mode_info_context->mbmi.segment_id == 1) &&
1207 ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
1209 xd->mode_info_context->mbmi.segment_id = 0;
1213 // Experimental code. Special case for gf and arf zeromv modes. Increase zbin size to supress noise
1214 if (cpi->zbin_mode_boost_enabled)
1216 if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME))
1217 cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
1219 cpi->zbin_mode_boost = 0;
1222 vp8cx_mb_init_quantizer(cpi, x);
1225 cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
1227 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
1229 x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
1231 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1233 if (xd->mode_info_context->mbmi.mode == B_PRED)
1235 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1239 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1242 sum_intra_stats(cpi, x);
1251 vp8_find_near_mvs(xd, xd->mode_info_context,
1252 &nearest, &nearby, &best_ref_mv, mdcounts, xd->mode_info_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias);
1254 vp8_build_uvmvs(xd, cpi->common.full_pixel);
1256 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
1257 ref_fb_idx = cpi->common.lst_fb_idx;
1258 else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
1259 ref_fb_idx = cpi->common.gld_fb_idx;
1261 ref_fb_idx = cpi->common.alt_fb_idx;
1263 xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
1264 xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
1265 xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
1267 if (xd->mode_info_context->mbmi.mode == SPLITMV)
1271 for (i = 0; i < 16; i++)
1273 if (xd->block[i].bmi.mode == NEW4X4)
1275 cpi->MVcount[0][mv_max+((xd->block[i].bmi.mv.as_mv.row - best_ref_mv.row) >> 1)]++;
1276 cpi->MVcount[1][mv_max+((xd->block[i].bmi.mv.as_mv.col - best_ref_mv.col) >> 1)]++;
1280 else if (xd->mode_info_context->mbmi.mode == NEWMV)
1282 cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv.row) >> 1)]++;
1283 cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv.col) >> 1)]++;
1286 if (!x->skip && !x->e_mbd.mode_info_context->mbmi.force_no_skip)
1288 vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
1290 // Clear mb_skip_coeff if mb_no_coeff_skip is not set
1291 if (!cpi->common.mb_no_coeff_skip)
1292 xd->mode_info_context->mbmi.mb_skip_coeff = 0;
1296 vp8_stuff_inter16x16(x);
1300 vp8_tokenize_mb(cpi, xd, t);
1303 if (cpi->common.mb_no_coeff_skip)
1305 if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV)
1306 xd->mode_info_context->mbmi.dc_diff = 0;
1308 xd->mode_info_context->mbmi.dc_diff = 1;
1310 xd->mode_info_context->mbmi.mb_skip_coeff = 1;
1311 cpi->skip_true_count ++;
1312 vp8_fix_contexts(xd);
1316 vp8_stuff_mb(cpi, xd, t);
1317 xd->mode_info_context->mbmi.mb_skip_coeff = 0;
1318 cpi->skip_false_count ++;