2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "vpx_ports/config.h"
18 #include "entropymode.h"
19 #include "quant_common.h"
20 #include "segmentation.h"
21 #include "setupintrarecon.h"
22 #include "encodeintra.h"
23 #include "reconinter.h"
25 #include "pickinter.h"
26 #include "findnearmv.h"
27 #include "reconintra.h"
31 #include "vpx_ports/vpx_timer.h"
33 #if CONFIG_RUNTIME_CPU_DETECT
34 #define RTCD(x) &cpi->common.rtcd.x
35 #define IF_RTCD(x) (x)
38 #define IF_RTCD(x) NULL
40 extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
42 extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
43 extern void vp8_auto_select_speed(VP8_COMP *cpi);
44 extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
49 void vp8_build_block_offsets(MACROBLOCK *x);
50 void vp8_setup_block_ptrs(MACROBLOCK *x);
51 int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, int recon_yoffset, int recon_uvoffset);
52 int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t);
55 unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
56 unsigned int inter_uv_modes[4] = {0, 0, 0, 0};
57 unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
58 unsigned int y_modes[5] = {0, 0, 0, 0, 0};
59 unsigned int uv_modes[4] = {0, 0, 0, 0};
60 unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
63 static const int qrounding_factors[129] =
65 48, 48, 48, 48, 48, 48, 48, 48,
66 48, 48, 48, 48, 48, 48, 48, 48,
67 48, 48, 48, 48, 48, 48, 48, 48,
68 48, 48, 48, 48, 48, 48, 48, 48,
69 48, 48, 48, 48, 48, 48, 48, 48,
70 48, 48, 48, 48, 48, 48, 48, 48,
71 48, 48, 48, 48, 48, 48, 48, 48,
72 48, 48, 48, 48, 48, 48, 48, 48,
73 48, 48, 48, 48, 48, 48, 48, 48,
74 48, 48, 48, 48, 48, 48, 48, 48,
75 48, 48, 48, 48, 48, 48, 48, 48,
76 48, 48, 48, 48, 48, 48, 48, 48,
77 48, 48, 48, 48, 48, 48, 48, 48,
78 48, 48, 48, 48, 48, 48, 48, 48,
79 48, 48, 48, 48, 48, 48, 48, 48,
80 48, 48, 48, 48, 48, 48, 48, 48,
84 static const int qzbin_factors[129] =
86 84, 84, 84, 84, 84, 84, 84, 84,
87 84, 84, 84, 84, 84, 84, 84, 84,
88 84, 84, 84, 84, 84, 84, 84, 84,
89 84, 84, 84, 84, 84, 84, 84, 84,
90 84, 84, 84, 84, 84, 84, 84, 84,
91 84, 84, 84, 84, 84, 84, 84, 84,
92 80, 80, 80, 80, 80, 80, 80, 80,
93 80, 80, 80, 80, 80, 80, 80, 80,
94 80, 80, 80, 80, 80, 80, 80, 80,
95 80, 80, 80, 80, 80, 80, 80, 80,
96 80, 80, 80, 80, 80, 80, 80, 80,
97 80, 80, 80, 80, 80, 80, 80, 80,
98 80, 80, 80, 80, 80, 80, 80, 80,
99 80, 80, 80, 80, 80, 80, 80, 80,
100 80, 80, 80, 80, 80, 80, 80, 80,
101 80, 80, 80, 80, 80, 80, 80, 80,
105 static const int qrounding_factors_y2[129] =
107 48, 48, 48, 48, 48, 48, 48, 48,
108 48, 48, 48, 48, 48, 48, 48, 48,
109 48, 48, 48, 48, 48, 48, 48, 48,
110 48, 48, 48, 48, 48, 48, 48, 48,
111 48, 48, 48, 48, 48, 48, 48, 48,
112 48, 48, 48, 48, 48, 48, 48, 48,
113 48, 48, 48, 48, 48, 48, 48, 48,
114 48, 48, 48, 48, 48, 48, 48, 48,
115 48, 48, 48, 48, 48, 48, 48, 48,
116 48, 48, 48, 48, 48, 48, 48, 48,
117 48, 48, 48, 48, 48, 48, 48, 48,
118 48, 48, 48, 48, 48, 48, 48, 48,
119 48, 48, 48, 48, 48, 48, 48, 48,
120 48, 48, 48, 48, 48, 48, 48, 48,
121 48, 48, 48, 48, 48, 48, 48, 48,
122 48, 48, 48, 48, 48, 48, 48, 48,
126 static const int qzbin_factors_y2[129] =
128 84, 84, 84, 84, 84, 84, 84, 84,
129 84, 84, 84, 84, 84, 84, 84, 84,
130 84, 84, 84, 84, 84, 84, 84, 84,
131 84, 84, 84, 84, 84, 84, 84, 84,
132 84, 84, 84, 84, 84, 84, 84, 84,
133 84, 84, 84, 84, 84, 84, 84, 84,
134 80, 80, 80, 80, 80, 80, 80, 80,
135 80, 80, 80, 80, 80, 80, 80, 80,
136 80, 80, 80, 80, 80, 80, 80, 80,
137 80, 80, 80, 80, 80, 80, 80, 80,
138 80, 80, 80, 80, 80, 80, 80, 80,
139 80, 80, 80, 80, 80, 80, 80, 80,
140 80, 80, 80, 80, 80, 80, 80, 80,
141 80, 80, 80, 80, 80, 80, 80, 80,
142 80, 80, 80, 80, 80, 80, 80, 80,
143 80, 80, 80, 80, 80, 80, 80, 80,
149 static void vp8cx_invert_quant(short *quant, short *shift, short d)
154 for(l = 0; t > 1; l++)
156 t = 1 + (1<<(16+l))/d;
157 *quant = (short)(t - (1<<16));
161 void vp8cx_init_quantizer(VP8_COMP *cpi)
167 int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
169 for (Q = 0; Q < QINDEX_RANGE; Q++)
172 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
173 vp8cx_invert_quant(cpi->Y1quant[Q] + 0,
174 cpi->Y1quant_shift[Q] + 0, quant_val);
175 cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
176 cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
177 cpi->common.Y1dequant[Q][0] = quant_val;
178 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
180 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
181 vp8cx_invert_quant(cpi->Y2quant[Q] + 0,
182 cpi->Y2quant_shift[Q] + 0, quant_val);
183 cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
184 cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
185 cpi->common.Y2dequant[Q][0] = quant_val;
186 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
188 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
189 vp8cx_invert_quant(cpi->UVquant[Q] + 0,
190 cpi->UVquant_shift[Q] + 0, quant_val);
191 cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
192 cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
193 cpi->common.UVdequant[Q][0] = quant_val;
194 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
196 // all the ac values = ;
197 for (i = 1; i < 16; i++)
199 int rc = vp8_default_zig_zag1d[i];
201 quant_val = vp8_ac_yquant(Q);
202 vp8cx_invert_quant(cpi->Y1quant[Q] + rc,
203 cpi->Y1quant_shift[Q] + rc, quant_val);
204 cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
205 cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
206 cpi->common.Y1dequant[Q][rc] = quant_val;
207 cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
209 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
210 vp8cx_invert_quant(cpi->Y2quant[Q] + rc,
211 cpi->Y2quant_shift[Q] + rc, quant_val);
212 cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
213 cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
214 cpi->common.Y2dequant[Q][rc] = quant_val;
215 cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
217 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
218 vp8cx_invert_quant(cpi->UVquant[Q] + rc,
219 cpi->UVquant_shift[Q] + rc, quant_val);
220 cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
221 cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
222 cpi->common.UVdequant[Q][rc] = quant_val;
223 cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
228 void vp8cx_init_quantizer(VP8_COMP *cpi)
234 int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
236 for (Q = 0; Q < QINDEX_RANGE; Q++)
239 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
240 cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
241 cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
242 cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
243 cpi->common.Y1dequant[Q][0] = quant_val;
244 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
246 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
247 cpi->Y2quant[Q][0] = (1 << 16) / quant_val;
248 cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
249 cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
250 cpi->common.Y2dequant[Q][0] = quant_val;
251 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
253 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
254 cpi->UVquant[Q][0] = (1 << 16) / quant_val;
255 cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
256 cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
257 cpi->common.UVdequant[Q][0] = quant_val;
258 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
260 // all the ac values = ;
261 for (i = 1; i < 16; i++)
263 int rc = vp8_default_zig_zag1d[i];
265 quant_val = vp8_ac_yquant(Q);
266 cpi->Y1quant[Q][rc] = (1 << 16) / quant_val;
267 cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
268 cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
269 cpi->common.Y1dequant[Q][rc] = quant_val;
270 cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
272 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
273 cpi->Y2quant[Q][rc] = (1 << 16) / quant_val;
274 cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
275 cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
276 cpi->common.Y2dequant[Q][rc] = quant_val;
277 cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
279 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
280 cpi->UVquant[Q][rc] = (1 << 16) / quant_val;
281 cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
282 cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
283 cpi->common.UVdequant[Q][rc] = quant_val;
284 cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
289 void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
293 MACROBLOCKD *xd = &x->e_mbd;
296 // Select the baseline MB Q index.
297 if (xd->segmentation_enabled)
300 if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
302 QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
306 QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
307 QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
311 QIndex = cpi->common.base_qindex;
314 zbin_extra = (cpi->common.Y1dequant[QIndex][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
316 for (i = 0; i < 16; i++)
318 x->block[i].quant = cpi->Y1quant[QIndex];
319 x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
320 x->block[i].zbin = cpi->Y1zbin[QIndex];
321 x->block[i].round = cpi->Y1round[QIndex];
322 x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
323 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
324 x->block[i].zbin_extra = (short)zbin_extra;
328 zbin_extra = (cpi->common.UVdequant[QIndex][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
330 for (i = 16; i < 24; i++)
332 x->block[i].quant = cpi->UVquant[QIndex];
333 x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
334 x->block[i].zbin = cpi->UVzbin[QIndex];
335 x->block[i].round = cpi->UVround[QIndex];
336 x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
337 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
338 x->block[i].zbin_extra = (short)zbin_extra;
342 zbin_extra = (cpi->common.Y2dequant[QIndex][1] * ((cpi->zbin_over_quant / 2) + cpi->zbin_mode_boost)) >> 7;
343 x->block[24].quant = cpi->Y2quant[QIndex];
344 x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
345 x->block[24].zbin = cpi->Y2zbin[QIndex];
346 x->block[24].round = cpi->Y2round[QIndex];
347 x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
348 x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
349 x->block[24].zbin_extra = (short)zbin_extra;
352 void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
354 // Clear Zbin mode boost for default case
355 cpi->zbin_mode_boost = 0;
357 // vp8cx_init_quantizer() is first called in vp8_create_compressor(). A check is added here so that vp8cx_init_quantizer() is only called
358 // when these values are not all zero.
359 if (cpi->common.y1dc_delta_q | cpi->common.y2dc_delta_q | cpi->common.uvdc_delta_q | cpi->common.y2ac_delta_q | cpi->common.uvac_delta_q)
361 vp8cx_init_quantizer(cpi);
364 // MB level quantizer setup
365 vp8cx_mb_init_quantizer(cpi, &cpi->mb);
371 void encode_mb_row(VP8_COMP *cpi,
381 int recon_yoffset, recon_uvoffset;
383 int ref_fb_idx = cm->lst_fb_idx;
384 int dst_fb_idx = cm->new_fb_idx;
385 int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
386 int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
387 int seg_map_index = (mb_row * cpi->common.mb_cols);
390 // reset above block coeffs
391 xd->above_context = cm->above_context;
393 xd->up_available = (mb_row != 0);
394 recon_yoffset = (mb_row * recon_y_stride * 16);
395 recon_uvoffset = (mb_row * recon_uv_stride * 8);
397 cpi->tplist[mb_row].start = *tp;
398 //printf("Main mb_row = %d\n", mb_row);
400 // Distance of Mb to the top & bottom edges, specified in 1/8th pel
401 // units as they are always compared to values that are in 1/8th pel units
402 xd->mb_to_top_edge = -((mb_row * 16) << 3);
403 xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
405 // Set up limit values for vertical motion vector components
406 // to prevent them extending beyond the UMV borders
407 x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
408 x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
409 + (VP8BORDERINPIXELS - 16);
411 // for each macroblock col in image
412 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
414 // Distance of Mb to the left & right edges, specified in
415 // 1/8th pel units as they are always compared to values
416 // that are in 1/8th pel units
417 xd->mb_to_left_edge = -((mb_col * 16) << 3);
418 xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
420 // Set up limit values for horizontal motion vector components
421 // to prevent them extending beyond the UMV borders
422 x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
423 x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16)
424 + (VP8BORDERINPIXELS - 16);
426 xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
427 xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
428 xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
429 xd->left_available = (mb_col != 0);
431 // Is segmentation enabled
432 // MB level adjutment to quantizer
433 if (xd->segmentation_enabled)
435 // Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
436 if (cpi->segmentation_map[seg_map_index+mb_col] <= 3)
437 xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[seg_map_index+mb_col];
439 xd->mode_info_context->mbmi.segment_id = 0;
441 vp8cx_mb_init_quantizer(cpi, x);
444 xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
446 x->active_ptr = cpi->active_map + seg_map_index + mb_col;
448 if (cm->frame_type == KEY_FRAME)
450 *totalrate += vp8cx_encode_intra_macro_block(cpi, x, tp);
452 y_modes[xd->mbmi.mode] ++;
457 *totalrate += vp8cx_encode_inter_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset);
460 inter_y_modes[xd->mbmi.mode] ++;
462 if (xd->mbmi.mode == SPLITMV)
466 for (b = 0; b < xd->mbmi.partition_count; b++)
468 inter_b_modes[x->partition->bmi[b].mode] ++;
474 // Count of last ref frame 0,0 useage
475 if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
476 cpi->inter_zz_count ++;
478 // Special case code for cyclic refresh
479 // If cyclic update enabled then copy xd->mbmi.segment_id; (which may have been updated based on mode
480 // during vp8cx_encode_inter_macroblock()) back into the global sgmentation map
481 if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)
483 cpi->segmentation_map[seg_map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
485 // If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
486 // Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
487 // else mark it as dirty (1).
488 if (xd->mode_info_context->mbmi.segment_id)
489 cpi->cyclic_refresh_map[seg_map_index+mb_col] = -1;
490 else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
492 if (cpi->cyclic_refresh_map[seg_map_index+mb_col] == 1)
493 cpi->cyclic_refresh_map[seg_map_index+mb_col] = 0;
496 cpi->cyclic_refresh_map[seg_map_index+mb_col] = 1;
501 cpi->tplist[mb_row].stop = *tp;
503 x->gf_active_ptr++; // Increment pointer into gf useage flags structure for next mb
505 for (i = 0; i < 16; i++)
506 vpx_memcpy(&xd->mode_info_context->bmi[i], &xd->block[i].bmi, sizeof(xd->block[i].bmi));
508 // adjust to the next column of macroblocks
509 x->src.y_buffer += 16;
510 x->src.u_buffer += 8;
511 x->src.v_buffer += 8;
516 // Keep track of segment useage
517 segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
520 xd->mode_info_context++;
524 cpi->current_mb_col_main = mb_col;
527 //extend the recon for intra prediction
529 &cm->yv12_fb[dst_fb_idx],
530 xd->dst.y_buffer + 16,
531 xd->dst.u_buffer + 8,
532 xd->dst.v_buffer + 8);
534 // this is to account for the border
535 xd->mode_info_context++;
543 void vp8_encode_frame(VP8_COMP *cpi)
546 MACROBLOCK *const x = & cpi->mb;
547 VP8_COMMON *const cm = & cpi->common;
548 MACROBLOCKD *const xd = & x->e_mbd;
551 TOKENEXTRA *tp = cpi->tok;
552 int segment_counts[MAX_MB_SEGMENTS];
555 // Functions setup for all frame types so we can use MC in AltRef
556 if (cm->mcomp_filter_type == SIXTAP)
558 xd->subpixel_predict = SUBPIX_INVOKE(
559 &cpi->common.rtcd.subpix, sixtap4x4);
560 xd->subpixel_predict8x4 = SUBPIX_INVOKE(
561 &cpi->common.rtcd.subpix, sixtap8x4);
562 xd->subpixel_predict8x8 = SUBPIX_INVOKE(
563 &cpi->common.rtcd.subpix, sixtap8x8);
564 xd->subpixel_predict16x16 = SUBPIX_INVOKE(
565 &cpi->common.rtcd.subpix, sixtap16x16);
569 xd->subpixel_predict = SUBPIX_INVOKE(
570 &cpi->common.rtcd.subpix, bilinear4x4);
571 xd->subpixel_predict8x4 = SUBPIX_INVOKE(
572 &cpi->common.rtcd.subpix, bilinear8x4);
573 xd->subpixel_predict8x8 = SUBPIX_INVOKE(
574 &cpi->common.rtcd.subpix, bilinear8x8);
575 xd->subpixel_predict16x16 = SUBPIX_INVOKE(
576 &cpi->common.rtcd.subpix, bilinear16x16);
579 x->gf_active_ptr = (signed char *)cpi->gf_active_flags; // Point to base of GF active flags data structure
581 x->vector_range = 32;
583 // Count of MBs using the alternate Q if any
586 // Reset frame count of inter 0,0 motion vector useage.
587 cpi->inter_zz_count = 0;
589 vpx_memset(segment_counts, 0, sizeof(segment_counts));
591 cpi->prediction_error = 0;
592 cpi->intra_error = 0;
593 cpi->skip_true_count = 0;
594 cpi->skip_false_count = 0;
598 cpi->frame_distortion = 0;
599 cpi->last_mb_distortion = 0;
604 x->partition_info = x->pi;
606 xd->mode_info_context = cm->mi;
607 xd->mode_info_stride = cm->mode_info_stride;
609 xd->frame_type = cm->frame_type;
611 xd->frames_since_golden = cm->frames_since_golden;
612 xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
613 vp8_zero(cpi->MVcount);
614 // vp8_zero( Contexts)
615 vp8_zero(cpi->coef_counts);
617 // reset intra mode contexts
618 if (cm->frame_type == KEY_FRAME)
619 vp8_init_mbmode_probs(cm);
622 vp8cx_frame_init_quantizer(cpi);
624 if (cpi->compressor_speed == 2)
626 if (cpi->oxcf.cpu_used < 0)
627 cpi->Speed = -(cpi->oxcf.cpu_used);
629 vp8_auto_select_speed(cpi);
632 vp8_initialize_rd_consts(cpi, vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
633 //vp8_initialize_rd_consts( cpi, vp8_dc_quant(cpi->avg_frame_qindex, cm->y1dc_delta_q) );
634 vp8cx_initialize_me_consts(cpi, cm->base_qindex);
635 //vp8cx_initialize_me_consts( cpi, cpi->avg_frame_qindex);
637 // Copy data over into macro block data sturctures.
639 x->src = * cpi->Source;
640 xd->pre = cm->yv12_fb[cm->lst_fb_idx];
641 xd->dst = cm->yv12_fb[cm->new_fb_idx];
643 // set up frame new frame for intra coded blocks
645 vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
647 vp8_build_block_offsets(x);
649 vp8_setup_block_dptrs(&x->e_mbd);
651 vp8_setup_block_ptrs(x);
653 x->rddiv = cpi->RDDIV;
654 x->rdmult = cpi->RDMULT;
657 // Experimental rd code
658 // 2 Pass - Possibly set Rdmult based on last frame distortion + this frame target bits or other metrics
659 // such as cpi->rate_correction_factor that indicate relative complexity.
660 /*if ( cpi->pass == 2 && (cpi->last_frame_distortion > 0) && (cpi->target_bits_per_mb > 0) )
662 //x->rdmult = ((cpi->last_frame_distortion * 256)/cpi->common.MBs)/ cpi->target_bits_per_mb;
663 x->rdmult = (int)(cpi->RDMULT * cpi->rate_correction_factor);
666 x->rdmult = cpi->RDMULT; */
667 //x->rdmult = (int)(cpi->RDMULT * pow( (cpi->rate_correction_factor * 2.0), 0.75 ));
670 xd->mode_info_context->mbmi.mode = DC_PRED;
671 xd->mode_info_context->mbmi.uv_mode = DC_PRED;
673 xd->left_context = &cm->left_context;
675 vp8_zero(cpi->count_mb_ref_frame_usage)
676 vp8_zero(cpi->ymode_count)
677 vp8_zero(cpi->uv_mode_count)
681 vpx_memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
684 struct vpx_usec_timer emr_timer;
685 vpx_usec_timer_start(&emr_timer);
687 if (!cpi->b_multi_threaded)
689 // for each macroblock row in image
690 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
693 vp8_zero(cm->left_context)
695 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
697 // adjust to the next row of mbs
698 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
699 x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
700 x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
703 cpi->tok_count = tp - cpi->tok;
708 #if CONFIG_MULTITHREAD
709 vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 1, cpi->encoding_thread_count);
711 for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1))
714 cpi->current_mb_col_main = -1;
716 for (i = 0; i < cpi->encoding_thread_count; i++)
718 if ((mb_row + i + 1) >= cm->mb_rows)
721 cpi->mb_row_ei[i].mb_row = mb_row + i + 1;
722 cpi->mb_row_ei[i].tp = cpi->tok + (mb_row + i + 1) * (cm->mb_cols * 16 * 24);
723 cpi->mb_row_ei[i].current_mb_col = -1;
724 //SetEvent(cpi->h_event_mbrencoding[i]);
725 sem_post(&cpi->h_event_mbrencoding[i]);
728 vp8_zero(cm->left_context)
730 tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
732 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
734 // adjust to the next row of mbs
735 x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols;
736 x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
737 x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
739 xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count;
740 x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count;
742 if (mb_row < cm->mb_rows - 1)
743 //WaitForSingleObject(cpi->h_event_main, INFINITE);
744 sem_wait(&cpi->h_event_main);
748 for( ;mb_row<cm->mb_rows; mb_row ++)
750 vp8_zero( cm->left_context)
752 tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
754 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
755 // adjust to the next row of mbs
756 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
757 x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
758 x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
764 for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++)
766 cpi->tok_count += cpi->tplist[mb_row].stop - cpi->tplist[mb_row].start;
769 if (xd->segmentation_enabled)
774 if (xd->segmentation_enabled)
777 for (i = 0; i < cpi->encoding_thread_count; i++)
779 for (j = 0; j < 4; j++)
780 segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
786 for (i = 0; i < cpi->encoding_thread_count; i++)
788 totalrate += cpi->mb_row_ei[i].totalrate;
795 vpx_usec_timer_mark(&emr_timer);
796 cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
801 // Work out the segment probabilites if segmentation is enabled
802 if (xd->segmentation_enabled)
808 vpx_memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
810 tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
814 xd->mb_segment_tree_probs[0] = ((segment_counts[0] + segment_counts[1]) * 255) / tot_count;
816 tot_count = segment_counts[0] + segment_counts[1];
820 xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
823 tot_count = segment_counts[2] + segment_counts[3];
826 xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
828 // Zero probabilities not allowed
829 for (i = 0; i < MB_FEATURE_TREE_PROBS; i ++)
831 if (xd->mb_segment_tree_probs[i] == 0)
832 xd->mb_segment_tree_probs[i] = 1;
837 // 256 rate units to the bit
838 cpi->projected_frame_size = totalrate >> 8; // projected_frame_size in units of BYTES
840 // Make a note of the percentage MBs coded Intra.
841 if (cm->frame_type == KEY_FRAME)
843 cpi->this_frame_percent_intra = 100;
849 tot_modes = cpi->count_mb_ref_frame_usage[INTRA_FRAME]
850 + cpi->count_mb_ref_frame_usage[LAST_FRAME]
851 + cpi->count_mb_ref_frame_usage[GOLDEN_FRAME]
852 + cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
855 cpi->this_frame_percent_intra = cpi->count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
862 int flag[2] = {0, 0};
864 for (cnt = 0; cnt < MVPcount; cnt++)
866 if (cm->fc.pre_mvc[0][cnt] != cm->fc.mvc[0][cnt])
869 vpx_memcpy(cm->fc.pre_mvc[0], cm->fc.mvc[0], MVPcount);
874 for (cnt = 0; cnt < MVPcount; cnt++)
876 if (cm->fc.pre_mvc[1][cnt] != cm->fc.mvc[1][cnt])
879 vpx_memcpy(cm->fc.pre_mvc[1], cm->fc.mvc[1], MVPcount);
884 if (flag[0] || flag[1])
885 vp8_build_component_cost_table(cpi->mb.mvcost, cpi->mb.mvsadcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
889 // Adjust the projected reference frame useage probability numbers to reflect
890 // what we have just seen. This may be usefull when we make multiple itterations
891 // of the recode loop rather than continuing to use values from the previous frame.
892 if ((cm->frame_type != KEY_FRAME) && !cm->refresh_alt_ref_frame && !cm->refresh_golden_frame)
894 const int *const rfct = cpi->count_mb_ref_frame_usage;
895 const int rf_intra = rfct[INTRA_FRAME];
896 const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
898 if ((rf_intra + rf_inter) > 0)
900 cpi->prob_intra_coded = (rf_intra * 255) / (rf_intra + rf_inter);
902 if (cpi->prob_intra_coded < 1)
903 cpi->prob_intra_coded = 1;
905 if ((cm->frames_since_golden > 0) || cpi->source_alt_ref_active)
907 cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
909 if (cpi->prob_last_coded < 1)
910 cpi->prob_last_coded = 1;
912 cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
913 ? (rfct[GOLDEN_FRAME] * 255) / (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128;
915 if (cpi->prob_gf_coded < 1)
916 cpi->prob_gf_coded = 1;
922 // Keep record of the total distortion this time around for future use
923 cpi->last_frame_distortion = cpi->frame_distortion;
927 void vp8_setup_block_ptrs(MACROBLOCK *x)
932 for (r = 0; r < 4; r++)
934 for (c = 0; c < 4; c++)
936 x->block[r*4+c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
940 for (r = 0; r < 2; r++)
942 for (c = 0; c < 2; c++)
944 x->block[16 + r*2+c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
949 for (r = 0; r < 2; r++)
951 for (c = 0; c < 2; c++)
953 x->block[20 + r*2+c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
957 x->block[24].src_diff = x->src_diff + 384;
960 for (i = 0; i < 25; i++)
962 x->block[i].coeff = x->coeff + i * 16;
966 void vp8_build_block_offsets(MACROBLOCK *x)
971 vp8_build_block_doffsets(&x->e_mbd);
974 for (br = 0; br < 4; br++)
976 for (bc = 0; bc < 4; bc++)
978 BLOCK *this_block = &x->block[block];
979 this_block->base_src = &x->src.y_buffer;
980 this_block->src_stride = x->src.y_stride;
981 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
987 for (br = 0; br < 2; br++)
989 for (bc = 0; bc < 2; bc++)
991 BLOCK *this_block = &x->block[block];
992 this_block->base_src = &x->src.u_buffer;
993 this_block->src_stride = x->src.uv_stride;
994 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1000 for (br = 0; br < 2; br++)
1002 for (bc = 0; bc < 2; bc++)
1004 BLOCK *this_block = &x->block[block];
1005 this_block->base_src = &x->src.v_buffer;
1006 this_block->src_stride = x->src.uv_stride;
1007 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1013 static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
1015 const MACROBLOCKD *xd = & x->e_mbd;
1016 const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
1017 const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
1020 const int is_key = cpi->common.frame_type == KEY_FRAME;
1022 ++ (is_key ? uv_modes : inter_uv_modes)[uvm];
1026 unsigned int *const bct = is_key ? b_modes : inter_b_modes;
1032 ++ bct[xd->block[b].bmi.mode];
1039 ++cpi->ymode_count[m];
1040 ++cpi->uv_mode_count[uvm];
1043 int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
1045 int Error4x4, Error16x16, error_uv;
1046 B_PREDICTION_MODE intra_bmodes[16];
1047 int rate4x4, rate16x16, rateuv;
1048 int dist4x4, dist16x16, distuv;
1050 int rate4x4_tokenonly = 0;
1051 int rate16x16_tokenonly = 0;
1052 int rateuv_tokenonly = 0;
1055 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
1057 #if !(CONFIG_REALTIME_ONLY)
1059 if (cpi->sf.RD || cpi->compressor_speed != 2)
1061 Error4x4 = vp8_rd_pick_intra4x4mby_modes(cpi, x, &rate4x4, &rate4x4_tokenonly, &dist4x4);
1063 //save the b modes for possible later use
1064 for (i = 0; i < 16; i++)
1065 intra_bmodes[i] = x->e_mbd.block[i].bmi.mode;
1067 Error16x16 = vp8_rd_pick_intra16x16mby_mode(cpi, x, &rate16x16, &rate16x16_tokenonly, &dist16x16);
1069 error_uv = vp8_rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
1071 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1074 if (Error4x4 < Error16x16)
1077 x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
1079 // get back the intra block modes
1080 for (i = 0; i < 16; i++)
1081 x->e_mbd.block[i].bmi.mode = intra_bmodes[i];
1083 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1084 cpi->prediction_error += Error4x4 ;
1086 // Experimental RD code
1087 cpi->frame_distortion += dist4x4;
1092 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1096 // Experimental RD code
1097 cpi->prediction_error += Error16x16;
1098 cpi->frame_distortion += dist16x16;
1102 sum_intra_stats(cpi, x);
1104 vp8_tokenize_mb(cpi, &x->e_mbd, t);
1110 int rate2, distortion2;
1111 MB_PREDICTION_MODE mode, best_mode = DC_PRED;
1113 Error16x16 = INT_MAX;
1115 for (mode = DC_PRED; mode <= TM_PRED; mode ++)
1117 x->e_mbd.mode_info_context->mbmi.mode = mode;
1118 vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
1119 distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16prederror)(x->src.y_buffer, x->src.y_stride, x->e_mbd.predictor, 16, 0x7fffffff);
1120 rate2 = x->mbmode_cost[x->e_mbd.frame_type][mode];
1121 this_rd = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
1123 if (Error16x16 > this_rd)
1125 Error16x16 = this_rd;
1130 vp8_pick_intra4x4mby_modes(IF_RTCD(&cpi->rtcd), x, &rate2, &distortion2);
1132 if (distortion2 == INT_MAX)
1135 Error4x4 = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
1137 if (Error4x4 < Error16x16)
1139 x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
1140 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1141 cpi->prediction_error += Error4x4;
1145 x->e_mbd.mode_info_context->mbmi.mode = best_mode;
1146 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1147 cpi->prediction_error += Error16x16;
1150 vp8_pick_intra_mbuv_mode(x);
1151 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1152 sum_intra_stats(cpi, x);
1153 vp8_tokenize_mb(cpi, &x->e_mbd, t);
1162 extern void vp8_fix_contexts(MACROBLOCKD *x);
1164 int vp8cx_encode_inter_macroblock
1166 VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
1167 int recon_yoffset, int recon_uvoffset
1170 MACROBLOCKD *const xd = &x->e_mbd;
1172 int intra_error = 0;
1178 if (xd->segmentation_enabled)
1179 x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
1181 x->encode_breakout = cpi->oxcf.encode_breakout;
1183 #if !(CONFIG_REALTIME_ONLY)
1187 inter_error = vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
1191 inter_error = vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
1194 cpi->prediction_error += inter_error;
1195 cpi->intra_error += intra_error;
1198 // Experimental RD code
1199 cpi->frame_distortion += distortion;
1200 cpi->last_mb_distortion = distortion;
1203 // MB level adjutment to quantizer setup
1204 if (xd->segmentation_enabled || cpi->zbin_mode_boost_enabled)
1206 // If cyclic update enabled
1207 if (cpi->cyclic_refresh_mode_enabled)
1209 // Clear segment_id back to 0 if not coded (last frame 0,0)
1210 if ((xd->mode_info_context->mbmi.segment_id == 1) &&
1211 ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
1213 xd->mode_info_context->mbmi.segment_id = 0;
1217 // Experimental code. Special case for gf and arf zeromv modes. Increase zbin size to supress noise
1218 if (cpi->zbin_mode_boost_enabled)
1220 if ( xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME )
1221 cpi->zbin_mode_boost = 0;
1224 if (xd->mode_info_context->mbmi.mode == ZEROMV)
1226 if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
1227 cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
1229 cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
1231 else if (xd->mode_info_context->mbmi.mode == SPLITMV)
1232 cpi->zbin_mode_boost = 0;
1234 cpi->zbin_mode_boost = MV_ZBIN_BOOST;
1238 cpi->zbin_mode_boost = 0;
1240 vp8cx_mb_init_quantizer(cpi, x);
1243 cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
1245 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
1247 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1249 if (xd->mode_info_context->mbmi.mode == B_PRED)
1251 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1255 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1258 sum_intra_stats(cpi, x);
1267 vp8_find_near_mvs(xd, xd->mode_info_context,
1268 &nearest, &nearby, &best_ref_mv, mdcounts, xd->mode_info_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias);
1270 vp8_build_uvmvs(xd, cpi->common.full_pixel);
1272 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
1273 ref_fb_idx = cpi->common.lst_fb_idx;
1274 else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
1275 ref_fb_idx = cpi->common.gld_fb_idx;
1277 ref_fb_idx = cpi->common.alt_fb_idx;
1279 xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
1280 xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
1281 xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
1283 if (xd->mode_info_context->mbmi.mode == SPLITMV)
1287 for (i = 0; i < 16; i++)
1289 if (xd->block[i].bmi.mode == NEW4X4)
1291 cpi->MVcount[0][mv_max+((xd->block[i].bmi.mv.as_mv.row - best_ref_mv.row) >> 1)]++;
1292 cpi->MVcount[1][mv_max+((xd->block[i].bmi.mv.as_mv.col - best_ref_mv.col) >> 1)]++;
1296 else if (xd->mode_info_context->mbmi.mode == NEWMV)
1298 cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv.row) >> 1)]++;
1299 cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv.col) >> 1)]++;
1302 if (!x->skip && !x->e_mbd.mode_info_context->mbmi.force_no_skip)
1304 vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
1306 // Clear mb_skip_coeff if mb_no_coeff_skip is not set
1307 if (!cpi->common.mb_no_coeff_skip)
1308 xd->mode_info_context->mbmi.mb_skip_coeff = 0;
1312 vp8_stuff_inter16x16(x);
1316 vp8_tokenize_mb(cpi, xd, t);
1319 if (cpi->common.mb_no_coeff_skip)
1321 if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV)
1322 xd->mode_info_context->mbmi.dc_diff = 0;
1324 xd->mode_info_context->mbmi.dc_diff = 1;
1326 xd->mode_info_context->mbmi.mb_skip_coeff = 1;
1327 cpi->skip_true_count ++;
1328 vp8_fix_contexts(xd);
1332 vp8_stuff_mb(cpi, xd, t);
1333 xd->mode_info_context->mbmi.mb_skip_coeff = 0;
1334 cpi->skip_false_count ++;