2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "vpx_ports/config.h"
18 #include "entropymode.h"
19 #include "quant_common.h"
20 #include "segmentation.h"
21 #include "setupintrarecon.h"
22 #include "encodeintra.h"
23 #include "reconinter.h"
25 #include "pickinter.h"
26 #include "findnearmv.h"
27 #include "reconintra.h"
31 #include "vpx_ports/vpx_timer.h"
33 #if CONFIG_RUNTIME_CPU_DETECT
34 #define RTCD(x) &cpi->common.rtcd.x
35 #define IF_RTCD(x) (x)
38 #define IF_RTCD(x) NULL
40 extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
42 extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
43 extern void vp8_auto_select_speed(VP8_COMP *cpi);
44 extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
49 void vp8_build_block_offsets(MACROBLOCK *x);
50 void vp8_setup_block_ptrs(MACROBLOCK *x);
51 int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, int recon_yoffset, int recon_uvoffset);
52 int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t);
55 unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
56 unsigned int inter_uv_modes[4] = {0, 0, 0, 0};
57 unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
58 unsigned int y_modes[5] = {0, 0, 0, 0, 0};
59 unsigned int uv_modes[4] = {0, 0, 0, 0};
60 unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
63 static const int qrounding_factors[129] =
65 56, 56, 56, 56, 48, 48, 56, 56,
66 48, 48, 48, 48, 48, 48, 48, 48,
67 48, 48, 48, 48, 48, 48, 48, 48,
68 48, 48, 48, 48, 48, 48, 48, 48,
69 48, 48, 48, 48, 48, 48, 48, 48,
70 48, 48, 48, 48, 48, 48, 48, 48,
71 48, 48, 48, 48, 48, 48, 48, 48,
72 48, 48, 48, 48, 48, 48, 48, 48,
73 48, 48, 48, 48, 48, 48, 48, 48,
74 48, 48, 48, 48, 48, 48, 48, 48,
75 48, 48, 48, 48, 48, 48, 48, 48,
76 48, 48, 48, 48, 48, 48, 48, 48,
77 48, 48, 48, 48, 48, 48, 48, 48,
78 48, 48, 48, 48, 48, 48, 48, 48,
79 48, 48, 48, 48, 48, 48, 48, 48,
80 48, 48, 48, 48, 48, 48, 48, 48,
84 static const int qzbin_factors[129] =
86 72, 72, 72, 72, 80, 80, 72, 72,
87 80, 80, 80, 80, 80, 80, 80, 80,
88 80, 80, 80, 80, 80, 80, 80, 80,
89 80, 80, 80, 80, 80, 80, 80, 80,
90 80, 80, 80, 80, 80, 80, 80, 80,
91 80, 80, 80, 80, 80, 80, 80, 80,
92 80, 80, 80, 80, 80, 80, 80, 80,
93 80, 80, 80, 80, 80, 80, 80, 80,
94 80, 80, 80, 80, 80, 80, 80, 80,
95 80, 80, 80, 80, 80, 80, 80, 80,
96 80, 80, 80, 80, 80, 80, 80, 80,
97 80, 80, 80, 80, 80, 80, 80, 80,
98 80, 80, 80, 80, 80, 80, 80, 80,
99 80, 80, 80, 80, 80, 80, 80, 80,
100 80, 80, 80, 80, 80, 80, 80, 80,
101 80, 80, 80, 80, 80, 80, 80, 80,
105 static const int qrounding_factors_y2[129] =
107 56, 56, 56, 56, 48, 48, 56, 56,
108 48, 48, 48, 48, 48, 48, 48, 48,
109 48, 48, 48, 48, 48, 48, 48, 48,
110 48, 48, 48, 48, 48, 48, 48, 48,
111 48, 48, 48, 48, 48, 48, 48, 48,
112 48, 48, 48, 48, 48, 48, 48, 48,
113 48, 48, 48, 48, 48, 48, 48, 48,
114 48, 48, 48, 48, 48, 48, 48, 48,
115 48, 48, 48, 48, 48, 48, 48, 48,
116 48, 48, 48, 48, 48, 48, 48, 48,
117 48, 48, 48, 48, 48, 48, 48, 48,
118 48, 48, 48, 48, 48, 48, 48, 48,
119 48, 48, 48, 48, 48, 48, 48, 48,
120 48, 48, 48, 48, 48, 48, 48, 48,
121 48, 48, 48, 48, 48, 48, 48, 48,
122 48, 48, 48, 48, 48, 48, 48, 48,
126 static const int qzbin_factors_y2[129] =
128 72, 72, 72, 72, 80, 80, 72, 72,
129 80, 80, 80, 80, 80, 80, 80, 80,
130 80, 80, 80, 80, 80, 80, 80, 80,
131 80, 80, 80, 80, 80, 80, 80, 80,
132 80, 80, 80, 80, 80, 80, 80, 80,
133 80, 80, 80, 80, 80, 80, 80, 80,
134 80, 80, 80, 80, 80, 80, 80, 80,
135 80, 80, 80, 80, 80, 80, 80, 80,
136 80, 80, 80, 80, 80, 80, 80, 80,
137 80, 80, 80, 80, 80, 80, 80, 80,
138 80, 80, 80, 80, 80, 80, 80, 80,
139 80, 80, 80, 80, 80, 80, 80, 80,
140 80, 80, 80, 80, 80, 80, 80, 80,
141 80, 80, 80, 80, 80, 80, 80, 80,
142 80, 80, 80, 80, 80, 80, 80, 80,
143 80, 80, 80, 80, 80, 80, 80, 80,
147 //#define EXACT_QUANT
149 static void vp8cx_invert_quant(short *quant, short *shift, short d)
154 for(l = 0; t > 1; l++)
156 t = 1 + (1<<(16+l))/d;
157 *quant = (short)(t - (1<<16));
161 void vp8cx_init_quantizer(VP8_COMP *cpi)
168 int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
170 for (Q = 0; Q < QINDEX_RANGE; Q++)
173 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
174 vp8cx_invert_quant(cpi->Y1quant[Q][0] + 0,
175 cpi->Y1quant_shift[Q][0] + 0, quant_val);
176 cpi->Y1zbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
177 cpi->Y1round[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;
178 cpi->common.Y1dequant[Q][0][0] = quant_val;
179 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
181 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
182 vp8cx_invert_quant(cpi->Y2quant[Q][0] + 0,
183 cpi->Y2quant_shift[Q][0] + 0, quant_val);
184 cpi->Y2zbin[Q][0][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
185 cpi->Y2round[Q][0][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
186 cpi->common.Y2dequant[Q][0][0] = quant_val;
187 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
189 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
190 vp8cx_invert_quant(cpi->UVquant[Q][0] + 0,
191 cpi->UVquant_shift[Q][0] + 0, quant_val);
192 cpi->UVzbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
193 cpi->UVround[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;
194 cpi->common.UVdequant[Q][0][0] = quant_val;
195 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
197 // all the ac values = ;
198 for (i = 1; i < 16; i++)
200 int rc = vp8_default_zig_zag1d[i];
204 quant_val = vp8_ac_yquant(Q);
205 vp8cx_invert_quant(cpi->Y1quant[Q][r] + c,
206 cpi->Y1quant_shift[Q][r] + c, quant_val);
207 cpi->Y1zbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
208 cpi->Y1round[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;
209 cpi->common.Y1dequant[Q][r][c] = quant_val;
210 cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
212 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
213 vp8cx_invert_quant(cpi->Y2quant[Q][r] + c,
214 cpi->Y2quant_shift[Q][r] + c, quant_val);
215 cpi->Y2zbin[Q][r][c] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
216 cpi->Y2round[Q][r][c] = (qrounding_factors_y2[Q] * quant_val) >> 7;
217 cpi->common.Y2dequant[Q][r][c] = quant_val;
218 cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
220 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
221 vp8cx_invert_quant(cpi->UVquant[Q][r] + c,
222 cpi->UVquant_shift[Q][r] + c, quant_val);
223 cpi->UVzbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
224 cpi->UVround[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;
225 cpi->common.UVdequant[Q][r][c] = quant_val;
226 cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
231 void vp8cx_init_quantizer(VP8_COMP *cpi)
238 int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
240 for (Q = 0; Q < QINDEX_RANGE; Q++)
243 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
244 cpi->Y1quant[Q][0][0] = (1 << 16) / quant_val;
245 cpi->Y1zbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
246 cpi->Y1round[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;
247 cpi->common.Y1dequant[Q][0][0] = quant_val;
248 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
250 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
251 cpi->Y2quant[Q][0][0] = (1 << 16) / quant_val;
252 cpi->Y2zbin[Q][0][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
253 cpi->Y2round[Q][0][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
254 cpi->common.Y2dequant[Q][0][0] = quant_val;
255 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
257 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
258 cpi->UVquant[Q][0][0] = (1 << 16) / quant_val;
259 cpi->UVzbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
260 cpi->UVround[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;
261 cpi->common.UVdequant[Q][0][0] = quant_val;
262 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
264 // all the ac values = ;
265 for (i = 1; i < 16; i++)
267 int rc = vp8_default_zig_zag1d[i];
271 quant_val = vp8_ac_yquant(Q);
272 cpi->Y1quant[Q][r][c] = (1 << 16) / quant_val;
273 cpi->Y1zbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
274 cpi->Y1round[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;
275 cpi->common.Y1dequant[Q][r][c] = quant_val;
276 cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
278 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
279 cpi->Y2quant[Q][r][c] = (1 << 16) / quant_val;
280 cpi->Y2zbin[Q][r][c] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
281 cpi->Y2round[Q][r][c] = (qrounding_factors_y2[Q] * quant_val) >> 7;
282 cpi->common.Y2dequant[Q][r][c] = quant_val;
283 cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
285 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
286 cpi->UVquant[Q][r][c] = (1 << 16) / quant_val;
287 cpi->UVzbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
288 cpi->UVround[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;
289 cpi->common.UVdequant[Q][r][c] = quant_val;
290 cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
295 void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
299 MACROBLOCKD *xd = &x->e_mbd;
302 // Select the baseline MB Q index.
303 if (xd->segmentation_enabled)
306 if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
308 QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
312 QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
313 QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
317 QIndex = cpi->common.base_qindex;
320 zbin_extra = (cpi->common.Y1dequant[QIndex][0][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
322 for (i = 0; i < 16; i++)
324 x->block[i].quant = cpi->Y1quant[QIndex];
325 x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
326 x->block[i].zbin = cpi->Y1zbin[QIndex];
327 x->block[i].round = cpi->Y1round[QIndex];
328 x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
329 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
330 x->block[i].zbin_extra = (short)zbin_extra;
334 zbin_extra = (cpi->common.UVdequant[QIndex][0][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
336 for (i = 16; i < 24; i++)
338 x->block[i].quant = cpi->UVquant[QIndex];
339 x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
340 x->block[i].zbin = cpi->UVzbin[QIndex];
341 x->block[i].round = cpi->UVround[QIndex];
342 x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
343 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
344 x->block[i].zbin_extra = (short)zbin_extra;
348 zbin_extra = (cpi->common.Y2dequant[QIndex][0][1] * ((cpi->zbin_over_quant / 2) + cpi->zbin_mode_boost)) >> 7;
349 x->block[24].quant = cpi->Y2quant[QIndex];
350 x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
351 x->block[24].zbin = cpi->Y2zbin[QIndex];
352 x->block[24].round = cpi->Y2round[QIndex];
353 x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
354 x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
355 x->block[24].zbin_extra = (short)zbin_extra;
358 void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
360 // vp8cx_init_quantizer() is first called in vp8_create_compressor(). A check is added here so that vp8cx_init_quantizer() is only called
361 // when these values are not all zero.
362 if (cpi->common.y1dc_delta_q | cpi->common.y2dc_delta_q | cpi->common.uvdc_delta_q | cpi->common.y2ac_delta_q | cpi->common.uvac_delta_q)
364 vp8cx_init_quantizer(cpi);
367 // MB level quantizer setup
368 vp8cx_mb_init_quantizer(cpi, &cpi->mb);
374 void encode_mb_row(VP8_COMP *cpi,
384 int recon_yoffset, recon_uvoffset;
386 int ref_fb_idx = cm->lst_fb_idx;
387 int dst_fb_idx = cm->new_fb_idx;
388 int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
389 int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
390 int seg_map_index = (mb_row * cpi->common.mb_cols);
393 // reset above block coeffs
394 xd->above_context = cm->above_context;
396 xd->up_available = (mb_row != 0);
397 recon_yoffset = (mb_row * recon_y_stride * 16);
398 recon_uvoffset = (mb_row * recon_uv_stride * 8);
400 cpi->tplist[mb_row].start = *tp;
401 //printf("Main mb_row = %d\n", mb_row);
403 // for each macroblock col in image
404 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
406 // Distance of Mb to the various image edges.
407 // These specified to 8th pel as they are always compared to values that are in 1/8th pel units
408 xd->mb_to_left_edge = -((mb_col * 16) << 3);
409 xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
410 xd->mb_to_top_edge = -((mb_row * 16) << 3);
411 xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
413 // Set up limit values for motion vectors used to prevent them extending outside the UMV borders
414 x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
415 x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
416 x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
417 x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
419 xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
420 xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
421 xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
422 xd->left_available = (mb_col != 0);
424 // Is segmentation enabled
425 // MB level adjutment to quantizer
426 if (xd->segmentation_enabled)
428 // Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
429 if (cpi->segmentation_map[seg_map_index+mb_col] <= 3)
430 xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[seg_map_index+mb_col];
432 xd->mode_info_context->mbmi.segment_id = 0;
434 vp8cx_mb_init_quantizer(cpi, x);
437 xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
439 x->active_ptr = cpi->active_map + seg_map_index + mb_col;
441 if (cm->frame_type == KEY_FRAME)
443 *totalrate += vp8cx_encode_intra_macro_block(cpi, x, tp);
445 y_modes[xd->mbmi.mode] ++;
450 *totalrate += vp8cx_encode_inter_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset);
453 inter_y_modes[xd->mbmi.mode] ++;
455 if (xd->mbmi.mode == SPLITMV)
459 for (b = 0; b < xd->mbmi.partition_count; b++)
461 inter_b_modes[x->partition->bmi[b].mode] ++;
467 // Count of last ref frame 0,0 useage
468 if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
469 cpi->inter_zz_count ++;
471 // Special case code for cyclic refresh
472 // If cyclic update enabled then copy xd->mbmi.segment_id; (which may have been updated based on mode
473 // during vp8cx_encode_inter_macroblock()) back into the global sgmentation map
474 if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)
476 cpi->segmentation_map[seg_map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
478 // If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
479 // Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
480 // else mark it as dirty (1).
481 if (xd->mode_info_context->mbmi.segment_id)
482 cpi->cyclic_refresh_map[seg_map_index+mb_col] = -1;
483 else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
485 if (cpi->cyclic_refresh_map[seg_map_index+mb_col] == 1)
486 cpi->cyclic_refresh_map[seg_map_index+mb_col] = 0;
489 cpi->cyclic_refresh_map[seg_map_index+mb_col] = 1;
494 cpi->tplist[mb_row].stop = *tp;
496 x->gf_active_ptr++; // Increment pointer into gf useage flags structure for next mb
498 for (i = 0; i < 16; i++)
499 vpx_memcpy(&xd->mode_info_context->bmi[i], &xd->block[i].bmi, sizeof(xd->block[i].bmi));
501 // adjust to the next column of macroblocks
502 x->src.y_buffer += 16;
503 x->src.u_buffer += 8;
504 x->src.v_buffer += 8;
509 // Keep track of segment useage
510 segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
513 xd->mode_info_context++;
517 cpi->current_mb_col_main = mb_col;
520 //extend the recon for intra prediction
522 &cm->yv12_fb[dst_fb_idx],
523 xd->dst.y_buffer + 16,
524 xd->dst.u_buffer + 8,
525 xd->dst.v_buffer + 8);
527 // this is to account for the border
528 xd->mode_info_context++;
536 void vp8_encode_frame(VP8_COMP *cpi)
539 MACROBLOCK *const x = & cpi->mb;
540 VP8_COMMON *const cm = & cpi->common;
541 MACROBLOCKD *const xd = & x->e_mbd;
544 TOKENEXTRA *tp = cpi->tok;
545 int segment_counts[MAX_MB_SEGMENTS];
548 // Functions setup for all frame types so we can use MC in AltRef
549 if (cm->mcomp_filter_type == SIXTAP)
551 xd->subpixel_predict = SUBPIX_INVOKE(
552 &cpi->common.rtcd.subpix, sixtap4x4);
553 xd->subpixel_predict8x4 = SUBPIX_INVOKE(
554 &cpi->common.rtcd.subpix, sixtap8x4);
555 xd->subpixel_predict8x8 = SUBPIX_INVOKE(
556 &cpi->common.rtcd.subpix, sixtap8x8);
557 xd->subpixel_predict16x16 = SUBPIX_INVOKE(
558 &cpi->common.rtcd.subpix, sixtap16x16);
562 xd->subpixel_predict = SUBPIX_INVOKE(
563 &cpi->common.rtcd.subpix, bilinear4x4);
564 xd->subpixel_predict8x4 = SUBPIX_INVOKE(
565 &cpi->common.rtcd.subpix, bilinear8x4);
566 xd->subpixel_predict8x8 = SUBPIX_INVOKE(
567 &cpi->common.rtcd.subpix, bilinear8x8);
568 xd->subpixel_predict16x16 = SUBPIX_INVOKE(
569 &cpi->common.rtcd.subpix, bilinear16x16);
572 x->gf_active_ptr = (signed char *)cpi->gf_active_flags; // Point to base of GF active flags data structure
574 x->vector_range = 32;
576 // Count of MBs using the alternate Q if any
579 // Reset frame count of inter 0,0 motion vector useage.
580 cpi->inter_zz_count = 0;
582 vpx_memset(segment_counts, 0, sizeof(segment_counts));
584 cpi->prediction_error = 0;
585 cpi->intra_error = 0;
586 cpi->skip_true_count = 0;
587 cpi->skip_false_count = 0;
591 cpi->frame_distortion = 0;
592 cpi->last_mb_distortion = 0;
597 x->partition_info = x->pi;
599 xd->mode_info_context = cm->mi;
600 xd->mode_info_stride = cm->mode_info_stride;
602 xd->frame_type = cm->frame_type;
604 xd->frames_since_golden = cm->frames_since_golden;
605 xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
606 vp8_zero(cpi->MVcount);
607 // vp8_zero( Contexts)
608 vp8_zero(cpi->coef_counts);
610 // reset intra mode contexts
611 if (cm->frame_type == KEY_FRAME)
612 vp8_init_mbmode_probs(cm);
615 vp8cx_frame_init_quantizer(cpi);
617 if (cpi->compressor_speed == 2)
619 if (cpi->oxcf.cpu_used < 0)
620 cpi->Speed = -(cpi->oxcf.cpu_used);
622 vp8_auto_select_speed(cpi);
625 vp8_initialize_rd_consts(cpi, vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
626 //vp8_initialize_rd_consts( cpi, vp8_dc_quant(cpi->avg_frame_qindex, cm->y1dc_delta_q) );
627 vp8cx_initialize_me_consts(cpi, cm->base_qindex);
628 //vp8cx_initialize_me_consts( cpi, cpi->avg_frame_qindex);
630 // Copy data over into macro block data sturctures.
632 x->src = * cpi->Source;
633 xd->pre = cm->yv12_fb[cm->lst_fb_idx];
634 xd->dst = cm->yv12_fb[cm->new_fb_idx];
636 // set up frame new frame for intra coded blocks
638 vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
640 vp8_build_block_offsets(x);
642 vp8_setup_block_dptrs(&x->e_mbd);
644 vp8_setup_block_ptrs(x);
646 x->rddiv = cpi->RDDIV;
647 x->rdmult = cpi->RDMULT;
650 // Experimental rd code
651 // 2 Pass - Possibly set Rdmult based on last frame distortion + this frame target bits or other metrics
652 // such as cpi->rate_correction_factor that indicate relative complexity.
653 /*if ( cpi->pass == 2 && (cpi->last_frame_distortion > 0) && (cpi->target_bits_per_mb > 0) )
655 //x->rdmult = ((cpi->last_frame_distortion * 256)/cpi->common.MBs)/ cpi->target_bits_per_mb;
656 x->rdmult = (int)(cpi->RDMULT * cpi->rate_correction_factor);
659 x->rdmult = cpi->RDMULT; */
660 //x->rdmult = (int)(cpi->RDMULT * pow( (cpi->rate_correction_factor * 2.0), 0.75 ));
663 xd->mode_info_context->mbmi.mode = DC_PRED;
664 xd->mode_info_context->mbmi.uv_mode = DC_PRED;
666 xd->left_context = &cm->left_context;
668 vp8_zero(cpi->count_mb_ref_frame_usage)
669 vp8_zero(cpi->ymode_count)
670 vp8_zero(cpi->uv_mode_count)
674 vpx_memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
677 struct vpx_usec_timer emr_timer;
678 vpx_usec_timer_start(&emr_timer);
680 if (!cpi->b_multi_threaded)
682 // for each macroblock row in image
683 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
686 vp8_zero(cm->left_context)
688 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
690 // adjust to the next row of mbs
691 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
692 x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
693 x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
696 cpi->tok_count = tp - cpi->tok;
701 #if CONFIG_MULTITHREAD
702 vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 1, cpi->encoding_thread_count);
704 for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1))
707 cpi->current_mb_col_main = -1;
709 for (i = 0; i < cpi->encoding_thread_count; i++)
711 if ((mb_row + i + 1) >= cm->mb_rows)
714 cpi->mb_row_ei[i].mb_row = mb_row + i + 1;
715 cpi->mb_row_ei[i].tp = cpi->tok + (mb_row + i + 1) * (cm->mb_cols * 16 * 24);
716 cpi->mb_row_ei[i].current_mb_col = -1;
717 //SetEvent(cpi->h_event_mbrencoding[i]);
718 sem_post(&cpi->h_event_mbrencoding[i]);
721 vp8_zero(cm->left_context)
723 tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
725 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
727 // adjust to the next row of mbs
728 x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols;
729 x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
730 x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
732 xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count;
733 x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count;
735 if (mb_row < cm->mb_rows - 1)
736 //WaitForSingleObject(cpi->h_event_main, INFINITE);
737 sem_wait(&cpi->h_event_main);
741 for( ;mb_row<cm->mb_rows; mb_row ++)
743 vp8_zero( cm->left_context)
745 tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
747 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
748 // adjust to the next row of mbs
749 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
750 x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
751 x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
757 for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++)
759 cpi->tok_count += cpi->tplist[mb_row].stop - cpi->tplist[mb_row].start;
762 if (xd->segmentation_enabled)
767 if (xd->segmentation_enabled)
770 for (i = 0; i < cpi->encoding_thread_count; i++)
772 for (j = 0; j < 4; j++)
773 segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
779 for (i = 0; i < cpi->encoding_thread_count; i++)
781 totalrate += cpi->mb_row_ei[i].totalrate;
788 vpx_usec_timer_mark(&emr_timer);
789 cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
794 // Work out the segment probabilites if segmentation is enabled
795 if (xd->segmentation_enabled)
801 vpx_memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
803 tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
807 xd->mb_segment_tree_probs[0] = ((segment_counts[0] + segment_counts[1]) * 255) / tot_count;
809 tot_count = segment_counts[0] + segment_counts[1];
813 xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
816 tot_count = segment_counts[2] + segment_counts[3];
819 xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
821 // Zero probabilities not allowed
822 for (i = 0; i < MB_FEATURE_TREE_PROBS; i ++)
824 if (xd->mb_segment_tree_probs[i] == 0)
825 xd->mb_segment_tree_probs[i] = 1;
830 // 256 rate units to the bit
831 cpi->projected_frame_size = totalrate >> 8; // projected_frame_size in units of BYTES
833 // Make a note of the percentage MBs coded Intra.
834 if (cm->frame_type == KEY_FRAME)
836 cpi->this_frame_percent_intra = 100;
842 tot_modes = cpi->count_mb_ref_frame_usage[INTRA_FRAME]
843 + cpi->count_mb_ref_frame_usage[LAST_FRAME]
844 + cpi->count_mb_ref_frame_usage[GOLDEN_FRAME]
845 + cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
848 cpi->this_frame_percent_intra = cpi->count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
855 int flag[2] = {0, 0};
857 for (cnt = 0; cnt < MVPcount; cnt++)
859 if (cm->fc.pre_mvc[0][cnt] != cm->fc.mvc[0][cnt])
862 vpx_memcpy(cm->fc.pre_mvc[0], cm->fc.mvc[0], MVPcount);
867 for (cnt = 0; cnt < MVPcount; cnt++)
869 if (cm->fc.pre_mvc[1][cnt] != cm->fc.mvc[1][cnt])
872 vpx_memcpy(cm->fc.pre_mvc[1], cm->fc.mvc[1], MVPcount);
877 if (flag[0] || flag[1])
878 vp8_build_component_cost_table(cpi->mb.mvcost, cpi->mb.mvsadcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
882 // Adjust the projected reference frame useage probability numbers to reflect
883 // what we have just seen. This may be usefull when we make multiple itterations
884 // of the recode loop rather than continuing to use values from the previous frame.
885 if ((cm->frame_type != KEY_FRAME) && !cm->refresh_alt_ref_frame && !cm->refresh_golden_frame)
887 const int *const rfct = cpi->count_mb_ref_frame_usage;
888 const int rf_intra = rfct[INTRA_FRAME];
889 const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
891 if ((rf_intra + rf_inter) > 0)
893 cpi->prob_intra_coded = (rf_intra * 255) / (rf_intra + rf_inter);
895 if (cpi->prob_intra_coded < 1)
896 cpi->prob_intra_coded = 1;
898 if ((cm->frames_since_golden > 0) || cpi->source_alt_ref_active)
900 cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
902 if (cpi->prob_last_coded < 1)
903 cpi->prob_last_coded = 1;
905 cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
906 ? (rfct[GOLDEN_FRAME] * 255) / (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128;
908 if (cpi->prob_gf_coded < 1)
909 cpi->prob_gf_coded = 1;
915 // Keep record of the total distortion this time around for future use
916 cpi->last_frame_distortion = cpi->frame_distortion;
920 void vp8_setup_block_ptrs(MACROBLOCK *x)
925 for (r = 0; r < 4; r++)
927 for (c = 0; c < 4; c++)
929 x->block[r*4+c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
933 for (r = 0; r < 2; r++)
935 for (c = 0; c < 2; c++)
937 x->block[16 + r*2+c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
942 for (r = 0; r < 2; r++)
944 for (c = 0; c < 2; c++)
946 x->block[20 + r*2+c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
950 x->block[24].src_diff = x->src_diff + 384;
953 for (i = 0; i < 25; i++)
955 x->block[i].coeff = x->coeff + i * 16;
959 void vp8_build_block_offsets(MACROBLOCK *x)
964 vp8_build_block_doffsets(&x->e_mbd);
967 for (br = 0; br < 4; br++)
969 for (bc = 0; bc < 4; bc++)
971 BLOCK *this_block = &x->block[block];
972 this_block->base_src = &x->src.y_buffer;
973 this_block->src_stride = x->src.y_stride;
974 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
980 for (br = 0; br < 2; br++)
982 for (bc = 0; bc < 2; bc++)
984 BLOCK *this_block = &x->block[block];
985 this_block->base_src = &x->src.u_buffer;
986 this_block->src_stride = x->src.uv_stride;
987 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
993 for (br = 0; br < 2; br++)
995 for (bc = 0; bc < 2; bc++)
997 BLOCK *this_block = &x->block[block];
998 this_block->base_src = &x->src.v_buffer;
999 this_block->src_stride = x->src.uv_stride;
1000 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1006 static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
1008 const MACROBLOCKD *xd = & x->e_mbd;
1009 const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
1010 const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
1013 const int is_key = cpi->common.frame_type == KEY_FRAME;
1015 ++ (is_key ? uv_modes : inter_uv_modes)[uvm];
1019 unsigned int *const bct = is_key ? b_modes : inter_b_modes;
1025 ++ bct[xd->block[b].bmi.mode];
1032 ++cpi->ymode_count[m];
1033 ++cpi->uv_mode_count[uvm];
1036 int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
1038 int Error4x4, Error16x16, error_uv;
1039 B_PREDICTION_MODE intra_bmodes[16];
1040 int rate4x4, rate16x16, rateuv;
1041 int dist4x4, dist16x16, distuv;
1043 int rate4x4_tokenonly = 0;
1044 int rate16x16_tokenonly = 0;
1045 int rateuv_tokenonly = 0;
1048 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
1050 #if !(CONFIG_REALTIME_ONLY)
1052 if (cpi->sf.RD || cpi->compressor_speed != 2)
1054 Error4x4 = vp8_rd_pick_intra4x4mby_modes(cpi, x, &rate4x4, &rate4x4_tokenonly, &dist4x4);
1056 //save the b modes for possible later use
1057 for (i = 0; i < 16; i++)
1058 intra_bmodes[i] = x->e_mbd.block[i].bmi.mode;
1060 Error16x16 = vp8_rd_pick_intra16x16mby_mode(cpi, x, &rate16x16, &rate16x16_tokenonly, &dist16x16);
1062 error_uv = vp8_rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
1064 x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
1066 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1069 if (Error4x4 < Error16x16)
1072 x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
1074 // get back the intra block modes
1075 for (i = 0; i < 16; i++)
1076 x->e_mbd.block[i].bmi.mode = intra_bmodes[i];
1078 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1079 cpi->prediction_error += Error4x4 ;
1081 // Experimental RD code
1082 cpi->frame_distortion += dist4x4;
1087 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1091 // Experimental RD code
1092 cpi->prediction_error += Error16x16;
1093 cpi->frame_distortion += dist16x16;
1097 sum_intra_stats(cpi, x);
1099 vp8_tokenize_mb(cpi, &x->e_mbd, t);
1105 int rate2, distortion2;
1106 MB_PREDICTION_MODE mode, best_mode = DC_PRED;
1108 Error16x16 = INT_MAX;
1110 for (mode = DC_PRED; mode <= TM_PRED; mode ++)
1112 x->e_mbd.mode_info_context->mbmi.mode = mode;
1113 vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
1114 distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16prederror)(x->src.y_buffer, x->src.y_stride, x->e_mbd.predictor, 16, 0x7fffffff);
1115 rate2 = x->mbmode_cost[x->e_mbd.frame_type][mode];
1116 this_rd = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
1118 if (Error16x16 > this_rd)
1120 Error16x16 = this_rd;
1125 vp8_pick_intra4x4mby_modes(IF_RTCD(&cpi->rtcd), x, &rate2, &distortion2);
1127 if (distortion2 == INT_MAX)
1130 Error4x4 = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
1132 x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
1134 if (Error4x4 < Error16x16)
1136 x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
1137 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1138 cpi->prediction_error += Error4x4;
1142 x->e_mbd.mode_info_context->mbmi.mode = best_mode;
1143 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1144 cpi->prediction_error += Error16x16;
1147 vp8_pick_intra_mbuv_mode(x);
1148 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1149 sum_intra_stats(cpi, x);
1150 vp8_tokenize_mb(cpi, &x->e_mbd, t);
1159 extern void vp8_fix_contexts(MACROBLOCKD *x);
1161 int vp8cx_encode_inter_macroblock
1163 VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
1164 int recon_yoffset, int recon_uvoffset
1167 MACROBLOCKD *const xd = &x->e_mbd;
1169 int intra_error = 0;
1175 if (xd->segmentation_enabled)
1176 x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
1178 x->encode_breakout = cpi->oxcf.encode_breakout;
1180 #if !(CONFIG_REALTIME_ONLY)
1184 inter_error = vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
1188 inter_error = vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
1191 cpi->prediction_error += inter_error;
1192 cpi->intra_error += intra_error;
1195 // Experimental RD code
1196 cpi->frame_distortion += distortion;
1197 cpi->last_mb_distortion = distortion;
1200 // MB level adjutment to quantizer setup
1201 if (xd->segmentation_enabled || cpi->zbin_mode_boost_enabled)
1203 // If cyclic update enabled
1204 if (cpi->cyclic_refresh_mode_enabled)
1206 // Clear segment_id back to 0 if not coded (last frame 0,0)
1207 if ((xd->mode_info_context->mbmi.segment_id == 1) &&
1208 ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
1210 xd->mode_info_context->mbmi.segment_id = 0;
1214 // Experimental code. Special case for gf and arf zeromv modes. Increase zbin size to supress noise
1215 if (cpi->zbin_mode_boost_enabled)
1217 if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME))
1218 cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
1220 cpi->zbin_mode_boost = 0;
1223 vp8cx_mb_init_quantizer(cpi, x);
1226 cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
1228 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
1230 x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
1232 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1234 if (xd->mode_info_context->mbmi.mode == B_PRED)
1236 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1240 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1243 sum_intra_stats(cpi, x);
1252 vp8_find_near_mvs(xd, xd->mode_info_context,
1253 &nearest, &nearby, &best_ref_mv, mdcounts, xd->mode_info_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias);
1255 vp8_build_uvmvs(xd, cpi->common.full_pixel);
1257 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
1258 ref_fb_idx = cpi->common.lst_fb_idx;
1259 else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
1260 ref_fb_idx = cpi->common.gld_fb_idx;
1262 ref_fb_idx = cpi->common.alt_fb_idx;
1264 xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
1265 xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
1266 xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
1268 if (xd->mode_info_context->mbmi.mode == SPLITMV)
1272 for (i = 0; i < 16; i++)
1274 if (xd->block[i].bmi.mode == NEW4X4)
1276 cpi->MVcount[0][mv_max+((xd->block[i].bmi.mv.as_mv.row - best_ref_mv.row) >> 1)]++;
1277 cpi->MVcount[1][mv_max+((xd->block[i].bmi.mv.as_mv.col - best_ref_mv.col) >> 1)]++;
1281 else if (xd->mode_info_context->mbmi.mode == NEWMV)
1283 cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv.row) >> 1)]++;
1284 cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv.col) >> 1)]++;
1287 if (!x->skip && !x->e_mbd.mode_info_context->mbmi.force_no_skip)
1289 vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
1291 // Clear mb_skip_coeff if mb_no_coeff_skip is not set
1292 if (!cpi->common.mb_no_coeff_skip)
1293 xd->mode_info_context->mbmi.mb_skip_coeff = 0;
1297 vp8_stuff_inter16x16(x);
1301 vp8_tokenize_mb(cpi, xd, t);
1304 if (cpi->common.mb_no_coeff_skip)
1306 if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV)
1307 xd->mode_info_context->mbmi.dc_diff = 0;
1309 xd->mode_info_context->mbmi.dc_diff = 1;
1311 xd->mode_info_context->mbmi.mb_skip_coeff = 1;
1312 cpi->skip_true_count ++;
1313 vp8_fix_contexts(xd);
1317 vp8_stuff_mb(cpi, xd, t);
1318 xd->mode_info_context->mbmi.mb_skip_coeff = 0;
1319 cpi->skip_false_count ++;