2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "vpx_ports/config.h"
18 #include "entropymode.h"
19 #include "quant_common.h"
20 #include "segmentation.h"
21 #include "setupintrarecon.h"
22 #include "encodeintra.h"
23 #include "reconinter.h"
25 #include "pickinter.h"
26 #include "findnearmv.h"
27 #include "reconintra.h"
31 #include "vpx_ports/vpx_timer.h"
33 #if CONFIG_RUNTIME_CPU_DETECT
34 #define RTCD(x) &cpi->common.rtcd.x
35 #define IF_RTCD(x) (x)
38 #define IF_RTCD(x) NULL
40 extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
42 extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
43 extern void vp8_auto_select_speed(VP8_COMP *cpi);
44 extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
49 void vp8_build_block_offsets(MACROBLOCK *x);
50 void vp8_setup_block_ptrs(MACROBLOCK *x);
51 int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, int recon_yoffset, int recon_uvoffset);
52 int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t);
55 unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
56 unsigned int inter_uv_modes[4] = {0, 0, 0, 0};
57 unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
58 unsigned int y_modes[5] = {0, 0, 0, 0, 0};
59 unsigned int uv_modes[4] = {0, 0, 0, 0};
60 unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
63 static const int qrounding_factors[129] =
65 56, 56, 56, 56, 48, 48, 56, 56,
66 48, 48, 48, 48, 48, 48, 48, 48,
67 48, 48, 48, 48, 48, 48, 48, 48,
68 48, 48, 48, 48, 48, 48, 48, 48,
69 48, 48, 48, 48, 48, 48, 48, 48,
70 48, 48, 48, 48, 48, 48, 48, 48,
71 48, 48, 48, 48, 48, 48, 48, 48,
72 48, 48, 48, 48, 48, 48, 48, 48,
73 48, 48, 48, 48, 48, 48, 48, 48,
74 48, 48, 48, 48, 48, 48, 48, 48,
75 48, 48, 48, 48, 48, 48, 48, 48,
76 48, 48, 48, 48, 48, 48, 48, 48,
77 48, 48, 48, 48, 48, 48, 48, 48,
78 48, 48, 48, 48, 48, 48, 48, 48,
79 48, 48, 48, 48, 48, 48, 48, 48,
80 48, 48, 48, 48, 48, 48, 48, 48,
84 static const int qzbin_factors[129] =
86 72, 72, 72, 72, 80, 80, 72, 72,
87 80, 80, 80, 80, 80, 80, 80, 80,
88 80, 80, 80, 80, 80, 80, 80, 80,
89 80, 80, 80, 80, 80, 80, 80, 80,
90 80, 80, 80, 80, 80, 80, 80, 80,
91 80, 80, 80, 80, 80, 80, 80, 80,
92 80, 80, 80, 80, 80, 80, 80, 80,
93 80, 80, 80, 80, 80, 80, 80, 80,
94 80, 80, 80, 80, 80, 80, 80, 80,
95 80, 80, 80, 80, 80, 80, 80, 80,
96 80, 80, 80, 80, 80, 80, 80, 80,
97 80, 80, 80, 80, 80, 80, 80, 80,
98 80, 80, 80, 80, 80, 80, 80, 80,
99 80, 80, 80, 80, 80, 80, 80, 80,
100 80, 80, 80, 80, 80, 80, 80, 80,
101 80, 80, 80, 80, 80, 80, 80, 80,
105 static const int qrounding_factors_y2[129] =
107 56, 56, 56, 56, 48, 48, 56, 56,
108 48, 48, 48, 48, 48, 48, 48, 48,
109 48, 48, 48, 48, 48, 48, 48, 48,
110 48, 48, 48, 48, 48, 48, 48, 48,
111 48, 48, 48, 48, 48, 48, 48, 48,
112 48, 48, 48, 48, 48, 48, 48, 48,
113 48, 48, 48, 48, 48, 48, 48, 48,
114 48, 48, 48, 48, 48, 48, 48, 48,
115 48, 48, 48, 48, 48, 48, 48, 48,
116 48, 48, 48, 48, 48, 48, 48, 48,
117 48, 48, 48, 48, 48, 48, 48, 48,
118 48, 48, 48, 48, 48, 48, 48, 48,
119 48, 48, 48, 48, 48, 48, 48, 48,
120 48, 48, 48, 48, 48, 48, 48, 48,
121 48, 48, 48, 48, 48, 48, 48, 48,
122 48, 48, 48, 48, 48, 48, 48, 48,
126 static const int qzbin_factors_y2[129] =
128 72, 72, 72, 72, 80, 80, 72, 72,
129 80, 80, 80, 80, 80, 80, 80, 80,
130 80, 80, 80, 80, 80, 80, 80, 80,
131 80, 80, 80, 80, 80, 80, 80, 80,
132 80, 80, 80, 80, 80, 80, 80, 80,
133 80, 80, 80, 80, 80, 80, 80, 80,
134 80, 80, 80, 80, 80, 80, 80, 80,
135 80, 80, 80, 80, 80, 80, 80, 80,
136 80, 80, 80, 80, 80, 80, 80, 80,
137 80, 80, 80, 80, 80, 80, 80, 80,
138 80, 80, 80, 80, 80, 80, 80, 80,
139 80, 80, 80, 80, 80, 80, 80, 80,
140 80, 80, 80, 80, 80, 80, 80, 80,
141 80, 80, 80, 80, 80, 80, 80, 80,
142 80, 80, 80, 80, 80, 80, 80, 80,
143 80, 80, 80, 80, 80, 80, 80, 80,
147 //#define EXACT_QUANT
149 static void vp8cx_invert_quant(short *quant, short *shift, short d)
154 for(l = 0; t > 1; l++)
156 t = 1 + (1<<(16+l))/d;
157 *quant = (short)(t - (1<<16));
161 void vp8cx_init_quantizer(VP8_COMP *cpi)
168 int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
170 for (Q = 0; Q < QINDEX_RANGE; Q++)
173 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
174 vp8cx_invert_quant(cpi->Y1quant[Q][0] + 0,
175 cpi->Y1quant_shift[Q][0] + 0, quant_val);
176 cpi->Y1zbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
177 cpi->Y1round[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;
178 cpi->common.Y1dequant[Q][0][0] = quant_val;
179 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
181 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
182 vp8cx_invert_quant(cpi->Y2quant[Q][0] + 0,
183 cpi->Y2quant_shift[Q][0] + 0, quant_val);
184 cpi->Y2zbin[Q][0][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
185 cpi->Y2round[Q][0][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
186 cpi->common.Y2dequant[Q][0][0] = quant_val;
187 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
189 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
190 vp8cx_invert_quant(cpi->UVquant[Q][0] + 0,
191 cpi->UVquant_shift[Q][0] + 0, quant_val);
192 cpi->UVzbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
193 cpi->UVround[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;
194 cpi->common.UVdequant[Q][0][0] = quant_val;
195 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
197 // all the ac values = ;
198 for (i = 1; i < 16; i++)
200 int rc = vp8_default_zig_zag1d[i];
204 quant_val = vp8_ac_yquant(Q);
205 vp8cx_invert_quant(cpi->Y1quant[Q][r] + c,
206 cpi->Y1quant_shift[Q][r] + c, quant_val);
207 cpi->Y1zbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
208 cpi->Y1round[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;
209 cpi->common.Y1dequant[Q][r][c] = quant_val;
210 cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
212 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
213 vp8cx_invert_quant(cpi->Y2quant[Q][r] + c,
214 cpi->Y2quant_shift[Q][r] + c, quant_val);
215 cpi->Y2zbin[Q][r][c] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
216 cpi->Y2round[Q][r][c] = (qrounding_factors_y2[Q] * quant_val) >> 7;
217 cpi->common.Y2dequant[Q][r][c] = quant_val;
218 cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
220 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
221 vp8cx_invert_quant(cpi->UVquant[Q][r] + c,
222 cpi->UVquant_shift[Q][r] + c, quant_val);
223 cpi->UVzbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
224 cpi->UVround[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;
225 cpi->common.UVdequant[Q][r][c] = quant_val;
226 cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
231 void vp8cx_init_quantizer(VP8_COMP *cpi)
238 int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
240 for (Q = 0; Q < QINDEX_RANGE; Q++)
243 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
244 cpi->Y1quant[Q][0][0] = (1 << 16) / quant_val;
245 cpi->Y1zbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
246 cpi->Y1round[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;
247 cpi->common.Y1dequant[Q][0][0] = quant_val;
248 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
250 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
251 cpi->Y2quant[Q][0][0] = (1 << 16) / quant_val;
252 cpi->Y2zbin[Q][0][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
253 cpi->Y2round[Q][0][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
254 cpi->common.Y2dequant[Q][0][0] = quant_val;
255 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
257 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
258 cpi->UVquant[Q][0][0] = (1 << 16) / quant_val;
259 cpi->UVzbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
260 cpi->UVround[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;
261 cpi->common.UVdequant[Q][0][0] = quant_val;
262 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
264 // all the ac values = ;
265 for (i = 1; i < 16; i++)
267 int rc = vp8_default_zig_zag1d[i];
271 quant_val = vp8_ac_yquant(Q);
272 cpi->Y1quant[Q][r][c] = (1 << 16) / quant_val;
273 cpi->Y1zbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
274 cpi->Y1round[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;
275 cpi->common.Y1dequant[Q][r][c] = quant_val;
276 cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
278 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
279 cpi->Y2quant[Q][r][c] = (1 << 16) / quant_val;
280 cpi->Y2zbin[Q][r][c] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
281 cpi->Y2round[Q][r][c] = (qrounding_factors_y2[Q] * quant_val) >> 7;
282 cpi->common.Y2dequant[Q][r][c] = quant_val;
283 cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
285 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
286 cpi->UVquant[Q][r][c] = (1 << 16) / quant_val;
287 cpi->UVzbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
288 cpi->UVround[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;
289 cpi->common.UVdequant[Q][r][c] = quant_val;
290 cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
295 void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
299 MACROBLOCKD *xd = &x->e_mbd;
302 // Select the baseline MB Q index.
303 if (xd->segmentation_enabled)
306 if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
308 QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
312 QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
313 QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
317 QIndex = cpi->common.base_qindex;
320 zbin_extra = (cpi->common.Y1dequant[QIndex][0][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
322 for (i = 0; i < 16; i++)
324 x->block[i].quant = cpi->Y1quant[QIndex];
325 x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
326 x->block[i].zbin = cpi->Y1zbin[QIndex];
327 x->block[i].round = cpi->Y1round[QIndex];
328 x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
329 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
330 x->block[i].zbin_extra = (short)zbin_extra;
334 zbin_extra = (cpi->common.UVdequant[QIndex][0][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
336 for (i = 16; i < 24; i++)
338 x->block[i].quant = cpi->UVquant[QIndex];
339 x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
340 x->block[i].zbin = cpi->UVzbin[QIndex];
341 x->block[i].round = cpi->UVround[QIndex];
342 x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
343 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
344 x->block[i].zbin_extra = (short)zbin_extra;
348 zbin_extra = (cpi->common.Y2dequant[QIndex][0][1] * ((cpi->zbin_over_quant / 2) + cpi->zbin_mode_boost)) >> 7;
349 x->block[24].quant = cpi->Y2quant[QIndex];
350 x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
351 x->block[24].zbin = cpi->Y2zbin[QIndex];
352 x->block[24].round = cpi->Y2round[QIndex];
353 x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
354 x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
355 x->block[24].zbin_extra = (short)zbin_extra;
358 void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
360 // vp8cx_init_quantizer() is first called in vp8_create_compressor(). A check is added here so that vp8cx_init_quantizer() is only called
361 // when these values are not all zero.
362 if (cpi->common.y1dc_delta_q | cpi->common.y2dc_delta_q | cpi->common.uvdc_delta_q | cpi->common.y2ac_delta_q | cpi->common.uvac_delta_q)
364 vp8cx_init_quantizer(cpi);
367 // MB level quantizer setup
368 vp8cx_mb_init_quantizer(cpi, &cpi->mb);
374 void encode_mb_row(VP8_COMP *cpi,
384 int recon_yoffset, recon_uvoffset;
386 int ref_fb_idx = cm->lst_fb_idx;
387 int dst_fb_idx = cm->new_fb_idx;
388 int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
389 int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
390 int seg_map_index = (mb_row * cpi->common.mb_cols);
393 // reset above block coeffs
394 xd->above_context = cm->above_context;
396 xd->up_available = (mb_row != 0);
397 recon_yoffset = (mb_row * recon_y_stride * 16);
398 recon_uvoffset = (mb_row * recon_uv_stride * 8);
400 cpi->tplist[mb_row].start = *tp;
401 //printf("Main mb_row = %d\n", mb_row);
403 // for each macroblock col in image
404 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
406 // Distance of Mb to the various image edges.
407 // These specified to 8th pel as they are always compared to values that are in 1/8th pel units
408 xd->mb_to_left_edge = -((mb_col * 16) << 3);
409 xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
410 xd->mb_to_top_edge = -((mb_row * 16) << 3);
411 xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
413 // Set up limit values for motion vectors used to prevent them extending outside the UMV borders
414 x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
415 x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
416 x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
417 x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
419 xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
420 xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
421 xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
422 xd->left_available = (mb_col != 0);
424 // Is segmentation enabled
425 // MB level adjutment to quantizer
426 if (xd->segmentation_enabled)
428 // Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
429 if (cpi->segmentation_map[seg_map_index+mb_col] <= 3)
430 xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[seg_map_index+mb_col];
432 xd->mode_info_context->mbmi.segment_id = 0;
434 vp8cx_mb_init_quantizer(cpi, x);
437 xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
439 x->active_ptr = cpi->active_map + seg_map_index + mb_col;
441 if (cm->frame_type == KEY_FRAME)
443 *totalrate += vp8cx_encode_intra_macro_block(cpi, x, tp);
445 y_modes[xd->mbmi.mode] ++;
450 *totalrate += vp8cx_encode_inter_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset);
453 inter_y_modes[xd->mbmi.mode] ++;
455 if (xd->mbmi.mode == SPLITMV)
459 for (b = 0; b < xd->mbmi.partition_count; b++)
461 inter_b_modes[x->partition->bmi[b].mode] ++;
467 // Count of last ref frame 0,0 useage
468 if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
469 cpi->inter_zz_count ++;
471 // Special case code for cyclic refresh
472 // If cyclic update enabled then copy xd->mbmi.segment_id; (which may have been updated based on mode
473 // during vp8cx_encode_inter_macroblock()) back into the global sgmentation map
474 if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)
476 cpi->segmentation_map[seg_map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
478 // If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
479 // Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
480 // else mark it as dirty (1).
481 if (xd->mode_info_context->mbmi.segment_id)
482 cpi->cyclic_refresh_map[seg_map_index+mb_col] = -1;
483 else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
485 if (cpi->cyclic_refresh_map[seg_map_index+mb_col] == 1)
486 cpi->cyclic_refresh_map[seg_map_index+mb_col] = 0;
489 cpi->cyclic_refresh_map[seg_map_index+mb_col] = 1;
494 cpi->tplist[mb_row].stop = *tp;
496 x->gf_active_ptr++; // Increment pointer into gf useage flags structure for next mb
498 for (i = 0; i < 16; i++)
499 vpx_memcpy(&xd->mode_info_context->bmi[i], &xd->block[i].bmi, sizeof(xd->block[i].bmi));
501 // adjust to the next column of macroblocks
502 x->src.y_buffer += 16;
503 x->src.u_buffer += 8;
504 x->src.v_buffer += 8;
509 // Keep track of segment useage
510 segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
513 xd->mode_info_context++;
517 cpi->current_mb_col_main = mb_col;
520 //extend the recon for intra prediction
522 &cm->yv12_fb[dst_fb_idx],
523 xd->dst.y_buffer + 16,
524 xd->dst.u_buffer + 8,
525 xd->dst.v_buffer + 8);
527 // this is to account for the border
528 xd->mode_info_context++;
536 void vp8_encode_frame(VP8_COMP *cpi)
539 MACROBLOCK *const x = & cpi->mb;
540 VP8_COMMON *const cm = & cpi->common;
541 MACROBLOCKD *const xd = & x->e_mbd;
544 TOKENEXTRA *tp = cpi->tok;
545 int segment_counts[MAX_MB_SEGMENTS];
548 if (cm->frame_type != KEY_FRAME)
550 if (cm->mcomp_filter_type == SIXTAP)
552 xd->subpixel_predict = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, sixtap4x4);
553 xd->subpixel_predict8x4 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, sixtap8x4);
554 xd->subpixel_predict8x8 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, sixtap8x8);
555 xd->subpixel_predict16x16 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, sixtap16x16);
559 xd->subpixel_predict = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, bilinear4x4);
560 xd->subpixel_predict8x4 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, bilinear8x4);
561 xd->subpixel_predict8x8 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, bilinear8x8);
562 xd->subpixel_predict16x16 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, bilinear16x16);
568 // For key frames make sure the intra ref frame probability value
569 // is set to "all intra"
570 //cpi->prob_intra_coded = 255;
574 x->gf_active_ptr = (signed char *)cpi->gf_active_flags; // Point to base of GF active flags data structure
576 x->vector_range = 32;
578 // Count of MBs using the alternate Q if any
581 // Reset frame count of inter 0,0 motion vector useage.
582 cpi->inter_zz_count = 0;
584 vpx_memset(segment_counts, 0, sizeof(segment_counts));
586 cpi->prediction_error = 0;
587 cpi->intra_error = 0;
588 cpi->skip_true_count = 0;
589 cpi->skip_false_count = 0;
593 cpi->frame_distortion = 0;
594 cpi->last_mb_distortion = 0;
599 x->partition_info = x->pi;
601 xd->mode_info_context = cm->mi;
602 xd->mode_info_stride = cm->mode_info_stride;
604 xd->frame_type = cm->frame_type;
606 xd->frames_since_golden = cm->frames_since_golden;
607 xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
608 vp8_zero(cpi->MVcount);
609 // vp8_zero( Contexts)
610 vp8_zero(cpi->coef_counts);
612 // reset intra mode contexts
613 if (cm->frame_type == KEY_FRAME)
614 vp8_init_mbmode_probs(cm);
617 vp8cx_frame_init_quantizer(cpi);
619 if (cpi->compressor_speed == 2)
621 if (cpi->oxcf.cpu_used < 0)
622 cpi->Speed = -(cpi->oxcf.cpu_used);
624 vp8_auto_select_speed(cpi);
627 vp8_initialize_rd_consts(cpi, vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
628 //vp8_initialize_rd_consts( cpi, vp8_dc_quant(cpi->avg_frame_qindex, cm->y1dc_delta_q) );
629 vp8cx_initialize_me_consts(cpi, cm->base_qindex);
630 //vp8cx_initialize_me_consts( cpi, cpi->avg_frame_qindex);
632 // Copy data over into macro block data sturctures.
634 x->src = * cpi->Source;
635 xd->pre = cm->yv12_fb[cm->lst_fb_idx];
636 xd->dst = cm->yv12_fb[cm->new_fb_idx];
638 // set up frame new frame for intra coded blocks
640 vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
642 vp8_build_block_offsets(x);
644 vp8_setup_block_dptrs(&x->e_mbd);
646 vp8_setup_block_ptrs(x);
648 x->rddiv = cpi->RDDIV;
649 x->rdmult = cpi->RDMULT;
652 // Experimental rd code
653 // 2 Pass - Possibly set Rdmult based on last frame distortion + this frame target bits or other metrics
654 // such as cpi->rate_correction_factor that indicate relative complexity.
655 /*if ( cpi->pass == 2 && (cpi->last_frame_distortion > 0) && (cpi->target_bits_per_mb > 0) )
657 //x->rdmult = ((cpi->last_frame_distortion * 256)/cpi->common.MBs)/ cpi->target_bits_per_mb;
658 x->rdmult = (int)(cpi->RDMULT * cpi->rate_correction_factor);
661 x->rdmult = cpi->RDMULT; */
662 //x->rdmult = (int)(cpi->RDMULT * pow( (cpi->rate_correction_factor * 2.0), 0.75 ));
665 xd->mode_info_context->mbmi.mode = DC_PRED;
666 xd->mode_info_context->mbmi.uv_mode = DC_PRED;
668 xd->left_context = &cm->left_context;
670 vp8_zero(cpi->count_mb_ref_frame_usage)
671 vp8_zero(cpi->ymode_count)
672 vp8_zero(cpi->uv_mode_count)
676 vpx_memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
679 struct vpx_usec_timer emr_timer;
680 vpx_usec_timer_start(&emr_timer);
682 if (!cpi->b_multi_threaded)
684 // for each macroblock row in image
685 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
688 vp8_zero(cm->left_context)
690 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
692 // adjust to the next row of mbs
693 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
694 x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
695 x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
698 cpi->tok_count = tp - cpi->tok;
703 #if CONFIG_MULTITHREAD
704 vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 1, cpi->encoding_thread_count);
706 for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1))
709 cpi->current_mb_col_main = -1;
711 for (i = 0; i < cpi->encoding_thread_count; i++)
713 if ((mb_row + i + 1) >= cm->mb_rows)
716 cpi->mb_row_ei[i].mb_row = mb_row + i + 1;
717 cpi->mb_row_ei[i].tp = cpi->tok + (mb_row + i + 1) * (cm->mb_cols * 16 * 24);
718 cpi->mb_row_ei[i].current_mb_col = -1;
719 //SetEvent(cpi->h_event_mbrencoding[i]);
720 sem_post(&cpi->h_event_mbrencoding[i]);
723 vp8_zero(cm->left_context)
725 tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
727 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
729 // adjust to the next row of mbs
730 x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols;
731 x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
732 x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
734 xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count;
735 x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count;
737 if (mb_row < cm->mb_rows - 1)
738 //WaitForSingleObject(cpi->h_event_main, INFINITE);
739 sem_wait(&cpi->h_event_main);
743 for( ;mb_row<cm->mb_rows; mb_row ++)
745 vp8_zero( cm->left_context)
747 tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
749 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
750 // adjust to the next row of mbs
751 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
752 x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
753 x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
759 for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++)
761 cpi->tok_count += cpi->tplist[mb_row].stop - cpi->tplist[mb_row].start;
764 if (xd->segmentation_enabled)
769 if (xd->segmentation_enabled)
772 for (i = 0; i < cpi->encoding_thread_count; i++)
774 for (j = 0; j < 4; j++)
775 segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
781 for (i = 0; i < cpi->encoding_thread_count; i++)
783 totalrate += cpi->mb_row_ei[i].totalrate;
790 vpx_usec_timer_mark(&emr_timer);
791 cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
796 // Work out the segment probabilites if segmentation is enabled
797 if (xd->segmentation_enabled)
803 vpx_memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
805 tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
809 xd->mb_segment_tree_probs[0] = ((segment_counts[0] + segment_counts[1]) * 255) / tot_count;
811 tot_count = segment_counts[0] + segment_counts[1];
815 xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
818 tot_count = segment_counts[2] + segment_counts[3];
821 xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
823 // Zero probabilities not allowed
824 for (i = 0; i < MB_FEATURE_TREE_PROBS; i ++)
826 if (xd->mb_segment_tree_probs[i] == 0)
827 xd->mb_segment_tree_probs[i] = 1;
832 // 256 rate units to the bit
833 cpi->projected_frame_size = totalrate >> 8; // projected_frame_size in units of BYTES
835 // Make a note of the percentage MBs coded Intra.
836 if (cm->frame_type == KEY_FRAME)
838 cpi->this_frame_percent_intra = 100;
844 tot_modes = cpi->count_mb_ref_frame_usage[INTRA_FRAME]
845 + cpi->count_mb_ref_frame_usage[LAST_FRAME]
846 + cpi->count_mb_ref_frame_usage[GOLDEN_FRAME]
847 + cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
850 cpi->this_frame_percent_intra = cpi->count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
857 int flag[2] = {0, 0};
859 for (cnt = 0; cnt < MVPcount; cnt++)
861 if (cm->fc.pre_mvc[0][cnt] != cm->fc.mvc[0][cnt])
864 vpx_memcpy(cm->fc.pre_mvc[0], cm->fc.mvc[0], MVPcount);
869 for (cnt = 0; cnt < MVPcount; cnt++)
871 if (cm->fc.pre_mvc[1][cnt] != cm->fc.mvc[1][cnt])
874 vpx_memcpy(cm->fc.pre_mvc[1], cm->fc.mvc[1], MVPcount);
879 if (flag[0] || flag[1])
880 vp8_build_component_cost_table(cpi->mb.mvcost, cpi->mb.mvsadcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
884 // Adjust the projected reference frame useage probability numbers to reflect
885 // what we have just seen. This may be usefull when we make multiple itterations
886 // of the recode loop rather than continuing to use values from the previous frame.
887 if ((cm->frame_type != KEY_FRAME) && !cm->refresh_alt_ref_frame && !cm->refresh_golden_frame)
889 const int *const rfct = cpi->count_mb_ref_frame_usage;
890 const int rf_intra = rfct[INTRA_FRAME];
891 const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
893 if ((rf_intra + rf_inter) > 0)
895 cpi->prob_intra_coded = (rf_intra * 255) / (rf_intra + rf_inter);
897 if (cpi->prob_intra_coded < 1)
898 cpi->prob_intra_coded = 1;
900 if ((cm->frames_since_golden > 0) || cpi->source_alt_ref_active)
902 cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
904 if (cpi->prob_last_coded < 1)
905 cpi->prob_last_coded = 1;
907 cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
908 ? (rfct[GOLDEN_FRAME] * 255) / (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128;
910 if (cpi->prob_gf_coded < 1)
911 cpi->prob_gf_coded = 1;
917 // Keep record of the total distortion this time around for future use
918 cpi->last_frame_distortion = cpi->frame_distortion;
922 void vp8_setup_block_ptrs(MACROBLOCK *x)
927 for (r = 0; r < 4; r++)
929 for (c = 0; c < 4; c++)
931 x->block[r*4+c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
935 for (r = 0; r < 2; r++)
937 for (c = 0; c < 2; c++)
939 x->block[16 + r*2+c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
944 for (r = 0; r < 2; r++)
946 for (c = 0; c < 2; c++)
948 x->block[20 + r*2+c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
952 x->block[24].src_diff = x->src_diff + 384;
955 for (i = 0; i < 25; i++)
957 x->block[i].coeff = x->coeff + i * 16;
961 void vp8_build_block_offsets(MACROBLOCK *x)
966 vp8_build_block_doffsets(&x->e_mbd);
969 for (br = 0; br < 4; br++)
971 for (bc = 0; bc < 4; bc++)
973 BLOCK *this_block = &x->block[block];
974 this_block->base_src = &x->src.y_buffer;
975 this_block->src_stride = x->src.y_stride;
976 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
982 for (br = 0; br < 2; br++)
984 for (bc = 0; bc < 2; bc++)
986 BLOCK *this_block = &x->block[block];
987 this_block->base_src = &x->src.u_buffer;
988 this_block->src_stride = x->src.uv_stride;
989 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
995 for (br = 0; br < 2; br++)
997 for (bc = 0; bc < 2; bc++)
999 BLOCK *this_block = &x->block[block];
1000 this_block->base_src = &x->src.v_buffer;
1001 this_block->src_stride = x->src.uv_stride;
1002 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1008 static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
1010 const MACROBLOCKD *xd = & x->e_mbd;
1011 const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
1012 const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
1015 const int is_key = cpi->common.frame_type == KEY_FRAME;
1017 ++ (is_key ? uv_modes : inter_uv_modes)[uvm];
1021 unsigned int *const bct = is_key ? b_modes : inter_b_modes;
1027 ++ bct[xd->block[b].bmi.mode];
1034 ++cpi->ymode_count[m];
1035 ++cpi->uv_mode_count[uvm];
1038 int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
1040 int Error4x4, Error16x16, error_uv;
1041 B_PREDICTION_MODE intra_bmodes[16];
1042 int rate4x4, rate16x16, rateuv;
1043 int dist4x4, dist16x16, distuv;
1045 int rate4x4_tokenonly = 0;
1046 int rate16x16_tokenonly = 0;
1047 int rateuv_tokenonly = 0;
1050 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
1052 #if !(CONFIG_REALTIME_ONLY)
1054 if (cpi->sf.RD || cpi->compressor_speed != 2)
1056 Error4x4 = vp8_rd_pick_intra4x4mby_modes(cpi, x, &rate4x4, &rate4x4_tokenonly, &dist4x4);
1058 //save the b modes for possible later use
1059 for (i = 0; i < 16; i++)
1060 intra_bmodes[i] = x->e_mbd.block[i].bmi.mode;
1062 Error16x16 = vp8_rd_pick_intra16x16mby_mode(cpi, x, &rate16x16, &rate16x16_tokenonly, &dist16x16);
1064 error_uv = vp8_rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
1066 x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
1068 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1071 if (Error4x4 < Error16x16)
1074 x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
1076 // get back the intra block modes
1077 for (i = 0; i < 16; i++)
1078 x->e_mbd.block[i].bmi.mode = intra_bmodes[i];
1080 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1081 cpi->prediction_error += Error4x4 ;
1083 // Experimental RD code
1084 cpi->frame_distortion += dist4x4;
1089 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1093 // Experimental RD code
1094 cpi->prediction_error += Error16x16;
1095 cpi->frame_distortion += dist16x16;
1099 sum_intra_stats(cpi, x);
1101 vp8_tokenize_mb(cpi, &x->e_mbd, t);
1107 int rate2, distortion2;
1108 MB_PREDICTION_MODE mode, best_mode = DC_PRED;
1110 Error16x16 = INT_MAX;
1112 for (mode = DC_PRED; mode <= TM_PRED; mode ++)
1114 x->e_mbd.mode_info_context->mbmi.mode = mode;
1115 vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
1116 distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16prederror)(x->src.y_buffer, x->src.y_stride, x->e_mbd.predictor, 16, 0x7fffffff);
1117 rate2 = x->mbmode_cost[x->e_mbd.frame_type][mode];
1118 this_rd = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
1120 if (Error16x16 > this_rd)
1122 Error16x16 = this_rd;
1127 vp8_pick_intra4x4mby_modes(IF_RTCD(&cpi->rtcd), x, &rate2, &distortion2);
1129 if (distortion2 == INT_MAX)
1132 Error4x4 = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
1134 x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
1136 if (Error4x4 < Error16x16)
1138 x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
1139 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1140 cpi->prediction_error += Error4x4;
1144 x->e_mbd.mode_info_context->mbmi.mode = best_mode;
1145 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1146 cpi->prediction_error += Error16x16;
1149 vp8_pick_intra_mbuv_mode(x);
1150 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1151 sum_intra_stats(cpi, x);
1152 vp8_tokenize_mb(cpi, &x->e_mbd, t);
1161 extern void vp8_fix_contexts(MACROBLOCKD *x);
1163 int vp8cx_encode_inter_macroblock
1165 VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
1166 int recon_yoffset, int recon_uvoffset
1169 MACROBLOCKD *const xd = &x->e_mbd;
1171 int intra_error = 0;
1177 if (xd->segmentation_enabled)
1178 x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
1180 x->encode_breakout = cpi->oxcf.encode_breakout;
1182 #if !(CONFIG_REALTIME_ONLY)
1186 inter_error = vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
1190 inter_error = vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
1193 cpi->prediction_error += inter_error;
1194 cpi->intra_error += intra_error;
1197 // Experimental RD code
1198 cpi->frame_distortion += distortion;
1199 cpi->last_mb_distortion = distortion;
1202 // MB level adjutment to quantizer setup
1203 if (xd->segmentation_enabled || cpi->zbin_mode_boost_enabled)
1205 // If cyclic update enabled
1206 if (cpi->cyclic_refresh_mode_enabled)
1208 // Clear segment_id back to 0 if not coded (last frame 0,0)
1209 if ((xd->mode_info_context->mbmi.segment_id == 1) &&
1210 ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
1212 xd->mode_info_context->mbmi.segment_id = 0;
1216 // Experimental code. Special case for gf and arf zeromv modes. Increase zbin size to supress noise
1217 if (cpi->zbin_mode_boost_enabled)
1219 if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME))
1220 cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
1222 cpi->zbin_mode_boost = 0;
1225 vp8cx_mb_init_quantizer(cpi, x);
1228 cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
1230 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
1232 x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
1234 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1236 if (xd->mode_info_context->mbmi.mode == B_PRED)
1238 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1242 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1245 sum_intra_stats(cpi, x);
1254 vp8_find_near_mvs(xd, xd->mode_info_context,
1255 &nearest, &nearby, &best_ref_mv, mdcounts, xd->mode_info_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias);
1257 vp8_build_uvmvs(xd, cpi->common.full_pixel);
1259 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
1260 ref_fb_idx = cpi->common.lst_fb_idx;
1261 else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
1262 ref_fb_idx = cpi->common.gld_fb_idx;
1264 ref_fb_idx = cpi->common.alt_fb_idx;
1266 xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
1267 xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
1268 xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
1270 if (xd->mode_info_context->mbmi.mode == SPLITMV)
1274 for (i = 0; i < 16; i++)
1276 if (xd->block[i].bmi.mode == NEW4X4)
1278 cpi->MVcount[0][mv_max+((xd->block[i].bmi.mv.as_mv.row - best_ref_mv.row) >> 1)]++;
1279 cpi->MVcount[1][mv_max+((xd->block[i].bmi.mv.as_mv.col - best_ref_mv.col) >> 1)]++;
1283 else if (xd->mode_info_context->mbmi.mode == NEWMV)
1285 cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv.row) >> 1)]++;
1286 cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv.col) >> 1)]++;
1289 if (!x->skip && !x->e_mbd.mode_info_context->mbmi.force_no_skip)
1291 vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
1293 // Clear mb_skip_coeff if mb_no_coeff_skip is not set
1294 if (!cpi->common.mb_no_coeff_skip)
1295 xd->mode_info_context->mbmi.mb_skip_coeff = 0;
1299 vp8_stuff_inter16x16(x);
1303 vp8_tokenize_mb(cpi, xd, t);
1306 if (cpi->common.mb_no_coeff_skip)
1308 if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV)
1309 xd->mode_info_context->mbmi.dc_diff = 0;
1311 xd->mode_info_context->mbmi.dc_diff = 1;
1313 xd->mode_info_context->mbmi.mb_skip_coeff = 1;
1314 cpi->skip_true_count ++;
1315 vp8_fix_contexts(xd);
1319 vp8_stuff_mb(cpi, xd, t);
1320 xd->mode_info_context->mbmi.mb_skip_coeff = 0;
1321 cpi->skip_false_count ++;