2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "vpx_ports/config.h"
18 #include "entropymode.h"
19 #include "quant_common.h"
20 #include "segmentation.h"
21 #include "setupintrarecon.h"
22 #include "encodeintra.h"
23 #include "reconinter.h"
25 #include "pickinter.h"
26 #include "findnearmv.h"
27 #include "reconintra.h"
31 #include "vpx_ports/vpx_timer.h"
33 #if CONFIG_RUNTIME_CPU_DETECT
34 #define RTCD(x) &cpi->common.rtcd.x
35 #define IF_RTCD(x) (x)
38 #define IF_RTCD(x) NULL
40 extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
42 extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
43 extern void vp8_auto_select_speed(VP8_COMP *cpi);
44 extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
49 void vp8_build_block_offsets(MACROBLOCK *x);
50 void vp8_setup_block_ptrs(MACROBLOCK *x);
51 int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, int recon_yoffset, int recon_uvoffset);
52 int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t);
55 unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
56 unsigned int inter_uv_modes[4] = {0, 0, 0, 0};
57 unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
58 unsigned int y_modes[5] = {0, 0, 0, 0, 0};
59 unsigned int uv_modes[4] = {0, 0, 0, 0};
60 unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
63 static const int qrounding_factors[129] =
65 48, 48, 48, 48, 48, 48, 48, 48,
66 48, 48, 48, 48, 48, 48, 48, 48,
67 48, 48, 48, 48, 48, 48, 48, 48,
68 48, 48, 48, 48, 48, 48, 48, 48,
69 48, 48, 48, 48, 48, 48, 48, 48,
70 48, 48, 48, 48, 48, 48, 48, 48,
71 48, 48, 48, 48, 48, 48, 48, 48,
72 48, 48, 48, 48, 48, 48, 48, 48,
73 48, 48, 48, 48, 48, 48, 48, 48,
74 48, 48, 48, 48, 48, 48, 48, 48,
75 48, 48, 48, 48, 48, 48, 48, 48,
76 48, 48, 48, 48, 48, 48, 48, 48,
77 48, 48, 48, 48, 48, 48, 48, 48,
78 48, 48, 48, 48, 48, 48, 48, 48,
79 48, 48, 48, 48, 48, 48, 48, 48,
80 48, 48, 48, 48, 48, 48, 48, 48,
84 static const int qzbin_factors[129] =
86 84, 84, 84, 84, 84, 84, 84, 84,
87 84, 84, 84, 84, 84, 84, 84, 84,
88 84, 84, 84, 84, 84, 84, 84, 84,
89 84, 84, 84, 84, 84, 84, 84, 84,
90 84, 84, 84, 84, 84, 84, 84, 84,
91 84, 84, 84, 84, 84, 84, 84, 84,
92 80, 80, 80, 80, 80, 80, 80, 80,
93 80, 80, 80, 80, 80, 80, 80, 80,
94 80, 80, 80, 80, 80, 80, 80, 80,
95 80, 80, 80, 80, 80, 80, 80, 80,
96 80, 80, 80, 80, 80, 80, 80, 80,
97 80, 80, 80, 80, 80, 80, 80, 80,
98 80, 80, 80, 80, 80, 80, 80, 80,
99 80, 80, 80, 80, 80, 80, 80, 80,
100 80, 80, 80, 80, 80, 80, 80, 80,
101 80, 80, 80, 80, 80, 80, 80, 80,
105 static const int qrounding_factors_y2[129] =
107 48, 48, 48, 48, 48, 48, 48, 48,
108 48, 48, 48, 48, 48, 48, 48, 48,
109 48, 48, 48, 48, 48, 48, 48, 48,
110 48, 48, 48, 48, 48, 48, 48, 48,
111 48, 48, 48, 48, 48, 48, 48, 48,
112 48, 48, 48, 48, 48, 48, 48, 48,
113 48, 48, 48, 48, 48, 48, 48, 48,
114 48, 48, 48, 48, 48, 48, 48, 48,
115 48, 48, 48, 48, 48, 48, 48, 48,
116 48, 48, 48, 48, 48, 48, 48, 48,
117 48, 48, 48, 48, 48, 48, 48, 48,
118 48, 48, 48, 48, 48, 48, 48, 48,
119 48, 48, 48, 48, 48, 48, 48, 48,
120 48, 48, 48, 48, 48, 48, 48, 48,
121 48, 48, 48, 48, 48, 48, 48, 48,
122 48, 48, 48, 48, 48, 48, 48, 48,
126 static const int qzbin_factors_y2[129] =
128 84, 84, 84, 84, 84, 84, 84, 84,
129 84, 84, 84, 84, 84, 84, 84, 84,
130 84, 84, 84, 84, 84, 84, 84, 84,
131 84, 84, 84, 84, 84, 84, 84, 84,
132 84, 84, 84, 84, 84, 84, 84, 84,
133 84, 84, 84, 84, 84, 84, 84, 84,
134 80, 80, 80, 80, 80, 80, 80, 80,
135 80, 80, 80, 80, 80, 80, 80, 80,
136 80, 80, 80, 80, 80, 80, 80, 80,
137 80, 80, 80, 80, 80, 80, 80, 80,
138 80, 80, 80, 80, 80, 80, 80, 80,
139 80, 80, 80, 80, 80, 80, 80, 80,
140 80, 80, 80, 80, 80, 80, 80, 80,
141 80, 80, 80, 80, 80, 80, 80, 80,
142 80, 80, 80, 80, 80, 80, 80, 80,
143 80, 80, 80, 80, 80, 80, 80, 80,
149 static void vp8cx_invert_quant(int improved_quant, short *quant,
150 short *shift, short d)
157 for(l = 0; t > 1; l++)
159 t = 1 + (1<<(16+l))/d;
160 *quant = (short)(t - (1<<16));
165 *quant = (1 << 16) / d;
170 void vp8cx_init_quantizer(VP8_COMP *cpi)
176 int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
178 for (Q = 0; Q < QINDEX_RANGE; Q++)
181 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
182 vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
183 cpi->Y1quant_shift[Q] + 0, quant_val);
184 cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
185 cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
186 cpi->common.Y1dequant[Q][0] = quant_val;
187 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
189 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
190 vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
191 cpi->Y2quant_shift[Q] + 0, quant_val);
192 cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
193 cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
194 cpi->common.Y2dequant[Q][0] = quant_val;
195 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
197 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
198 vp8cx_invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
199 cpi->UVquant_shift[Q] + 0, quant_val);
200 cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
201 cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
202 cpi->common.UVdequant[Q][0] = quant_val;
203 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
205 // all the ac values = ;
206 for (i = 1; i < 16; i++)
208 int rc = vp8_default_zig_zag1d[i];
210 quant_val = vp8_ac_yquant(Q);
211 vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + rc,
212 cpi->Y1quant_shift[Q] + rc, quant_val);
213 cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
214 cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
215 cpi->common.Y1dequant[Q][rc] = quant_val;
216 cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
218 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
219 vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + rc,
220 cpi->Y2quant_shift[Q] + rc, quant_val);
221 cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
222 cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
223 cpi->common.Y2dequant[Q][rc] = quant_val;
224 cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
226 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
227 vp8cx_invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + rc,
228 cpi->UVquant_shift[Q] + rc, quant_val);
229 cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
230 cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
231 cpi->common.UVdequant[Q][rc] = quant_val;
232 cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
237 void vp8cx_init_quantizer(VP8_COMP *cpi)
243 int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
245 for (Q = 0; Q < QINDEX_RANGE; Q++)
248 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
249 cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
250 cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
251 cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
252 cpi->common.Y1dequant[Q][0] = quant_val;
253 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
255 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
256 cpi->Y2quant[Q][0] = (1 << 16) / quant_val;
257 cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
258 cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
259 cpi->common.Y2dequant[Q][0] = quant_val;
260 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
262 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
263 cpi->UVquant[Q][0] = (1 << 16) / quant_val;
264 cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
265 cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
266 cpi->common.UVdequant[Q][0] = quant_val;
267 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
269 // all the ac values = ;
270 for (i = 1; i < 16; i++)
272 int rc = vp8_default_zig_zag1d[i];
274 quant_val = vp8_ac_yquant(Q);
275 cpi->Y1quant[Q][rc] = (1 << 16) / quant_val;
276 cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
277 cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
278 cpi->common.Y1dequant[Q][rc] = quant_val;
279 cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
281 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
282 cpi->Y2quant[Q][rc] = (1 << 16) / quant_val;
283 cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
284 cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
285 cpi->common.Y2dequant[Q][rc] = quant_val;
286 cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
288 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
289 cpi->UVquant[Q][rc] = (1 << 16) / quant_val;
290 cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
291 cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
292 cpi->common.UVdequant[Q][rc] = quant_val;
293 cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
298 void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
302 MACROBLOCKD *xd = &x->e_mbd;
305 // Select the baseline MB Q index.
306 if (xd->segmentation_enabled)
309 if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
311 QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
315 QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
316 QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
320 QIndex = cpi->common.base_qindex;
323 zbin_extra = (cpi->common.Y1dequant[QIndex][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
325 for (i = 0; i < 16; i++)
327 x->block[i].quant = cpi->Y1quant[QIndex];
328 x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
329 x->block[i].zbin = cpi->Y1zbin[QIndex];
330 x->block[i].round = cpi->Y1round[QIndex];
331 x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
332 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
333 x->block[i].zbin_extra = (short)zbin_extra;
337 zbin_extra = (cpi->common.UVdequant[QIndex][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
339 for (i = 16; i < 24; i++)
341 x->block[i].quant = cpi->UVquant[QIndex];
342 x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
343 x->block[i].zbin = cpi->UVzbin[QIndex];
344 x->block[i].round = cpi->UVround[QIndex];
345 x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
346 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
347 x->block[i].zbin_extra = (short)zbin_extra;
351 zbin_extra = (cpi->common.Y2dequant[QIndex][1] * ((cpi->zbin_over_quant / 2) + cpi->zbin_mode_boost)) >> 7;
352 x->block[24].quant = cpi->Y2quant[QIndex];
353 x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
354 x->block[24].zbin = cpi->Y2zbin[QIndex];
355 x->block[24].round = cpi->Y2round[QIndex];
356 x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
357 x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
358 x->block[24].zbin_extra = (short)zbin_extra;
361 void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
363 // Clear Zbin mode boost for default case
364 cpi->zbin_mode_boost = 0;
366 // vp8cx_init_quantizer() is first called in vp8_create_compressor(). A check is added here so that vp8cx_init_quantizer() is only called
367 // when these values are not all zero.
368 if (cpi->common.y1dc_delta_q | cpi->common.y2dc_delta_q | cpi->common.uvdc_delta_q | cpi->common.y2ac_delta_q | cpi->common.uvac_delta_q)
370 vp8cx_init_quantizer(cpi);
373 // MB level quantizer setup
374 vp8cx_mb_init_quantizer(cpi, &cpi->mb);
378 /* activity_avg must be positive, or flat regions could get a zero weight
379 * (infinite lambda), which confounds analysis.
380 * This also avoids the need for divide by zero checks in
381 * vp8_activity_masking().
383 #define VP8_ACTIVITY_AVG_MIN (64)
385 /* This is used as a reference when computing the source variance for the
386 * purposes of activity masking.
387 * Eventually this should be replaced by custom no-reference routines,
388 * which will be faster.
390 static const unsigned char VP8_VAR_OFFS[16]=
392 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128
395 unsigned int vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
403 /* TODO: This could also be done over smaller areas (8x8), but that would
404 * require extensive changes elsewhere, as lambda is assumed to be fixed
405 * over an entire MB in most of the code.
406 * Another option is to compute four 8x8 variances, and pick a single
407 * lambda using a non-linear combination (e.g., the smallest, or second
410 VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16var)(x->src.y_buffer,
411 x->src.y_stride, VP8_VAR_OFFS, 0, &sse, &sum);
412 /* This requires a full 32 bits of precision. */
413 act = (sse<<8) - sum*sum;
414 /* Drop 4 to give us some headroom to work with. */
415 act = (act + 8) >> 4;
416 /* If the region is flat, lower the activity some more. */
418 act = act < 5<<12 ? act : 5<<12;
419 /* TODO: For non-flat regions, edge regions should receive less masking
420 * than textured regions, but identifying edge regions quickly and
421 * reliably enough is still a subject of experimentation.
422 * This will be most noticable near edges with a complex shape (e.g.,
423 * text), but the 4x4 transform size should make this less of a problem
424 * than it would be for an 8x8 transform.
426 /* Apply the masking to the RD multiplier. */
427 a = act + 4*cpi->activity_avg;
428 b = 4*act + cpi->activity_avg;
429 x->rdmult = (unsigned int)(((INT64)x->rdmult*b + (a>>1))/a);
436 void encode_mb_row(VP8_COMP *cpi,
445 INT64 activity_sum = 0;
447 int recon_yoffset, recon_uvoffset;
449 int ref_fb_idx = cm->lst_fb_idx;
450 int dst_fb_idx = cm->new_fb_idx;
451 int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
452 int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
453 int seg_map_index = (mb_row * cpi->common.mb_cols);
456 // reset above block coeffs
457 xd->above_context = cm->above_context;
459 xd->up_available = (mb_row != 0);
460 recon_yoffset = (mb_row * recon_y_stride * 16);
461 recon_uvoffset = (mb_row * recon_uv_stride * 8);
463 cpi->tplist[mb_row].start = *tp;
464 //printf("Main mb_row = %d\n", mb_row);
466 // Distance of Mb to the top & bottom edges, specified in 1/8th pel
467 // units as they are always compared to values that are in 1/8th pel units
468 xd->mb_to_top_edge = -((mb_row * 16) << 3);
469 xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
471 // Set up limit values for vertical motion vector components
472 // to prevent them extending beyond the UMV borders
473 x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
474 x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
475 + (VP8BORDERINPIXELS - 16);
477 // for each macroblock col in image
478 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
480 // Distance of Mb to the left & right edges, specified in
481 // 1/8th pel units as they are always compared to values
482 // that are in 1/8th pel units
483 xd->mb_to_left_edge = -((mb_col * 16) << 3);
484 xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
486 // Set up limit values for horizontal motion vector components
487 // to prevent them extending beyond the UMV borders
488 x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
489 x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16)
490 + (VP8BORDERINPIXELS - 16);
492 xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
493 xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
494 xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
495 xd->left_available = (mb_col != 0);
497 x->rddiv = cpi->RDDIV;
498 x->rdmult = cpi->RDMULT;
500 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
501 activity_sum += vp8_activity_masking(cpi, x);
503 // Is segmentation enabled
504 // MB level adjutment to quantizer
505 if (xd->segmentation_enabled)
507 // Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
508 if (cpi->segmentation_map[seg_map_index+mb_col] <= 3)
509 xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[seg_map_index+mb_col];
511 xd->mode_info_context->mbmi.segment_id = 0;
513 vp8cx_mb_init_quantizer(cpi, x);
516 xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
518 x->active_ptr = cpi->active_map + seg_map_index + mb_col;
520 if (cm->frame_type == KEY_FRAME)
522 *totalrate += vp8cx_encode_intra_macro_block(cpi, x, tp);
524 y_modes[xd->mbmi.mode] ++;
529 *totalrate += vp8cx_encode_inter_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset);
532 inter_y_modes[xd->mbmi.mode] ++;
534 if (xd->mbmi.mode == SPLITMV)
538 for (b = 0; b < xd->mbmi.partition_count; b++)
540 inter_b_modes[x->partition->bmi[b].mode] ++;
546 // Count of last ref frame 0,0 useage
547 if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
548 cpi->inter_zz_count ++;
550 // Special case code for cyclic refresh
551 // If cyclic update enabled then copy xd->mbmi.segment_id; (which may have been updated based on mode
552 // during vp8cx_encode_inter_macroblock()) back into the global sgmentation map
553 if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)
555 cpi->segmentation_map[seg_map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
557 // If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
558 // Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
559 // else mark it as dirty (1).
560 if (xd->mode_info_context->mbmi.segment_id)
561 cpi->cyclic_refresh_map[seg_map_index+mb_col] = -1;
562 else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
564 if (cpi->cyclic_refresh_map[seg_map_index+mb_col] == 1)
565 cpi->cyclic_refresh_map[seg_map_index+mb_col] = 0;
568 cpi->cyclic_refresh_map[seg_map_index+mb_col] = 1;
573 cpi->tplist[mb_row].stop = *tp;
575 x->gf_active_ptr++; // Increment pointer into gf useage flags structure for next mb
577 for (i = 0; i < 16; i++)
578 vpx_memcpy(&xd->mode_info_context->bmi[i], &xd->block[i].bmi, sizeof(xd->block[i].bmi));
580 // adjust to the next column of macroblocks
581 x->src.y_buffer += 16;
582 x->src.u_buffer += 8;
583 x->src.v_buffer += 8;
588 // Keep track of segment useage
589 segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
592 xd->mode_info_context++;
596 cpi->current_mb_col_main = mb_col;
599 //extend the recon for intra prediction
601 &cm->yv12_fb[dst_fb_idx],
602 xd->dst.y_buffer + 16,
603 xd->dst.u_buffer + 8,
604 xd->dst.v_buffer + 8);
606 // this is to account for the border
607 xd->mode_info_context++;
609 x->activity_sum += activity_sum;
616 void vp8_encode_frame(VP8_COMP *cpi)
619 MACROBLOCK *const x = & cpi->mb;
620 VP8_COMMON *const cm = & cpi->common;
621 MACROBLOCKD *const xd = & x->e_mbd;
624 TOKENEXTRA *tp = cpi->tok;
625 int segment_counts[MAX_MB_SEGMENTS];
628 // Functions setup for all frame types so we can use MC in AltRef
629 if (cm->mcomp_filter_type == SIXTAP)
631 xd->subpixel_predict = SUBPIX_INVOKE(
632 &cpi->common.rtcd.subpix, sixtap4x4);
633 xd->subpixel_predict8x4 = SUBPIX_INVOKE(
634 &cpi->common.rtcd.subpix, sixtap8x4);
635 xd->subpixel_predict8x8 = SUBPIX_INVOKE(
636 &cpi->common.rtcd.subpix, sixtap8x8);
637 xd->subpixel_predict16x16 = SUBPIX_INVOKE(
638 &cpi->common.rtcd.subpix, sixtap16x16);
642 xd->subpixel_predict = SUBPIX_INVOKE(
643 &cpi->common.rtcd.subpix, bilinear4x4);
644 xd->subpixel_predict8x4 = SUBPIX_INVOKE(
645 &cpi->common.rtcd.subpix, bilinear8x4);
646 xd->subpixel_predict8x8 = SUBPIX_INVOKE(
647 &cpi->common.rtcd.subpix, bilinear8x8);
648 xd->subpixel_predict16x16 = SUBPIX_INVOKE(
649 &cpi->common.rtcd.subpix, bilinear16x16);
652 x->gf_active_ptr = (signed char *)cpi->gf_active_flags; // Point to base of GF active flags data structure
654 x->vector_range = 32;
656 // Count of MBs using the alternate Q if any
659 // Reset frame count of inter 0,0 motion vector useage.
660 cpi->inter_zz_count = 0;
662 vpx_memset(segment_counts, 0, sizeof(segment_counts));
664 cpi->prediction_error = 0;
665 cpi->intra_error = 0;
666 cpi->skip_true_count = 0;
667 cpi->skip_false_count = 0;
671 cpi->frame_distortion = 0;
672 cpi->last_mb_distortion = 0;
677 x->partition_info = x->pi;
679 xd->mode_info_context = cm->mi;
680 xd->mode_info_stride = cm->mode_info_stride;
682 xd->frame_type = cm->frame_type;
684 xd->frames_since_golden = cm->frames_since_golden;
685 xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
686 vp8_zero(cpi->MVcount);
687 // vp8_zero( Contexts)
688 vp8_zero(cpi->coef_counts);
690 // reset intra mode contexts
691 if (cm->frame_type == KEY_FRAME)
692 vp8_init_mbmode_probs(cm);
695 vp8cx_frame_init_quantizer(cpi);
697 if (cpi->compressor_speed == 2)
699 if (cpi->oxcf.cpu_used < 0)
700 cpi->Speed = -(cpi->oxcf.cpu_used);
702 vp8_auto_select_speed(cpi);
705 vp8_initialize_rd_consts(cpi, vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
706 //vp8_initialize_rd_consts( cpi, vp8_dc_quant(cpi->avg_frame_qindex, cm->y1dc_delta_q) );
707 vp8cx_initialize_me_consts(cpi, cm->base_qindex);
708 //vp8cx_initialize_me_consts( cpi, cpi->avg_frame_qindex);
710 // Copy data over into macro block data sturctures.
712 x->src = * cpi->Source;
713 xd->pre = cm->yv12_fb[cm->lst_fb_idx];
714 xd->dst = cm->yv12_fb[cm->new_fb_idx];
716 // set up frame new frame for intra coded blocks
718 vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
720 vp8_build_block_offsets(x);
722 vp8_setup_block_dptrs(&x->e_mbd);
724 vp8_setup_block_ptrs(x);
729 // Experimental rd code
730 // 2 Pass - Possibly set Rdmult based on last frame distortion + this frame target bits or other metrics
731 // such as cpi->rate_correction_factor that indicate relative complexity.
732 /*if ( cpi->pass == 2 && (cpi->last_frame_distortion > 0) && (cpi->target_bits_per_mb > 0) )
734 //x->rdmult = ((cpi->last_frame_distortion * 256)/cpi->common.MBs)/ cpi->target_bits_per_mb;
735 x->rdmult = (int)(cpi->RDMULT * cpi->rate_correction_factor);
738 x->rdmult = cpi->RDMULT; */
739 //x->rdmult = (int)(cpi->RDMULT * pow( (cpi->rate_correction_factor * 2.0), 0.75 ));
742 xd->mode_info_context->mbmi.mode = DC_PRED;
743 xd->mode_info_context->mbmi.uv_mode = DC_PRED;
745 xd->left_context = &cm->left_context;
747 vp8_zero(cpi->count_mb_ref_frame_usage)
748 vp8_zero(cpi->ymode_count)
749 vp8_zero(cpi->uv_mode_count)
753 vpx_memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
756 struct vpx_usec_timer emr_timer;
757 vpx_usec_timer_start(&emr_timer);
759 if (!cpi->b_multi_threaded)
761 // for each macroblock row in image
762 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
765 vp8_zero(cm->left_context)
767 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
769 // adjust to the next row of mbs
770 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
771 x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
772 x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
775 cpi->tok_count = tp - cpi->tok;
780 #if CONFIG_MULTITHREAD
783 vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 1, cpi->encoding_thread_count);
785 for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1))
787 cpi->current_mb_col_main = -1;
789 for (i = 0; i < cpi->encoding_thread_count; i++)
791 if ((mb_row + i + 1) >= cm->mb_rows)
794 cpi->mb_row_ei[i].mb_row = mb_row + i + 1;
795 cpi->mb_row_ei[i].tp = cpi->tok + (mb_row + i + 1) * (cm->mb_cols * 16 * 24);
796 cpi->mb_row_ei[i].current_mb_col = -1;
797 //SetEvent(cpi->h_event_mbrencoding[i]);
798 sem_post(&cpi->h_event_mbrencoding[i]);
801 vp8_zero(cm->left_context)
803 tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
805 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
807 // adjust to the next row of mbs
808 x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols;
809 x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
810 x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
812 xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count;
813 x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count;
815 if (mb_row < cm->mb_rows - 1)
816 //WaitForSingleObject(cpi->h_event_main, INFINITE);
817 sem_wait(&cpi->h_event_main);
821 for( ;mb_row<cm->mb_rows; mb_row ++)
823 vp8_zero( cm->left_context)
825 tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
827 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
828 // adjust to the next row of mbs
829 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
830 x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
831 x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
837 for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++)
839 cpi->tok_count += cpi->tplist[mb_row].stop - cpi->tplist[mb_row].start;
842 if (xd->segmentation_enabled)
847 if (xd->segmentation_enabled)
850 for (i = 0; i < cpi->encoding_thread_count; i++)
852 for (j = 0; j < 4; j++)
853 segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
859 for (i = 0; i < cpi->encoding_thread_count; i++)
861 totalrate += cpi->mb_row_ei[i].totalrate;
864 for (i = 0; i < cpi->encoding_thread_count; i++)
866 x->activity_sum += cpi->mb_row_ei[i].mb.activity_sum;
873 vpx_usec_timer_mark(&emr_timer);
874 cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
879 // Work out the segment probabilites if segmentation is enabled
880 if (xd->segmentation_enabled)
886 vpx_memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
888 tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
892 xd->mb_segment_tree_probs[0] = ((segment_counts[0] + segment_counts[1]) * 255) / tot_count;
894 tot_count = segment_counts[0] + segment_counts[1];
898 xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
901 tot_count = segment_counts[2] + segment_counts[3];
904 xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
906 // Zero probabilities not allowed
907 for (i = 0; i < MB_FEATURE_TREE_PROBS; i ++)
909 if (xd->mb_segment_tree_probs[i] == 0)
910 xd->mb_segment_tree_probs[i] = 1;
915 // 256 rate units to the bit
916 cpi->projected_frame_size = totalrate >> 8; // projected_frame_size in units of BYTES
918 // Make a note of the percentage MBs coded Intra.
919 if (cm->frame_type == KEY_FRAME)
921 cpi->this_frame_percent_intra = 100;
927 tot_modes = cpi->count_mb_ref_frame_usage[INTRA_FRAME]
928 + cpi->count_mb_ref_frame_usage[LAST_FRAME]
929 + cpi->count_mb_ref_frame_usage[GOLDEN_FRAME]
930 + cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
933 cpi->this_frame_percent_intra = cpi->count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
940 int flag[2] = {0, 0};
942 for (cnt = 0; cnt < MVPcount; cnt++)
944 if (cm->fc.pre_mvc[0][cnt] != cm->fc.mvc[0][cnt])
947 vpx_memcpy(cm->fc.pre_mvc[0], cm->fc.mvc[0], MVPcount);
952 for (cnt = 0; cnt < MVPcount; cnt++)
954 if (cm->fc.pre_mvc[1][cnt] != cm->fc.mvc[1][cnt])
957 vpx_memcpy(cm->fc.pre_mvc[1], cm->fc.mvc[1], MVPcount);
962 if (flag[0] || flag[1])
963 vp8_build_component_cost_table(cpi->mb.mvcost, cpi->mb.mvsadcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
967 // Adjust the projected reference frame useage probability numbers to reflect
968 // what we have just seen. This may be usefull when we make multiple itterations
969 // of the recode loop rather than continuing to use values from the previous frame.
970 if ((cm->frame_type != KEY_FRAME) && !cm->refresh_alt_ref_frame && !cm->refresh_golden_frame)
972 const int *const rfct = cpi->count_mb_ref_frame_usage;
973 const int rf_intra = rfct[INTRA_FRAME];
974 const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
976 if ((rf_intra + rf_inter) > 0)
978 cpi->prob_intra_coded = (rf_intra * 255) / (rf_intra + rf_inter);
980 if (cpi->prob_intra_coded < 1)
981 cpi->prob_intra_coded = 1;
983 if ((cm->frames_since_golden > 0) || cpi->source_alt_ref_active)
985 cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
987 if (cpi->prob_last_coded < 1)
988 cpi->prob_last_coded = 1;
990 cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
991 ? (rfct[GOLDEN_FRAME] * 255) / (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128;
993 if (cpi->prob_gf_coded < 1)
994 cpi->prob_gf_coded = 1;
1000 // Keep record of the total distortion this time around for future use
1001 cpi->last_frame_distortion = cpi->frame_distortion;
1004 /* Update the average activity for the next frame.
1005 * This is feed-forward for now; it could also be saved in two-pass, or
1006 * done during lookahead when that is eventually added.
1008 cpi->activity_avg = (unsigned int )(x->activity_sum/cpi->common.MBs);
1009 if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN)
1010 cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
1013 void vp8_setup_block_ptrs(MACROBLOCK *x)
1018 for (r = 0; r < 4; r++)
1020 for (c = 0; c < 4; c++)
1022 x->block[r*4+c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
1026 for (r = 0; r < 2; r++)
1028 for (c = 0; c < 2; c++)
1030 x->block[16 + r*2+c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
1035 for (r = 0; r < 2; r++)
1037 for (c = 0; c < 2; c++)
1039 x->block[20 + r*2+c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
1043 x->block[24].src_diff = x->src_diff + 384;
1046 for (i = 0; i < 25; i++)
1048 x->block[i].coeff = x->coeff + i * 16;
1052 void vp8_build_block_offsets(MACROBLOCK *x)
1057 vp8_build_block_doffsets(&x->e_mbd);
1060 for (br = 0; br < 4; br++)
1062 for (bc = 0; bc < 4; bc++)
1064 BLOCK *this_block = &x->block[block];
1065 this_block->base_src = &x->src.y_buffer;
1066 this_block->src_stride = x->src.y_stride;
1067 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1073 for (br = 0; br < 2; br++)
1075 for (bc = 0; bc < 2; bc++)
1077 BLOCK *this_block = &x->block[block];
1078 this_block->base_src = &x->src.u_buffer;
1079 this_block->src_stride = x->src.uv_stride;
1080 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1086 for (br = 0; br < 2; br++)
1088 for (bc = 0; bc < 2; bc++)
1090 BLOCK *this_block = &x->block[block];
1091 this_block->base_src = &x->src.v_buffer;
1092 this_block->src_stride = x->src.uv_stride;
1093 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1099 static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
1101 const MACROBLOCKD *xd = & x->e_mbd;
1102 const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
1103 const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
1106 const int is_key = cpi->common.frame_type == KEY_FRAME;
1108 ++ (is_key ? uv_modes : inter_uv_modes)[uvm];
1112 unsigned int *const bct = is_key ? b_modes : inter_b_modes;
1118 ++ bct[xd->block[b].bmi.mode];
1125 ++cpi->ymode_count[m];
1126 ++cpi->uv_mode_count[uvm];
1129 int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
1131 int Error4x4, Error16x16, error_uv;
1132 B_PREDICTION_MODE intra_bmodes[16];
1133 int rate4x4, rate16x16, rateuv;
1134 int dist4x4, dist16x16, distuv;
1136 int rate4x4_tokenonly = 0;
1137 int rate16x16_tokenonly = 0;
1138 int rateuv_tokenonly = 0;
1141 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
1143 #if !(CONFIG_REALTIME_ONLY)
1145 if (cpi->sf.RD || cpi->compressor_speed != 2)
1147 Error4x4 = vp8_rd_pick_intra4x4mby_modes(cpi, x, &rate4x4, &rate4x4_tokenonly, &dist4x4);
1149 //save the b modes for possible later use
1150 for (i = 0; i < 16; i++)
1151 intra_bmodes[i] = x->e_mbd.block[i].bmi.mode;
1153 Error16x16 = vp8_rd_pick_intra16x16mby_mode(cpi, x, &rate16x16, &rate16x16_tokenonly, &dist16x16);
1155 error_uv = vp8_rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
1157 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1160 if (Error4x4 < Error16x16)
1163 x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
1165 // get back the intra block modes
1166 for (i = 0; i < 16; i++)
1167 x->e_mbd.block[i].bmi.mode = intra_bmodes[i];
1169 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1170 cpi->prediction_error += Error4x4 ;
1172 // Experimental RD code
1173 cpi->frame_distortion += dist4x4;
1178 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1182 // Experimental RD code
1183 cpi->prediction_error += Error16x16;
1184 cpi->frame_distortion += dist16x16;
1188 sum_intra_stats(cpi, x);
1190 vp8_tokenize_mb(cpi, &x->e_mbd, t);
1196 int rate2, distortion2;
1197 MB_PREDICTION_MODE mode, best_mode = DC_PRED;
1199 Error16x16 = INT_MAX;
1201 for (mode = DC_PRED; mode <= TM_PRED; mode ++)
1203 x->e_mbd.mode_info_context->mbmi.mode = mode;
1204 vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
1205 distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16prederror)(x->src.y_buffer, x->src.y_stride, x->e_mbd.predictor, 16, 0x7fffffff);
1206 rate2 = x->mbmode_cost[x->e_mbd.frame_type][mode];
1207 this_rd = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
1209 if (Error16x16 > this_rd)
1211 Error16x16 = this_rd;
1216 vp8_pick_intra4x4mby_modes(IF_RTCD(&cpi->rtcd), x, &rate2, &distortion2);
1218 if (distortion2 == INT_MAX)
1221 Error4x4 = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
1223 if (Error4x4 < Error16x16)
1225 x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
1226 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1227 cpi->prediction_error += Error4x4;
1231 x->e_mbd.mode_info_context->mbmi.mode = best_mode;
1232 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1233 cpi->prediction_error += Error16x16;
1236 vp8_pick_intra_mbuv_mode(x);
1237 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1238 sum_intra_stats(cpi, x);
1239 vp8_tokenize_mb(cpi, &x->e_mbd, t);
1248 extern void vp8_fix_contexts(MACROBLOCKD *x);
1250 int vp8cx_encode_inter_macroblock
1252 VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
1253 int recon_yoffset, int recon_uvoffset
1256 MACROBLOCKD *const xd = &x->e_mbd;
1258 int intra_error = 0;
1264 if (xd->segmentation_enabled)
1265 x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
1267 x->encode_breakout = cpi->oxcf.encode_breakout;
1269 #if !(CONFIG_REALTIME_ONLY)
1273 inter_error = vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
1277 inter_error = vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
1280 cpi->prediction_error += inter_error;
1281 cpi->intra_error += intra_error;
1284 // Experimental RD code
1285 cpi->frame_distortion += distortion;
1286 cpi->last_mb_distortion = distortion;
1289 // MB level adjutment to quantizer setup
1290 if (xd->segmentation_enabled || cpi->zbin_mode_boost_enabled)
1292 // If cyclic update enabled
1293 if (cpi->cyclic_refresh_mode_enabled)
1295 // Clear segment_id back to 0 if not coded (last frame 0,0)
1296 if ((xd->mode_info_context->mbmi.segment_id == 1) &&
1297 ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
1299 xd->mode_info_context->mbmi.segment_id = 0;
1303 // Experimental code. Special case for gf and arf zeromv modes. Increase zbin size to supress noise
1304 if (cpi->zbin_mode_boost_enabled)
1306 if ( xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME )
1307 cpi->zbin_mode_boost = 0;
1310 if (xd->mode_info_context->mbmi.mode == ZEROMV)
1312 if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
1313 cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
1315 cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
1317 else if (xd->mode_info_context->mbmi.mode == SPLITMV)
1318 cpi->zbin_mode_boost = 0;
1320 cpi->zbin_mode_boost = MV_ZBIN_BOOST;
1324 cpi->zbin_mode_boost = 0;
1326 vp8cx_mb_init_quantizer(cpi, x);
1329 cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
1331 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
1333 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1335 if (xd->mode_info_context->mbmi.mode == B_PRED)
1337 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1341 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1344 sum_intra_stats(cpi, x);
1353 vp8_find_near_mvs(xd, xd->mode_info_context,
1354 &nearest, &nearby, &best_ref_mv, mdcounts, xd->mode_info_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias);
1356 vp8_build_uvmvs(xd, cpi->common.full_pixel);
1358 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
1359 ref_fb_idx = cpi->common.lst_fb_idx;
1360 else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
1361 ref_fb_idx = cpi->common.gld_fb_idx;
1363 ref_fb_idx = cpi->common.alt_fb_idx;
1365 xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
1366 xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
1367 xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
1369 if (xd->mode_info_context->mbmi.mode == SPLITMV)
1373 for (i = 0; i < 16; i++)
1375 if (xd->block[i].bmi.mode == NEW4X4)
1377 cpi->MVcount[0][mv_max+((xd->block[i].bmi.mv.as_mv.row - best_ref_mv.row) >> 1)]++;
1378 cpi->MVcount[1][mv_max+((xd->block[i].bmi.mv.as_mv.col - best_ref_mv.col) >> 1)]++;
1382 else if (xd->mode_info_context->mbmi.mode == NEWMV)
1384 cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv.row) >> 1)]++;
1385 cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv.col) >> 1)]++;
1388 if (!x->skip && !x->e_mbd.mode_info_context->mbmi.force_no_skip)
1390 vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
1392 // Clear mb_skip_coeff if mb_no_coeff_skip is not set
1393 if (!cpi->common.mb_no_coeff_skip)
1394 xd->mode_info_context->mbmi.mb_skip_coeff = 0;
1398 vp8_stuff_inter16x16(x);
1402 vp8_tokenize_mb(cpi, xd, t);
1405 if (cpi->common.mb_no_coeff_skip)
1407 if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV)
1408 xd->mode_info_context->mbmi.dc_diff = 0;
1410 xd->mode_info_context->mbmi.dc_diff = 1;
1412 xd->mode_info_context->mbmi.mb_skip_coeff = 1;
1413 cpi->skip_true_count ++;
1414 vp8_fix_contexts(xd);
1418 vp8_stuff_mb(cpi, xd, t);
1419 xd->mode_info_context->mbmi.mb_skip_coeff = 0;
1420 cpi->skip_false_count ++;