2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "./vp9_rtcd.h"
16 #include "./vpx_config.h"
18 #include "vpx_ports/vpx_timer.h"
20 #include "vp9/common/vp9_common.h"
21 #include "vp9/common/vp9_entropy.h"
22 #include "vp9/common/vp9_entropymode.h"
23 #include "vp9/common/vp9_idct.h"
24 #include "vp9/common/vp9_mvref_common.h"
25 #include "vp9/common/vp9_pred_common.h"
26 #include "vp9/common/vp9_quant_common.h"
27 #include "vp9/common/vp9_reconintra.h"
28 #include "vp9/common/vp9_reconinter.h"
29 #include "vp9/common/vp9_seg_common.h"
30 #include "vp9/common/vp9_systemdependent.h"
31 #include "vp9/common/vp9_tile_common.h"
33 #include "vp9/encoder/vp9_aq_complexity.h"
34 #include "vp9/encoder/vp9_aq_cyclicrefresh.h"
35 #include "vp9/encoder/vp9_aq_variance.h"
36 #include "vp9/encoder/vp9_encodeframe.h"
37 #include "vp9/encoder/vp9_encodemb.h"
38 #include "vp9/encoder/vp9_encodemv.h"
39 #include "vp9/encoder/vp9_extend.h"
40 #include "vp9/encoder/vp9_pickmode.h"
41 #include "vp9/encoder/vp9_rd.h"
42 #include "vp9/encoder/vp9_rdopt.h"
43 #include "vp9/encoder/vp9_segmentation.h"
44 #include "vp9/encoder/vp9_tokenize.h"
46 #define GF_ZEROMV_ZBIN_BOOST 0
47 #define LF_ZEROMV_ZBIN_BOOST 0
48 #define MV_ZBIN_BOOST 0
49 #define SPLIT_MV_ZBIN_BOOST 0
50 #define INTRA_ZBIN_BOOST 0
52 static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
53 int mi_row, int mi_col, BLOCK_SIZE bsize,
54 PICK_MODE_CONTEXT *ctx);
56 // Motion vector component magnitude threshold for defining fast motion.
57 #define FAST_MOTION_MV_THRESH 24
59 // This is used as a reference when computing the source variance for the
60 // purposes of activity masking.
61 // Eventually this should be replaced by custom no-reference routines,
62 // which will be faster.
63 static const uint8_t VP9_VAR_OFFS[64] = {
64 128, 128, 128, 128, 128, 128, 128, 128,
65 128, 128, 128, 128, 128, 128, 128, 128,
66 128, 128, 128, 128, 128, 128, 128, 128,
67 128, 128, 128, 128, 128, 128, 128, 128,
68 128, 128, 128, 128, 128, 128, 128, 128,
69 128, 128, 128, 128, 128, 128, 128, 128,
70 128, 128, 128, 128, 128, 128, 128, 128,
71 128, 128, 128, 128, 128, 128, 128, 128
74 #if CONFIG_VP9_HIGHBITDEPTH
75 static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
76 128, 128, 128, 128, 128, 128, 128, 128,
77 128, 128, 128, 128, 128, 128, 128, 128,
78 128, 128, 128, 128, 128, 128, 128, 128,
79 128, 128, 128, 128, 128, 128, 128, 128,
80 128, 128, 128, 128, 128, 128, 128, 128,
81 128, 128, 128, 128, 128, 128, 128, 128,
82 128, 128, 128, 128, 128, 128, 128, 128,
83 128, 128, 128, 128, 128, 128, 128, 128
86 static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
87 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
88 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
89 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
90 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
91 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
92 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
93 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
94 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4
97 static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
98 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
99 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
100 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
101 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
102 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
103 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
104 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
105 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16
107 #endif // CONFIG_VP9_HIGHBITDEPTH
109 static unsigned int get_sby_perpixel_variance(VP9_COMP *cpi,
110 const struct buf_2d *ref,
113 const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
114 VP9_VAR_OFFS, 0, &sse);
115 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
118 #if CONFIG_VP9_HIGHBITDEPTH
119 static unsigned int high_get_sby_perpixel_variance(
120 VP9_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) {
121 unsigned int var, sse;
124 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
125 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10),
129 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
130 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12),
135 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
136 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8),
140 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
142 #endif // CONFIG_VP9_HIGHBITDEPTH
144 static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
145 const struct buf_2d *ref,
146 int mi_row, int mi_col,
148 const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME);
149 const uint8_t* last_y = &last->y_buffer[mi_row * MI_SIZE * last->y_stride +
152 const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
153 last_y, last->y_stride, &sse);
154 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
157 static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi,
160 unsigned int var = get_sby_perpixel_diff_variance(cpi, &cpi->mb.plane[0].src,
173 static BLOCK_SIZE get_nonrd_var_based_fixed_partition(VP9_COMP *cpi,
176 unsigned int var = get_sby_perpixel_diff_variance(cpi, &cpi->mb.plane[0].src,
187 // Lighter version of set_offsets that only sets the mode info
189 static INLINE void set_modeinfo_offsets(VP9_COMMON *const cm,
190 MACROBLOCKD *const xd,
193 const int idx_str = xd->mi_stride * mi_row + mi_col;
194 xd->mi = cm->mi + idx_str;
195 xd->mi[0].src_mi = &xd->mi[0];
198 static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
199 int mi_row, int mi_col, BLOCK_SIZE bsize) {
200 MACROBLOCK *const x = &cpi->mb;
201 VP9_COMMON *const cm = &cpi->common;
202 MACROBLOCKD *const xd = &x->e_mbd;
204 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
205 const int mi_height = num_8x8_blocks_high_lookup[bsize];
206 const struct segmentation *const seg = &cm->seg;
208 set_skip_context(xd, mi_row, mi_col);
210 set_modeinfo_offsets(cm, xd, mi_row, mi_col);
212 mbmi = &xd->mi[0].src_mi->mbmi;
214 // Set up destination pointers.
215 vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
217 // Set up limit values for MV components.
218 // Mv beyond the range do not produce new/different prediction block.
219 x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
220 x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
221 x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
222 x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
224 // Set up distance of MB to edge of frame in 1/8th pel units.
225 assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
226 set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
227 cm->mi_rows, cm->mi_cols);
229 // Set up source buffers.
230 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
233 x->rddiv = cpi->rd.RDDIV;
234 x->rdmult = cpi->rd.RDMULT;
238 if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
239 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
240 : cm->last_frame_seg_map;
241 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
243 vp9_init_plane_quantizers(cpi, x);
245 x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
247 mbmi->segment_id = 0;
248 x->encode_breakout = cpi->encode_breakout;
252 static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
253 int mi_row, int mi_col,
255 const int block_width = num_8x8_blocks_wide_lookup[bsize];
256 const int block_height = num_8x8_blocks_high_lookup[bsize];
258 for (j = 0; j < block_height; ++j)
259 for (i = 0; i < block_width; ++i) {
260 if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
261 xd->mi[j * xd->mi_stride + i].src_mi = &xd->mi[0];
265 static void set_block_size(VP9_COMP * const cpi,
266 int mi_row, int mi_col,
268 if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
269 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
270 set_modeinfo_offsets(&cpi->common, xd, mi_row, mi_col);
271 xd->mi[0].src_mi->mbmi.sb_type = bsize;
272 duplicate_mode_info_in_sb(&cpi->common, xd, mi_row, mi_col, bsize);
277 int64_t sum_square_error;
287 } partition_variance;
290 partition_variance part_variances;
295 partition_variance part_variances;
300 partition_variance part_variances;
305 partition_variance part_variances;
310 partition_variance *part_variances;
320 static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
322 node->part_variances = NULL;
323 vpx_memset(node->split, 0, sizeof(node->split));
326 v64x64 *vt = (v64x64 *) data;
327 node->part_variances = &vt->part_variances;
328 for (i = 0; i < 4; i++)
329 node->split[i] = &vt->split[i].part_variances.none;
333 v32x32 *vt = (v32x32 *) data;
334 node->part_variances = &vt->part_variances;
335 for (i = 0; i < 4; i++)
336 node->split[i] = &vt->split[i].part_variances.none;
340 v16x16 *vt = (v16x16 *) data;
341 node->part_variances = &vt->part_variances;
342 for (i = 0; i < 4; i++)
343 node->split[i] = &vt->split[i].part_variances.none;
347 v8x8 *vt = (v8x8 *) data;
348 node->part_variances = &vt->part_variances;
349 for (i = 0; i < 4; i++)
350 node->split[i] = &vt->split[i];
360 // Set variance values given sum square error, sum error, count.
361 static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
362 v->sum_square_error = s2;
366 v->variance = (int)(256 *
367 (v->sum_square_error - v->sum_error * v->sum_error /
368 v->count) / v->count);
373 void sum_2_variances(const var *a, const var *b, var *r) {
374 fill_variance(a->sum_square_error + b->sum_square_error,
375 a->sum_error + b->sum_error, a->count + b->count, r);
378 static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
380 tree_to_node(data, bsize, &node);
381 sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
382 sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
383 sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
384 sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
385 sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
386 &node.part_variances->none);
389 static int set_vt_partitioning(VP9_COMP *cpi,
394 VP9_COMMON * const cm = &cpi->common;
396 const int block_width = num_8x8_blocks_wide_lookup[bsize];
397 const int block_height = num_8x8_blocks_high_lookup[bsize];
398 // TODO(debargha): Choose this more intelligently.
399 const int threshold_multiplier = cm->frame_type == KEY_FRAME ? 64 : 4;
401 (int64_t)(threshold_multiplier *
402 vp9_convert_qindex_to_q(cm->base_qindex, cm->bit_depth));
403 assert(block_height == block_width);
404 tree_to_node(data, bsize, &vt);
406 // Split none is available only if we have more than half a block size
407 // in width and height inside the visible image.
408 if (mi_col + block_width / 2 < cm->mi_cols &&
409 mi_row + block_height / 2 < cm->mi_rows &&
410 vt.part_variances->none.variance < threshold) {
411 set_block_size(cpi, mi_row, mi_col, bsize);
415 // Only allow split for blocks above 16x16.
416 if (bsize > BLOCK_16X16) {
417 // Vertical split is available on all but the bottom border.
418 if (mi_row + block_height / 2 < cm->mi_rows &&
419 vt.part_variances->vert[0].variance < threshold &&
420 vt.part_variances->vert[1].variance < threshold) {
421 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
422 set_block_size(cpi, mi_row, mi_col, subsize);
423 set_block_size(cpi, mi_row, mi_col + block_width / 2, subsize);
427 // Horizontal split is available on all but the right border.
428 if (mi_col + block_width / 2 < cm->mi_cols &&
429 vt.part_variances->horz[0].variance < threshold &&
430 vt.part_variances->horz[1].variance < threshold) {
431 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
432 set_block_size(cpi, mi_row, mi_col, subsize);
433 set_block_size(cpi, mi_row + block_height / 2, mi_col, subsize);
438 // This will only allow 8x8 if the 16x16 variance is very large.
439 if (bsize == BLOCK_16X16) {
440 if (mi_col + block_width / 2 < cm->mi_cols &&
441 mi_row + block_height / 2 < cm->mi_rows &&
442 vt.part_variances->none.variance < (threshold << 6)) {
443 set_block_size(cpi, mi_row, mi_col, bsize);
450 // This function chooses partitioning based on the variance
451 // between source and reconstructed last, where variance is
452 // computed for 8x8 downsampled inputs. Some things to check:
453 // using the last source rather than reconstructed last, and
454 // allowing for small downsampling (4x4 or 2x2) for selection
455 // of smaller block sizes (i.e., < 16x16).
456 static void choose_partitioning(VP9_COMP *cpi,
457 const TileInfo *const tile,
458 int mi_row, int mi_col) {
459 VP9_COMMON * const cm = &cpi->common;
460 MACROBLOCK *x = &cpi->mb;
461 MACROBLOCKD *xd = &cpi->mb.e_mbd;
469 int pixels_wide = 64, pixels_high = 64;
470 int_mv nearest_mv, near_mv;
471 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
472 const struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf;
474 vp9_clear_system_state();
476 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
478 if (xd->mb_to_right_edge < 0)
479 pixels_wide += (xd->mb_to_right_edge >> 3);
480 if (xd->mb_to_bottom_edge < 0)
481 pixels_high += (xd->mb_to_bottom_edge >> 3);
483 s = x->plane[0].src.buf;
484 sp = x->plane[0].src.stride;
486 if (cm->frame_type != KEY_FRAME) {
487 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col, sf);
489 xd->mi[0].src_mi->mbmi.ref_frame[0] = LAST_FRAME;
490 xd->mi[0].src_mi->mbmi.sb_type = BLOCK_64X64;
491 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv,
492 xd->mi[0].src_mi->mbmi.ref_mvs[LAST_FRAME],
493 &nearest_mv, &near_mv);
495 xd->mi[0].src_mi->mbmi.mv[0] = nearest_mv;
496 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64);
498 d = xd->plane[0].dst.buf;
499 dp = xd->plane[0].dst.stride;
503 #if CONFIG_VP9_HIGHBITDEPTH
504 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
507 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10);
510 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12);
514 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8);
518 #endif // CONFIG_VP9_HIGHBITDEPTH
521 // Fill in the entire tree of 8x8 variances for splits.
522 for (i = 0; i < 4; i++) {
523 const int x32_idx = ((i & 1) << 5);
524 const int y32_idx = ((i >> 1) << 5);
525 for (j = 0; j < 4; j++) {
526 const int x16_idx = x32_idx + ((j & 1) << 4);
527 const int y16_idx = y32_idx + ((j >> 1) << 4);
528 v16x16 *vst = &vt.split[i].split[j];
529 for (k = 0; k < 4; k++) {
530 int x_idx = x16_idx + ((k & 1) << 3);
531 int y_idx = y16_idx + ((k >> 1) << 3);
532 unsigned int sse = 0;
535 if (x_idx < pixels_wide && y_idx < pixels_high) {
537 #if CONFIG_VP9_HIGHBITDEPTH
538 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
539 s_avg = vp9_highbd_avg_8x8(s + y_idx * sp + x_idx, sp);
540 d_avg = vp9_highbd_avg_8x8(d + y_idx * dp + x_idx, dp);
542 s_avg = vp9_avg_8x8(s + y_idx * sp + x_idx, sp);
543 d_avg = vp9_avg_8x8(d + y_idx * dp + x_idx, dp);
546 s_avg = vp9_avg_8x8(s + y_idx * sp + x_idx, sp);
547 d_avg = vp9_avg_8x8(d + y_idx * dp + x_idx, dp);
552 // For an 8x8 block we have just one value the average of all 64
553 // pixels, so use 1. This means of course that there is no variance
555 fill_variance(sse, sum, 1, &vst->split[k].part_variances.none);
559 // Fill the rest of the variance tree by summing split partition values.
560 for (i = 0; i < 4; i++) {
561 for (j = 0; j < 4; j++) {
562 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
564 fill_variance_tree(&vt.split[i], BLOCK_32X32);
566 fill_variance_tree(&vt, BLOCK_64X64);
568 // Now go through the entire structure, splitting every block size until
569 // we get to one that's got a variance lower than our threshold, or we
571 if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
572 !set_vt_partitioning(cpi, &vt, BLOCK_64X64, mi_row, mi_col)) {
573 for (i = 0; i < 4; ++i) {
574 const int x32_idx = ((i & 1) << 2);
575 const int y32_idx = ((i >> 1) << 2);
576 if (!set_vt_partitioning(cpi, &vt.split[i], BLOCK_32X32,
577 (mi_row + y32_idx), (mi_col + x32_idx))) {
578 for (j = 0; j < 4; ++j) {
579 const int x16_idx = ((j & 1) << 1);
580 const int y16_idx = ((j >> 1) << 1);
581 // NOTE: Since this uses 8x8 downsampling for variance calculation
582 // we cannot really select block size 8x8 (or even 8x16/16x8),
583 // since we do not sufficient samples for variance.
584 // For now, 8x8 partition is only set if the variance of the 16x16
585 // block is very high. This is controlled in set_vt_partitioning.
586 if (!set_vt_partitioning(cpi, &vt.split[i].split[j],
588 mi_row + y32_idx + y16_idx,
589 mi_col + x32_idx + x16_idx)) {
590 for (k = 0; k < 4; ++k) {
591 const int x8_idx = (k & 1);
592 const int y8_idx = (k >> 1);
594 (mi_row + y32_idx + y16_idx + y8_idx),
595 (mi_col + x32_idx + x16_idx + x8_idx),
605 static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
606 int mi_row, int mi_col, BLOCK_SIZE bsize,
607 int output_enabled) {
609 VP9_COMMON *const cm = &cpi->common;
610 RD_OPT *const rd_opt = &cpi->rd;
611 MACROBLOCK *const x = &cpi->mb;
612 MACROBLOCKD *const xd = &x->e_mbd;
613 struct macroblock_plane *const p = x->plane;
614 struct macroblockd_plane *const pd = xd->plane;
615 MODE_INFO *mi = &ctx->mic;
616 MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
617 MODE_INFO *mi_addr = &xd->mi[0];
618 const struct segmentation *const seg = &cm->seg;
620 const int mis = cm->mi_stride;
621 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
622 const int mi_height = num_8x8_blocks_high_lookup[bsize];
625 assert(mi->mbmi.sb_type == bsize);
628 mi_addr->src_mi = mi_addr;
630 // If segmentation in use
631 if (seg->enabled && output_enabled) {
632 // For in frame complexity AQ copy the segment id from the segment map.
633 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
634 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
635 : cm->last_frame_seg_map;
636 mi_addr->mbmi.segment_id =
637 vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
639 // Else for cyclic refresh mode update the segment map, set the segment id
640 // and then update the quantizer.
641 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
642 vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0].src_mi->mbmi,
643 mi_row, mi_col, bsize, 1);
647 max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1;
648 for (i = 0; i < max_plane; ++i) {
649 p[i].coeff = ctx->coeff_pbuf[i][1];
650 p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
651 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
652 p[i].eobs = ctx->eobs_pbuf[i][1];
655 for (i = max_plane; i < MAX_MB_PLANE; ++i) {
656 p[i].coeff = ctx->coeff_pbuf[i][2];
657 p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
658 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
659 p[i].eobs = ctx->eobs_pbuf[i][2];
662 // Restore the coding context of the MB to that that was in place
663 // when the mode was picked for it
664 for (y = 0; y < mi_height; y++)
665 for (x_idx = 0; x_idx < mi_width; x_idx++)
666 if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
667 && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
668 xd->mi[x_idx + y * mis].src_mi = mi_addr;
671 if (cpi->oxcf.aq_mode)
672 vp9_init_plane_quantizers(cpi, x);
674 // FIXME(rbultje) I'm pretty sure this should go to the end of this block
675 // (i.e. after the output_enabled)
676 if (bsize < BLOCK_32X32) {
677 if (bsize < BLOCK_16X16)
678 ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8];
679 ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16];
682 if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
683 mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
684 mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
688 vpx_memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
689 sizeof(uint8_t) * ctx->num_4x4_blk);
694 if (!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
695 for (i = 0; i < TX_MODES; i++)
696 rd_opt->tx_select_diff[i] += ctx->tx_rd_diff[i];
699 #if CONFIG_INTERNAL_STATS
700 if (frame_is_intra_only(cm)) {
701 static const int kf_mode_index[] = {
703 THR_V_PRED /*V_PRED*/,
704 THR_H_PRED /*H_PRED*/,
705 THR_D45_PRED /*D45_PRED*/,
706 THR_D135_PRED /*D135_PRED*/,
707 THR_D117_PRED /*D117_PRED*/,
708 THR_D153_PRED /*D153_PRED*/,
709 THR_D207_PRED /*D207_PRED*/,
710 THR_D63_PRED /*D63_PRED*/,
713 ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
715 // Note how often each mode chosen as best
716 ++cpi->mode_chosen_counts[ctx->best_mode_index];
719 if (!frame_is_intra_only(cm)) {
720 if (is_inter_block(mbmi)) {
721 vp9_update_mv_count(cm, xd);
723 if (cm->interp_filter == SWITCHABLE) {
724 const int ctx = vp9_get_pred_context_switchable_interp(xd);
725 ++cm->counts.switchable_interp[ctx][mbmi->interp_filter];
729 rd_opt->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
730 rd_opt->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
731 rd_opt->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
733 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
734 rd_opt->filter_diff[i] += ctx->best_filter_diff[i];
738 void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
739 int mi_row, int mi_col) {
740 uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer };
741 const int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride };
744 // Set current frame pointer.
745 x->e_mbd.cur_buf = src;
747 for (i = 0; i < MAX_MB_PLANE; i++)
748 setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
749 NULL, x->e_mbd.plane[i].subsampling_x,
750 x->e_mbd.plane[i].subsampling_y);
753 static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode, int *rate,
754 int64_t *dist, BLOCK_SIZE bsize) {
755 MACROBLOCKD *const xd = &x->e_mbd;
756 MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
757 INTERP_FILTER filter_ref;
759 if (xd->up_available)
760 filter_ref = xd->mi[-xd->mi_stride].src_mi->mbmi.interp_filter;
761 else if (xd->left_available)
762 filter_ref = xd->mi[-1].src_mi->mbmi.interp_filter;
764 filter_ref = EIGHTTAP;
766 mbmi->sb_type = bsize;
768 mbmi->tx_size = MIN(max_txsize_lookup[bsize],
769 tx_mode_to_biggest_tx_size[tx_mode]);
771 mbmi->uv_mode = DC_PRED;
772 mbmi->ref_frame[0] = LAST_FRAME;
773 mbmi->ref_frame[1] = NONE;
774 mbmi->mv[0].as_int = 0;
775 mbmi->interp_filter = filter_ref;
777 xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = 0;
784 static void rd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile,
785 int mi_row, int mi_col, RD_COST *rd_cost,
786 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
788 VP9_COMMON *const cm = &cpi->common;
789 MACROBLOCK *const x = &cpi->mb;
790 MACROBLOCKD *const xd = &x->e_mbd;
792 struct macroblock_plane *const p = x->plane;
793 struct macroblockd_plane *const pd = xd->plane;
794 const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
798 vp9_clear_system_state();
799 rdmult_ratio = 1.0; // avoid uninitialized warnings
801 // Use the lower precision, but faster, 32x32 fdct for mode selection.
802 x->use_lp32x32fdct = 1;
804 set_offsets(cpi, tile, mi_row, mi_col, bsize);
805 mbmi = &xd->mi[0].src_mi->mbmi;
806 mbmi->sb_type = bsize;
808 for (i = 0; i < MAX_MB_PLANE; ++i) {
809 p[i].coeff = ctx->coeff_pbuf[i][0];
810 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
811 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
812 p[i].eobs = ctx->eobs_pbuf[i][0];
818 // Set to zero to make sure we do not use the previous encoded frame stats
821 #if CONFIG_VP9_HIGHBITDEPTH
822 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
824 high_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize, xd->bd);
827 get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
830 x->source_variance = get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
831 #endif // CONFIG_VP9_HIGHBITDEPTH
833 // Save rdmult before it might be changed, so it can be restored later.
834 orig_rdmult = x->rdmult;
836 if (aq_mode == VARIANCE_AQ) {
837 const int energy = bsize <= BLOCK_16X16 ? x->mb_energy
838 : vp9_block_energy(cpi, x, bsize);
839 if (cm->frame_type == KEY_FRAME ||
840 cpi->refresh_alt_ref_frame ||
841 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
842 mbmi->segment_id = vp9_vaq_segment_id(energy);
844 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
845 : cm->last_frame_seg_map;
846 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
849 rdmult_ratio = vp9_vaq_rdmult_ratio(energy);
850 vp9_init_plane_quantizers(cpi, x);
851 vp9_clear_system_state();
852 x->rdmult = (int)round(x->rdmult * rdmult_ratio);
853 } else if (aq_mode == COMPLEXITY_AQ) {
854 const int mi_offset = mi_row * cm->mi_cols + mi_col;
855 unsigned char complexity = cpi->complexity_map[mi_offset];
856 const int is_edge = (mi_row <= 1) || (mi_row >= (cm->mi_rows - 2)) ||
857 (mi_col <= 1) || (mi_col >= (cm->mi_cols - 2));
858 if (!is_edge && (complexity > 128))
859 x->rdmult += ((x->rdmult * (complexity - 128)) / 256);
860 } else if (aq_mode == CYCLIC_REFRESH_AQ) {
861 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
862 : cm->last_frame_seg_map;
863 // If segment 1, use rdmult for that segment.
864 if (vp9_get_segment_id(cm, map, bsize, mi_row, mi_col))
865 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
868 // Find best coding mode & reconstruct the MB so it is available
869 // as a predictor for MBs that follow in the SB
870 if (frame_is_intra_only(cm)) {
871 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
873 if (bsize >= BLOCK_8X8) {
874 if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
875 vp9_rd_pick_inter_mode_sb_seg_skip(cpi, x, rd_cost, bsize,
878 vp9_rd_pick_inter_mode_sb(cpi, x, tile, mi_row, mi_col,
879 rd_cost, bsize, ctx, best_rd);
881 vp9_rd_pick_inter_mode_sub8x8(cpi, x, tile, mi_row, mi_col, rd_cost,
882 bsize, ctx, best_rd);
886 if (aq_mode == VARIANCE_AQ && rd_cost->rate != INT_MAX) {
887 vp9_clear_system_state();
888 rd_cost->rate = (int)round(rd_cost->rate * rdmult_ratio);
889 rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
892 x->rdmult = orig_rdmult;
894 // TODO(jingning) The rate-distortion optimization flow needs to be
895 // refactored to provide proper exit/return handle.
896 if (rd_cost->rate == INT_MAX)
897 rd_cost->rdcost = INT64_MAX;
900 static void update_stats(VP9_COMMON *cm, const MACROBLOCK *x) {
901 const MACROBLOCKD *const xd = &x->e_mbd;
902 const MODE_INFO *const mi = xd->mi[0].src_mi;
903 const MB_MODE_INFO *const mbmi = &mi->mbmi;
905 if (!frame_is_intra_only(cm)) {
906 const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
908 if (!seg_ref_active) {
909 FRAME_COUNTS *const counts = &cm->counts;
910 const int inter_block = is_inter_block(mbmi);
912 counts->intra_inter[vp9_get_intra_inter_context(xd)][inter_block]++;
914 // If the segment reference feature is enabled we have only a single
915 // reference frame allowed for the segment so exclude it from
916 // the reference frame counts used to work out probabilities.
918 const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0];
920 if (cm->reference_mode == REFERENCE_MODE_SELECT)
921 counts->comp_inter[vp9_get_reference_mode_context(cm, xd)]
922 [has_second_ref(mbmi)]++;
924 if (has_second_ref(mbmi)) {
925 counts->comp_ref[vp9_get_pred_context_comp_ref_p(cm, xd)]
926 [ref0 == GOLDEN_FRAME]++;
928 counts->single_ref[vp9_get_pred_context_single_ref_p1(xd)][0]
929 [ref0 != LAST_FRAME]++;
930 if (ref0 != LAST_FRAME)
931 counts->single_ref[vp9_get_pred_context_single_ref_p2(xd)][1]
932 [ref0 != GOLDEN_FRAME]++;
939 static void restore_context(VP9_COMP *cpi, int mi_row, int mi_col,
940 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
941 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
942 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
944 MACROBLOCK *const x = &cpi->mb;
945 MACROBLOCKD *const xd = &x->e_mbd;
947 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
948 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
949 int mi_width = num_8x8_blocks_wide_lookup[bsize];
950 int mi_height = num_8x8_blocks_high_lookup[bsize];
951 for (p = 0; p < MAX_MB_PLANE; p++) {
953 xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
954 a + num_4x4_blocks_wide * p,
955 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
956 xd->plane[p].subsampling_x);
959 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
960 l + num_4x4_blocks_high * p,
961 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
962 xd->plane[p].subsampling_y);
964 vpx_memcpy(xd->above_seg_context + mi_col, sa,
965 sizeof(*xd->above_seg_context) * mi_width);
966 vpx_memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
967 sizeof(xd->left_seg_context[0]) * mi_height);
970 static void save_context(VP9_COMP *cpi, int mi_row, int mi_col,
971 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
972 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
973 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
975 const MACROBLOCK *const x = &cpi->mb;
976 const MACROBLOCKD *const xd = &x->e_mbd;
978 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
979 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
980 int mi_width = num_8x8_blocks_wide_lookup[bsize];
981 int mi_height = num_8x8_blocks_high_lookup[bsize];
983 // buffer the above/left context information of the block in search.
984 for (p = 0; p < MAX_MB_PLANE; ++p) {
986 a + num_4x4_blocks_wide * p,
987 xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
988 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
989 xd->plane[p].subsampling_x);
991 l + num_4x4_blocks_high * p,
993 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
994 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
995 xd->plane[p].subsampling_y);
997 vpx_memcpy(sa, xd->above_seg_context + mi_col,
998 sizeof(*xd->above_seg_context) * mi_width);
999 vpx_memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
1000 sizeof(xd->left_seg_context[0]) * mi_height);
1003 static void encode_b(VP9_COMP *cpi, const TileInfo *const tile,
1004 TOKENEXTRA **tp, int mi_row, int mi_col,
1005 int output_enabled, BLOCK_SIZE bsize,
1006 PICK_MODE_CONTEXT *ctx) {
1007 set_offsets(cpi, tile, mi_row, mi_col, bsize);
1008 update_state(cpi, ctx, mi_row, mi_col, bsize, output_enabled);
1009 encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize, ctx);
1011 if (output_enabled) {
1012 update_stats(&cpi->common, &cpi->mb);
1014 (*tp)->token = EOSB_TOKEN;
1019 static void encode_sb(VP9_COMP *cpi, const TileInfo *const tile,
1020 TOKENEXTRA **tp, int mi_row, int mi_col,
1021 int output_enabled, BLOCK_SIZE bsize,
1023 VP9_COMMON *const cm = &cpi->common;
1024 MACROBLOCK *const x = &cpi->mb;
1025 MACROBLOCKD *const xd = &x->e_mbd;
1027 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
1029 PARTITION_TYPE partition;
1030 BLOCK_SIZE subsize = bsize;
1032 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1035 if (bsize >= BLOCK_8X8) {
1036 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1037 subsize = get_subsize(bsize, pc_tree->partitioning);
1040 subsize = BLOCK_4X4;
1043 partition = partition_lookup[bsl][subsize];
1044 if (output_enabled && bsize != BLOCK_4X4)
1045 cm->counts.partition[ctx][partition]++;
1047 switch (partition) {
1048 case PARTITION_NONE:
1049 encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
1052 case PARTITION_VERT:
1053 encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
1054 &pc_tree->vertical[0]);
1055 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
1056 encode_b(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, subsize,
1057 &pc_tree->vertical[1]);
1060 case PARTITION_HORZ:
1061 encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
1062 &pc_tree->horizontal[0]);
1063 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
1064 encode_b(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize,
1065 &pc_tree->horizontal[1]);
1068 case PARTITION_SPLIT:
1069 if (bsize == BLOCK_8X8) {
1070 encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
1071 pc_tree->leaf_split[0]);
1073 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
1075 encode_sb(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, subsize,
1077 encode_sb(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize,
1079 encode_sb(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
1080 subsize, pc_tree->split[3]);
1084 assert("Invalid partition type.");
1088 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
1089 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1092 // Check to see if the given partition size is allowed for a specified number
1093 // of 8x8 block rows and columns remaining in the image.
1094 // If not then return the largest allowed partition size
1095 static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
1096 int rows_left, int cols_left,
1098 if (rows_left <= 0 || cols_left <= 0) {
1099 return MIN(bsize, BLOCK_8X8);
1101 for (; bsize > 0; bsize -= 3) {
1102 *bh = num_8x8_blocks_high_lookup[bsize];
1103 *bw = num_8x8_blocks_wide_lookup[bsize];
1104 if ((*bh <= rows_left) && (*bw <= cols_left)) {
1112 static void set_partial_b64x64_partition(MODE_INFO *mi, int mis,
1113 int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining,
1114 BLOCK_SIZE bsize, MODE_INFO *mi_8x8) {
1117 for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
1119 for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
1120 const int index = r * mis + c;
1121 mi_8x8[index].src_mi = mi + index;
1122 mi_8x8[index].src_mi->mbmi.sb_type = find_partition_size(bsize,
1123 row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
1128 // This function attempts to set all mode info entries in a given SB64
1129 // to the same block partition size.
1130 // However, at the bottom and right borders of the image the requested size
1131 // may not be allowed in which case this code attempts to choose the largest
1132 // allowable partition.
1133 static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
1134 MODE_INFO *mi_8x8, int mi_row, int mi_col,
1136 VP9_COMMON *const cm = &cpi->common;
1137 const int mis = cm->mi_stride;
1138 const int row8x8_remaining = tile->mi_row_end - mi_row;
1139 const int col8x8_remaining = tile->mi_col_end - mi_col;
1140 int block_row, block_col;
1141 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
1142 int bh = num_8x8_blocks_high_lookup[bsize];
1143 int bw = num_8x8_blocks_wide_lookup[bsize];
1145 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
1147 // Apply the requested partition size to the SB64 if it is all "in image"
1148 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
1149 (row8x8_remaining >= MI_BLOCK_SIZE)) {
1150 for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
1151 for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
1152 int index = block_row * mis + block_col;
1153 mi_8x8[index].src_mi = mi_upper_left + index;
1154 mi_8x8[index].src_mi->mbmi.sb_type = bsize;
1158 // Else this is a partial SB64.
1159 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
1160 col8x8_remaining, bsize, mi_8x8);
1164 static void copy_partitioning(VP9_COMMON *cm, MODE_INFO *mi_8x8,
1165 MODE_INFO *prev_mi_8x8) {
1166 const int mis = cm->mi_stride;
1167 int block_row, block_col;
1169 for (block_row = 0; block_row < 8; ++block_row) {
1170 for (block_col = 0; block_col < 8; ++block_col) {
1171 MODE_INFO *const prev_mi =
1172 prev_mi_8x8[block_row * mis + block_col].src_mi;
1173 const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
1176 const ptrdiff_t offset = prev_mi - cm->prev_mi;
1177 mi_8x8[block_row * mis + block_col].src_mi = cm->mi + offset;
1178 mi_8x8[block_row * mis + block_col].src_mi->mbmi.sb_type = sb_type;
1184 static void constrain_copy_partitioning(VP9_COMP *const cpi,
1185 const TileInfo *const tile,
1187 MODE_INFO *prev_mi_8x8,
1188 int mi_row, int mi_col,
1190 VP9_COMMON *const cm = &cpi->common;
1191 const int mis = cm->mi_stride;
1192 const int row8x8_remaining = tile->mi_row_end - mi_row;
1193 const int col8x8_remaining = tile->mi_col_end - mi_col;
1194 MODE_INFO *const mi_upper_left = cm->mi + mi_row * mis + mi_col;
1195 const int bh = num_8x8_blocks_high_lookup[bsize];
1196 const int bw = num_8x8_blocks_wide_lookup[bsize];
1197 int block_row, block_col;
1199 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
1201 // If the SB64 if it is all "in image".
1202 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
1203 (row8x8_remaining >= MI_BLOCK_SIZE)) {
1204 for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
1205 for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
1206 const int index = block_row * mis + block_col;
1207 MODE_INFO *prev_mi = prev_mi_8x8[index].src_mi;
1208 const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
1209 // Use previous partition if block size is not larger than bsize.
1210 if (prev_mi && sb_type <= bsize) {
1211 int block_row2, block_col2;
1212 for (block_row2 = 0; block_row2 < bh; ++block_row2) {
1213 for (block_col2 = 0; block_col2 < bw; ++block_col2) {
1214 const int index2 = (block_row + block_row2) * mis +
1215 block_col + block_col2;
1216 prev_mi = prev_mi_8x8[index2].src_mi;
1218 const ptrdiff_t offset = prev_mi - cm->prev_mi;
1219 mi_8x8[index2].src_mi = cm->mi + offset;
1220 mi_8x8[index2].src_mi->mbmi.sb_type = prev_mi->mbmi.sb_type;
1225 // Otherwise, use fixed partition of size bsize.
1226 mi_8x8[index].src_mi = mi_upper_left + index;
1227 mi_8x8[index].src_mi->mbmi.sb_type = bsize;
1232 // Else this is a partial SB64, copy previous partition.
1233 copy_partitioning(cm, mi_8x8, prev_mi_8x8);
1240 } coord_lookup[16] = {
1242 {0, 0}, {0, 2}, {2, 0}, {2, 2},
1244 {0, 4}, {0, 6}, {2, 4}, {2, 6},
1246 {4, 0}, {4, 2}, {6, 0}, {6, 2},
1248 {4, 4}, {4, 6}, {6, 4}, {6, 6},
1251 static void set_source_var_based_partition(VP9_COMP *cpi,
1252 const TileInfo *const tile,
1254 int mi_row, int mi_col) {
1255 VP9_COMMON *const cm = &cpi->common;
1256 MACROBLOCK *const x = &cpi->mb;
1257 const int mis = cm->mi_stride;
1258 const int row8x8_remaining = tile->mi_row_end - mi_row;
1259 const int col8x8_remaining = tile->mi_col_end - mi_col;
1260 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
1262 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
1264 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
1267 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
1268 (row8x8_remaining >= MI_BLOCK_SIZE)) {
1272 const int offset = (mi_row >> 1) * cm->mb_cols + (mi_col >> 1);
1273 int is_larger_better = 0;
1275 unsigned int thr = cpi->source_var_thresh;
1277 vpx_memset(d32, 0, 4 * sizeof(diff));
1279 for (i = 0; i < 4; i++) {
1282 for (j = 0; j < 4; j++) {
1283 int b_mi_row = coord_lookup[i * 4 + j].row;
1284 int b_mi_col = coord_lookup[i * 4 + j].col;
1285 int boffset = b_mi_row / 2 * cm->mb_cols +
1288 d16[j] = cpi->source_diff_var + offset + boffset;
1290 index = b_mi_row * mis + b_mi_col;
1291 mi_8x8[index].src_mi = mi_upper_left + index;
1292 mi_8x8[index].src_mi->mbmi.sb_type = BLOCK_16X16;
1294 // TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
1295 // size to further improve quality.
1298 is_larger_better = (d16[0]->var < thr) && (d16[1]->var < thr) &&
1299 (d16[2]->var < thr) && (d16[3]->var < thr);
1301 // Use 32x32 partition
1302 if (is_larger_better) {
1305 for (j = 0; j < 4; j++) {
1306 d32[i].sse += d16[j]->sse;
1307 d32[i].sum += d16[j]->sum;
1310 d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10);
1312 index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col;
1313 mi_8x8[index].src_mi = mi_upper_left + index;
1314 mi_8x8[index].src_mi->mbmi.sb_type = BLOCK_32X32;
1318 if (use32x32 == 4) {
1320 is_larger_better = (d32[0].var < thr) && (d32[1].var < thr) &&
1321 (d32[2].var < thr) && (d32[3].var < thr);
1323 // Use 64x64 partition
1324 if (is_larger_better) {
1325 mi_8x8[0].src_mi = mi_upper_left;
1326 mi_8x8[0].src_mi->mbmi.sb_type = BLOCK_64X64;
1329 } else { // partial in-image SB64
1330 int bh = num_8x8_blocks_high_lookup[BLOCK_16X16];
1331 int bw = num_8x8_blocks_wide_lookup[BLOCK_16X16];
1332 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw,
1333 row8x8_remaining, col8x8_remaining, BLOCK_16X16, mi_8x8);
1337 static int is_background(const VP9_COMP *cpi, const TileInfo *const tile,
1338 int mi_row, int mi_col) {
1339 // This assumes the input source frames are of the same dimension.
1340 const int row8x8_remaining = tile->mi_row_end - mi_row;
1341 const int col8x8_remaining = tile->mi_col_end - mi_col;
1342 const int x = mi_col * MI_SIZE;
1343 const int y = mi_row * MI_SIZE;
1344 const int src_stride = cpi->Source->y_stride;
1345 const uint8_t *const src = &cpi->Source->y_buffer[y * src_stride + x];
1346 const int pre_stride = cpi->Last_Source->y_stride;
1347 const uint8_t *const pre = &cpi->Last_Source->y_buffer[y * pre_stride + x];
1351 if (row8x8_remaining >= MI_BLOCK_SIZE &&
1352 col8x8_remaining >= MI_BLOCK_SIZE) {
1353 this_sad = cpi->fn_ptr[BLOCK_64X64].sdf(src, src_stride, pre, pre_stride);
1354 threshold = (1 << 12);
1357 for (r = 0; r < row8x8_remaining; r += 2)
1358 for (c = 0; c < col8x8_remaining; c += 2)
1359 this_sad += cpi->fn_ptr[BLOCK_16X16].sdf(src, src_stride,
1361 threshold = (row8x8_remaining * col8x8_remaining) << 6;
1364 return this_sad < 2 * threshold;
1367 static int sb_has_motion(const VP9_COMMON *cm, MODE_INFO *prev_mi_8x8,
1368 const int motion_thresh) {
1369 const int mis = cm->mi_stride;
1370 int block_row, block_col;
1373 for (block_row = 0; block_row < 8; ++block_row) {
1374 for (block_col = 0; block_col < 8; ++block_col) {
1375 const MODE_INFO *prev_mi =
1376 prev_mi_8x8[block_row * mis + block_col].src_mi;
1378 if (abs(prev_mi->mbmi.mv[0].as_mv.row) > motion_thresh ||
1379 abs(prev_mi->mbmi.mv[0].as_mv.col) > motion_thresh)
1388 static void update_state_rt(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
1389 int mi_row, int mi_col, int bsize) {
1390 VP9_COMMON *const cm = &cpi->common;
1391 MACROBLOCK *const x = &cpi->mb;
1392 MACROBLOCKD *const xd = &x->e_mbd;
1393 MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
1394 const struct segmentation *const seg = &cm->seg;
1396 *(xd->mi[0].src_mi) = ctx->mic;
1397 xd->mi[0].src_mi = &xd->mi[0];
1399 if (seg->enabled && cpi->oxcf.aq_mode) {
1400 // For in frame complexity AQ or variance AQ, copy segment_id from
1401 // segmentation_map.
1402 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ ||
1403 cpi->oxcf.aq_mode == VARIANCE_AQ ) {
1404 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
1405 : cm->last_frame_seg_map;
1406 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
1408 // Setting segmentation map for cyclic_refresh
1409 vp9_cyclic_refresh_update_segment(cpi, mbmi, mi_row, mi_col, bsize, 1);
1411 vp9_init_plane_quantizers(cpi, x);
1414 if (is_inter_block(mbmi)) {
1415 vp9_update_mv_count(cm, xd);
1417 if (cm->interp_filter == SWITCHABLE) {
1418 const int pred_ctx = vp9_get_pred_context_switchable_interp(xd);
1419 ++cm->counts.switchable_interp[pred_ctx][mbmi->interp_filter];
1423 x->skip = ctx->skip;
1424 x->skip_txfm[0] = mbmi->segment_id ? 0 : ctx->skip_txfm[0];
1427 static void encode_b_rt(VP9_COMP *cpi, const TileInfo *const tile,
1428 TOKENEXTRA **tp, int mi_row, int mi_col,
1429 int output_enabled, BLOCK_SIZE bsize,
1430 PICK_MODE_CONTEXT *ctx) {
1431 set_offsets(cpi, tile, mi_row, mi_col, bsize);
1432 update_state_rt(cpi, ctx, mi_row, mi_col, bsize);
1434 #if CONFIG_VP9_TEMPORAL_DENOISING
1435 if (cpi->oxcf.noise_sensitivity > 0 && output_enabled) {
1436 vp9_denoiser_denoise(&cpi->denoiser, &cpi->mb, mi_row, mi_col,
1437 MAX(BLOCK_8X8, bsize), ctx);
1441 encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize, ctx);
1442 update_stats(&cpi->common, &cpi->mb);
1444 (*tp)->token = EOSB_TOKEN;
1448 static void encode_sb_rt(VP9_COMP *cpi, const TileInfo *const tile,
1449 TOKENEXTRA **tp, int mi_row, int mi_col,
1450 int output_enabled, BLOCK_SIZE bsize,
1452 VP9_COMMON *const cm = &cpi->common;
1453 MACROBLOCK *const x = &cpi->mb;
1454 MACROBLOCKD *const xd = &x->e_mbd;
1456 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
1458 PARTITION_TYPE partition;
1461 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1464 if (bsize >= BLOCK_8X8) {
1465 const int idx_str = xd->mi_stride * mi_row + mi_col;
1466 MODE_INFO *mi_8x8 = cm->mi[idx_str].src_mi;
1467 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1468 subsize = mi_8x8[0].src_mi->mbmi.sb_type;
1471 subsize = BLOCK_4X4;
1474 partition = partition_lookup[bsl][subsize];
1475 if (output_enabled && bsize != BLOCK_4X4)
1476 cm->counts.partition[ctx][partition]++;
1478 switch (partition) {
1479 case PARTITION_NONE:
1480 encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
1483 case PARTITION_VERT:
1484 encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
1485 &pc_tree->vertical[0]);
1486 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
1487 encode_b_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled,
1488 subsize, &pc_tree->vertical[1]);
1491 case PARTITION_HORZ:
1492 encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
1493 &pc_tree->horizontal[0]);
1494 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
1495 encode_b_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled,
1496 subsize, &pc_tree->horizontal[1]);
1499 case PARTITION_SPLIT:
1500 subsize = get_subsize(bsize, PARTITION_SPLIT);
1501 encode_sb_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
1503 encode_sb_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled,
1504 subsize, pc_tree->split[1]);
1505 encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled,
1506 subsize, pc_tree->split[2]);
1507 encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
1508 subsize, pc_tree->split[3]);
1511 assert("Invalid partition type.");
1515 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
1516 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1519 static void rd_use_partition(VP9_COMP *cpi, const TileInfo *const tile,
1520 MODE_INFO *mi_8x8, TOKENEXTRA **tp,
1521 int mi_row, int mi_col,
1522 BLOCK_SIZE bsize, int *rate, int64_t *dist,
1523 int do_recon, PC_TREE *pc_tree) {
1524 VP9_COMMON *const cm = &cpi->common;
1525 MACROBLOCK *const x = &cpi->mb;
1526 MACROBLOCKD *const xd = &x->e_mbd;
1527 const int mis = cm->mi_stride;
1528 const int bsl = b_width_log2_lookup[bsize];
1529 const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2;
1530 const int bss = (1 << bsl) / 4;
1532 PARTITION_TYPE partition = PARTITION_NONE;
1534 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1535 PARTITION_CONTEXT sl[8], sa[8];
1536 RD_COST last_part_rdc, none_rdc, chosen_rdc;
1537 BLOCK_SIZE sub_subsize = BLOCK_4X4;
1538 int splits_below = 0;
1539 BLOCK_SIZE bs_type = mi_8x8[0].src_mi->mbmi.sb_type;
1540 int do_partition_search = 1;
1541 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
1543 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1546 assert(num_4x4_blocks_wide_lookup[bsize] ==
1547 num_4x4_blocks_high_lookup[bsize]);
1549 vp9_rd_cost_reset(&last_part_rdc);
1550 vp9_rd_cost_reset(&none_rdc);
1551 vp9_rd_cost_reset(&chosen_rdc);
1553 partition = partition_lookup[bsl][bs_type];
1554 subsize = get_subsize(bsize, partition);
1556 pc_tree->partitioning = partition;
1557 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1559 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) {
1560 set_offsets(cpi, tile, mi_row, mi_col, bsize);
1561 x->mb_energy = vp9_block_energy(cpi, x, bsize);
1564 if (do_partition_search &&
1565 cpi->sf.partition_search_type == SEARCH_PARTITION &&
1566 cpi->sf.adjust_partitioning_from_last_frame) {
1567 // Check if any of the sub blocks are further split.
1568 if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
1569 sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
1571 for (i = 0; i < 4; i++) {
1572 int jj = i >> 1, ii = i & 0x01;
1573 MODE_INFO *this_mi = mi_8x8[jj * bss * mis + ii * bss].src_mi;
1574 if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
1580 // If partition is not none try none unless each of the 4 splits are split
1582 if (partition != PARTITION_NONE && !splits_below &&
1583 mi_row + (mi_step >> 1) < cm->mi_rows &&
1584 mi_col + (mi_step >> 1) < cm->mi_cols) {
1585 pc_tree->partitioning = PARTITION_NONE;
1586 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &none_rdc, bsize,
1589 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1591 if (none_rdc.rate < INT_MAX) {
1592 none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
1593 none_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, none_rdc.rate,
1597 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1598 mi_8x8[0].src_mi->mbmi.sb_type = bs_type;
1599 pc_tree->partitioning = partition;
1603 switch (partition) {
1604 case PARTITION_NONE:
1605 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rdc,
1606 bsize, ctx, INT64_MAX);
1608 case PARTITION_HORZ:
1609 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rdc,
1610 subsize, &pc_tree->horizontal[0],
1612 if (last_part_rdc.rate != INT_MAX &&
1613 bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
1615 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
1616 vp9_rd_cost_init(&tmp_rdc);
1617 update_state(cpi, ctx, mi_row, mi_col, subsize, 0);
1618 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx);
1619 rd_pick_sb_modes(cpi, tile, mi_row + (mi_step >> 1), mi_col, &tmp_rdc,
1620 subsize, &pc_tree->horizontal[1], INT64_MAX);
1621 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
1622 vp9_rd_cost_reset(&last_part_rdc);
1625 last_part_rdc.rate += tmp_rdc.rate;
1626 last_part_rdc.dist += tmp_rdc.dist;
1627 last_part_rdc.rdcost += tmp_rdc.rdcost;
1630 case PARTITION_VERT:
1631 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rdc,
1632 subsize, &pc_tree->vertical[0], INT64_MAX);
1633 if (last_part_rdc.rate != INT_MAX &&
1634 bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
1636 PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
1637 vp9_rd_cost_init(&tmp_rdc);
1638 update_state(cpi, ctx, mi_row, mi_col, subsize, 0);
1639 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx);
1640 rd_pick_sb_modes(cpi, tile, mi_row, mi_col + (mi_step >> 1), &tmp_rdc,
1641 subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
1643 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
1644 vp9_rd_cost_reset(&last_part_rdc);
1647 last_part_rdc.rate += tmp_rdc.rate;
1648 last_part_rdc.dist += tmp_rdc.dist;
1649 last_part_rdc.rdcost += tmp_rdc.rdcost;
1652 case PARTITION_SPLIT:
1653 if (bsize == BLOCK_8X8) {
1654 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rdc,
1655 subsize, pc_tree->leaf_split[0], INT64_MAX);
1658 last_part_rdc.rate = 0;
1659 last_part_rdc.dist = 0;
1660 last_part_rdc.rdcost = 0;
1661 for (i = 0; i < 4; i++) {
1662 int x_idx = (i & 1) * (mi_step >> 1);
1663 int y_idx = (i >> 1) * (mi_step >> 1);
1664 int jj = i >> 1, ii = i & 0x01;
1666 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
1669 vp9_rd_cost_init(&tmp_rdc);
1670 rd_use_partition(cpi, tile, mi_8x8 + jj * bss * mis + ii * bss, tp,
1671 mi_row + y_idx, mi_col + x_idx, subsize,
1672 &tmp_rdc.rate, &tmp_rdc.dist,
1673 i != 3, pc_tree->split[i]);
1674 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
1675 vp9_rd_cost_reset(&last_part_rdc);
1678 last_part_rdc.rate += tmp_rdc.rate;
1679 last_part_rdc.dist += tmp_rdc.dist;
1687 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1688 if (last_part_rdc.rate < INT_MAX) {
1689 last_part_rdc.rate += cpi->partition_cost[pl][partition];
1690 last_part_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
1691 last_part_rdc.rate, last_part_rdc.dist);
1694 if (do_partition_search
1695 && cpi->sf.adjust_partitioning_from_last_frame
1696 && cpi->sf.partition_search_type == SEARCH_PARTITION
1697 && partition != PARTITION_SPLIT && bsize > BLOCK_8X8
1698 && (mi_row + mi_step < cm->mi_rows ||
1699 mi_row + (mi_step >> 1) == cm->mi_rows)
1700 && (mi_col + mi_step < cm->mi_cols ||
1701 mi_col + (mi_step >> 1) == cm->mi_cols)) {
1702 BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
1703 chosen_rdc.rate = 0;
1704 chosen_rdc.dist = 0;
1705 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1706 pc_tree->partitioning = PARTITION_SPLIT;
1709 for (i = 0; i < 4; i++) {
1710 int x_idx = (i & 1) * (mi_step >> 1);
1711 int y_idx = (i >> 1) * (mi_step >> 1);
1713 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1714 PARTITION_CONTEXT sl[8], sa[8];
1716 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
1719 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1720 pc_tree->split[i]->partitioning = PARTITION_NONE;
1721 rd_pick_sb_modes(cpi, tile, mi_row + y_idx, mi_col + x_idx, &tmp_rdc,
1722 split_subsize, &pc_tree->split[i]->none, INT64_MAX);
1724 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1726 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
1727 vp9_rd_cost_reset(&chosen_rdc);
1731 chosen_rdc.rate += tmp_rdc.rate;
1732 chosen_rdc.dist += tmp_rdc.dist;
1735 encode_sb(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, 0,
1736 split_subsize, pc_tree->split[i]);
1738 pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
1740 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
1742 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1743 if (chosen_rdc.rate < INT_MAX) {
1744 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
1745 chosen_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
1746 chosen_rdc.rate, chosen_rdc.dist);
1750 // If last_part is better set the partitioning to that.
1751 if (last_part_rdc.rdcost < chosen_rdc.rdcost) {
1752 mi_8x8[0].src_mi->mbmi.sb_type = bsize;
1753 if (bsize >= BLOCK_8X8)
1754 pc_tree->partitioning = partition;
1755 chosen_rdc = last_part_rdc;
1757 // If none was better set the partitioning to that.
1758 if (none_rdc.rdcost < chosen_rdc.rdcost) {
1759 if (bsize >= BLOCK_8X8)
1760 pc_tree->partitioning = PARTITION_NONE;
1761 chosen_rdc = none_rdc;
1764 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1766 // We must have chosen a partitioning and encoding or we'll fail later on.
1767 // No other opportunities for success.
1768 if (bsize == BLOCK_64X64)
1769 assert(chosen_rdc.rate < INT_MAX && chosen_rdc.dist < INT64_MAX);
1772 int output_enabled = (bsize == BLOCK_64X64);
1774 // Check the projected output rate for this SB against it's target
1775 // and and if necessary apply a Q delta using segmentation to get
1776 // closer to the target.
1777 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) {
1778 vp9_select_in_frame_q_segment(cpi, mi_row, mi_col,
1779 output_enabled, chosen_rdc.rate);
1782 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
1783 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
1784 chosen_rdc.rate, chosen_rdc.dist);
1785 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize,
1789 *rate = chosen_rdc.rate;
1790 *dist = chosen_rdc.dist;
1793 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
1794 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
1795 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
1796 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
1797 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
1801 static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
1802 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16,
1803 BLOCK_16X16, BLOCK_32X32, BLOCK_32X32,
1804 BLOCK_32X32, BLOCK_64X64, BLOCK_64X64,
1805 BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
1809 // Look at all the mode_info entries for blocks that are part of this
1810 // partition and find the min and max values for sb_type.
1811 // At the moment this is designed to work on a 64x64 SB but could be
1812 // adjusted to use a size parameter.
1814 // The min and max are assumed to have been initialized prior to calling this
1815 // function so repeat calls can accumulate a min and max of more than one sb64.
1816 static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO *mi_8x8,
1817 BLOCK_SIZE *min_block_size,
1818 BLOCK_SIZE *max_block_size,
1819 int bs_hist[BLOCK_SIZES]) {
1820 int sb_width_in_blocks = MI_BLOCK_SIZE;
1821 int sb_height_in_blocks = MI_BLOCK_SIZE;
1825 // Check the sb_type for each block that belongs to this region.
1826 for (i = 0; i < sb_height_in_blocks; ++i) {
1827 for (j = 0; j < sb_width_in_blocks; ++j) {
1828 MODE_INFO *mi = mi_8x8[index+j].src_mi;
1829 BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
1831 *min_block_size = MIN(*min_block_size, sb_type);
1832 *max_block_size = MAX(*max_block_size, sb_type);
1834 index += xd->mi_stride;
1838 // Next square block size less or equal than current block size.
1839 static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
1840 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
1841 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
1842 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
1843 BLOCK_32X32, BLOCK_32X32, BLOCK_32X32,
1847 // Look at neighboring blocks and set a min and max partition size based on
1849 static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
1850 int mi_row, int mi_col,
1851 BLOCK_SIZE *min_block_size,
1852 BLOCK_SIZE *max_block_size) {
1853 VP9_COMMON *const cm = &cpi->common;
1854 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1855 MODE_INFO *mi = xd->mi[0].src_mi;
1856 const int left_in_image = xd->left_available && mi[-1].src_mi;
1857 const int above_in_image = xd->up_available && mi[-xd->mi_stride].src_mi;
1858 const int row8x8_remaining = tile->mi_row_end - mi_row;
1859 const int col8x8_remaining = tile->mi_col_end - mi_col;
1861 BLOCK_SIZE min_size = BLOCK_4X4;
1862 BLOCK_SIZE max_size = BLOCK_64X64;
1864 int bs_hist[BLOCK_SIZES] = {0};
1866 // Trap case where we do not have a prediction.
1867 if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
1868 // Default "min to max" and "max to min"
1869 min_size = BLOCK_64X64;
1870 max_size = BLOCK_4X4;
1872 // NOTE: each call to get_sb_partition_size_range() uses the previous
1873 // passed in values for min and max as a starting point.
1874 // Find the min and max partition used in previous frame at this location
1875 if (cm->frame_type != KEY_FRAME) {
1876 MODE_INFO *prev_mi =
1877 cm->prev_mip + cm->mi_stride + 1 + mi_row * xd->mi_stride + mi_col;
1879 get_sb_partition_size_range(xd, prev_mi, &min_size, &max_size, bs_hist);
1881 // Find the min and max partition sizes used in the left SB64
1882 if (left_in_image) {
1883 MODE_INFO *left_sb64_mi = mi[-MI_BLOCK_SIZE].src_mi;
1884 get_sb_partition_size_range(xd, left_sb64_mi, &min_size, &max_size,
1887 // Find the min and max partition sizes used in the above SB64.
1888 if (above_in_image) {
1889 MODE_INFO *above_sb64_mi = mi[-xd->mi_stride * MI_BLOCK_SIZE].src_mi;
1890 get_sb_partition_size_range(xd, above_sb64_mi, &min_size, &max_size,
1894 // adjust observed min and max
1895 if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
1896 min_size = min_partition_size[min_size];
1897 max_size = max_partition_size[max_size];
1898 } else if (cpi->sf.auto_min_max_partition_size ==
1899 CONSTRAIN_NEIGHBORING_MIN_MAX) {
1900 // adjust the search range based on the histogram of the observed
1901 // partition sizes from left, above the previous co-located blocks
1903 int first_moment = 0;
1904 int second_moment = 0;
1905 int var_unnormalized = 0;
1907 for (i = 0; i < BLOCK_SIZES; i++) {
1909 first_moment += bs_hist[i] * i;
1910 second_moment += bs_hist[i] * i * i;
1913 // if variance is small enough,
1914 // adjust the range around its mean size, which gives a tighter range
1915 var_unnormalized = second_moment - first_moment * first_moment / sum;
1916 if (var_unnormalized <= 4 * sum) {
1917 int mean = first_moment / sum;
1918 min_size = min_partition_size[mean];
1919 max_size = max_partition_size[mean];
1921 min_size = min_partition_size[min_size];
1922 max_size = max_partition_size[max_size];
1927 // Check border cases where max and min from neighbors may not be legal.
1928 max_size = find_partition_size(max_size,
1929 row8x8_remaining, col8x8_remaining,
1931 min_size = MIN(min_size, max_size);
1933 // When use_square_partition_only is true, make sure at least one square
1934 // partition is allowed by selecting the next smaller square size as
1936 if (cpi->sf.use_square_partition_only &&
1937 next_square_size[max_size] < min_size) {
1938 min_size = next_square_size[max_size];
1941 *min_block_size = min_size;
1942 *max_block_size = max_size;
1945 static void auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
1946 int mi_row, int mi_col,
1947 BLOCK_SIZE *min_block_size,
1948 BLOCK_SIZE *max_block_size) {
1949 VP9_COMMON *const cm = &cpi->common;
1950 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1951 MODE_INFO *mi_8x8 = xd->mi;
1952 const int left_in_image = xd->left_available && mi_8x8[-1].src_mi;
1953 const int above_in_image = xd->up_available &&
1954 mi_8x8[-xd->mi_stride].src_mi;
1955 int row8x8_remaining = tile->mi_row_end - mi_row;
1956 int col8x8_remaining = tile->mi_col_end - mi_col;
1958 BLOCK_SIZE min_size = BLOCK_32X32;
1959 BLOCK_SIZE max_size = BLOCK_8X8;
1960 int bsl = mi_width_log2_lookup[BLOCK_64X64];
1961 const int search_range_ctrl = (((mi_row + mi_col) >> bsl) +
1962 get_chessboard_index(cm->current_video_frame)) & 0x1;
1963 // Trap case where we do not have a prediction.
1964 if (search_range_ctrl &&
1965 (left_in_image || above_in_image || cm->frame_type != KEY_FRAME)) {
1970 // Find the min and max partition sizes used in the left SB64.
1971 if (left_in_image) {
1973 mi = mi_8x8[-1].src_mi;
1974 for (block = 0; block < MI_BLOCK_SIZE; ++block) {
1975 cur_mi = mi[block * xd->mi_stride].src_mi;
1976 sb_type = cur_mi ? cur_mi->mbmi.sb_type : 0;
1977 min_size = MIN(min_size, sb_type);
1978 max_size = MAX(max_size, sb_type);
1981 // Find the min and max partition sizes used in the above SB64.
1982 if (above_in_image) {
1983 mi = mi_8x8[-xd->mi_stride * MI_BLOCK_SIZE].src_mi;
1984 for (block = 0; block < MI_BLOCK_SIZE; ++block) {
1985 sb_type = mi[block].src_mi ? mi[block].src_mi->mbmi.sb_type : 0;
1986 min_size = MIN(min_size, sb_type);
1987 max_size = MAX(max_size, sb_type);
1991 min_size = min_partition_size[min_size];
1992 max_size = find_partition_size(max_size, row8x8_remaining, col8x8_remaining,
1994 min_size = MIN(min_size, max_size);
1995 min_size = MAX(min_size, BLOCK_8X8);
1996 max_size = MIN(max_size, BLOCK_32X32);
1998 min_size = BLOCK_8X8;
1999 max_size = BLOCK_32X32;
2002 *min_block_size = min_size;
2003 *max_block_size = max_size;
2006 // TODO(jingning) refactor functions setting partition search range
2007 static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd,
2008 int mi_row, int mi_col, BLOCK_SIZE bsize,
2009 BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
2010 int mi_width = num_8x8_blocks_wide_lookup[bsize];
2011 int mi_height = num_8x8_blocks_high_lookup[bsize];
2015 const int idx_str = cm->mi_stride * mi_row + mi_col;
2016 MODE_INFO *prev_mi = (cm->prev_mip + cm->mi_stride + 1 + idx_str)->src_mi;
2019 BLOCK_SIZE bs, min_size, max_size;
2021 min_size = BLOCK_64X64;
2022 max_size = BLOCK_4X4;
2025 for (idy = 0; idy < mi_height; ++idy) {
2026 for (idx = 0; idx < mi_width; ++idx) {
2027 mi = prev_mi[idy * cm->mi_stride + idx].src_mi;
2028 bs = mi ? mi->mbmi.sb_type : bsize;
2029 min_size = MIN(min_size, bs);
2030 max_size = MAX(max_size, bs);
2035 if (xd->left_available) {
2036 for (idy = 0; idy < mi_height; ++idy) {
2037 mi = xd->mi[idy * cm->mi_stride - 1].src_mi;
2038 bs = mi ? mi->mbmi.sb_type : bsize;
2039 min_size = MIN(min_size, bs);
2040 max_size = MAX(max_size, bs);
2044 if (xd->up_available) {
2045 for (idx = 0; idx < mi_width; ++idx) {
2046 mi = xd->mi[idx - cm->mi_stride].src_mi;
2047 bs = mi ? mi->mbmi.sb_type : bsize;
2048 min_size = MIN(min_size, bs);
2049 max_size = MAX(max_size, bs);
2053 if (min_size == max_size) {
2054 min_size = min_partition_size[min_size];
2055 max_size = max_partition_size[max_size];
2062 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
2063 vpx_memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
2066 static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
2067 vpx_memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
2070 #if CONFIG_FP_MB_STATS
2071 const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] =
2072 {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 4, 4};
2073 const int num_16x16_blocks_high_lookup[BLOCK_SIZES] =
2074 {1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 4, 2, 4};
2075 const int qindex_skip_threshold_lookup[BLOCK_SIZES] =
2076 {0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120};
2077 const int qindex_split_threshold_lookup[BLOCK_SIZES] =
2078 {0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120};
2079 const int complexity_16x16_blocks_threshold[BLOCK_SIZES] =
2080 {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6};
2091 static INLINE MOTION_DIRECTION get_motion_direction_fp(uint8_t fp_byte) {
2092 if (fp_byte & FPMB_MOTION_ZERO_MASK) {
2094 } else if (fp_byte & FPMB_MOTION_LEFT_MASK) {
2096 } else if (fp_byte & FPMB_MOTION_RIGHT_MASK) {
2098 } else if (fp_byte & FPMB_MOTION_UP_MASK) {
2105 static INLINE int get_motion_inconsistency(MOTION_DIRECTION this_mv,
2106 MOTION_DIRECTION that_mv) {
2107 if (this_mv == that_mv) {
2110 return abs(this_mv - that_mv) == 2 ? 2 : 1;
2115 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
2116 // unlikely to be selected depending on previous rate-distortion optimization
2117 // results, for encoding speed-up.
2118 static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
2119 TOKENEXTRA **tp, int mi_row, int mi_col,
2120 BLOCK_SIZE bsize, RD_COST *rd_cost,
2121 int64_t best_rd, PC_TREE *pc_tree) {
2122 VP9_COMMON *const cm = &cpi->common;
2123 MACROBLOCK *const x = &cpi->mb;
2124 MACROBLOCKD *const xd = &x->e_mbd;
2125 const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
2126 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
2127 PARTITION_CONTEXT sl[8], sa[8];
2128 TOKENEXTRA *tp_orig = *tp;
2129 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
2132 RD_COST this_rdc, sum_rdc, best_rdc;
2133 int do_split = bsize >= BLOCK_8X8;
2136 // Override skipping rectangular partition operations for edge blocks
2137 const int force_horz_split = (mi_row + mi_step >= cm->mi_rows);
2138 const int force_vert_split = (mi_col + mi_step >= cm->mi_cols);
2139 const int xss = x->e_mbd.plane[1].subsampling_x;
2140 const int yss = x->e_mbd.plane[1].subsampling_y;
2142 BLOCK_SIZE min_size = cpi->sf.min_partition_size;
2143 BLOCK_SIZE max_size = cpi->sf.max_partition_size;
2145 #if CONFIG_FP_MB_STATS
2146 unsigned int src_diff_var = UINT_MAX;
2147 int none_complexity = 0;
2150 int partition_none_allowed = !force_horz_split && !force_vert_split;
2151 int partition_horz_allowed = !force_vert_split && yss <= xss &&
2153 int partition_vert_allowed = !force_horz_split && xss <= yss &&
2157 assert(num_8x8_blocks_wide_lookup[bsize] ==
2158 num_8x8_blocks_high_lookup[bsize]);
2160 vp9_rd_cost_init(&this_rdc);
2161 vp9_rd_cost_init(&sum_rdc);
2162 vp9_rd_cost_reset(&best_rdc);
2163 best_rdc.rdcost = best_rd;
2165 set_offsets(cpi, tile, mi_row, mi_col, bsize);
2167 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode)
2168 x->mb_energy = vp9_block_energy(cpi, x, bsize);
2170 if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
2171 int cb_partition_search_ctrl = ((pc_tree->index == 0 || pc_tree->index == 3)
2172 + get_chessboard_index(cm->current_video_frame)) & 0x1;
2174 if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size)
2175 set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size);
2178 // Determine partition types in search according to the speed features.
2179 // The threshold set here has to be of square block size.
2180 if (cpi->sf.auto_min_max_partition_size) {
2181 partition_none_allowed &= (bsize <= max_size && bsize >= min_size);
2182 partition_horz_allowed &= ((bsize <= max_size && bsize > min_size) ||
2184 partition_vert_allowed &= ((bsize <= max_size && bsize > min_size) ||
2186 do_split &= bsize > min_size;
2188 if (cpi->sf.use_square_partition_only) {
2189 partition_horz_allowed &= force_horz_split;
2190 partition_vert_allowed &= force_vert_split;
2193 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
2195 #if CONFIG_FP_MB_STATS
2196 if (cpi->use_fp_mb_stats) {
2197 set_offsets(cpi, tile, mi_row, mi_col, bsize);
2198 src_diff_var = get_sby_perpixel_diff_variance(cpi, &cpi->mb.plane[0].src,
2199 mi_row, mi_col, bsize);
2203 #if CONFIG_FP_MB_STATS
2204 // Decide whether we shall split directly and skip searching NONE by using
2205 // the first pass block statistics
2206 if (cpi->use_fp_mb_stats && bsize >= BLOCK_32X32 && do_split &&
2207 partition_none_allowed && src_diff_var > 4 &&
2208 cm->base_qindex < qindex_split_threshold_lookup[bsize]) {
2209 int mb_row = mi_row >> 1;
2210 int mb_col = mi_col >> 1;
2212 MIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
2214 MIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
2217 // compute a complexity measure, basically measure inconsistency of motion
2218 // vectors obtained from the first pass in the current block
2219 for (r = mb_row; r < mb_row_end ; r++) {
2220 for (c = mb_col; c < mb_col_end; c++) {
2221 const int mb_index = r * cm->mb_cols + c;
2223 MOTION_DIRECTION this_mv;
2224 MOTION_DIRECTION right_mv;
2225 MOTION_DIRECTION bottom_mv;
2228 get_motion_direction_fp(cpi->twopass.this_frame_mb_stats[mb_index]);
2231 if (c != mb_col_end - 1) {
2232 right_mv = get_motion_direction_fp(
2233 cpi->twopass.this_frame_mb_stats[mb_index + 1]);
2234 none_complexity += get_motion_inconsistency(this_mv, right_mv);
2238 if (r != mb_row_end - 1) {
2239 bottom_mv = get_motion_direction_fp(
2240 cpi->twopass.this_frame_mb_stats[mb_index + cm->mb_cols]);
2241 none_complexity += get_motion_inconsistency(this_mv, bottom_mv);
2244 // do not count its left and top neighbors to avoid double counting
2248 if (none_complexity > complexity_16x16_blocks_threshold[bsize]) {
2249 partition_none_allowed = 0;
2255 if (partition_none_allowed) {
2256 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &this_rdc, bsize, ctx,
2258 if (this_rdc.rate != INT_MAX) {
2259 if (bsize >= BLOCK_8X8) {
2260 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2261 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
2262 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2263 this_rdc.rate, this_rdc.dist);
2266 if (this_rdc.rdcost < best_rdc.rdcost) {
2267 int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_dist_thr;
2268 int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr;
2270 best_rdc = this_rdc;
2271 if (bsize >= BLOCK_8X8)
2272 pc_tree->partitioning = PARTITION_NONE;
2274 // Adjust dist breakout threshold according to the partition size.
2275 dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
2276 b_height_log2_lookup[bsize]);
2278 rate_breakout_thr *= num_pels_log2_lookup[bsize];
2280 // If all y, u, v transform blocks in this partition are skippable, and
2281 // the dist & rate are within the thresholds, the partition search is
2282 // terminated for current branch of the partition search tree.
2283 // The dist & rate thresholds are set to 0 at speed 0 to disable the
2284 // early termination at that speed.
2285 if (!x->e_mbd.lossless &&
2286 (ctx->skippable && best_rdc.dist < dist_breakout_thr &&
2287 best_rdc.rate < rate_breakout_thr)) {
2292 #if CONFIG_FP_MB_STATS
2293 // Check if every 16x16 first pass block statistics has zero
2294 // motion and the corresponding first pass residue is small enough.
2295 // If that is the case, check the difference variance between the
2296 // current frame and the last frame. If the variance is small enough,
2297 // stop further splitting in RD optimization
2298 if (cpi->use_fp_mb_stats && do_split != 0 &&
2299 cm->base_qindex > qindex_skip_threshold_lookup[bsize]) {
2300 int mb_row = mi_row >> 1;
2301 int mb_col = mi_col >> 1;
2303 MIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
2305 MIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
2309 for (r = mb_row; r < mb_row_end; r++) {
2310 for (c = mb_col; c < mb_col_end; c++) {
2311 const int mb_index = r * cm->mb_cols + c;
2312 if (!(cpi->twopass.this_frame_mb_stats[mb_index] &
2313 FPMB_MOTION_ZERO_MASK) ||
2314 !(cpi->twopass.this_frame_mb_stats[mb_index] &
2315 FPMB_ERROR_SMALL_MASK)) {
2325 if (src_diff_var == UINT_MAX) {
2326 set_offsets(cpi, tile, mi_row, mi_col, bsize);
2327 src_diff_var = get_sby_perpixel_diff_variance(
2328 cpi, &cpi->mb.plane[0].src, mi_row, mi_col, bsize);
2330 if (src_diff_var < 8) {
2339 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
2342 // store estimated motion vector
2343 if (cpi->sf.adaptive_motion_search)
2344 store_pred_mv(x, ctx);
2347 // TODO(jingning): use the motion vectors given by the above search as
2348 // the starting point of motion search in the following partition type check.
2350 subsize = get_subsize(bsize, PARTITION_SPLIT);
2351 if (bsize == BLOCK_8X8) {
2353 if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
2354 pc_tree->leaf_split[0]->pred_interp_filter =
2355 ctx->mic.mbmi.interp_filter;
2356 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rdc, subsize,
2357 pc_tree->leaf_split[0], best_rdc.rdcost);
2358 if (sum_rdc.rate == INT_MAX)
2359 sum_rdc.rdcost = INT64_MAX;
2361 for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
2362 const int x_idx = (i & 1) * mi_step;
2363 const int y_idx = (i >> 1) * mi_step;
2365 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
2368 if (cpi->sf.adaptive_motion_search)
2369 load_pred_mv(x, ctx);
2371 pc_tree->split[i]->index = i;
2372 rd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx,
2374 best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
2376 if (this_rdc.rate == INT_MAX) {
2377 sum_rdc.rdcost = INT64_MAX;
2380 sum_rdc.rate += this_rdc.rate;
2381 sum_rdc.dist += this_rdc.dist;
2382 sum_rdc.rdcost += this_rdc.rdcost;
2387 if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) {
2388 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2389 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
2390 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2391 sum_rdc.rate, sum_rdc.dist);
2393 if (sum_rdc.rdcost < best_rdc.rdcost) {
2395 pc_tree->partitioning = PARTITION_SPLIT;
2398 // skip rectangular partition test when larger block size
2399 // gives better rd cost
2400 if (cpi->sf.less_rectangular_check)
2401 do_rect &= !partition_none_allowed;
2403 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
2407 if (partition_horz_allowed && do_rect) {
2408 subsize = get_subsize(bsize, PARTITION_HORZ);
2409 if (cpi->sf.adaptive_motion_search)
2410 load_pred_mv(x, ctx);
2411 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2412 partition_none_allowed)
2413 pc_tree->horizontal[0].pred_interp_filter =
2414 ctx->mic.mbmi.interp_filter;
2415 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rdc, subsize,
2416 &pc_tree->horizontal[0], best_rdc.rdcost);
2418 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + mi_step < cm->mi_rows &&
2419 bsize > BLOCK_8X8) {
2420 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
2421 update_state(cpi, ctx, mi_row, mi_col, subsize, 0);
2422 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx);
2424 if (cpi->sf.adaptive_motion_search)
2425 load_pred_mv(x, ctx);
2426 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2427 partition_none_allowed)
2428 pc_tree->horizontal[1].pred_interp_filter =
2429 ctx->mic.mbmi.interp_filter;
2430 rd_pick_sb_modes(cpi, tile, mi_row + mi_step, mi_col, &this_rdc,
2431 subsize, &pc_tree->horizontal[1],
2432 best_rdc.rdcost - sum_rdc.rdcost);
2433 if (this_rdc.rate == INT_MAX) {
2434 sum_rdc.rdcost = INT64_MAX;
2436 sum_rdc.rate += this_rdc.rate;
2437 sum_rdc.dist += this_rdc.dist;
2438 sum_rdc.rdcost += this_rdc.rdcost;
2442 if (sum_rdc.rdcost < best_rdc.rdcost) {
2443 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2444 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
2445 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
2446 if (sum_rdc.rdcost < best_rdc.rdcost) {
2448 pc_tree->partitioning = PARTITION_HORZ;
2451 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
2454 if (partition_vert_allowed && do_rect) {
2455 subsize = get_subsize(bsize, PARTITION_VERT);
2457 if (cpi->sf.adaptive_motion_search)
2458 load_pred_mv(x, ctx);
2459 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2460 partition_none_allowed)
2461 pc_tree->vertical[0].pred_interp_filter =
2462 ctx->mic.mbmi.interp_filter;
2463 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rdc, subsize,
2464 &pc_tree->vertical[0], best_rdc.rdcost);
2465 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + mi_step < cm->mi_cols &&
2466 bsize > BLOCK_8X8) {
2467 update_state(cpi, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0);
2468 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize,
2469 &pc_tree->vertical[0]);
2471 if (cpi->sf.adaptive_motion_search)
2472 load_pred_mv(x, ctx);
2473 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2474 partition_none_allowed)
2475 pc_tree->vertical[1].pred_interp_filter =
2476 ctx->mic.mbmi.interp_filter;
2477 rd_pick_sb_modes(cpi, tile, mi_row, mi_col + mi_step, &this_rdc, subsize,
2478 &pc_tree->vertical[1], best_rdc.rdcost - sum_rdc.rdcost);
2479 if (this_rdc.rate == INT_MAX) {
2480 sum_rdc.rdcost = INT64_MAX;
2482 sum_rdc.rate += this_rdc.rate;
2483 sum_rdc.dist += this_rdc.dist;
2484 sum_rdc.rdcost += this_rdc.rdcost;
2488 if (sum_rdc.rdcost < best_rdc.rdcost) {
2489 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2490 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
2491 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2492 sum_rdc.rate, sum_rdc.dist);
2493 if (sum_rdc.rdcost < best_rdc.rdcost) {
2495 pc_tree->partitioning = PARTITION_VERT;
2498 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
2501 // TODO(jbb): This code added so that we avoid static analysis
2502 // warning related to the fact that best_rd isn't used after this
2503 // point. This code should be refactored so that the duplicate
2504 // checks occur in some sub function and thus are used...
2506 *rd_cost = best_rdc;
2509 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX &&
2510 pc_tree->index != 3) {
2511 int output_enabled = (bsize == BLOCK_64X64);
2513 // Check the projected output rate for this SB against it's target
2514 // and and if necessary apply a Q delta using segmentation to get
2515 // closer to the target.
2516 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map)
2517 vp9_select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled,
2519 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
2520 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
2521 best_rdc.rate, best_rdc.dist);
2523 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize, pc_tree);
2526 if (bsize == BLOCK_64X64) {
2527 assert(tp_orig < *tp);
2528 assert(best_rdc.rate < INT_MAX);
2529 assert(best_rdc.dist < INT64_MAX);
2531 assert(tp_orig == *tp);
2535 static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
2536 int mi_row, TOKENEXTRA **tp) {
2537 VP9_COMMON *const cm = &cpi->common;
2538 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
2539 SPEED_FEATURES *const sf = &cpi->sf;
2542 // Initialize the left context for the new SB row
2543 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
2544 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
2546 // Code each SB in the row
2547 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
2548 mi_col += MI_BLOCK_SIZE) {
2554 const int idx_str = cm->mi_stride * mi_row + mi_col;
2555 MODE_INFO *mi = cm->mi + idx_str;
2556 MODE_INFO *prev_mi = NULL;
2558 if (cm->frame_type != KEY_FRAME)
2559 prev_mi = (cm->prev_mip + cm->mi_stride + 1 + idx_str)->src_mi;
2561 if (sf->adaptive_pred_interp_filter) {
2562 for (i = 0; i < 64; ++i)
2563 cpi->leaf_tree[i].pred_interp_filter = SWITCHABLE;
2565 for (i = 0; i < 64; ++i) {
2566 cpi->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
2567 cpi->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE;
2568 cpi->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE;
2569 cpi->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE;
2573 vp9_zero(cpi->mb.pred_mv);
2574 cpi->pc_root->index = 0;
2576 // TODO(yunqingwang): use_lastframe_partitioning is no longer used in good-
2577 // quality encoding. Need to evaluate it in real-time encoding later to
2578 // decide if it can be removed too. And then, do the code cleanup.
2579 cpi->mb.source_variance = UINT_MAX;
2580 if (sf->partition_search_type == FIXED_PARTITION) {
2581 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
2582 set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col,
2583 sf->always_this_block_size);
2584 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
2585 &dummy_rate, &dummy_dist, 1, cpi->pc_root);
2586 } else if (cpi->partition_search_skippable_frame) {
2588 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
2589 bsize = get_rd_var_based_fixed_partition(cpi, mi_row, mi_col);
2590 set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, bsize);
2591 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
2592 &dummy_rate, &dummy_dist, 1, cpi->pc_root);
2593 } else if (sf->partition_search_type == VAR_BASED_PARTITION &&
2594 cm->frame_type != KEY_FRAME ) {
2595 choose_partitioning(cpi, tile, mi_row, mi_col);
2596 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
2597 &dummy_rate, &dummy_dist, 1, cpi->pc_root);
2598 } else if (sf->partition_search_type == SEARCH_PARTITION &&
2599 sf->use_lastframe_partitioning &&
2600 (cpi->rc.frames_since_key %
2601 sf->last_partitioning_redo_frequency) &&
2604 cm->frame_type != KEY_FRAME &&
2605 !cpi->rc.is_src_frame_alt_ref &&
2606 ((sf->use_lastframe_partitioning !=
2607 LAST_FRAME_PARTITION_LOW_MOTION) ||
2608 !sb_has_motion(cm, prev_mi, sf->lf_motion_threshold))) {
2609 if (sf->constrain_copy_partition &&
2610 sb_has_motion(cm, prev_mi, sf->lf_motion_threshold))
2611 constrain_copy_partitioning(cpi, tile, mi, prev_mi,
2612 mi_row, mi_col, BLOCK_16X16);
2614 copy_partitioning(cm, mi, prev_mi);
2615 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
2616 &dummy_rate, &dummy_dist, 1, cpi->pc_root);
2618 // If required set upper and lower partition size limits
2619 if (sf->auto_min_max_partition_size) {
2620 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
2621 rd_auto_partition_range(cpi, tile, mi_row, mi_col,
2622 &sf->min_partition_size,
2623 &sf->max_partition_size);
2625 rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
2626 &dummy_rdc, INT64_MAX, cpi->pc_root);
2631 static void init_encode_frame_mb_context(VP9_COMP *cpi) {
2632 MACROBLOCK *const x = &cpi->mb;
2633 VP9_COMMON *const cm = &cpi->common;
2634 MACROBLOCKD *const xd = &x->e_mbd;
2635 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
2637 // Copy data over into macro block data structures.
2638 vp9_setup_src_planes(x, cpi->Source, 0, 0);
2640 vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
2642 // Note: this memset assumes above_context[0], [1] and [2]
2643 // are allocated as part of the same buffer.
2644 vpx_memset(xd->above_context[0], 0,
2645 sizeof(*xd->above_context[0]) *
2646 2 * aligned_mi_cols * MAX_MB_PLANE);
2647 vpx_memset(xd->above_seg_context, 0,
2648 sizeof(*xd->above_seg_context) * aligned_mi_cols);
2651 static int check_dual_ref_flags(VP9_COMP *cpi) {
2652 const int ref_flags = cpi->ref_frame_flags;
2654 if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
2657 return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
2658 + !!(ref_flags & VP9_ALT_FLAG)) >= 2;
2662 static void reset_skip_tx_size(VP9_COMMON *cm, TX_SIZE max_tx_size) {
2664 const int mis = cm->mi_stride;
2665 MODE_INFO *mi_ptr = cm->mi;
2667 for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) {
2668 for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
2669 if (mi_ptr[mi_col].src_mi->mbmi.tx_size > max_tx_size)
2670 mi_ptr[mi_col].src_mi->mbmi.tx_size = max_tx_size;
2675 static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) {
2676 if (frame_is_intra_only(&cpi->common))
2678 else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
2679 return ALTREF_FRAME;
2680 else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
2681 return GOLDEN_FRAME;
2686 static TX_MODE select_tx_mode(const VP9_COMP *cpi) {
2687 if (cpi->mb.e_mbd.lossless)
2689 if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
2691 else if (cpi->sf.tx_size_search_method == USE_FULL_RD||
2692 cpi->sf.tx_size_search_method == USE_TX_8X8)
2693 return TX_MODE_SELECT;
2695 return cpi->common.tx_mode;
2698 static void nonrd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile,
2699 int mi_row, int mi_col,
2700 int *rate, int64_t *dist,
2701 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
2702 VP9_COMMON *const cm = &cpi->common;
2703 MACROBLOCK *const x = &cpi->mb;
2704 MACROBLOCKD *const xd = &x->e_mbd;
2706 set_offsets(cpi, tile, mi_row, mi_col, bsize);
2707 mbmi = &xd->mi[0].src_mi->mbmi;
2708 mbmi->sb_type = bsize;
2710 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
2711 if (mbmi->segment_id && x->in_static_area)
2712 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
2714 if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
2715 set_mode_info_seg_skip(x, cm->tx_mode, rate, dist, bsize);
2717 vp9_pick_inter_mode(cpi, x, tile, mi_row, mi_col, rate, dist, bsize, ctx);
2719 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2722 static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x,
2723 int mi_row, int mi_col,
2724 BLOCK_SIZE bsize, BLOCK_SIZE subsize,
2726 MACROBLOCKD *xd = &x->e_mbd;
2727 int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
2728 PARTITION_TYPE partition = pc_tree->partitioning;
2730 assert(bsize >= BLOCK_8X8);
2732 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
2735 switch (partition) {
2736 case PARTITION_NONE:
2737 set_modeinfo_offsets(cm, xd, mi_row, mi_col);
2738 *(xd->mi[0].src_mi) = pc_tree->none.mic;
2739 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2741 case PARTITION_VERT:
2742 set_modeinfo_offsets(cm, xd, mi_row, mi_col);
2743 *(xd->mi[0].src_mi) = pc_tree->vertical[0].mic;
2744 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2746 if (mi_col + hbs < cm->mi_cols) {
2747 set_modeinfo_offsets(cm, xd, mi_row, mi_col + hbs);
2748 *(xd->mi[0].src_mi) = pc_tree->vertical[1].mic;
2749 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, bsize);
2752 case PARTITION_HORZ:
2753 set_modeinfo_offsets(cm, xd, mi_row, mi_col);
2754 *(xd->mi[0].src_mi) = pc_tree->horizontal[0].mic;
2755 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2756 if (mi_row + hbs < cm->mi_rows) {
2757 set_modeinfo_offsets(cm, xd, mi_row + hbs, mi_col);
2758 *(xd->mi[0].src_mi) = pc_tree->horizontal[1].mic;
2759 duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, bsize);
2762 case PARTITION_SPLIT: {
2763 BLOCK_SIZE subsubsize = get_subsize(subsize, PARTITION_SPLIT);
2764 fill_mode_info_sb(cm, x, mi_row, mi_col, subsize,
2765 subsubsize, pc_tree->split[0]);
2766 fill_mode_info_sb(cm, x, mi_row, mi_col + hbs, subsize,
2767 subsubsize, pc_tree->split[1]);
2768 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col, subsize,
2769 subsubsize, pc_tree->split[2]);
2770 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col + hbs, subsize,
2771 subsubsize, pc_tree->split[3]);
2779 static void nonrd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
2780 TOKENEXTRA **tp, int mi_row,
2781 int mi_col, BLOCK_SIZE bsize, int *rate,
2782 int64_t *dist, int do_recon, int64_t best_rd,
2784 const SPEED_FEATURES *const sf = &cpi->sf;
2785 const VP9EncoderConfig *const oxcf = &cpi->oxcf;
2786 VP9_COMMON *const cm = &cpi->common;
2787 MACROBLOCK *const x = &cpi->mb;
2788 MACROBLOCKD *const xd = &x->e_mbd;
2789 const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
2790 TOKENEXTRA *tp_orig = *tp;
2791 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
2793 BLOCK_SIZE subsize = bsize;
2794 int this_rate, sum_rate = 0, best_rate = INT_MAX;
2795 int64_t this_dist, sum_dist = 0, best_dist = INT64_MAX;
2797 int do_split = bsize >= BLOCK_8X8;
2799 // Override skipping rectangular partition operations for edge blocks
2800 const int force_horz_split = (mi_row + ms >= cm->mi_rows);
2801 const int force_vert_split = (mi_col + ms >= cm->mi_cols);
2802 const int xss = x->e_mbd.plane[1].subsampling_x;
2803 const int yss = x->e_mbd.plane[1].subsampling_y;
2805 int partition_none_allowed = !force_horz_split && !force_vert_split;
2806 int partition_horz_allowed = !force_vert_split && yss <= xss &&
2808 int partition_vert_allowed = !force_horz_split && xss <= yss &&
2812 assert(num_8x8_blocks_wide_lookup[bsize] ==
2813 num_8x8_blocks_high_lookup[bsize]);
2815 // Determine partition types in search according to the speed features.
2816 // The threshold set here has to be of square block size.
2817 if (sf->auto_min_max_partition_size) {
2818 partition_none_allowed &= (bsize <= sf->max_partition_size &&
2819 bsize >= sf->min_partition_size);
2820 partition_horz_allowed &= ((bsize <= sf->max_partition_size &&
2821 bsize > sf->min_partition_size) ||
2823 partition_vert_allowed &= ((bsize <= sf->max_partition_size &&
2824 bsize > sf->min_partition_size) ||
2826 do_split &= bsize > sf->min_partition_size;
2828 if (sf->use_square_partition_only) {
2829 partition_horz_allowed &= force_horz_split;
2830 partition_vert_allowed &= force_vert_split;
2834 if (partition_none_allowed) {
2835 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
2836 &this_rate, &this_dist, bsize, ctx);
2837 ctx->mic.mbmi = xd->mi[0].src_mi->mbmi;
2838 ctx->skip_txfm[0] = x->skip_txfm[0];
2839 ctx->skip = x->skip;
2841 if (this_rate != INT_MAX) {
2842 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2843 this_rate += cpi->partition_cost[pl][PARTITION_NONE];
2844 sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist);
2845 if (sum_rd < best_rd) {
2846 int dist_breakout_thr = sf->partition_search_breakout_dist_thr;
2847 int64_t rate_breakout_thr = sf->partition_search_breakout_rate_thr;
2849 dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
2850 b_height_log2_lookup[bsize]);
2852 rate_breakout_thr *= num_pels_log2_lookup[bsize];
2854 best_rate = this_rate;
2855 best_dist = this_dist;
2857 if (bsize >= BLOCK_8X8)
2858 pc_tree->partitioning = PARTITION_NONE;
2860 if (!x->e_mbd.lossless &&
2861 this_rate < rate_breakout_thr &&
2862 this_dist < dist_breakout_thr) {
2870 // store estimated motion vector
2871 store_pred_mv(x, ctx);
2876 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2877 sum_rate += cpi->partition_cost[pl][PARTITION_SPLIT];
2878 subsize = get_subsize(bsize, PARTITION_SPLIT);
2879 for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
2880 const int x_idx = (i & 1) * ms;
2881 const int y_idx = (i >> 1) * ms;
2883 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
2885 load_pred_mv(x, ctx);
2886 nonrd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx,
2887 subsize, &this_rate, &this_dist, 0,
2888 best_rd - sum_rd, pc_tree->split[i]);
2890 if (this_rate == INT_MAX) {
2893 sum_rate += this_rate;
2894 sum_dist += this_dist;
2895 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2899 if (sum_rd < best_rd) {
2900 best_rate = sum_rate;
2901 best_dist = sum_dist;
2903 pc_tree->partitioning = PARTITION_SPLIT;
2905 // skip rectangular partition test when larger block size
2906 // gives better rd cost
2907 if (sf->less_rectangular_check)
2908 do_rect &= !partition_none_allowed;
2913 if (partition_horz_allowed && do_rect) {
2914 subsize = get_subsize(bsize, PARTITION_HORZ);
2915 if (sf->adaptive_motion_search)
2916 load_pred_mv(x, ctx);
2918 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
2919 &this_rate, &this_dist, subsize,
2920 &pc_tree->horizontal[0]);
2922 pc_tree->horizontal[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
2923 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
2924 pc_tree->horizontal[0].skip = x->skip;
2926 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2928 if (sum_rd < best_rd && mi_row + ms < cm->mi_rows) {
2929 load_pred_mv(x, ctx);
2930 nonrd_pick_sb_modes(cpi, tile, mi_row + ms, mi_col,
2931 &this_rate, &this_dist, subsize,
2932 &pc_tree->horizontal[1]);
2934 pc_tree->horizontal[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
2935 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
2936 pc_tree->horizontal[1].skip = x->skip;
2938 if (this_rate == INT_MAX) {
2941 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2942 this_rate += cpi->partition_cost[pl][PARTITION_HORZ];
2943 sum_rate += this_rate;
2944 sum_dist += this_dist;
2945 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2948 if (sum_rd < best_rd) {
2950 best_rate = sum_rate;
2951 best_dist = sum_dist;
2952 pc_tree->partitioning = PARTITION_HORZ;
2957 if (partition_vert_allowed && do_rect) {
2958 subsize = get_subsize(bsize, PARTITION_VERT);
2960 if (sf->adaptive_motion_search)
2961 load_pred_mv(x, ctx);
2963 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
2964 &this_rate, &this_dist, subsize,
2965 &pc_tree->vertical[0]);
2966 pc_tree->vertical[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
2967 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
2968 pc_tree->vertical[0].skip = x->skip;
2969 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2970 if (sum_rd < best_rd && mi_col + ms < cm->mi_cols) {
2971 load_pred_mv(x, ctx);
2972 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col + ms,
2973 &this_rate, &this_dist, subsize,
2974 &pc_tree->vertical[1]);
2975 pc_tree->vertical[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
2976 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
2977 pc_tree->vertical[1].skip = x->skip;
2978 if (this_rate == INT_MAX) {
2981 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2982 this_rate += cpi->partition_cost[pl][PARTITION_VERT];
2983 sum_rate += this_rate;
2984 sum_dist += this_dist;
2985 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2988 if (sum_rd < best_rd) {
2989 best_rate = sum_rate;
2990 best_dist = sum_dist;
2992 pc_tree->partitioning = PARTITION_VERT;
2995 // TODO(JBB): The following line is here just to avoid a static warning
2996 // that occurs because at this point we never again reuse best_rd
2997 // despite setting it here. The code should be refactored to avoid this.
3003 if (best_rate == INT_MAX)
3006 // update mode info array
3007 subsize = get_subsize(bsize, pc_tree->partitioning);
3008 fill_mode_info_sb(cm, x, mi_row, mi_col, bsize, subsize,
3011 if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon) {
3012 int output_enabled = (bsize == BLOCK_64X64);
3014 // Check the projected output rate for this SB against it's target
3015 // and and if necessary apply a Q delta using segmentation to get
3016 // closer to the target.
3017 if ((oxcf->aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) {
3018 vp9_select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled,
3022 if (oxcf->aq_mode == CYCLIC_REFRESH_AQ)
3023 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
3024 best_rate, best_dist);
3026 encode_sb_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize, pc_tree);
3029 if (bsize == BLOCK_64X64) {
3030 assert(tp_orig < *tp);
3031 assert(best_rate < INT_MAX);
3032 assert(best_dist < INT64_MAX);
3034 assert(tp_orig == *tp);
3038 static void nonrd_use_partition(VP9_COMP *cpi,
3039 const TileInfo *const tile,
3042 int mi_row, int mi_col,
3043 BLOCK_SIZE bsize, int output_enabled,
3044 int *totrate, int64_t *totdist,
3046 VP9_COMMON *const cm = &cpi->common;
3047 MACROBLOCK *const x = &cpi->mb;
3048 MACROBLOCKD *const xd = &x->e_mbd;
3049 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
3050 const int mis = cm->mi_stride;
3051 PARTITION_TYPE partition;
3054 int64_t dist = INT64_MAX;
3056 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
3059 subsize = (bsize >= BLOCK_8X8) ? mi[0].src_mi->mbmi.sb_type : BLOCK_4X4;
3060 partition = partition_lookup[bsl][subsize];
3062 switch (partition) {
3063 case PARTITION_NONE:
3064 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist,
3065 subsize, &pc_tree->none);
3066 pc_tree->none.mic.mbmi = xd->mi[0].src_mi->mbmi;
3067 pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
3068 pc_tree->none.skip = x->skip;
3070 case PARTITION_VERT:
3071 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist,
3072 subsize, &pc_tree->vertical[0]);
3073 pc_tree->vertical[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
3074 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
3075 pc_tree->vertical[0].skip = x->skip;
3076 if (mi_col + hbs < cm->mi_cols) {
3077 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col + hbs,
3078 &rate, &dist, subsize, &pc_tree->vertical[1]);
3079 pc_tree->vertical[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
3080 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
3081 pc_tree->vertical[1].skip = x->skip;
3082 if (rate != INT_MAX && dist != INT64_MAX &&
3083 *totrate != INT_MAX && *totdist != INT64_MAX) {
3089 case PARTITION_HORZ:
3090 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist,
3091 subsize, &pc_tree->horizontal[0]);
3092 pc_tree->horizontal[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
3093 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
3094 pc_tree->horizontal[0].skip = x->skip;
3095 if (mi_row + hbs < cm->mi_rows) {
3096 nonrd_pick_sb_modes(cpi, tile, mi_row + hbs, mi_col,
3097 &rate, &dist, subsize, &pc_tree->horizontal[0]);
3098 pc_tree->horizontal[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
3099 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
3100 pc_tree->horizontal[1].skip = x->skip;
3101 if (rate != INT_MAX && dist != INT64_MAX &&
3102 *totrate != INT_MAX && *totdist != INT64_MAX) {
3108 case PARTITION_SPLIT:
3109 subsize = get_subsize(bsize, PARTITION_SPLIT);
3110 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col,
3111 subsize, output_enabled, totrate, totdist,
3113 nonrd_use_partition(cpi, tile, mi + hbs, tp,
3114 mi_row, mi_col + hbs, subsize, output_enabled,
3115 &rate, &dist, pc_tree->split[1]);
3116 if (rate != INT_MAX && dist != INT64_MAX &&
3117 *totrate != INT_MAX && *totdist != INT64_MAX) {
3121 nonrd_use_partition(cpi, tile, mi + hbs * mis, tp,
3122 mi_row + hbs, mi_col, subsize, output_enabled,
3123 &rate, &dist, pc_tree->split[2]);
3124 if (rate != INT_MAX && dist != INT64_MAX &&
3125 *totrate != INT_MAX && *totdist != INT64_MAX) {
3129 nonrd_use_partition(cpi, tile, mi + hbs * mis + hbs, tp,
3130 mi_row + hbs, mi_col + hbs, subsize, output_enabled,
3131 &rate, &dist, pc_tree->split[3]);
3132 if (rate != INT_MAX && dist != INT64_MAX &&
3133 *totrate != INT_MAX && *totdist != INT64_MAX) {
3139 assert("Invalid partition type.");
3143 if (bsize == BLOCK_64X64 && output_enabled) {
3144 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
3145 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
3146 *totrate, *totdist);
3147 encode_sb_rt(cpi, tile, tp, mi_row, mi_col, 1, bsize, pc_tree);
3151 static void encode_nonrd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
3152 int mi_row, TOKENEXTRA **tp) {
3153 SPEED_FEATURES *const sf = &cpi->sf;
3154 VP9_COMMON *const cm = &cpi->common;
3155 MACROBLOCK *const x = &cpi->mb;
3156 MACROBLOCKD *const xd = &x->e_mbd;
3159 // Initialize the left context for the new SB row
3160 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
3161 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
3163 // Code each SB in the row
3164 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
3165 mi_col += MI_BLOCK_SIZE) {
3167 int64_t dummy_dist = 0;
3168 const int idx_str = cm->mi_stride * mi_row + mi_col;
3169 MODE_INFO *mi = cm->mi + idx_str;
3171 x->in_static_area = 0;
3172 x->source_variance = UINT_MAX;
3173 vp9_zero(x->pred_mv);
3175 // Set the partition type of the 64X64 block
3176 switch (sf->partition_search_type) {
3177 case VAR_BASED_PARTITION:
3178 choose_partitioning(cpi, tile, mi_row, mi_col);
3179 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
3180 1, &dummy_rate, &dummy_dist, cpi->pc_root);
3182 case SOURCE_VAR_BASED_PARTITION:
3183 set_source_var_based_partition(cpi, tile, mi, mi_row, mi_col);
3184 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
3185 1, &dummy_rate, &dummy_dist, cpi->pc_root);
3187 case FIXED_PARTITION:
3188 bsize = sf->partition_search_type == FIXED_PARTITION ?
3189 sf->always_this_block_size :
3190 get_nonrd_var_based_fixed_partition(cpi, mi_row, mi_col);
3191 set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, bsize);
3192 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
3193 1, &dummy_rate, &dummy_dist, cpi->pc_root);
3195 case REFERENCE_PARTITION:
3196 if (sf->partition_check ||
3197 !(x->in_static_area = is_background(cpi, tile, mi_row, mi_col))) {
3198 set_modeinfo_offsets(cm, xd, mi_row, mi_col);
3199 auto_partition_range(cpi, tile, mi_row, mi_col,
3200 &sf->min_partition_size,
3201 &sf->max_partition_size);
3202 nonrd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
3203 &dummy_rate, &dummy_dist, 1, INT64_MAX,
3206 choose_partitioning(cpi, tile, mi_row, mi_col);
3207 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col,
3208 BLOCK_64X64, 1, &dummy_rate, &dummy_dist,
3218 // end RTC play code
3220 static int set_var_thresh_from_histogram(VP9_COMP *cpi) {
3221 const SPEED_FEATURES *const sf = &cpi->sf;
3222 const VP9_COMMON *const cm = &cpi->common;
3224 const uint8_t *src = cpi->Source->y_buffer;
3225 const uint8_t *last_src = cpi->Last_Source->y_buffer;
3226 const int src_stride = cpi->Source->y_stride;
3227 const int last_stride = cpi->Last_Source->y_stride;
3229 // Pick cutoff threshold
3230 const int cutoff = (MIN(cm->width, cm->height) >= 720) ?
3231 (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100) :
3232 (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100);
3233 DECLARE_ALIGNED_ARRAY(16, int, hist, VAR_HIST_BINS);
3234 diff *var16 = cpi->source_diff_var;
3239 vpx_memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0]));
3241 for (i = 0; i < cm->mb_rows; i++) {
3242 for (j = 0; j < cm->mb_cols; j++) {
3243 #if CONFIG_VP9_HIGHBITDEPTH
3244 if (cm->use_highbitdepth) {
3245 switch (cm->bit_depth) {
3247 vp9_highbd_get16x16var(src, src_stride, last_src, last_stride,
3248 &var16->sse, &var16->sum);
3251 vp9_highbd_10_get16x16var(src, src_stride, last_src, last_stride,
3252 &var16->sse, &var16->sum);
3255 vp9_highbd_12_get16x16var(src, src_stride, last_src, last_stride,
3256 &var16->sse, &var16->sum);
3259 assert(0 && "cm->bit_depth should be VPX_BITS_8, VPX_BITS_10"
3264 vp9_get16x16var(src, src_stride, last_src, last_stride,
3265 &var16->sse, &var16->sum);
3268 vp9_get16x16var(src, src_stride, last_src, last_stride,
3269 &var16->sse, &var16->sum);
3270 #endif // CONFIG_VP9_HIGHBITDEPTH
3271 var16->var = var16->sse -
3272 (((uint32_t)var16->sum * var16->sum) >> 8);
3274 if (var16->var >= VAR_HIST_MAX_BG_VAR)
3275 hist[VAR_HIST_BINS - 1]++;
3277 hist[var16->var / VAR_HIST_FACTOR]++;
3284 src = src - cm->mb_cols * 16 + 16 * src_stride;
3285 last_src = last_src - cm->mb_cols * 16 + 16 * last_stride;
3288 cpi->source_var_thresh = 0;
3290 if (hist[VAR_HIST_BINS - 1] < cutoff) {
3291 for (i = 0; i < VAR_HIST_BINS - 1; i++) {
3295 cpi->source_var_thresh = (i + 1) * VAR_HIST_FACTOR;
3301 return sf->search_type_check_frequency;
3304 static void source_var_based_partition_search_method(VP9_COMP *cpi) {
3305 VP9_COMMON *const cm = &cpi->common;
3306 SPEED_FEATURES *const sf = &cpi->sf;
3308 if (cm->frame_type == KEY_FRAME) {
3309 // For key frame, use SEARCH_PARTITION.
3310 sf->partition_search_type = SEARCH_PARTITION;
3311 } else if (cm->intra_only) {
3312 sf->partition_search_type = FIXED_PARTITION;
3314 if (cm->last_width != cm->width || cm->last_height != cm->height) {
3315 if (cpi->source_diff_var)
3316 vpx_free(cpi->source_diff_var);
3318 CHECK_MEM_ERROR(cm, cpi->source_diff_var,
3319 vpx_calloc(cm->MBs, sizeof(diff)));
3322 if (!cpi->frames_till_next_var_check)
3323 cpi->frames_till_next_var_check = set_var_thresh_from_histogram(cpi);
3325 if (cpi->frames_till_next_var_check > 0) {
3326 sf->partition_search_type = FIXED_PARTITION;
3327 cpi->frames_till_next_var_check--;
3332 static int get_skip_encode_frame(const VP9_COMMON *cm) {
3333 unsigned int intra_count = 0, inter_count = 0;
3336 for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
3337 intra_count += cm->counts.intra_inter[j][0];
3338 inter_count += cm->counts.intra_inter[j][1];
3341 return (intra_count << 2) < inter_count &&
3342 cm->frame_type != KEY_FRAME &&
3346 static void encode_tiles(VP9_COMP *cpi) {
3347 const VP9_COMMON *const cm = &cpi->common;
3348 const int tile_cols = 1 << cm->log2_tile_cols;
3349 const int tile_rows = 1 << cm->log2_tile_rows;
3351 int tile_col, tile_row;
3352 TileInfo tile[4][1 << 6];
3353 TOKENEXTRA *tok[4][1 << 6];
3354 TOKENEXTRA *pre_tok = cpi->tok;
3357 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
3358 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
3359 vp9_tile_init(&tile[tile_row][tile_col], cm, tile_row, tile_col);
3361 tok[tile_row][tile_col] = pre_tok + tile_tok;
3362 pre_tok = tok[tile_row][tile_col];
3363 tile_tok = allocated_tokens(tile[tile_row][tile_col]);
3367 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
3368 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
3369 const TileInfo * const ptile = &tile[tile_row][tile_col];
3370 TOKENEXTRA * const old_tok = tok[tile_row][tile_col];
3373 for (mi_row = ptile->mi_row_start; mi_row < ptile->mi_row_end;
3374 mi_row += MI_BLOCK_SIZE) {
3375 if (cpi->sf.use_nonrd_pick_mode && !frame_is_intra_only(cm))
3376 encode_nonrd_sb_row(cpi, ptile, mi_row, &tok[tile_row][tile_col]);
3378 encode_rd_sb_row(cpi, ptile, mi_row, &tok[tile_row][tile_col]);
3380 cpi->tok_count[tile_row][tile_col] =
3381 (unsigned int)(tok[tile_row][tile_col] - old_tok);
3382 assert(tok[tile_row][tile_col] - old_tok <= allocated_tokens(*ptile));
3387 #if CONFIG_FP_MB_STATS
3388 static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
3389 VP9_COMMON *cm, uint8_t **this_frame_mb_stats) {
3390 uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
3391 cm->current_video_frame * cm->MBs * sizeof(uint8_t);
3393 if (mb_stats_in > firstpass_mb_stats->mb_stats_end)
3396 *this_frame_mb_stats = mb_stats_in;
3402 static void encode_frame_internal(VP9_COMP *cpi) {
3403 SPEED_FEATURES *const sf = &cpi->sf;
3404 RD_OPT *const rd_opt = &cpi->rd;
3405 MACROBLOCK *const x = &cpi->mb;
3406 VP9_COMMON *const cm = &cpi->common;
3407 MACROBLOCKD *const xd = &x->e_mbd;
3410 xd->mi[0].src_mi = &xd->mi[0];
3412 vp9_zero(cm->counts);
3413 vp9_zero(cpi->coef_counts);
3414 vp9_zero(rd_opt->comp_pred_diff);
3415 vp9_zero(rd_opt->filter_diff);
3416 vp9_zero(rd_opt->tx_select_diff);
3417 vp9_zero(rd_opt->tx_select_threshes);
3419 xd->lossless = cm->base_qindex == 0 &&
3420 cm->y_dc_delta_q == 0 &&
3421 cm->uv_dc_delta_q == 0 &&
3422 cm->uv_ac_delta_q == 0;
3424 cm->tx_mode = select_tx_mode(cpi);
3426 #if CONFIG_VP9_HIGHBITDEPTH
3427 if (cm->use_highbitdepth)
3428 x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vp9_fdct4x4;
3430 x->fwd_txm4x4 = xd->lossless ? vp9_highbd_fwht4x4 : vp9_highbd_fdct4x4;
3431 x->highbd_itxm_add = xd->lossless ? vp9_highbd_iwht4x4_add :
3432 vp9_highbd_idct4x4_add;
3434 x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vp9_fdct4x4;
3435 #endif // CONFIG_VP9_HIGHBITDEPTH
3436 x->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
3440 cm->lf.filter_level = 0;
3441 cpi->zbin_mode_boost_enabled = 0;
3444 vp9_frame_init_quantizer(cpi);
3446 vp9_initialize_rd_consts(cpi);
3447 vp9_initialize_me_consts(cpi, cm->base_qindex);
3448 init_encode_frame_mb_context(cpi);
3451 x->quant_fp = cpi->sf.use_quant_fp;
3452 vp9_zero(x->skip_txfm);
3453 if (sf->use_nonrd_pick_mode) {
3454 // Initialize internal buffer pointers for rtc coding, where non-RD
3455 // mode decision is used and hence no buffer pointer swap needed.
3457 struct macroblock_plane *const p = x->plane;
3458 struct macroblockd_plane *const pd = xd->plane;
3459 PICK_MODE_CONTEXT *ctx = &cpi->pc_root->none;
3461 for (i = 0; i < MAX_MB_PLANE; ++i) {
3462 p[i].coeff = ctx->coeff_pbuf[i][0];
3463 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
3464 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
3465 p[i].eobs = ctx->eobs_pbuf[i][0];
3467 vp9_zero(x->zcoeff_blk);
3469 if (sf->partition_search_type == SOURCE_VAR_BASED_PARTITION)
3470 source_var_based_partition_search_method(cpi);
3474 struct vpx_usec_timer emr_timer;
3475 vpx_usec_timer_start(&emr_timer);
3477 #if CONFIG_FP_MB_STATS
3478 if (cpi->use_fp_mb_stats) {
3479 input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm,
3480 &cpi->twopass.this_frame_mb_stats);
3486 vpx_usec_timer_mark(&emr_timer);
3487 cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
3490 sf->skip_encode_frame = sf->skip_encode_sb ? get_skip_encode_frame(cm) : 0;
3493 // Keep record of the total distortion this time around for future use
3494 cpi->last_frame_distortion = cpi->frame_distortion;
3498 static INTERP_FILTER get_interp_filter(
3499 const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) {
3501 threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
3502 threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] &&
3503 threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) {
3504 return EIGHTTAP_SMOOTH;
3505 } else if (threshes[EIGHTTAP_SHARP] > threshes[EIGHTTAP] &&
3506 threshes[EIGHTTAP_SHARP] > threshes[SWITCHABLE - 1]) {
3507 return EIGHTTAP_SHARP;
3508 } else if (threshes[EIGHTTAP] > threshes[SWITCHABLE - 1]) {
3515 void vp9_encode_frame(VP9_COMP *cpi) {
3516 VP9_COMMON *const cm = &cpi->common;
3517 RD_OPT *const rd_opt = &cpi->rd;
3519 // In the longer term the encoder should be generalized to match the
3520 // decoder such that we allow compound where one of the 3 buffers has a
3521 // different sign bias and that buffer is then the fixed ref. However, this
3522 // requires further work in the rd loop. For now the only supported encoder
3523 // side behavior is where the ALT ref buffer has opposite sign bias to
3525 if (!frame_is_intra_only(cm)) {
3526 if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
3527 cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
3528 (cm->ref_frame_sign_bias[ALTREF_FRAME] ==
3529 cm->ref_frame_sign_bias[LAST_FRAME])) {
3530 cm->allow_comp_inter_inter = 0;
3532 cm->allow_comp_inter_inter = 1;
3533 cm->comp_fixed_ref = ALTREF_FRAME;
3534 cm->comp_var_ref[0] = LAST_FRAME;
3535 cm->comp_var_ref[1] = GOLDEN_FRAME;
3539 if (cpi->sf.frame_parameter_update) {
3542 // This code does a single RD pass over the whole frame assuming
3543 // either compound, single or hybrid prediction as per whatever has
3544 // worked best for that type of frame in the past.
3545 // It also predicts whether another coding mode would have worked
3546 // better that this coding mode. If that is the case, it remembers
3547 // that for subsequent frames.
3548 // It does the same analysis for transform size selection also.
3549 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
3550 int64_t *const mode_thrs = rd_opt->prediction_type_threshes[frame_type];
3551 int64_t *const filter_thrs = rd_opt->filter_threshes[frame_type];
3552 int *const tx_thrs = rd_opt->tx_select_threshes[frame_type];
3553 const int is_alt_ref = frame_type == ALTREF_FRAME;
3555 /* prediction (compound, single or hybrid) mode selection */
3556 if (is_alt_ref || !cm->allow_comp_inter_inter)
3557 cm->reference_mode = SINGLE_REFERENCE;
3558 else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] &&
3559 mode_thrs[COMPOUND_REFERENCE] >
3560 mode_thrs[REFERENCE_MODE_SELECT] &&
3561 check_dual_ref_flags(cpi) &&
3562 cpi->static_mb_pct == 100)
3563 cm->reference_mode = COMPOUND_REFERENCE;
3564 else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT])
3565 cm->reference_mode = SINGLE_REFERENCE;
3567 cm->reference_mode = REFERENCE_MODE_SELECT;
3569 if (cm->interp_filter == SWITCHABLE)
3570 cm->interp_filter = get_interp_filter(filter_thrs, is_alt_ref);
3572 encode_frame_internal(cpi);
3574 for (i = 0; i < REFERENCE_MODES; ++i)
3575 mode_thrs[i] = (mode_thrs[i] + rd_opt->comp_pred_diff[i] / cm->MBs) / 2;
3577 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
3578 filter_thrs[i] = (filter_thrs[i] + rd_opt->filter_diff[i] / cm->MBs) / 2;
3580 for (i = 0; i < TX_MODES; ++i) {
3581 int64_t pd = rd_opt->tx_select_diff[i];
3582 if (i == TX_MODE_SELECT)
3583 pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv, 2048 * (TX_SIZES - 1), 0);
3584 tx_thrs[i] = (tx_thrs[i] + (int)(pd / cm->MBs)) / 2;
3587 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
3588 int single_count_zero = 0;
3589 int comp_count_zero = 0;
3591 for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
3592 single_count_zero += cm->counts.comp_inter[i][0];
3593 comp_count_zero += cm->counts.comp_inter[i][1];
3596 if (comp_count_zero == 0) {
3597 cm->reference_mode = SINGLE_REFERENCE;
3598 vp9_zero(cm->counts.comp_inter);
3599 } else if (single_count_zero == 0) {
3600 cm->reference_mode = COMPOUND_REFERENCE;
3601 vp9_zero(cm->counts.comp_inter);
3605 if (cm->tx_mode == TX_MODE_SELECT) {
3607 int count8x8_lp = 0, count8x8_8x8p = 0;
3608 int count16x16_16x16p = 0, count16x16_lp = 0;
3611 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
3612 count4x4 += cm->counts.tx.p32x32[i][TX_4X4];
3613 count4x4 += cm->counts.tx.p16x16[i][TX_4X4];
3614 count4x4 += cm->counts.tx.p8x8[i][TX_4X4];
3616 count8x8_lp += cm->counts.tx.p32x32[i][TX_8X8];
3617 count8x8_lp += cm->counts.tx.p16x16[i][TX_8X8];
3618 count8x8_8x8p += cm->counts.tx.p8x8[i][TX_8X8];
3620 count16x16_16x16p += cm->counts.tx.p16x16[i][TX_16X16];
3621 count16x16_lp += cm->counts.tx.p32x32[i][TX_16X16];
3622 count32x32 += cm->counts.tx.p32x32[i][TX_32X32];
3625 if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
3627 cm->tx_mode = ALLOW_8X8;
3628 reset_skip_tx_size(cm, TX_8X8);
3629 } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
3630 count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
3631 cm->tx_mode = ONLY_4X4;
3632 reset_skip_tx_size(cm, TX_4X4);
3633 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
3634 cm->tx_mode = ALLOW_32X32;
3635 } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
3636 cm->tx_mode = ALLOW_16X16;
3637 reset_skip_tx_size(cm, TX_16X16);
3641 cm->reference_mode = SINGLE_REFERENCE;
3642 encode_frame_internal(cpi);
3646 static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) {
3647 const PREDICTION_MODE y_mode = mi->mbmi.mode;
3648 const PREDICTION_MODE uv_mode = mi->mbmi.uv_mode;
3649 const BLOCK_SIZE bsize = mi->mbmi.sb_type;
3651 if (bsize < BLOCK_8X8) {
3653 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
3654 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
3655 for (idy = 0; idy < 2; idy += num_4x4_h)
3656 for (idx = 0; idx < 2; idx += num_4x4_w)
3657 ++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode];
3659 ++counts->y_mode[size_group_lookup[bsize]][y_mode];
3662 ++counts->uv_mode[y_mode][uv_mode];
3665 static int get_zbin_mode_boost(const MB_MODE_INFO *mbmi, int enabled) {
3667 if (is_inter_block(mbmi)) {
3668 if (mbmi->mode == ZEROMV) {
3669 return mbmi->ref_frame[0] != LAST_FRAME ? GF_ZEROMV_ZBIN_BOOST
3670 : LF_ZEROMV_ZBIN_BOOST;
3672 return mbmi->sb_type < BLOCK_8X8 ? SPLIT_MV_ZBIN_BOOST
3676 return INTRA_ZBIN_BOOST;
3683 static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
3684 int mi_row, int mi_col, BLOCK_SIZE bsize,
3685 PICK_MODE_CONTEXT *ctx) {
3686 VP9_COMMON *const cm = &cpi->common;
3687 MACROBLOCK *const x = &cpi->mb;
3688 MACROBLOCKD *const xd = &x->e_mbd;
3689 MODE_INFO *mi_8x8 = xd->mi;
3690 MODE_INFO *mi = mi_8x8;
3691 MB_MODE_INFO *mbmi = &mi->mbmi;
3692 const int seg_skip = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
3694 const int mis = cm->mi_stride;
3695 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
3696 const int mi_height = num_8x8_blocks_high_lookup[bsize];
3698 x->skip_recode = !x->select_tx_size && mbmi->sb_type >= BLOCK_8X8 &&
3699 cpi->oxcf.aq_mode != COMPLEXITY_AQ &&
3700 cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ &&
3701 cpi->sf.allow_skip_recode;
3703 if (!x->skip_recode && !cpi->sf.use_nonrd_pick_mode)
3704 vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
3706 x->skip_optimize = ctx->is_coded;
3708 x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
3709 x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
3710 x->q_index < QIDX_SKIP_THRESH);
3715 set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
3717 // Experimental code. Special case for gf and arf zeromv modes.
3718 // Increase zbin size to suppress noise
3719 cpi->zbin_mode_boost = get_zbin_mode_boost(mbmi,
3720 cpi->zbin_mode_boost_enabled);
3721 vp9_update_zbin_extra(cpi, x);
3723 if (!is_inter_block(mbmi)) {
3726 for (plane = 0; plane < MAX_MB_PLANE; ++plane)
3727 vp9_encode_intra_block_plane(x, MAX(bsize, BLOCK_8X8), plane);
3729 sum_intra_stats(&cm->counts, mi);
3730 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
3733 const int is_compound = has_second_ref(mbmi);
3734 for (ref = 0; ref < 1 + is_compound; ++ref) {
3735 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi,
3736 mbmi->ref_frame[ref]);
3737 vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
3738 &xd->block_refs[ref]->sf);
3740 if (!cpi->sf.reuse_inter_pred_sby || seg_skip)
3741 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
3743 vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
3745 vp9_encode_sb(x, MAX(bsize, BLOCK_8X8));
3746 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
3749 if (output_enabled) {
3750 if (cm->tx_mode == TX_MODE_SELECT &&
3751 mbmi->sb_type >= BLOCK_8X8 &&
3752 !(is_inter_block(mbmi) && (mbmi->skip || seg_skip))) {
3753 ++get_tx_counts(max_txsize_lookup[bsize], vp9_get_tx_size_context(xd),
3754 &cm->counts.tx)[mbmi->tx_size];
3758 // The new intra coding scheme requires no change of transform size
3759 if (is_inter_block(&mi->mbmi)) {
3760 tx_size = MIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
3761 max_txsize_lookup[bsize]);
3763 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4;
3766 for (y = 0; y < mi_height; y++)
3767 for (x = 0; x < mi_width; x++)
3768 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
3769 mi_8x8[mis * y + x].src_mi->mbmi.tx_size = tx_size;