2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
16 #include "./vp9_rtcd.h"
17 #include "./vpx_dsp_rtcd.h"
18 #include "./vpx_config.h"
20 #include "vpx_dsp/vpx_dsp_common.h"
21 #include "vpx_ports/mem.h"
22 #include "vpx_ports/vpx_timer.h"
23 #include "vpx_ports/system_state.h"
25 #if CONFIG_MISMATCH_DEBUG
26 #include "vpx_util/vpx_debug_util.h"
27 #endif // CONFIG_MISMATCH_DEBUG
29 #include "vp9/common/vp9_common.h"
30 #include "vp9/common/vp9_entropy.h"
31 #include "vp9/common/vp9_entropymode.h"
32 #include "vp9/common/vp9_idct.h"
33 #include "vp9/common/vp9_mvref_common.h"
34 #include "vp9/common/vp9_pred_common.h"
35 #include "vp9/common/vp9_quant_common.h"
36 #include "vp9/common/vp9_reconintra.h"
37 #include "vp9/common/vp9_reconinter.h"
38 #include "vp9/common/vp9_seg_common.h"
39 #include "vp9/common/vp9_tile_common.h"
40 #if !CONFIG_REALTIME_ONLY
41 #include "vp9/encoder/vp9_aq_360.h"
42 #include "vp9/encoder/vp9_aq_complexity.h"
44 #include "vp9/encoder/vp9_aq_cyclicrefresh.h"
45 #if !CONFIG_REALTIME_ONLY
46 #include "vp9/encoder/vp9_aq_variance.h"
48 #include "vp9/encoder/vp9_encodeframe.h"
49 #include "vp9/encoder/vp9_encodemb.h"
50 #include "vp9/encoder/vp9_encodemv.h"
51 #include "vp9/encoder/vp9_ethread.h"
52 #include "vp9/encoder/vp9_extend.h"
53 #include "vp9/encoder/vp9_multi_thread.h"
54 #include "vp9/encoder/vp9_partition_models.h"
55 #include "vp9/encoder/vp9_pickmode.h"
56 #include "vp9/encoder/vp9_rd.h"
57 #include "vp9/encoder/vp9_rdopt.h"
58 #include "vp9/encoder/vp9_segmentation.h"
59 #include "vp9/encoder/vp9_tokenize.h"
61 static void encode_superblock(VP9_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
62 int output_enabled, int mi_row, int mi_col,
63 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx);
65 // This is used as a reference when computing the source variance for the
66 // purpose of activity masking.
67 // Eventually this should be replaced by custom no-reference routines,
68 // which will be faster.
69 static const uint8_t VP9_VAR_OFFS[64] = {
70 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
71 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
72 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
73 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
74 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
77 #if CONFIG_VP9_HIGHBITDEPTH
78 static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
79 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
80 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
81 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
82 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
83 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
86 static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
87 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
88 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
89 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
90 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
91 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
92 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
93 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
94 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4
97 static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
98 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
99 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
100 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
101 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
102 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
103 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
104 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
105 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
106 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
109 #endif // CONFIG_VP9_HIGHBITDEPTH
111 unsigned int vp9_get_sby_variance(VP9_COMP *cpi, const struct buf_2d *ref,
114 const unsigned int var =
115 cpi->fn_ptr[bs].vf(ref->buf, ref->stride, VP9_VAR_OFFS, 0, &sse);
119 #if CONFIG_VP9_HIGHBITDEPTH
120 unsigned int vp9_high_get_sby_variance(VP9_COMP *cpi, const struct buf_2d *ref,
121 BLOCK_SIZE bs, int bd) {
122 unsigned int var, sse;
126 cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
127 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10), 0, &sse);
131 cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
132 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12), 0, &sse);
137 cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
138 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8), 0, &sse);
143 #endif // CONFIG_VP9_HIGHBITDEPTH
145 unsigned int vp9_get_sby_perpixel_variance(VP9_COMP *cpi,
146 const struct buf_2d *ref,
148 return ROUND_POWER_OF_TWO(vp9_get_sby_variance(cpi, ref, bs),
149 num_pels_log2_lookup[bs]);
152 #if CONFIG_VP9_HIGHBITDEPTH
153 unsigned int vp9_high_get_sby_perpixel_variance(VP9_COMP *cpi,
154 const struct buf_2d *ref,
155 BLOCK_SIZE bs, int bd) {
156 return (unsigned int)ROUND64_POWER_OF_TWO(
157 (int64_t)vp9_high_get_sby_variance(cpi, ref, bs, bd),
158 num_pels_log2_lookup[bs]);
160 #endif // CONFIG_VP9_HIGHBITDEPTH
162 #if !CONFIG_REALTIME_ONLY
163 static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
164 const struct buf_2d *ref,
165 int mi_row, int mi_col,
167 unsigned int sse, var;
169 const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME);
171 assert(last != NULL);
173 &last->y_buffer[mi_row * MI_SIZE * last->y_stride + mi_col * MI_SIZE];
174 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, last_y, last->y_stride, &sse);
175 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
178 static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x,
179 int mi_row, int mi_col) {
180 unsigned int var = get_sby_perpixel_diff_variance(
181 cpi, &x->plane[0].src, mi_row, mi_col, BLOCK_64X64);
191 #endif // !CONFIG_REALTIME_ONLY
193 static void set_segment_index(VP9_COMP *cpi, MACROBLOCK *const x, int mi_row,
194 int mi_col, BLOCK_SIZE bsize, int segment_index) {
195 VP9_COMMON *const cm = &cpi->common;
196 const struct segmentation *const seg = &cm->seg;
197 MACROBLOCKD *const xd = &x->e_mbd;
198 MODE_INFO *mi = xd->mi[0];
200 const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
201 const uint8_t *const map =
202 seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
204 // Initialize the segmentation index as 0.
207 // Skip the rest if AQ mode is disabled.
208 if (!seg->enabled) return;
211 case CYCLIC_REFRESH_AQ:
212 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
214 #if !CONFIG_REALTIME_ONLY
216 if (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
217 cpi->force_update_segmentation ||
218 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
221 // Get sub block energy range
222 if (bsize >= BLOCK_32X32) {
223 vp9_get_sub_block_energy(cpi, x, mi_row, mi_col, bsize, &min_energy,
226 min_energy = bsize <= BLOCK_16X16 ? x->mb_energy
227 : vp9_block_energy(cpi, x, bsize);
229 mi->segment_id = vp9_vaq_segment_id(min_energy);
231 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
235 if (cm->frame_type == KEY_FRAME || cpi->force_update_segmentation)
236 mi->segment_id = vp9_360aq_segment_id(mi_row, cm->mi_rows);
238 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
242 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
244 case PSNR_AQ: mi->segment_id = segment_index; break;
245 case PERCEPTUAL_AQ: mi->segment_id = x->segment_id; break;
251 // Set segment index from ROI map if it's enabled.
252 if (cpi->roi.enabled)
253 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
255 vp9_init_plane_quantizers(cpi, x);
258 // Lighter version of set_offsets that only sets the mode info
260 static INLINE void set_mode_info_offsets(VP9_COMMON *const cm,
262 MACROBLOCKD *const xd, int mi_row,
264 const int idx_str = xd->mi_stride * mi_row + mi_col;
265 xd->mi = cm->mi_grid_visible + idx_str;
266 xd->mi[0] = cm->mi + idx_str;
267 x->mbmi_ext = x->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
270 static void set_ssim_rdmult(VP9_COMP *const cpi, MACROBLOCK *const x,
271 const BLOCK_SIZE bsize, const int mi_row,
272 const int mi_col, int *const rdmult) {
273 const VP9_COMMON *const cm = &cpi->common;
275 const int bsize_base = BLOCK_16X16;
276 const int num_8x8_w = num_8x8_blocks_wide_lookup[bsize_base];
277 const int num_8x8_h = num_8x8_blocks_high_lookup[bsize_base];
278 const int num_cols = (cm->mi_cols + num_8x8_w - 1) / num_8x8_w;
279 const int num_rows = (cm->mi_rows + num_8x8_h - 1) / num_8x8_h;
280 const int num_bcols =
281 (num_8x8_blocks_wide_lookup[bsize] + num_8x8_w - 1) / num_8x8_w;
282 const int num_brows =
283 (num_8x8_blocks_high_lookup[bsize] + num_8x8_h - 1) / num_8x8_h;
285 double num_of_mi = 0.0;
286 double geom_mean_of_scale = 0.0;
288 assert(cpi->oxcf.tuning == VP8_TUNE_SSIM);
290 for (row = mi_row / num_8x8_w;
291 row < num_rows && row < mi_row / num_8x8_w + num_brows; ++row) {
292 for (col = mi_col / num_8x8_h;
293 col < num_cols && col < mi_col / num_8x8_h + num_bcols; ++col) {
294 const int index = row * num_cols + col;
295 geom_mean_of_scale += log(cpi->mi_ssim_rdmult_scaling_factors[index]);
299 geom_mean_of_scale = exp(geom_mean_of_scale / num_of_mi);
301 *rdmult = (int)((double)(*rdmult) * geom_mean_of_scale);
302 *rdmult = VPXMAX(*rdmult, 0);
303 set_error_per_bit(x, *rdmult);
304 vpx_clear_system_state();
307 static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
308 MACROBLOCK *const x, int mi_row, int mi_col,
310 VP9_COMMON *const cm = &cpi->common;
311 const VP9EncoderConfig *const oxcf = &cpi->oxcf;
312 MACROBLOCKD *const xd = &x->e_mbd;
313 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
314 const int mi_height = num_8x8_blocks_high_lookup[bsize];
315 MvLimits *const mv_limits = &x->mv_limits;
317 set_skip_context(xd, mi_row, mi_col);
319 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
321 // Set up destination pointers.
322 vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
324 // Set up limit values for MV components.
325 // Mv beyond the range do not produce new/different prediction block.
326 mv_limits->row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
327 mv_limits->col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
328 mv_limits->row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
329 mv_limits->col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
331 // Set up distance of MB to edge of frame in 1/8th pel units.
332 assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
333 set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width, cm->mi_rows,
336 // Set up source buffers.
337 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
340 x->rddiv = cpi->rd.RDDIV;
341 x->rdmult = cpi->rd.RDMULT;
342 if (oxcf->tuning == VP8_TUNE_SSIM) {
343 set_ssim_rdmult(cpi, x, bsize, mi_row, mi_col, &x->rdmult);
346 // required by vp9_append_sub8x8_mvs_for_idx() and vp9_find_best_ref_mvs()
350 static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
351 int mi_row, int mi_col,
353 const int block_width =
354 VPXMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col);
355 const int block_height =
356 VPXMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row);
357 const int mi_stride = xd->mi_stride;
358 MODE_INFO *const src_mi = xd->mi[0];
361 for (j = 0; j < block_height; ++j)
362 for (i = 0; i < block_width; ++i) xd->mi[j * mi_stride + i] = src_mi;
365 static void set_block_size(VP9_COMP *const cpi, MACROBLOCK *const x,
366 MACROBLOCKD *const xd, int mi_row, int mi_col,
368 if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
369 set_mode_info_offsets(&cpi->common, x, xd, mi_row, mi_col);
370 xd->mi[0]->sb_type = bsize;
375 // This struct is used for computing variance in choose_partitioning(), where
376 // the max number of samples within a superblock is 16x16 (with 4x4 avg). Even
377 // in high bitdepth, uint32_t is enough for sum_square_error (2^12 * 2^12 * 16
379 uint32_t sum_square_error;
389 } partition_variance;
392 partition_variance part_variances;
397 partition_variance part_variances;
402 partition_variance part_variances;
407 partition_variance part_variances;
412 partition_variance part_variances;
417 partition_variance *part_variances;
427 static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
429 node->part_variances = NULL;
432 v64x64 *vt = (v64x64 *)data;
433 node->part_variances = &vt->part_variances;
434 for (i = 0; i < 4; i++)
435 node->split[i] = &vt->split[i].part_variances.none;
439 v32x32 *vt = (v32x32 *)data;
440 node->part_variances = &vt->part_variances;
441 for (i = 0; i < 4; i++)
442 node->split[i] = &vt->split[i].part_variances.none;
446 v16x16 *vt = (v16x16 *)data;
447 node->part_variances = &vt->part_variances;
448 for (i = 0; i < 4; i++)
449 node->split[i] = &vt->split[i].part_variances.none;
453 v8x8 *vt = (v8x8 *)data;
454 node->part_variances = &vt->part_variances;
455 for (i = 0; i < 4; i++)
456 node->split[i] = &vt->split[i].part_variances.none;
460 v4x4 *vt = (v4x4 *)data;
461 assert(bsize == BLOCK_4X4);
462 node->part_variances = &vt->part_variances;
463 for (i = 0; i < 4; i++) node->split[i] = &vt->split[i];
469 // Set variance values given sum square error, sum error, count.
470 static void fill_variance(uint32_t s2, int32_t s, int c, var *v) {
471 v->sum_square_error = s2;
476 static void get_variance(var *v) {
478 (int)(256 * (v->sum_square_error -
479 (uint32_t)(((int64_t)v->sum_error * v->sum_error) >>
484 static void sum_2_variances(const var *a, const var *b, var *r) {
485 assert(a->log2_count == b->log2_count);
486 fill_variance(a->sum_square_error + b->sum_square_error,
487 a->sum_error + b->sum_error, a->log2_count + 1, r);
490 static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
492 memset(&node, 0, sizeof(node));
493 tree_to_node(data, bsize, &node);
494 sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
495 sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
496 sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
497 sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
498 sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
499 &node.part_variances->none);
502 static int set_vt_partitioning(VP9_COMP *cpi, MACROBLOCK *const x,
503 MACROBLOCKD *const xd, void *data,
504 BLOCK_SIZE bsize, int mi_row, int mi_col,
505 int64_t threshold, BLOCK_SIZE bsize_min,
507 VP9_COMMON *const cm = &cpi->common;
509 const int block_width = num_8x8_blocks_wide_lookup[bsize];
510 const int block_height = num_8x8_blocks_high_lookup[bsize];
512 assert(block_height == block_width);
513 tree_to_node(data, bsize, &vt);
515 if (force_split == 1) return 0;
517 // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
518 // variance is below threshold, otherwise split will be selected.
519 // No check for vert/horiz split as too few samples for variance.
520 if (bsize == bsize_min) {
521 // Variance already computed to set the force_split.
522 if (frame_is_intra_only(cm)) get_variance(&vt.part_variances->none);
523 if (mi_col + block_width / 2 < cm->mi_cols &&
524 mi_row + block_height / 2 < cm->mi_rows &&
525 vt.part_variances->none.variance < threshold) {
526 set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
530 } else if (bsize > bsize_min) {
531 // Variance already computed to set the force_split.
532 if (frame_is_intra_only(cm)) get_variance(&vt.part_variances->none);
533 // For key frame: take split for bsize above 32X32 or very high variance.
534 if (frame_is_intra_only(cm) &&
535 (bsize > BLOCK_32X32 ||
536 vt.part_variances->none.variance > (threshold << 4))) {
539 // If variance is low, take the bsize (no split).
540 if (mi_col + block_width / 2 < cm->mi_cols &&
541 mi_row + block_height / 2 < cm->mi_rows &&
542 vt.part_variances->none.variance < threshold) {
543 set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
547 // Check vertical split.
548 if (mi_row + block_height / 2 < cm->mi_rows) {
549 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
550 get_variance(&vt.part_variances->vert[0]);
551 get_variance(&vt.part_variances->vert[1]);
552 if (vt.part_variances->vert[0].variance < threshold &&
553 vt.part_variances->vert[1].variance < threshold &&
554 get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
555 set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
556 set_block_size(cpi, x, xd, mi_row, mi_col + block_width / 2, subsize);
560 // Check horizontal split.
561 if (mi_col + block_width / 2 < cm->mi_cols) {
562 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
563 get_variance(&vt.part_variances->horz[0]);
564 get_variance(&vt.part_variances->horz[1]);
565 if (vt.part_variances->horz[0].variance < threshold &&
566 vt.part_variances->horz[1].variance < threshold &&
567 get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
568 set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
569 set_block_size(cpi, x, xd, mi_row + block_height / 2, mi_col, subsize);
579 static int64_t scale_part_thresh_sumdiff(int64_t threshold_base, int speed,
580 int width, int height,
583 if (width <= 640 && height <= 480)
584 return (5 * threshold_base) >> 2;
585 else if ((content_state == kLowSadLowSumdiff) ||
586 (content_state == kHighSadLowSumdiff) ||
587 (content_state == kLowVarHighSumdiff))
588 return (5 * threshold_base) >> 2;
589 } else if (speed == 7) {
590 if ((content_state == kLowSadLowSumdiff) ||
591 (content_state == kHighSadLowSumdiff) ||
592 (content_state == kLowVarHighSumdiff)) {
593 return (5 * threshold_base) >> 2;
596 return threshold_base;
599 // Set the variance split thresholds for following the block sizes:
600 // 0 - threshold_64x64, 1 - threshold_32x32, 2 - threshold_16x16,
601 // 3 - vbp_threshold_8x8. vbp_threshold_8x8 (to split to 4x4 partition) is
602 // currently only used on key frame.
603 static void set_vbp_thresholds(VP9_COMP *cpi, int64_t thresholds[], int q,
605 VP9_COMMON *const cm = &cpi->common;
606 const int is_key_frame = frame_is_intra_only(cm);
607 const int threshold_multiplier =
608 is_key_frame ? 20 : cpi->sf.variance_part_thresh_mult;
609 int64_t threshold_base =
610 (int64_t)(threshold_multiplier * cpi->y_dequant[q][1]);
613 thresholds[0] = threshold_base;
614 thresholds[1] = threshold_base >> 2;
615 thresholds[2] = threshold_base >> 2;
616 thresholds[3] = threshold_base << 2;
618 // Increase base variance threshold based on estimated noise level.
619 if (cpi->noise_estimate.enabled && cm->width >= 640 && cm->height >= 480) {
620 NOISE_LEVEL noise_level =
621 vp9_noise_estimate_extract_level(&cpi->noise_estimate);
622 if (noise_level == kHigh)
623 threshold_base = 3 * threshold_base;
624 else if (noise_level == kMedium)
625 threshold_base = threshold_base << 1;
626 else if (noise_level < kLow)
627 threshold_base = (7 * threshold_base) >> 3;
629 #if CONFIG_VP9_TEMPORAL_DENOISING
630 if (cpi->oxcf.noise_sensitivity > 0 && denoise_svc(cpi) &&
631 cpi->oxcf.speed > 5 && cpi->denoiser.denoising_level >= kDenLow)
633 vp9_scale_part_thresh(threshold_base, cpi->denoiser.denoising_level,
634 content_state, cpi->svc.temporal_layer_id);
637 scale_part_thresh_sumdiff(threshold_base, cpi->oxcf.speed, cm->width,
638 cm->height, content_state);
640 // Increase base variance threshold based on content_state/sum_diff level.
641 threshold_base = scale_part_thresh_sumdiff(
642 threshold_base, cpi->oxcf.speed, cm->width, cm->height, content_state);
644 thresholds[0] = threshold_base;
645 thresholds[2] = threshold_base << cpi->oxcf.speed;
646 if (cm->width >= 1280 && cm->height >= 720 && cpi->oxcf.speed < 7)
647 thresholds[2] = thresholds[2] << 1;
648 if (cm->width <= 352 && cm->height <= 288) {
649 thresholds[0] = threshold_base >> 3;
650 thresholds[1] = threshold_base >> 1;
651 thresholds[2] = threshold_base << 3;
652 } else if (cm->width < 1280 && cm->height < 720) {
653 thresholds[1] = (5 * threshold_base) >> 2;
654 } else if (cm->width < 1920 && cm->height < 1080) {
655 thresholds[1] = threshold_base << 1;
657 thresholds[1] = (5 * threshold_base) >> 1;
659 if (cpi->sf.disable_16x16part_nonkey) thresholds[2] = INT64_MAX;
663 void vp9_set_variance_partition_thresholds(VP9_COMP *cpi, int q,
665 VP9_COMMON *const cm = &cpi->common;
666 SPEED_FEATURES *const sf = &cpi->sf;
667 const int is_key_frame = frame_is_intra_only(cm);
668 if (sf->partition_search_type != VAR_BASED_PARTITION &&
669 sf->partition_search_type != REFERENCE_PARTITION) {
672 set_vbp_thresholds(cpi, cpi->vbp_thresholds, q, content_state);
673 // The thresholds below are not changed locally.
675 cpi->vbp_threshold_sad = 0;
676 cpi->vbp_threshold_copy = 0;
677 cpi->vbp_bsize_min = BLOCK_8X8;
679 if (cm->width <= 352 && cm->height <= 288)
680 cpi->vbp_threshold_sad = 10;
682 cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000
683 ? (cpi->y_dequant[q][1] << 1)
685 cpi->vbp_bsize_min = BLOCK_16X16;
686 if (cm->width <= 352 && cm->height <= 288)
687 cpi->vbp_threshold_copy = 4000;
688 else if (cm->width <= 640 && cm->height <= 360)
689 cpi->vbp_threshold_copy = 8000;
691 cpi->vbp_threshold_copy = (cpi->y_dequant[q][1] << 3) > 8000
692 ? (cpi->y_dequant[q][1] << 3)
694 if (cpi->rc.high_source_sad ||
695 (cpi->use_svc && cpi->svc.high_source_sad_superframe)) {
696 cpi->vbp_threshold_sad = 0;
697 cpi->vbp_threshold_copy = 0;
700 cpi->vbp_threshold_minmax = 15 + (q >> 3);
704 // Compute the minmax over the 8x8 subblocks.
705 static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d,
706 int dp, int x16_idx, int y16_idx,
707 #if CONFIG_VP9_HIGHBITDEPTH
710 int pixels_wide, int pixels_high) {
713 int minmax_min = 255;
714 // Loop over the 4 8x8 subblocks.
715 for (k = 0; k < 4; k++) {
716 int x8_idx = x16_idx + ((k & 1) << 3);
717 int y8_idx = y16_idx + ((k >> 1) << 3);
720 if (x8_idx < pixels_wide && y8_idx < pixels_high) {
721 #if CONFIG_VP9_HIGHBITDEPTH
722 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
723 vpx_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
724 d + y8_idx * dp + x8_idx, dp, &min, &max);
726 vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx,
730 vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx, dp,
733 if ((max - min) > minmax_max) minmax_max = (max - min);
734 if ((max - min) < minmax_min) minmax_min = (max - min);
737 return (minmax_max - minmax_min);
740 static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d,
741 int dp, int x8_idx, int y8_idx, v8x8 *vst,
742 #if CONFIG_VP9_HIGHBITDEPTH
745 int pixels_wide, int pixels_high,
748 for (k = 0; k < 4; k++) {
749 int x4_idx = x8_idx + ((k & 1) << 2);
750 int y4_idx = y8_idx + ((k >> 1) << 2);
751 unsigned int sse = 0;
753 if (x4_idx < pixels_wide && y4_idx < pixels_high) {
756 #if CONFIG_VP9_HIGHBITDEPTH
757 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
758 s_avg = vpx_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
760 d_avg = vpx_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
762 s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
763 if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
766 s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
767 if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
772 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
776 static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
777 int dp, int x16_idx, int y16_idx, v16x16 *vst,
778 #if CONFIG_VP9_HIGHBITDEPTH
781 int pixels_wide, int pixels_high,
784 for (k = 0; k < 4; k++) {
785 int x8_idx = x16_idx + ((k & 1) << 3);
786 int y8_idx = y16_idx + ((k >> 1) << 3);
787 unsigned int sse = 0;
789 if (x8_idx < pixels_wide && y8_idx < pixels_high) {
792 #if CONFIG_VP9_HIGHBITDEPTH
793 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
794 s_avg = vpx_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
796 d_avg = vpx_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
798 s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
799 if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
802 s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
803 if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
808 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
812 // Check if most of the superblock is skin content, and if so, force split to
813 // 32x32, and set x->sb_is_skin for use in mode selection.
814 static int skin_sb_split(VP9_COMP *cpi, MACROBLOCK *x, const int low_res,
815 int mi_row, int mi_col, int *force_split) {
816 VP9_COMMON *const cm = &cpi->common;
817 #if CONFIG_VP9_HIGHBITDEPTH
818 if (cm->use_highbitdepth) return 0;
820 // Avoid checking superblocks on/near boundary and avoid low resolutions.
821 // Note superblock may still pick 64X64 if y_sad is very small
822 // (i.e., y_sad < cpi->vbp_threshold_sad) below. For now leave this as is.
823 if (!low_res && (mi_col >= 8 && mi_col + 8 < cm->mi_cols && mi_row >= 8 &&
824 mi_row + 8 < cm->mi_rows)) {
825 int num_16x16_skin = 0;
826 int num_16x16_nonskin = 0;
827 uint8_t *ysignal = x->plane[0].src.buf;
828 uint8_t *usignal = x->plane[1].src.buf;
829 uint8_t *vsignal = x->plane[2].src.buf;
830 int sp = x->plane[0].src.stride;
831 int spuv = x->plane[1].src.stride;
832 const int block_index = mi_row * cm->mi_cols + mi_col;
833 const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64];
834 const int bh = num_8x8_blocks_high_lookup[BLOCK_64X64];
835 const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
836 const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
837 // Loop through the 16x16 sub-blocks.
839 for (i = 0; i < ymis; i += 2) {
840 for (j = 0; j < xmis; j += 2) {
841 int bl_index = block_index + i * cm->mi_cols + j;
842 int is_skin = cpi->skin_map[bl_index];
843 num_16x16_skin += is_skin;
844 num_16x16_nonskin += (1 - is_skin);
845 if (num_16x16_nonskin > 3) {
846 // Exit loop if at least 4 of the 16x16 blocks are not skin.
854 ysignal += (sp << 4) - 64;
855 usignal += (spuv << 3) - 32;
856 vsignal += (spuv << 3) - 32;
858 if (num_16x16_skin > 12) {
866 static void set_low_temp_var_flag(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
867 v64x64 *vt, int64_t thresholds[],
868 MV_REFERENCE_FRAME ref_frame_partition,
869 int mi_col, int mi_row) {
871 VP9_COMMON *const cm = &cpi->common;
872 const int mv_thr = cm->width > 640 ? 8 : 4;
873 // Check temporal variance for bsize >= 16x16, if LAST_FRAME was selected and
874 // int_pro mv is small. If the temporal variance is small set the flag
875 // variance_low for the block. The variance threshold can be adjusted, the
876 // higher the more aggressive.
877 if (ref_frame_partition == LAST_FRAME &&
878 (cpi->sf.short_circuit_low_temp_var == 1 ||
879 (xd->mi[0]->mv[0].as_mv.col < mv_thr &&
880 xd->mi[0]->mv[0].as_mv.col > -mv_thr &&
881 xd->mi[0]->mv[0].as_mv.row < mv_thr &&
882 xd->mi[0]->mv[0].as_mv.row > -mv_thr))) {
883 if (xd->mi[0]->sb_type == BLOCK_64X64) {
884 if ((vt->part_variances).none.variance < (thresholds[0] >> 1))
885 x->variance_low[0] = 1;
886 } else if (xd->mi[0]->sb_type == BLOCK_64X32) {
887 for (i = 0; i < 2; i++) {
888 if (vt->part_variances.horz[i].variance < (thresholds[0] >> 2))
889 x->variance_low[i + 1] = 1;
891 } else if (xd->mi[0]->sb_type == BLOCK_32X64) {
892 for (i = 0; i < 2; i++) {
893 if (vt->part_variances.vert[i].variance < (thresholds[0] >> 2))
894 x->variance_low[i + 3] = 1;
897 for (i = 0; i < 4; i++) {
898 const int idx[4][2] = { { 0, 0 }, { 0, 4 }, { 4, 0 }, { 4, 4 } };
900 cm->mi_stride * (mi_row + idx[i][0]) + mi_col + idx[i][1];
901 MODE_INFO **this_mi = cm->mi_grid_visible + idx_str;
903 if (cm->mi_cols <= mi_col + idx[i][1] ||
904 cm->mi_rows <= mi_row + idx[i][0])
907 if ((*this_mi)->sb_type == BLOCK_32X32) {
908 int64_t threshold_32x32 = (cpi->sf.short_circuit_low_temp_var == 1 ||
909 cpi->sf.short_circuit_low_temp_var == 3)
910 ? ((5 * thresholds[1]) >> 3)
911 : (thresholds[1] >> 1);
912 if (vt->split[i].part_variances.none.variance < threshold_32x32)
913 x->variance_low[i + 5] = 1;
914 } else if (cpi->sf.short_circuit_low_temp_var >= 2) {
915 // For 32x16 and 16x32 blocks, the flag is set on each 16x16 block
917 if ((*this_mi)->sb_type == BLOCK_16X16 ||
918 (*this_mi)->sb_type == BLOCK_32X16 ||
919 (*this_mi)->sb_type == BLOCK_16X32) {
920 for (j = 0; j < 4; j++) {
921 if (vt->split[i].split[j].part_variances.none.variance <
922 (thresholds[2] >> 8))
923 x->variance_low[(i << 2) + j + 9] = 1;
932 static void copy_partitioning_helper(VP9_COMP *cpi, MACROBLOCK *x,
933 MACROBLOCKD *xd, BLOCK_SIZE bsize,
934 int mi_row, int mi_col) {
935 VP9_COMMON *const cm = &cpi->common;
936 BLOCK_SIZE *prev_part = cpi->prev_partition;
937 int start_pos = mi_row * cm->mi_stride + mi_col;
939 const int bsl = b_width_log2_lookup[bsize];
940 const int bs = (1 << bsl) >> 2;
942 PARTITION_TYPE partition;
944 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
946 partition = partition_lookup[bsl][prev_part[start_pos]];
947 subsize = get_subsize(bsize, partition);
949 if (subsize < BLOCK_8X8) {
950 set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
954 set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
957 set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
958 set_block_size(cpi, x, xd, mi_row + bs, mi_col, subsize);
961 set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
962 set_block_size(cpi, x, xd, mi_row, mi_col + bs, subsize);
965 assert(partition == PARTITION_SPLIT);
966 copy_partitioning_helper(cpi, x, xd, subsize, mi_row, mi_col);
967 copy_partitioning_helper(cpi, x, xd, subsize, mi_row + bs, mi_col);
968 copy_partitioning_helper(cpi, x, xd, subsize, mi_row, mi_col + bs);
969 copy_partitioning_helper(cpi, x, xd, subsize, mi_row + bs, mi_col + bs);
975 static int copy_partitioning(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
976 int mi_row, int mi_col, int segment_id,
978 int svc_copy_allowed = 1;
979 int frames_since_key_thresh = 1;
981 // For SVC, don't allow copy if base spatial layer is key frame, or if
982 // frame is not a temporal enhancement layer frame.
983 int layer = LAYER_IDS_TO_IDX(0, cpi->svc.temporal_layer_id,
984 cpi->svc.number_temporal_layers);
985 const LAYER_CONTEXT *lc = &cpi->svc.layer_context[layer];
986 if (lc->is_key_frame || !cpi->svc.non_reference_frame) svc_copy_allowed = 0;
987 frames_since_key_thresh = cpi->svc.number_spatial_layers << 1;
989 if (cpi->rc.frames_since_key > frames_since_key_thresh && svc_copy_allowed &&
990 !cpi->resize_pending && segment_id == CR_SEGMENT_ID_BASE &&
991 cpi->prev_segment_id[sb_offset] == CR_SEGMENT_ID_BASE &&
992 cpi->copied_frame_cnt[sb_offset] < cpi->max_copied_frame) {
993 if (cpi->prev_partition != NULL) {
994 copy_partitioning_helper(cpi, x, xd, BLOCK_64X64, mi_row, mi_col);
995 cpi->copied_frame_cnt[sb_offset] += 1;
996 memcpy(x->variance_low, &(cpi->prev_variance_low[sb_offset * 25]),
997 sizeof(x->variance_low));
1005 static int scale_partitioning_svc(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
1006 BLOCK_SIZE bsize, int mi_row, int mi_col,
1007 int mi_row_high, int mi_col_high) {
1008 VP9_COMMON *const cm = &cpi->common;
1009 SVC *const svc = &cpi->svc;
1010 BLOCK_SIZE *prev_part = svc->prev_partition_svc;
1011 // Variables with _high are for higher resolution.
1013 int subsize_high = 0;
1014 const int bsl_high = b_width_log2_lookup[bsize];
1015 const int bs_high = (1 << bsl_high) >> 2;
1016 const int has_rows = (mi_row_high + bs_high) < cm->mi_rows;
1017 const int has_cols = (mi_col_high + bs_high) < cm->mi_cols;
1019 const int row_boundary_block_scale_factor[BLOCK_SIZES] = { 13, 13, 13, 1, 0,
1022 const int col_boundary_block_scale_factor[BLOCK_SIZES] = { 13, 13, 13, 2, 2,
1026 BLOCK_SIZE bsize_low;
1027 PARTITION_TYPE partition_high;
1029 if (mi_row_high >= cm->mi_rows || mi_col_high >= cm->mi_cols) return 0;
1030 if (mi_row >= svc->mi_rows[svc->spatial_layer_id - 1] ||
1031 mi_col >= svc->mi_cols[svc->spatial_layer_id - 1])
1034 // Find corresponding (mi_col/mi_row) block down-scaled by 2x2.
1035 start_pos = mi_row * (svc->mi_stride[svc->spatial_layer_id - 1]) + mi_col;
1036 bsize_low = prev_part[start_pos];
1037 // The block size is too big for boundaries. Do variance based partitioning.
1038 if ((!has_rows || !has_cols) && bsize_low > BLOCK_16X16) return 1;
1040 // For reference frames: return 1 (do variance-based partitioning) if the
1041 // superblock is not low source sad and lower-resoln bsize is below 32x32.
1042 if (!cpi->svc.non_reference_frame && !x->skip_low_source_sad &&
1043 bsize_low < BLOCK_32X32)
1046 // Scale up block size by 2x2. Force 64x64 for size larger than 32x32.
1047 if (bsize_low < BLOCK_32X32) {
1048 bsize_high = bsize_low + 3;
1049 } else if (bsize_low >= BLOCK_32X32) {
1050 bsize_high = BLOCK_64X64;
1052 // Scale up blocks on boundary.
1053 if (!has_cols && has_rows) {
1054 bsize_high = bsize_low + row_boundary_block_scale_factor[bsize_low];
1055 } else if (has_cols && !has_rows) {
1056 bsize_high = bsize_low + col_boundary_block_scale_factor[bsize_low];
1057 } else if (!has_cols && !has_rows) {
1058 bsize_high = bsize_low;
1061 partition_high = partition_lookup[bsl_high][bsize_high];
1062 subsize_high = get_subsize(bsize, partition_high);
1064 if (subsize_high < BLOCK_8X8) {
1065 set_block_size(cpi, x, xd, mi_row_high, mi_col_high, bsize_high);
1067 const int bsl = b_width_log2_lookup[bsize];
1068 const int bs = (1 << bsl) >> 2;
1069 switch (partition_high) {
1070 case PARTITION_NONE:
1071 set_block_size(cpi, x, xd, mi_row_high, mi_col_high, bsize_high);
1073 case PARTITION_HORZ:
1074 set_block_size(cpi, x, xd, mi_row_high, mi_col_high, subsize_high);
1075 if (subsize_high < BLOCK_64X64)
1076 set_block_size(cpi, x, xd, mi_row_high + bs_high, mi_col_high,
1079 case PARTITION_VERT:
1080 set_block_size(cpi, x, xd, mi_row_high, mi_col_high, subsize_high);
1081 if (subsize_high < BLOCK_64X64)
1082 set_block_size(cpi, x, xd, mi_row_high, mi_col_high + bs_high,
1086 assert(partition_high == PARTITION_SPLIT);
1087 if (scale_partitioning_svc(cpi, x, xd, subsize_high, mi_row, mi_col,
1088 mi_row_high, mi_col_high))
1090 if (scale_partitioning_svc(cpi, x, xd, subsize_high, mi_row + (bs >> 1),
1091 mi_col, mi_row_high + bs_high, mi_col_high))
1093 if (scale_partitioning_svc(cpi, x, xd, subsize_high, mi_row,
1094 mi_col + (bs >> 1), mi_row_high,
1095 mi_col_high + bs_high))
1097 if (scale_partitioning_svc(cpi, x, xd, subsize_high, mi_row + (bs >> 1),
1098 mi_col + (bs >> 1), mi_row_high + bs_high,
1099 mi_col_high + bs_high))
1108 static void update_partition_svc(VP9_COMP *cpi, BLOCK_SIZE bsize, int mi_row,
1110 VP9_COMMON *const cm = &cpi->common;
1111 BLOCK_SIZE *prev_part = cpi->svc.prev_partition_svc;
1112 int start_pos = mi_row * cm->mi_stride + mi_col;
1113 const int bsl = b_width_log2_lookup[bsize];
1114 const int bs = (1 << bsl) >> 2;
1116 PARTITION_TYPE partition;
1117 const MODE_INFO *mi = NULL;
1120 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
1122 mi = cm->mi_grid_visible[start_pos];
1123 partition = partition_lookup[bsl][mi->sb_type];
1124 subsize = get_subsize(bsize, partition);
1125 if (subsize < BLOCK_8X8) {
1126 prev_part[start_pos] = bsize;
1128 switch (partition) {
1129 case PARTITION_NONE:
1130 prev_part[start_pos] = bsize;
1131 if (bsize == BLOCK_64X64) {
1132 for (xx = 0; xx < 8; xx += 4)
1133 for (yy = 0; yy < 8; yy += 4) {
1134 if ((mi_row + xx < cm->mi_rows) && (mi_col + yy < cm->mi_cols))
1135 prev_part[start_pos + xx * cm->mi_stride + yy] = bsize;
1139 case PARTITION_HORZ:
1140 prev_part[start_pos] = subsize;
1141 if (mi_row + bs < cm->mi_rows)
1142 prev_part[start_pos + bs * cm->mi_stride] = subsize;
1144 case PARTITION_VERT:
1145 prev_part[start_pos] = subsize;
1146 if (mi_col + bs < cm->mi_cols) prev_part[start_pos + bs] = subsize;
1149 assert(partition == PARTITION_SPLIT);
1150 update_partition_svc(cpi, subsize, mi_row, mi_col);
1151 update_partition_svc(cpi, subsize, mi_row + bs, mi_col);
1152 update_partition_svc(cpi, subsize, mi_row, mi_col + bs);
1153 update_partition_svc(cpi, subsize, mi_row + bs, mi_col + bs);
1159 static void update_prev_partition_helper(VP9_COMP *cpi, BLOCK_SIZE bsize,
1160 int mi_row, int mi_col) {
1161 VP9_COMMON *const cm = &cpi->common;
1162 BLOCK_SIZE *prev_part = cpi->prev_partition;
1163 int start_pos = mi_row * cm->mi_stride + mi_col;
1164 const int bsl = b_width_log2_lookup[bsize];
1165 const int bs = (1 << bsl) >> 2;
1167 PARTITION_TYPE partition;
1168 const MODE_INFO *mi = NULL;
1170 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
1172 mi = cm->mi_grid_visible[start_pos];
1173 partition = partition_lookup[bsl][mi->sb_type];
1174 subsize = get_subsize(bsize, partition);
1175 if (subsize < BLOCK_8X8) {
1176 prev_part[start_pos] = bsize;
1178 switch (partition) {
1179 case PARTITION_NONE: prev_part[start_pos] = bsize; break;
1180 case PARTITION_HORZ:
1181 prev_part[start_pos] = subsize;
1182 if (mi_row + bs < cm->mi_rows)
1183 prev_part[start_pos + bs * cm->mi_stride] = subsize;
1185 case PARTITION_VERT:
1186 prev_part[start_pos] = subsize;
1187 if (mi_col + bs < cm->mi_cols) prev_part[start_pos + bs] = subsize;
1190 assert(partition == PARTITION_SPLIT);
1191 update_prev_partition_helper(cpi, subsize, mi_row, mi_col);
1192 update_prev_partition_helper(cpi, subsize, mi_row + bs, mi_col);
1193 update_prev_partition_helper(cpi, subsize, mi_row, mi_col + bs);
1194 update_prev_partition_helper(cpi, subsize, mi_row + bs, mi_col + bs);
1200 static void update_prev_partition(VP9_COMP *cpi, MACROBLOCK *x, int segment_id,
1201 int mi_row, int mi_col, int sb_offset) {
1202 update_prev_partition_helper(cpi, BLOCK_64X64, mi_row, mi_col);
1203 cpi->prev_segment_id[sb_offset] = segment_id;
1204 memcpy(&(cpi->prev_variance_low[sb_offset * 25]), x->variance_low,
1205 sizeof(x->variance_low));
1206 // Reset the counter for copy partitioning
1207 cpi->copied_frame_cnt[sb_offset] = 0;
1210 static void chroma_check(VP9_COMP *cpi, MACROBLOCK *x, int bsize,
1211 unsigned int y_sad, int is_key_frame,
1212 int scene_change_detected) {
1214 MACROBLOCKD *xd = &x->e_mbd;
1217 if (is_key_frame) return;
1219 // For speed > 8, avoid the chroma check if y_sad is above threshold.
1220 if (cpi->oxcf.speed > 8) {
1221 if (y_sad > cpi->vbp_thresholds[1] &&
1222 (!cpi->noise_estimate.enabled ||
1223 vp9_noise_estimate_extract_level(&cpi->noise_estimate) < kMedium))
1227 if (cpi->oxcf.content == VP9E_CONTENT_SCREEN && scene_change_detected)
1230 for (i = 1; i <= 2; ++i) {
1231 unsigned int uv_sad = UINT_MAX;
1232 struct macroblock_plane *p = &x->plane[i];
1233 struct macroblockd_plane *pd = &xd->plane[i];
1234 const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
1236 if (bs != BLOCK_INVALID)
1237 uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride, pd->dst.buf,
1240 // TODO(marpan): Investigate if we should lower this threshold if
1241 // superblock is detected as skin.
1242 x->color_sensitivity[i - 1] = uv_sad > (y_sad >> shift);
1246 static uint64_t avg_source_sad(VP9_COMP *cpi, MACROBLOCK *x, int shift,
1248 unsigned int tmp_sse;
1250 unsigned int tmp_variance;
1251 const BLOCK_SIZE bsize = BLOCK_64X64;
1252 uint8_t *src_y = cpi->Source->y_buffer;
1253 int src_ystride = cpi->Source->y_stride;
1254 uint8_t *last_src_y = cpi->Last_Source->y_buffer;
1255 int last_src_ystride = cpi->Last_Source->y_stride;
1256 uint64_t avg_source_sad_threshold = 10000;
1257 uint64_t avg_source_sad_threshold2 = 12000;
1258 #if CONFIG_VP9_HIGHBITDEPTH
1259 if (cpi->common.use_highbitdepth) return 0;
1262 last_src_y += shift;
1264 cpi->fn_ptr[bsize].sdf(src_y, src_ystride, last_src_y, last_src_ystride);
1265 tmp_variance = vpx_variance64x64(src_y, src_ystride, last_src_y,
1266 last_src_ystride, &tmp_sse);
1267 // Note: tmp_sse - tmp_variance = ((sum * sum) >> 12)
1268 if (tmp_sad < avg_source_sad_threshold)
1269 x->content_state_sb = ((tmp_sse - tmp_variance) < 25) ? kLowSadLowSumdiff
1270 : kLowSadHighSumdiff;
1272 x->content_state_sb = ((tmp_sse - tmp_variance) < 25) ? kHighSadLowSumdiff
1273 : kHighSadHighSumdiff;
1275 // Detect large lighting change.
1276 if (cpi->oxcf.content != VP9E_CONTENT_SCREEN &&
1277 cpi->oxcf.rc_mode == VPX_CBR && tmp_variance < (tmp_sse >> 3) &&
1278 (tmp_sse - tmp_variance) > 10000)
1279 x->content_state_sb = kLowVarHighSumdiff;
1280 else if (tmp_sad > (avg_source_sad_threshold << 1))
1281 x->content_state_sb = kVeryHighSad;
1283 if (cpi->content_state_sb_fd != NULL) {
1284 if (tmp_sad < avg_source_sad_threshold2) {
1285 // Cap the increment to 255.
1286 if (cpi->content_state_sb_fd[sb_offset] < 255)
1287 cpi->content_state_sb_fd[sb_offset]++;
1289 cpi->content_state_sb_fd[sb_offset] = 0;
1292 if (tmp_sad == 0) x->zero_temp_sad_source = 1;
1296 // This function chooses partitioning based on the variance between source and
1297 // reconstructed last, where variance is computed for down-sampled inputs.
1298 static int choose_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
1299 MACROBLOCK *x, int mi_row, int mi_col) {
1300 VP9_COMMON *const cm = &cpi->common;
1301 MACROBLOCKD *xd = &x->e_mbd;
1305 int force_split[21];
1307 int max_var_32x32 = 0;
1308 int min_var_32x32 = INT_MAX;
1311 int maxvar_16x16[4];
1312 int minvar_16x16[4];
1313 int64_t threshold_4x4avg;
1314 NOISE_LEVEL noise_level = kLow;
1315 int content_state = 0;
1320 int compute_minmax_variance = 1;
1321 unsigned int y_sad = UINT_MAX;
1322 BLOCK_SIZE bsize = BLOCK_64X64;
1323 // Ref frame used in partitioning.
1324 MV_REFERENCE_FRAME ref_frame_partition = LAST_FRAME;
1325 int pixels_wide = 64, pixels_high = 64;
1326 int64_t thresholds[4] = { cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
1327 cpi->vbp_thresholds[2], cpi->vbp_thresholds[3] };
1328 int scene_change_detected =
1329 cpi->rc.high_source_sad ||
1330 (cpi->use_svc && cpi->svc.high_source_sad_superframe);
1331 int force_64_split = scene_change_detected ||
1332 (cpi->oxcf.content == VP9E_CONTENT_SCREEN &&
1333 cpi->compute_source_sad_onepass &&
1334 cpi->sf.use_source_sad && !x->zero_temp_sad_source);
1336 // For the variance computation under SVC mode, we treat the frame as key if
1337 // the reference (base layer frame) is key frame (i.e., is_key_frame == 1).
1339 (frame_is_intra_only(cm) ||
1340 (is_one_pass_cbr_svc(cpi) &&
1341 cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame));
1342 // Always use 4x4 partition for key frame.
1343 const int use_4x4_partition = frame_is_intra_only(cm);
1344 const int low_res = (cm->width <= 352 && cm->height <= 288);
1345 int variance4x4downsample[16];
1347 int sb_offset = (cm->mi_stride >> 3) * (mi_row >> 3) + (mi_col >> 3);
1349 // For SVC: check if LAST frame is NULL or if the resolution of LAST is
1350 // different than the current frame resolution, and if so, treat this frame
1351 // as a key frame, for the purpose of the superblock partitioning.
1352 // LAST == NULL can happen in some cases where enhancement spatial layers are
1353 // enabled dyanmically in the stream and the only reference is the spatial
1354 // reference (GOLDEN).
1356 const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, LAST_FRAME);
1357 if (ref == NULL || ref->y_crop_height != cm->height ||
1358 ref->y_crop_width != cm->width)
1362 set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
1363 set_segment_index(cpi, x, mi_row, mi_col, BLOCK_64X64, 0);
1364 segment_id = xd->mi[0]->segment_id;
1366 if (cpi->oxcf.speed >= 8 || (cpi->use_svc && cpi->svc.non_reference_frame))
1367 compute_minmax_variance = 0;
1369 memset(x->variance_low, 0, sizeof(x->variance_low));
1371 if (cpi->sf.use_source_sad && !is_key_frame) {
1372 int sb_offset2 = ((cm->mi_cols + 7) >> 3) * (mi_row >> 3) + (mi_col >> 3);
1373 content_state = x->content_state_sb;
1374 x->skip_low_source_sad = (content_state == kLowSadLowSumdiff ||
1375 content_state == kLowSadHighSumdiff)
1378 x->lowvar_highsumdiff = (content_state == kLowVarHighSumdiff) ? 1 : 0;
1379 if (cpi->content_state_sb_fd != NULL)
1380 x->last_sb_high_content = cpi->content_state_sb_fd[sb_offset2];
1382 // For SVC on top spatial layer: use/scale the partition from
1383 // the lower spatial resolution if svc_use_lowres_part is enabled.
1384 if (cpi->sf.svc_use_lowres_part &&
1385 cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 1 &&
1386 cpi->svc.prev_partition_svc != NULL && content_state != kVeryHighSad) {
1387 if (!scale_partitioning_svc(cpi, x, xd, BLOCK_64X64, mi_row >> 1,
1388 mi_col >> 1, mi_row, mi_col)) {
1389 if (cpi->sf.copy_partition_flag) {
1390 update_prev_partition(cpi, x, segment_id, mi_row, mi_col, sb_offset);
1395 // If source_sad is low copy the partition without computing the y_sad.
1396 if (x->skip_low_source_sad && cpi->sf.copy_partition_flag &&
1398 copy_partitioning(cpi, x, xd, mi_row, mi_col, segment_id, sb_offset)) {
1399 x->sb_use_mv_part = 1;
1400 if (cpi->sf.svc_use_lowres_part &&
1401 cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 2)
1402 update_partition_svc(cpi, BLOCK_64X64, mi_row, mi_col);
1407 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled &&
1408 cyclic_refresh_segment_id_boosted(segment_id)) {
1409 int q = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex);
1410 set_vbp_thresholds(cpi, thresholds, q, content_state);
1412 set_vbp_thresholds(cpi, thresholds, cm->base_qindex, content_state);
1414 // Decrease 32x32 split threshold for screen on base layer, for scene
1415 // change/high motion frames.
1416 if (cpi->oxcf.content == VP9E_CONTENT_SCREEN &&
1417 cpi->svc.spatial_layer_id == 0 && force_64_split)
1418 thresholds[1] = 3 * thresholds[1] >> 2;
1420 // For non keyframes, disable 4x4 average for low resolution when speed = 8
1421 threshold_4x4avg = (cpi->oxcf.speed < 8) ? thresholds[1] << 1 : INT64_MAX;
1423 if (xd->mb_to_right_edge < 0) pixels_wide += (xd->mb_to_right_edge >> 3);
1424 if (xd->mb_to_bottom_edge < 0) pixels_high += (xd->mb_to_bottom_edge >> 3);
1426 s = x->plane[0].src.buf;
1427 sp = x->plane[0].src.stride;
1429 // Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks,
1430 // 5-20 for the 16x16 blocks.
1431 force_split[0] = force_64_split;
1433 if (!is_key_frame) {
1434 // In the case of spatial/temporal scalable coding, the assumption here is
1435 // that the temporal reference frame will always be of type LAST_FRAME.
1436 // TODO(marpan): If that assumption is broken, we need to revisit this code.
1437 MODE_INFO *mi = xd->mi[0];
1438 YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
1440 const YV12_BUFFER_CONFIG *yv12_g = NULL;
1441 unsigned int y_sad_g, y_sad_thr, y_sad_last;
1442 bsize = BLOCK_32X32 + (mi_col + 4 < cm->mi_cols) * 2 +
1443 (mi_row + 4 < cm->mi_rows);
1445 assert(yv12 != NULL);
1447 if (!(is_one_pass_cbr_svc(cpi) && cpi->svc.spatial_layer_id) ||
1448 cpi->svc.use_gf_temporal_ref_current_layer) {
1449 // For now, GOLDEN will not be used for non-zero spatial layers, since
1450 // it may not be a temporal reference.
1451 yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
1454 // Only compute y_sad_g (sad for golden reference) for speed < 8.
1455 if (cpi->oxcf.speed < 8 && yv12_g && yv12_g != yv12 &&
1456 (cpi->ref_frame_flags & VP9_GOLD_FLAG)) {
1457 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
1458 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
1459 y_sad_g = cpi->fn_ptr[bsize].sdf(
1460 x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
1461 xd->plane[0].pre[0].stride);
1466 if (cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR &&
1467 cpi->rc.is_src_frame_alt_ref) {
1468 yv12 = get_ref_frame_buffer(cpi, ALTREF_FRAME);
1469 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
1470 &cm->frame_refs[ALTREF_FRAME - 1].sf);
1471 mi->ref_frame[0] = ALTREF_FRAME;
1474 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
1475 &cm->frame_refs[LAST_FRAME - 1].sf);
1476 mi->ref_frame[0] = LAST_FRAME;
1478 mi->ref_frame[1] = NONE;
1479 mi->sb_type = BLOCK_64X64;
1480 mi->mv[0].as_int = 0;
1481 mi->interp_filter = BILINEAR;
1483 if (cpi->oxcf.speed >= 8 && !low_res &&
1484 x->content_state_sb != kVeryHighSad) {
1485 y_sad = cpi->fn_ptr[bsize].sdf(
1486 x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
1487 xd->plane[0].pre[0].stride);
1489 const MV dummy_mv = { 0, 0 };
1490 y_sad = vp9_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col,
1492 x->sb_use_mv_part = 1;
1493 x->sb_mvcol_part = mi->mv[0].as_mv.col;
1494 x->sb_mvrow_part = mi->mv[0].as_mv.row;
1495 if (cpi->oxcf.content == VP9E_CONTENT_SCREEN &&
1496 cpi->svc.spatial_layer_id == cpi->svc.first_spatial_layer_to_encode &&
1497 cpi->svc.high_num_blocks_with_motion && !x->zero_temp_sad_source &&
1498 cm->width > 640 && cm->height > 480) {
1499 // Disable split below 16x16 block size when scroll motion (horz or
1500 // vert) is detected.
1501 // TODO(marpan/jianj): Improve this condition: issue is that search
1502 // range is hard-coded/limited in vp9_int_pro_motion_estimation() so
1503 // scroll motion may not be detected here.
1504 if (((abs(x->sb_mvrow_part) >= 48 && abs(x->sb_mvcol_part) <= 8) ||
1505 (abs(x->sb_mvcol_part) >= 48 && abs(x->sb_mvrow_part) <= 8)) &&
1507 compute_minmax_variance = 0;
1508 thresholds[2] = INT64_MAX;
1514 // Pick ref frame for partitioning, bias last frame when y_sad_g and y_sad
1515 // are close if short_circuit_low_temp_var is on.
1516 y_sad_thr = cpi->sf.short_circuit_low_temp_var ? (y_sad * 7) >> 3 : y_sad;
1517 if (y_sad_g < y_sad_thr) {
1518 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
1519 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
1520 mi->ref_frame[0] = GOLDEN_FRAME;
1521 mi->mv[0].as_int = 0;
1523 ref_frame_partition = GOLDEN_FRAME;
1525 x->pred_mv[LAST_FRAME] = mi->mv[0].as_mv;
1526 ref_frame_partition = LAST_FRAME;
1529 set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]);
1530 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
1532 if (cpi->use_skin_detection)
1534 skin_sb_split(cpi, x, low_res, mi_row, mi_col, force_split);
1536 d = xd->plane[0].dst.buf;
1537 dp = xd->plane[0].dst.stride;
1539 // If the y_sad is very small, take 64x64 as partition and exit.
1540 // Don't check on boosted segment for now, as 64x64 is suppressed there.
1541 if (segment_id == CR_SEGMENT_ID_BASE && y_sad < cpi->vbp_threshold_sad) {
1542 const int block_width = num_8x8_blocks_wide_lookup[BLOCK_64X64];
1543 const int block_height = num_8x8_blocks_high_lookup[BLOCK_64X64];
1544 if (mi_col + block_width / 2 < cm->mi_cols &&
1545 mi_row + block_height / 2 < cm->mi_rows) {
1546 set_block_size(cpi, x, xd, mi_row, mi_col, BLOCK_64X64);
1547 x->variance_low[0] = 1;
1548 chroma_check(cpi, x, bsize, y_sad, is_key_frame, scene_change_detected);
1549 if (cpi->sf.svc_use_lowres_part &&
1550 cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 2)
1551 update_partition_svc(cpi, BLOCK_64X64, mi_row, mi_col);
1552 if (cpi->sf.copy_partition_flag) {
1553 update_prev_partition(cpi, x, segment_id, mi_row, mi_col, sb_offset);
1559 // If the y_sad is small enough, copy the partition of the superblock in the
1560 // last frame to current frame only if the last frame is not a keyframe.
1561 // Stop the copy every cpi->max_copied_frame to refresh the partition.
1562 // TODO(jianj) : tune the threshold.
1563 if (cpi->sf.copy_partition_flag && y_sad_last < cpi->vbp_threshold_copy &&
1564 copy_partitioning(cpi, x, xd, mi_row, mi_col, segment_id, sb_offset)) {
1565 chroma_check(cpi, x, bsize, y_sad, is_key_frame, scene_change_detected);
1566 if (cpi->sf.svc_use_lowres_part &&
1567 cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 2)
1568 update_partition_svc(cpi, BLOCK_64X64, mi_row, mi_col);
1574 #if CONFIG_VP9_HIGHBITDEPTH
1575 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1577 case 10: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); break;
1578 case 12: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12); break;
1580 default: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); break;
1583 #endif // CONFIG_VP9_HIGHBITDEPTH
1586 if (low_res && threshold_4x4avg < INT64_MAX)
1587 CHECK_MEM_ERROR(cm, vt2, vpx_calloc(16, sizeof(*vt2)));
1588 // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances
1590 for (i = 0; i < 4; i++) {
1591 const int x32_idx = ((i & 1) << 5);
1592 const int y32_idx = ((i >> 1) << 5);
1593 const int i2 = i << 2;
1594 force_split[i + 1] = 0;
1596 maxvar_16x16[i] = 0;
1597 minvar_16x16[i] = INT_MAX;
1598 for (j = 0; j < 4; j++) {
1599 const int x16_idx = x32_idx + ((j & 1) << 4);
1600 const int y16_idx = y32_idx + ((j >> 1) << 4);
1601 const int split_index = 5 + i2 + j;
1602 v16x16 *vst = &vt.split[i].split[j];
1603 force_split[split_index] = 0;
1604 variance4x4downsample[i2 + j] = 0;
1605 if (!is_key_frame) {
1606 fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst,
1607 #if CONFIG_VP9_HIGHBITDEPTH
1610 pixels_wide, pixels_high, is_key_frame);
1611 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
1612 get_variance(&vt.split[i].split[j].part_variances.none);
1613 avg_16x16[i] += vt.split[i].split[j].part_variances.none.variance;
1614 if (vt.split[i].split[j].part_variances.none.variance < minvar_16x16[i])
1615 minvar_16x16[i] = vt.split[i].split[j].part_variances.none.variance;
1616 if (vt.split[i].split[j].part_variances.none.variance > maxvar_16x16[i])
1617 maxvar_16x16[i] = vt.split[i].split[j].part_variances.none.variance;
1618 if (vt.split[i].split[j].part_variances.none.variance > thresholds[2]) {
1619 // 16X16 variance is above threshold for split, so force split to 8x8
1620 // for this 16x16 block (this also forces splits for upper levels).
1621 force_split[split_index] = 1;
1622 force_split[i + 1] = 1;
1624 } else if (compute_minmax_variance &&
1625 vt.split[i].split[j].part_variances.none.variance >
1627 !cyclic_refresh_segment_id_boosted(segment_id)) {
1628 // We have some nominal amount of 16x16 variance (based on average),
1629 // compute the minmax over the 8x8 sub-blocks, and if above threshold,
1630 // force split to 8x8 block for this 16x16 block.
1631 int minmax = compute_minmax_8x8(s, sp, d, dp, x16_idx, y16_idx,
1632 #if CONFIG_VP9_HIGHBITDEPTH
1635 pixels_wide, pixels_high);
1636 int thresh_minmax = (int)cpi->vbp_threshold_minmax;
1637 if (x->content_state_sb == kVeryHighSad)
1638 thresh_minmax = thresh_minmax << 1;
1639 if (minmax > thresh_minmax) {
1640 force_split[split_index] = 1;
1641 force_split[i + 1] = 1;
1647 (low_res && vt.split[i].split[j].part_variances.none.variance >
1648 threshold_4x4avg)) {
1649 force_split[split_index] = 0;
1650 // Go down to 4x4 down-sampling for variance.
1651 variance4x4downsample[i2 + j] = 1;
1652 for (k = 0; k < 4; k++) {
1653 int x8_idx = x16_idx + ((k & 1) << 3);
1654 int y8_idx = y16_idx + ((k >> 1) << 3);
1655 v8x8 *vst2 = is_key_frame ? &vst->split[k] : &vt2[i2 + j].split[k];
1656 fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2,
1657 #if CONFIG_VP9_HIGHBITDEPTH
1660 pixels_wide, pixels_high, is_key_frame);
1665 if (cpi->noise_estimate.enabled)
1666 noise_level = vp9_noise_estimate_extract_level(&cpi->noise_estimate);
1667 // Fill the rest of the variance tree by summing split partition values.
1669 for (i = 0; i < 4; i++) {
1670 const int i2 = i << 2;
1671 for (j = 0; j < 4; j++) {
1672 if (variance4x4downsample[i2 + j] == 1) {
1673 v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] : &vt.split[i].split[j];
1674 for (m = 0; m < 4; m++) fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
1675 fill_variance_tree(vtemp, BLOCK_16X16);
1676 // If variance of this 16x16 block is above the threshold, force block
1677 // to split. This also forces a split on the upper levels.
1678 get_variance(&vtemp->part_variances.none);
1679 if (vtemp->part_variances.none.variance > thresholds[2]) {
1680 force_split[5 + i2 + j] = 1;
1681 force_split[i + 1] = 1;
1686 fill_variance_tree(&vt.split[i], BLOCK_32X32);
1687 // If variance of this 32x32 block is above the threshold, or if its above
1688 // (some threshold of) the average variance over the sub-16x16 blocks, then
1689 // force this block to split. This also forces a split on the upper
1691 if (!force_split[i + 1]) {
1692 get_variance(&vt.split[i].part_variances.none);
1693 var_32x32 = vt.split[i].part_variances.none.variance;
1694 max_var_32x32 = VPXMAX(var_32x32, max_var_32x32);
1695 min_var_32x32 = VPXMIN(var_32x32, min_var_32x32);
1696 if (vt.split[i].part_variances.none.variance > thresholds[1] ||
1698 vt.split[i].part_variances.none.variance > (thresholds[1] >> 1) &&
1699 vt.split[i].part_variances.none.variance > (avg_16x16[i] >> 1))) {
1700 force_split[i + 1] = 1;
1702 } else if (!is_key_frame && noise_level < kLow && cm->height <= 360 &&
1703 (maxvar_16x16[i] - minvar_16x16[i]) > (thresholds[1] >> 1) &&
1704 maxvar_16x16[i] > thresholds[1]) {
1705 force_split[i + 1] = 1;
1708 avg_32x32 += var_32x32;
1711 if (!force_split[0]) {
1712 fill_variance_tree(&vt, BLOCK_64X64);
1713 get_variance(&vt.part_variances.none);
1714 // If variance of this 64x64 block is above (some threshold of) the average
1715 // variance over the sub-32x32 blocks, then force this block to split.
1716 // Only checking this for noise level >= medium for now.
1717 if (!is_key_frame && noise_level >= kMedium &&
1718 vt.part_variances.none.variance > (9 * avg_32x32) >> 5)
1720 // Else if the maximum 32x32 variance minus the miniumum 32x32 variance in
1721 // a 64x64 block is greater than threshold and the maximum 32x32 variance is
1722 // above a miniumum threshold, then force the split of a 64x64 block
1723 // Only check this for low noise.
1724 else if (!is_key_frame && noise_level < kMedium &&
1725 (max_var_32x32 - min_var_32x32) > 3 * (thresholds[0] >> 3) &&
1726 max_var_32x32 > thresholds[0] >> 1)
1730 // Now go through the entire structure, splitting every block size until
1731 // we get to one that's got a variance lower than our threshold.
1732 if (mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
1733 !set_vt_partitioning(cpi, x, xd, &vt, BLOCK_64X64, mi_row, mi_col,
1734 thresholds[0], BLOCK_16X16, force_split[0])) {
1735 for (i = 0; i < 4; ++i) {
1736 const int x32_idx = ((i & 1) << 2);
1737 const int y32_idx = ((i >> 1) << 2);
1738 const int i2 = i << 2;
1739 if (!set_vt_partitioning(cpi, x, xd, &vt.split[i], BLOCK_32X32,
1740 (mi_row + y32_idx), (mi_col + x32_idx),
1741 thresholds[1], BLOCK_16X16,
1742 force_split[i + 1])) {
1743 for (j = 0; j < 4; ++j) {
1744 const int x16_idx = ((j & 1) << 1);
1745 const int y16_idx = ((j >> 1) << 1);
1746 // For inter frames: if variance4x4downsample[] == 1 for this 16x16
1747 // block, then the variance is based on 4x4 down-sampling, so use vt2
1748 // in set_vt_partioning(), otherwise use vt.
1749 v16x16 *vtemp = (!is_key_frame && variance4x4downsample[i2 + j] == 1)
1751 : &vt.split[i].split[j];
1752 if (!set_vt_partitioning(
1753 cpi, x, xd, vtemp, BLOCK_16X16, mi_row + y32_idx + y16_idx,
1754 mi_col + x32_idx + x16_idx, thresholds[2], cpi->vbp_bsize_min,
1755 force_split[5 + i2 + j])) {
1756 for (k = 0; k < 4; ++k) {
1757 const int x8_idx = (k & 1);
1758 const int y8_idx = (k >> 1);
1759 if (use_4x4_partition) {
1760 if (!set_vt_partitioning(cpi, x, xd, &vtemp->split[k],
1762 mi_row + y32_idx + y16_idx + y8_idx,
1763 mi_col + x32_idx + x16_idx + x8_idx,
1764 thresholds[3], BLOCK_8X8, 0)) {
1766 cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx),
1767 (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_4X4);
1771 cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx),
1772 (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_8X8);
1781 if (!frame_is_intra_only(cm) && cpi->sf.copy_partition_flag) {
1782 update_prev_partition(cpi, x, segment_id, mi_row, mi_col, sb_offset);
1785 if (!frame_is_intra_only(cm) && cpi->sf.svc_use_lowres_part &&
1786 cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 2)
1787 update_partition_svc(cpi, BLOCK_64X64, mi_row, mi_col);
1789 if (cpi->sf.short_circuit_low_temp_var) {
1790 set_low_temp_var_flag(cpi, x, xd, &vt, thresholds, ref_frame_partition,
1794 chroma_check(cpi, x, bsize, y_sad, is_key_frame, scene_change_detected);
1795 if (vt2) vpx_free(vt2);
1799 #if !CONFIG_REALTIME_ONLY
1800 static void update_state(VP9_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
1801 int mi_row, int mi_col, BLOCK_SIZE bsize,
1802 int output_enabled) {
1804 VP9_COMMON *const cm = &cpi->common;
1805 RD_COUNTS *const rdc = &td->rd_counts;
1806 MACROBLOCK *const x = &td->mb;
1807 MACROBLOCKD *const xd = &x->e_mbd;
1808 struct macroblock_plane *const p = x->plane;
1809 struct macroblockd_plane *const pd = xd->plane;
1810 MODE_INFO *mi = &ctx->mic;
1811 MODE_INFO *const xdmi = xd->mi[0];
1812 MODE_INFO *mi_addr = xd->mi[0];
1813 const struct segmentation *const seg = &cm->seg;
1814 const int bw = num_8x8_blocks_wide_lookup[mi->sb_type];
1815 const int bh = num_8x8_blocks_high_lookup[mi->sb_type];
1816 const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
1817 const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
1818 MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
1821 const int mis = cm->mi_stride;
1822 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
1823 const int mi_height = num_8x8_blocks_high_lookup[bsize];
1826 assert(mi->sb_type == bsize);
1829 *x->mbmi_ext = ctx->mbmi_ext;
1831 // If segmentation in use
1833 // For in frame complexity AQ copy the segment id from the segment map.
1834 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
1835 const uint8_t *const map =
1836 seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
1837 mi_addr->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
1839 // Else for cyclic refresh mode update the segment map, set the segment id
1840 // and then update the quantizer.
1841 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
1842 vp9_cyclic_refresh_update_segment(cpi, xd->mi[0], mi_row, mi_col, bsize,
1843 ctx->rate, ctx->dist, x->skip, p);
1847 max_plane = is_inter_block(xdmi) ? MAX_MB_PLANE : 1;
1848 for (i = 0; i < max_plane; ++i) {
1849 p[i].coeff = ctx->coeff_pbuf[i][1];
1850 p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
1851 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
1852 p[i].eobs = ctx->eobs_pbuf[i][1];
1855 for (i = max_plane; i < MAX_MB_PLANE; ++i) {
1856 p[i].coeff = ctx->coeff_pbuf[i][2];
1857 p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
1858 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
1859 p[i].eobs = ctx->eobs_pbuf[i][2];
1862 // Restore the coding context of the MB to that that was in place
1863 // when the mode was picked for it
1864 for (y = 0; y < mi_height; y++)
1865 for (x_idx = 0; x_idx < mi_width; x_idx++)
1866 if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx &&
1867 (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
1868 xd->mi[x_idx + y * mis] = mi_addr;
1871 if (cpi->oxcf.aq_mode != NO_AQ) vp9_init_plane_quantizers(cpi, x);
1873 if (is_inter_block(xdmi) && xdmi->sb_type < BLOCK_8X8) {
1874 xdmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
1875 xdmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
1878 x->skip = ctx->skip;
1879 memcpy(x->zcoeff_blk[xdmi->tx_size], ctx->zcoeff_blk,
1880 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
1882 if (!output_enabled) return;
1884 #if CONFIG_INTERNAL_STATS
1885 if (frame_is_intra_only(cm)) {
1886 static const int kf_mode_index[] = {
1887 THR_DC /*DC_PRED*/, THR_V_PRED /*V_PRED*/,
1888 THR_H_PRED /*H_PRED*/, THR_D45_PRED /*D45_PRED*/,
1889 THR_D135_PRED /*D135_PRED*/, THR_D117_PRED /*D117_PRED*/,
1890 THR_D153_PRED /*D153_PRED*/, THR_D207_PRED /*D207_PRED*/,
1891 THR_D63_PRED /*D63_PRED*/, THR_TM /*TM_PRED*/,
1893 ++cpi->mode_chosen_counts[kf_mode_index[xdmi->mode]];
1895 // Note how often each mode chosen as best
1896 ++cpi->mode_chosen_counts[ctx->best_mode_index];
1899 if (!frame_is_intra_only(cm)) {
1900 if (is_inter_block(xdmi)) {
1901 vp9_update_mv_count(td);
1903 if (cm->interp_filter == SWITCHABLE) {
1904 const int ctx = get_pred_context_switchable_interp(xd);
1905 ++td->counts->switchable_interp[ctx][xdmi->interp_filter];
1909 rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
1910 rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
1911 rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
1913 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
1914 rdc->filter_diff[i] += ctx->best_filter_diff[i];
1917 for (h = 0; h < y_mis; ++h) {
1918 MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
1919 for (w = 0; w < x_mis; ++w) {
1920 MV_REF *const mv = frame_mv + w;
1921 mv->ref_frame[0] = mi->ref_frame[0];
1922 mv->ref_frame[1] = mi->ref_frame[1];
1923 mv->mv[0].as_int = mi->mv[0].as_int;
1924 mv->mv[1].as_int = mi->mv[1].as_int;
1928 #endif // !CONFIG_REALTIME_ONLY
1930 void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
1931 int mi_row, int mi_col) {
1932 uint8_t *const buffers[3] = { src->y_buffer, src->u_buffer, src->v_buffer };
1933 const int strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
1936 // Set current frame pointer.
1937 x->e_mbd.cur_buf = src;
1939 for (i = 0; i < MAX_MB_PLANE; i++)
1940 setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
1941 NULL, x->e_mbd.plane[i].subsampling_x,
1942 x->e_mbd.plane[i].subsampling_y);
1945 static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode,
1946 RD_COST *rd_cost, BLOCK_SIZE bsize) {
1947 MACROBLOCKD *const xd = &x->e_mbd;
1948 MODE_INFO *const mi = xd->mi[0];
1949 INTERP_FILTER filter_ref;
1951 filter_ref = get_pred_context_switchable_interp(xd);
1952 if (filter_ref == SWITCHABLE_FILTERS) filter_ref = EIGHTTAP;
1954 mi->sb_type = bsize;
1957 VPXMIN(max_txsize_lookup[bsize], tx_mode_to_biggest_tx_size[tx_mode]);
1959 mi->uv_mode = DC_PRED;
1960 mi->ref_frame[0] = LAST_FRAME;
1961 mi->ref_frame[1] = NONE;
1962 mi->mv[0].as_int = 0;
1963 mi->interp_filter = filter_ref;
1965 xd->mi[0]->bmi[0].as_mv[0].as_int = 0;
1968 vp9_rd_cost_init(rd_cost);
1971 #if !CONFIG_REALTIME_ONLY
1972 static void set_segment_rdmult(VP9_COMP *const cpi, MACROBLOCK *const x,
1973 int mi_row, int mi_col, BLOCK_SIZE bsize,
1975 VP9_COMMON *const cm = &cpi->common;
1976 const VP9EncoderConfig *const oxcf = &cpi->oxcf;
1977 const uint8_t *const map =
1978 cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
1980 vp9_init_plane_quantizers(cpi, x);
1981 vpx_clear_system_state();
1983 if (aq_mode == NO_AQ || aq_mode == PSNR_AQ) {
1984 if (cpi->sf.enable_tpl_model) x->rdmult = x->cb_rdmult;
1985 } else if (aq_mode == PERCEPTUAL_AQ) {
1986 x->rdmult = x->cb_rdmult;
1987 } else if (aq_mode == CYCLIC_REFRESH_AQ) {
1988 // If segment is boosted, use rdmult for that segment.
1989 if (cyclic_refresh_segment_id_boosted(
1990 get_segment_id(cm, map, bsize, mi_row, mi_col)))
1991 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
1993 x->rdmult = vp9_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q);
1996 if (oxcf->tuning == VP8_TUNE_SSIM) {
1997 set_ssim_rdmult(cpi, x, bsize, mi_row, mi_col, &x->rdmult);
2001 static void rd_pick_sb_modes(VP9_COMP *cpi, TileDataEnc *tile_data,
2002 MACROBLOCK *const x, int mi_row, int mi_col,
2003 RD_COST *rd_cost, BLOCK_SIZE bsize,
2004 PICK_MODE_CONTEXT *ctx, int rate_in_best_rd,
2005 int64_t dist_in_best_rd) {
2006 VP9_COMMON *const cm = &cpi->common;
2007 TileInfo *const tile_info = &tile_data->tile_info;
2008 MACROBLOCKD *const xd = &x->e_mbd;
2010 struct macroblock_plane *const p = x->plane;
2011 struct macroblockd_plane *const pd = xd->plane;
2012 const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
2014 int64_t best_rd = INT64_MAX;
2016 vpx_clear_system_state();
2018 // Use the lower precision, but faster, 32x32 fdct for mode selection.
2019 x->use_lp32x32fdct = 1;
2021 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2023 mi->sb_type = bsize;
2025 for (i = 0; i < MAX_MB_PLANE; ++i) {
2026 p[i].coeff = ctx->coeff_pbuf[i][0];
2027 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
2028 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
2029 p[i].eobs = ctx->eobs_pbuf[i][0];
2033 ctx->pred_pixel_ready = 0;
2036 // Set to zero to make sure we do not use the previous encoded frame stats
2039 #if CONFIG_VP9_HIGHBITDEPTH
2040 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2041 x->source_variance = vp9_high_get_sby_perpixel_variance(
2042 cpi, &x->plane[0].src, bsize, xd->bd);
2044 x->source_variance =
2045 vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
2048 x->source_variance =
2049 vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
2050 #endif // CONFIG_VP9_HIGHBITDEPTH
2052 // Save rdmult before it might be changed, so it can be restored later.
2053 orig_rdmult = x->rdmult;
2055 if ((cpi->sf.tx_domain_thresh > 0.0) || (cpi->sf.quant_opt_thresh > 0.0)) {
2056 double logvar = vp9_log_block_var(cpi, x, bsize);
2057 // Check block complexity as part of descision on using pixel or transform
2058 // domain distortion in rd tests.
2059 x->block_tx_domain = cpi->sf.allow_txfm_domain_distortion &&
2060 (logvar >= cpi->sf.tx_domain_thresh);
2062 // Check block complexity as part of descision on using quantized
2063 // coefficient optimisation inside the rd loop.
2064 x->block_qcoeff_opt =
2065 cpi->sf.allow_quant_coeff_opt && (logvar <= cpi->sf.quant_opt_thresh);
2067 x->block_tx_domain = cpi->sf.allow_txfm_domain_distortion;
2068 x->block_qcoeff_opt = cpi->sf.allow_quant_coeff_opt;
2071 set_segment_index(cpi, x, mi_row, mi_col, bsize, 0);
2072 set_segment_rdmult(cpi, x, mi_row, mi_col, bsize, aq_mode);
2073 if (rate_in_best_rd < INT_MAX && dist_in_best_rd < INT64_MAX) {
2074 best_rd = vp9_calculate_rd_cost(x->rdmult, x->rddiv, rate_in_best_rd,
2078 // Find best coding mode & reconstruct the MB so it is available
2079 // as a predictor for MBs that follow in the SB
2080 if (frame_is_intra_only(cm)) {
2081 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
2083 if (bsize >= BLOCK_8X8) {
2084 if (segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP))
2085 vp9_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
2088 vp9_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost,
2089 bsize, ctx, best_rd);
2091 vp9_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col, rd_cost,
2092 bsize, ctx, best_rd);
2096 // Examine the resulting rate and for AQ mode 2 make a segment choice.
2097 if ((rd_cost->rate != INT_MAX) && (aq_mode == COMPLEXITY_AQ) &&
2098 (bsize >= BLOCK_16X16) &&
2099 (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
2100 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
2101 vp9_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
2104 // TODO(jingning) The rate-distortion optimization flow needs to be
2105 // refactored to provide proper exit/return handle.
2106 if (rd_cost->rate == INT_MAX || rd_cost->dist == INT64_MAX)
2107 rd_cost->rdcost = INT64_MAX;
2109 rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
2111 x->rdmult = orig_rdmult;
2113 ctx->rate = rd_cost->rate;
2114 ctx->dist = rd_cost->dist;
2116 #endif // !CONFIG_REALTIME_ONLY
2118 static void update_stats(VP9_COMMON *cm, ThreadData *td) {
2119 const MACROBLOCK *x = &td->mb;
2120 const MACROBLOCKD *const xd = &x->e_mbd;
2121 const MODE_INFO *const mi = xd->mi[0];
2122 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2123 const BLOCK_SIZE bsize = mi->sb_type;
2125 if (!frame_is_intra_only(cm)) {
2126 FRAME_COUNTS *const counts = td->counts;
2127 const int inter_block = is_inter_block(mi);
2128 const int seg_ref_active =
2129 segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_REF_FRAME);
2130 if (!seg_ref_active) {
2131 counts->intra_inter[get_intra_inter_context(xd)][inter_block]++;
2132 // If the segment reference feature is enabled we have only a single
2133 // reference frame allowed for the segment so exclude it from
2134 // the reference frame counts used to work out probabilities.
2136 const MV_REFERENCE_FRAME ref0 = mi->ref_frame[0];
2137 if (cm->reference_mode == REFERENCE_MODE_SELECT)
2138 counts->comp_inter[vp9_get_reference_mode_context(cm, xd)]
2139 [has_second_ref(mi)]++;
2141 if (has_second_ref(mi)) {
2142 const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
2143 const int ctx = vp9_get_pred_context_comp_ref_p(cm, xd);
2144 const int bit = mi->ref_frame[!idx] == cm->comp_var_ref[1];
2145 counts->comp_ref[ctx][bit]++;
2147 counts->single_ref[vp9_get_pred_context_single_ref_p1(xd)][0]
2148 [ref0 != LAST_FRAME]++;
2149 if (ref0 != LAST_FRAME)
2150 counts->single_ref[vp9_get_pred_context_single_ref_p2(xd)][1]
2151 [ref0 != GOLDEN_FRAME]++;
2156 !segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP)) {
2157 const int mode_ctx = mbmi_ext->mode_context[mi->ref_frame[0]];
2158 if (bsize >= BLOCK_8X8) {
2159 const PREDICTION_MODE mode = mi->mode;
2160 ++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)];
2162 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
2163 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
2165 for (idy = 0; idy < 2; idy += num_4x4_h) {
2166 for (idx = 0; idx < 2; idx += num_4x4_w) {
2167 const int j = idy * 2 + idx;
2168 const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
2169 ++counts->inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
2177 #if !CONFIG_REALTIME_ONLY
2178 static void restore_context(MACROBLOCK *const x, int mi_row, int mi_col,
2179 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
2180 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
2181 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
2183 MACROBLOCKD *const xd = &x->e_mbd;
2185 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
2186 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
2187 int mi_width = num_8x8_blocks_wide_lookup[bsize];
2188 int mi_height = num_8x8_blocks_high_lookup[bsize];
2189 for (p = 0; p < MAX_MB_PLANE; p++) {
2190 memcpy(xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
2191 a + num_4x4_blocks_wide * p,
2192 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
2193 xd->plane[p].subsampling_x);
2194 memcpy(xd->left_context[p] +
2195 ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
2196 l + num_4x4_blocks_high * p,
2197 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
2198 xd->plane[p].subsampling_y);
2200 memcpy(xd->above_seg_context + mi_col, sa,
2201 sizeof(*xd->above_seg_context) * mi_width);
2202 memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
2203 sizeof(xd->left_seg_context[0]) * mi_height);
2206 static void save_context(MACROBLOCK *const x, int mi_row, int mi_col,
2207 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
2208 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
2209 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
2211 const MACROBLOCKD *const xd = &x->e_mbd;
2213 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
2214 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
2215 int mi_width = num_8x8_blocks_wide_lookup[bsize];
2216 int mi_height = num_8x8_blocks_high_lookup[bsize];
2218 // buffer the above/left context information of the block in search.
2219 for (p = 0; p < MAX_MB_PLANE; ++p) {
2220 memcpy(a + num_4x4_blocks_wide * p,
2221 xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
2222 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
2223 xd->plane[p].subsampling_x);
2224 memcpy(l + num_4x4_blocks_high * p,
2225 xd->left_context[p] +
2226 ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
2227 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
2228 xd->plane[p].subsampling_y);
2230 memcpy(sa, xd->above_seg_context + mi_col,
2231 sizeof(*xd->above_seg_context) * mi_width);
2232 memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
2233 sizeof(xd->left_seg_context[0]) * mi_height);
2236 static void encode_b(VP9_COMP *cpi, const TileInfo *const tile, ThreadData *td,
2237 TOKENEXTRA **tp, int mi_row, int mi_col,
2238 int output_enabled, BLOCK_SIZE bsize,
2239 PICK_MODE_CONTEXT *ctx) {
2240 MACROBLOCK *const x = &td->mb;
2241 set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
2243 if (cpi->sf.enable_tpl_model &&
2244 (cpi->oxcf.aq_mode == NO_AQ || cpi->oxcf.aq_mode == PERCEPTUAL_AQ)) {
2245 const VP9EncoderConfig *const oxcf = &cpi->oxcf;
2246 x->rdmult = x->cb_rdmult;
2247 if (oxcf->tuning == VP8_TUNE_SSIM) {
2248 set_ssim_rdmult(cpi, x, bsize, mi_row, mi_col, &x->rdmult);
2252 update_state(cpi, td, ctx, mi_row, mi_col, bsize, output_enabled);
2253 encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
2255 if (output_enabled) {
2256 update_stats(&cpi->common, td);
2258 (*tp)->token = EOSB_TOKEN;
2263 static void encode_sb(VP9_COMP *cpi, ThreadData *td, const TileInfo *const tile,
2264 TOKENEXTRA **tp, int mi_row, int mi_col,
2265 int output_enabled, BLOCK_SIZE bsize, PC_TREE *pc_tree) {
2266 VP9_COMMON *const cm = &cpi->common;
2267 MACROBLOCK *const x = &td->mb;
2268 MACROBLOCKD *const xd = &x->e_mbd;
2270 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
2272 PARTITION_TYPE partition;
2273 BLOCK_SIZE subsize = bsize;
2275 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
2277 if (bsize >= BLOCK_8X8) {
2278 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
2279 subsize = get_subsize(bsize, pc_tree->partitioning);
2282 subsize = BLOCK_4X4;
2285 partition = partition_lookup[bsl][subsize];
2286 if (output_enabled && bsize != BLOCK_4X4)
2287 td->counts->partition[ctx][partition]++;
2289 switch (partition) {
2290 case PARTITION_NONE:
2291 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
2294 case PARTITION_VERT:
2295 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
2296 &pc_tree->vertical[0]);
2297 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
2298 encode_b(cpi, tile, td, tp, mi_row, mi_col + hbs, output_enabled,
2299 subsize, &pc_tree->vertical[1]);
2302 case PARTITION_HORZ:
2303 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
2304 &pc_tree->horizontal[0]);
2305 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
2306 encode_b(cpi, tile, td, tp, mi_row + hbs, mi_col, output_enabled,
2307 subsize, &pc_tree->horizontal[1]);
2311 assert(partition == PARTITION_SPLIT);
2312 if (bsize == BLOCK_8X8) {
2313 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
2314 pc_tree->leaf_split[0]);
2316 encode_sb(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
2318 encode_sb(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
2319 subsize, pc_tree->split[1]);
2320 encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
2321 subsize, pc_tree->split[2]);
2322 encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
2323 subsize, pc_tree->split[3]);
2328 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
2329 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
2331 #endif // !CONFIG_REALTIME_ONLY
2333 // Check to see if the given partition size is allowed for a specified number
2334 // of 8x8 block rows and columns remaining in the image.
2335 // If not then return the largest allowed partition size
2336 static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, int rows_left,
2337 int cols_left, int *bh, int *bw) {
2338 if (rows_left <= 0 || cols_left <= 0) {
2339 return VPXMIN(bsize, BLOCK_8X8);
2341 for (; bsize > 0; bsize -= 3) {
2342 *bh = num_8x8_blocks_high_lookup[bsize];
2343 *bw = num_8x8_blocks_wide_lookup[bsize];
2344 if ((*bh <= rows_left) && (*bw <= cols_left)) {
2352 static void set_partial_b64x64_partition(MODE_INFO *mi, int mis, int bh_in,
2353 int bw_in, int row8x8_remaining,
2354 int col8x8_remaining, BLOCK_SIZE bsize,
2355 MODE_INFO **mi_8x8) {
2358 for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
2360 for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
2361 const int index = r * mis + c;
2362 mi_8x8[index] = mi + index;
2363 mi_8x8[index]->sb_type = find_partition_size(
2364 bsize, row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
2369 // This function attempts to set all mode info entries in a given SB64
2370 // to the same block partition size.
2371 // However, at the bottom and right borders of the image the requested size
2372 // may not be allowed in which case this code attempts to choose the largest
2373 // allowable partition.
2374 static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
2375 MODE_INFO **mi_8x8, int mi_row, int mi_col,
2377 VP9_COMMON *const cm = &cpi->common;
2378 const int mis = cm->mi_stride;
2379 const int row8x8_remaining = tile->mi_row_end - mi_row;
2380 const int col8x8_remaining = tile->mi_col_end - mi_col;
2381 int block_row, block_col;
2382 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
2383 int bh = num_8x8_blocks_high_lookup[bsize];
2384 int bw = num_8x8_blocks_wide_lookup[bsize];
2386 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
2388 // Apply the requested partition size to the SB64 if it is all "in image"
2389 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
2390 (row8x8_remaining >= MI_BLOCK_SIZE)) {
2391 for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
2392 for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
2393 int index = block_row * mis + block_col;
2394 mi_8x8[index] = mi_upper_left + index;
2395 mi_8x8[index]->sb_type = bsize;
2399 // Else this is a partial SB64.
2400 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
2401 col8x8_remaining, bsize, mi_8x8);
2405 static const struct {
2408 } coord_lookup[16] = {
2431 static void set_source_var_based_partition(VP9_COMP *cpi,
2432 const TileInfo *const tile,
2433 MACROBLOCK *const x,
2434 MODE_INFO **mi_8x8, int mi_row,
2436 VP9_COMMON *const cm = &cpi->common;
2437 const int mis = cm->mi_stride;
2438 const int row8x8_remaining = tile->mi_row_end - mi_row;
2439 const int col8x8_remaining = tile->mi_col_end - mi_col;
2440 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
2442 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
2444 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
2447 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
2448 (row8x8_remaining >= MI_BLOCK_SIZE)) {
2452 const int offset = (mi_row >> 1) * cm->mb_cols + (mi_col >> 1);
2453 int is_larger_better = 0;
2455 unsigned int thr = cpi->source_var_thresh;
2457 memset(d32, 0, 4 * sizeof(diff));
2459 for (i = 0; i < 4; i++) {
2462 for (j = 0; j < 4; j++) {
2463 int b_mi_row = coord_lookup[i * 4 + j].row;
2464 int b_mi_col = coord_lookup[i * 4 + j].col;
2465 int boffset = b_mi_row / 2 * cm->mb_cols + b_mi_col / 2;
2467 d16[j] = cpi->source_diff_var + offset + boffset;
2469 index = b_mi_row * mis + b_mi_col;
2470 mi_8x8[index] = mi_upper_left + index;
2471 mi_8x8[index]->sb_type = BLOCK_16X16;
2473 // TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
2474 // size to further improve quality.
2477 is_larger_better = (d16[0]->var < thr) && (d16[1]->var < thr) &&
2478 (d16[2]->var < thr) && (d16[3]->var < thr);
2480 // Use 32x32 partition
2481 if (is_larger_better) {
2484 for (j = 0; j < 4; j++) {
2485 d32[i].sse += d16[j]->sse;
2486 d32[i].sum += d16[j]->sum;
2490 (unsigned int)(d32[i].sse -
2491 (unsigned int)(((int64_t)d32[i].sum * d32[i].sum) >>
2494 index = coord_lookup[i * 4].row * mis + coord_lookup[i * 4].col;
2495 mi_8x8[index] = mi_upper_left + index;
2496 mi_8x8[index]->sb_type = BLOCK_32X32;
2500 if (use32x32 == 4) {
2502 is_larger_better = (d32[0].var < thr) && (d32[1].var < thr) &&
2503 (d32[2].var < thr) && (d32[3].var < thr);
2505 // Use 64x64 partition
2506 if (is_larger_better) {
2507 mi_8x8[0] = mi_upper_left;
2508 mi_8x8[0]->sb_type = BLOCK_64X64;
2511 } else { // partial in-image SB64
2512 int bh = num_8x8_blocks_high_lookup[BLOCK_16X16];
2513 int bw = num_8x8_blocks_wide_lookup[BLOCK_16X16];
2514 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
2515 col8x8_remaining, BLOCK_16X16, mi_8x8);
2519 static void update_state_rt(VP9_COMP *cpi, ThreadData *td,
2520 PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col,
2522 VP9_COMMON *const cm = &cpi->common;
2523 MACROBLOCK *const x = &td->mb;
2524 MACROBLOCKD *const xd = &x->e_mbd;
2525 MODE_INFO *const mi = xd->mi[0];
2526 struct macroblock_plane *const p = x->plane;
2527 const struct segmentation *const seg = &cm->seg;
2528 const int bw = num_8x8_blocks_wide_lookup[mi->sb_type];
2529 const int bh = num_8x8_blocks_high_lookup[mi->sb_type];
2530 const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
2531 const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
2533 *(xd->mi[0]) = ctx->mic;
2534 *(x->mbmi_ext) = ctx->mbmi_ext;
2536 if (seg->enabled && (cpi->oxcf.aq_mode != NO_AQ || cpi->roi.enabled)) {
2537 // Setting segmentation map for cyclic_refresh.
2538 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
2539 vp9_cyclic_refresh_update_segment(cpi, mi, mi_row, mi_col, bsize,
2540 ctx->rate, ctx->dist, x->skip, p);
2542 const uint8_t *const map =
2543 seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
2544 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
2546 vp9_init_plane_quantizers(cpi, x);
2549 if (is_inter_block(mi)) {
2550 vp9_update_mv_count(td);
2551 if (cm->interp_filter == SWITCHABLE) {
2552 const int pred_ctx = get_pred_context_switchable_interp(xd);
2553 ++td->counts->switchable_interp[pred_ctx][mi->interp_filter];
2556 if (mi->sb_type < BLOCK_8X8) {
2557 mi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
2558 mi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
2562 if (cm->use_prev_frame_mvs || !cm->error_resilient_mode ||
2563 (cpi->svc.use_base_mv && cpi->svc.number_spatial_layers > 1 &&
2564 cpi->svc.spatial_layer_id != cpi->svc.number_spatial_layers - 1)) {
2565 MV_REF *const frame_mvs =
2566 cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
2569 for (h = 0; h < y_mis; ++h) {
2570 MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
2571 for (w = 0; w < x_mis; ++w) {
2572 MV_REF *const mv = frame_mv + w;
2573 mv->ref_frame[0] = mi->ref_frame[0];
2574 mv->ref_frame[1] = mi->ref_frame[1];
2575 mv->mv[0].as_int = mi->mv[0].as_int;
2576 mv->mv[1].as_int = mi->mv[1].as_int;
2581 x->skip = ctx->skip;
2582 x->skip_txfm[0] = (mi->segment_id || xd->lossless) ? 0 : ctx->skip_txfm[0];
2585 static void encode_b_rt(VP9_COMP *cpi, ThreadData *td,
2586 const TileInfo *const tile, TOKENEXTRA **tp, int mi_row,
2587 int mi_col, int output_enabled, BLOCK_SIZE bsize,
2588 PICK_MODE_CONTEXT *ctx) {
2589 MACROBLOCK *const x = &td->mb;
2590 set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
2591 update_state_rt(cpi, td, ctx, mi_row, mi_col, bsize);
2593 encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
2594 update_stats(&cpi->common, td);
2596 (*tp)->token = EOSB_TOKEN;
2600 static void encode_sb_rt(VP9_COMP *cpi, ThreadData *td,
2601 const TileInfo *const tile, TOKENEXTRA **tp,
2602 int mi_row, int mi_col, int output_enabled,
2603 BLOCK_SIZE bsize, PC_TREE *pc_tree) {
2604 VP9_COMMON *const cm = &cpi->common;
2605 MACROBLOCK *const x = &td->mb;
2606 MACROBLOCKD *const xd = &x->e_mbd;
2608 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
2610 PARTITION_TYPE partition;
2613 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
2615 if (bsize >= BLOCK_8X8) {
2616 const int idx_str = xd->mi_stride * mi_row + mi_col;
2617 MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
2618 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
2619 subsize = mi_8x8[0]->sb_type;
2622 subsize = BLOCK_4X4;
2625 partition = partition_lookup[bsl][subsize];
2626 if (output_enabled && bsize != BLOCK_4X4)
2627 td->counts->partition[ctx][partition]++;
2629 switch (partition) {
2630 case PARTITION_NONE:
2631 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
2634 case PARTITION_VERT:
2635 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
2636 &pc_tree->vertical[0]);
2637 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
2638 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
2639 subsize, &pc_tree->vertical[1]);
2642 case PARTITION_HORZ:
2643 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
2644 &pc_tree->horizontal[0]);
2645 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
2646 encode_b_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
2647 subsize, &pc_tree->horizontal[1]);
2651 assert(partition == PARTITION_SPLIT);
2652 subsize = get_subsize(bsize, PARTITION_SPLIT);
2653 encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
2655 encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
2656 subsize, pc_tree->split[1]);
2657 encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
2658 subsize, pc_tree->split[2]);
2659 encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs,
2660 output_enabled, subsize, pc_tree->split[3]);
2664 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
2665 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
2668 #if !CONFIG_REALTIME_ONLY
2669 static void rd_use_partition(VP9_COMP *cpi, ThreadData *td,
2670 TileDataEnc *tile_data, MODE_INFO **mi_8x8,
2671 TOKENEXTRA **tp, int mi_row, int mi_col,
2672 BLOCK_SIZE bsize, int *rate, int64_t *dist,
2673 int do_recon, PC_TREE *pc_tree) {
2674 VP9_COMMON *const cm = &cpi->common;
2675 TileInfo *const tile_info = &tile_data->tile_info;
2676 MACROBLOCK *const x = &td->mb;
2677 MACROBLOCKD *const xd = &x->e_mbd;
2678 const int mis = cm->mi_stride;
2679 const int bsl = b_width_log2_lookup[bsize];
2680 const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2;
2681 const int bss = (1 << bsl) / 4;
2683 PARTITION_TYPE partition = PARTITION_NONE;
2685 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
2686 PARTITION_CONTEXT sl[8], sa[8];
2687 RD_COST last_part_rdc, none_rdc, chosen_rdc;
2688 BLOCK_SIZE sub_subsize = BLOCK_4X4;
2689 int splits_below = 0;
2690 BLOCK_SIZE bs_type = mi_8x8[0]->sb_type;
2691 int do_partition_search = 1;
2692 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
2694 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
2696 assert(num_4x4_blocks_wide_lookup[bsize] ==
2697 num_4x4_blocks_high_lookup[bsize]);
2699 vp9_rd_cost_reset(&last_part_rdc);
2700 vp9_rd_cost_reset(&none_rdc);
2701 vp9_rd_cost_reset(&chosen_rdc);
2703 partition = partition_lookup[bsl][bs_type];
2704 subsize = get_subsize(bsize, partition);
2706 pc_tree->partitioning = partition;
2707 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2709 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode != NO_AQ) {
2710 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2711 x->mb_energy = vp9_block_energy(cpi, x, bsize);
2714 if (do_partition_search &&
2715 cpi->sf.partition_search_type == SEARCH_PARTITION &&
2716 cpi->sf.adjust_partitioning_from_last_frame) {
2717 // Check if any of the sub blocks are further split.
2718 if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
2719 sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
2721 for (i = 0; i < 4; i++) {
2722 int jj = i >> 1, ii = i & 0x01;
2723 MODE_INFO *this_mi = mi_8x8[jj * bss * mis + ii * bss];
2724 if (this_mi && this_mi->sb_type >= sub_subsize) {
2730 // If partition is not none try none unless each of the 4 splits are split
2732 if (partition != PARTITION_NONE && !splits_below &&
2733 mi_row + (mi_step >> 1) < cm->mi_rows &&
2734 mi_col + (mi_step >> 1) < cm->mi_cols) {
2735 pc_tree->partitioning = PARTITION_NONE;
2736 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize, ctx,
2737 INT_MAX, INT64_MAX);
2739 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2741 if (none_rdc.rate < INT_MAX) {
2742 none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
2744 RDCOST(x->rdmult, x->rddiv, none_rdc.rate, none_rdc.dist);
2747 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2748 mi_8x8[0]->sb_type = bs_type;
2749 pc_tree->partitioning = partition;
2753 switch (partition) {
2754 case PARTITION_NONE:
2755 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, bsize,
2756 ctx, INT_MAX, INT64_MAX);
2758 case PARTITION_HORZ:
2759 pc_tree->horizontal[0].skip_ref_frame_mask = 0;
2760 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
2761 subsize, &pc_tree->horizontal[0], INT_MAX, INT64_MAX);
2762 if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 &&
2763 mi_row + (mi_step >> 1) < cm->mi_rows) {
2765 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
2766 vp9_rd_cost_init(&tmp_rdc);
2767 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
2768 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
2769 pc_tree->horizontal[1].skip_ref_frame_mask = 0;
2770 rd_pick_sb_modes(cpi, tile_data, x, mi_row + (mi_step >> 1), mi_col,
2771 &tmp_rdc, subsize, &pc_tree->horizontal[1], INT_MAX,
2773 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2774 vp9_rd_cost_reset(&last_part_rdc);
2777 last_part_rdc.rate += tmp_rdc.rate;
2778 last_part_rdc.dist += tmp_rdc.dist;
2779 last_part_rdc.rdcost += tmp_rdc.rdcost;
2782 case PARTITION_VERT:
2783 pc_tree->vertical[0].skip_ref_frame_mask = 0;
2784 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
2785 subsize, &pc_tree->vertical[0], INT_MAX, INT64_MAX);
2786 if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 &&
2787 mi_col + (mi_step >> 1) < cm->mi_cols) {
2789 PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
2790 vp9_rd_cost_init(&tmp_rdc);
2791 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
2792 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
2793 pc_tree->vertical[bsize > BLOCK_8X8].skip_ref_frame_mask = 0;
2795 cpi, tile_data, x, mi_row, mi_col + (mi_step >> 1), &tmp_rdc,
2796 subsize, &pc_tree->vertical[bsize > BLOCK_8X8], INT_MAX, INT64_MAX);
2797 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2798 vp9_rd_cost_reset(&last_part_rdc);
2801 last_part_rdc.rate += tmp_rdc.rate;
2802 last_part_rdc.dist += tmp_rdc.dist;
2803 last_part_rdc.rdcost += tmp_rdc.rdcost;
2807 assert(partition == PARTITION_SPLIT);
2808 if (bsize == BLOCK_8X8) {
2809 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
2810 subsize, pc_tree->leaf_split[0], INT_MAX, INT64_MAX);
2813 last_part_rdc.rate = 0;
2814 last_part_rdc.dist = 0;
2815 last_part_rdc.rdcost = 0;
2816 for (i = 0; i < 4; i++) {
2817 int x_idx = (i & 1) * (mi_step >> 1);
2818 int y_idx = (i >> 1) * (mi_step >> 1);
2819 int jj = i >> 1, ii = i & 0x01;
2821 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
2824 vp9_rd_cost_init(&tmp_rdc);
2825 rd_use_partition(cpi, td, tile_data, mi_8x8 + jj * bss * mis + ii * bss,
2826 tp, mi_row + y_idx, mi_col + x_idx, subsize,
2827 &tmp_rdc.rate, &tmp_rdc.dist, i != 3,
2829 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2830 vp9_rd_cost_reset(&last_part_rdc);
2833 last_part_rdc.rate += tmp_rdc.rate;
2834 last_part_rdc.dist += tmp_rdc.dist;
2839 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2840 if (last_part_rdc.rate < INT_MAX) {
2841 last_part_rdc.rate += cpi->partition_cost[pl][partition];
2842 last_part_rdc.rdcost =
2843 RDCOST(x->rdmult, x->rddiv, last_part_rdc.rate, last_part_rdc.dist);
2846 if (do_partition_search && cpi->sf.adjust_partitioning_from_last_frame &&
2847 cpi->sf.partition_search_type == SEARCH_PARTITION &&
2848 partition != PARTITION_SPLIT && bsize > BLOCK_8X8 &&
2849 (mi_row + mi_step < cm->mi_rows ||
2850 mi_row + (mi_step >> 1) == cm->mi_rows) &&
2851 (mi_col + mi_step < cm->mi_cols ||
2852 mi_col + (mi_step >> 1) == cm->mi_cols)) {
2853 BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
2854 chosen_rdc.rate = 0;
2855 chosen_rdc.dist = 0;
2856 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2857 pc_tree->partitioning = PARTITION_SPLIT;
2860 for (i = 0; i < 4; i++) {
2861 int x_idx = (i & 1) * (mi_step >> 1);
2862 int y_idx = (i >> 1) * (mi_step >> 1);
2864 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
2865 PARTITION_CONTEXT sl[8], sa[8];
2867 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
2870 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2871 pc_tree->split[i]->partitioning = PARTITION_NONE;
2872 rd_pick_sb_modes(cpi, tile_data, x, mi_row + y_idx, mi_col + x_idx,
2873 &tmp_rdc, split_subsize, &pc_tree->split[i]->none,
2874 INT_MAX, INT64_MAX);
2876 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2878 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2879 vp9_rd_cost_reset(&chosen_rdc);
2883 chosen_rdc.rate += tmp_rdc.rate;
2884 chosen_rdc.dist += tmp_rdc.dist;
2887 encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0,
2888 split_subsize, pc_tree->split[i]);
2890 pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
2892 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
2894 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2895 if (chosen_rdc.rate < INT_MAX) {
2896 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
2898 RDCOST(x->rdmult, x->rddiv, chosen_rdc.rate, chosen_rdc.dist);
2902 // If last_part is better set the partitioning to that.
2903 if (last_part_rdc.rdcost < chosen_rdc.rdcost) {
2904 mi_8x8[0]->sb_type = bsize;
2905 if (bsize >= BLOCK_8X8) pc_tree->partitioning = partition;
2906 chosen_rdc = last_part_rdc;
2908 // If none was better set the partitioning to that.
2909 if (none_rdc.rdcost < chosen_rdc.rdcost) {
2910 if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
2911 chosen_rdc = none_rdc;
2914 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2916 // We must have chosen a partitioning and encoding or we'll fail later on.
2917 // No other opportunities for success.
2918 if (bsize == BLOCK_64X64)
2919 assert(chosen_rdc.rate < INT_MAX && chosen_rdc.dist < INT64_MAX);
2922 int output_enabled = (bsize == BLOCK_64X64);
2923 encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
2927 *rate = chosen_rdc.rate;
2928 *dist = chosen_rdc.dist;
2931 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
2932 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
2933 BLOCK_4X4, BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, BLOCK_16X16,
2934 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16
2937 static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
2938 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_32X32,
2939 BLOCK_32X32, BLOCK_32X32, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
2940 BLOCK_64X64, BLOCK_64X64, BLOCK_64X64
2943 // Look at all the mode_info entries for blocks that are part of this
2944 // partition and find the min and max values for sb_type.
2945 // At the moment this is designed to work on a 64x64 SB but could be
2946 // adjusted to use a size parameter.
2948 // The min and max are assumed to have been initialized prior to calling this
2949 // function so repeat calls can accumulate a min and max of more than one sb64.
2950 static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO **mi_8x8,
2951 BLOCK_SIZE *min_block_size,
2952 BLOCK_SIZE *max_block_size,
2953 int bs_hist[BLOCK_SIZES]) {
2954 int sb_width_in_blocks = MI_BLOCK_SIZE;
2955 int sb_height_in_blocks = MI_BLOCK_SIZE;
2959 // Check the sb_type for each block that belongs to this region.
2960 for (i = 0; i < sb_height_in_blocks; ++i) {
2961 for (j = 0; j < sb_width_in_blocks; ++j) {
2962 MODE_INFO *mi = mi_8x8[index + j];
2963 BLOCK_SIZE sb_type = mi ? mi->sb_type : 0;
2965 *min_block_size = VPXMIN(*min_block_size, sb_type);
2966 *max_block_size = VPXMAX(*max_block_size, sb_type);
2968 index += xd->mi_stride;
2972 // Next square block size less or equal than current block size.
2973 static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
2974 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_8X8, BLOCK_8X8,
2975 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_32X32,
2976 BLOCK_32X32, BLOCK_32X32, BLOCK_64X64
2979 // Look at neighboring blocks and set a min and max partition size based on
2981 static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
2982 MACROBLOCKD *const xd, int mi_row,
2983 int mi_col, BLOCK_SIZE *min_block_size,
2984 BLOCK_SIZE *max_block_size) {
2985 VP9_COMMON *const cm = &cpi->common;
2986 MODE_INFO **mi = xd->mi;
2987 const int left_in_image = !!xd->left_mi;
2988 const int above_in_image = !!xd->above_mi;
2989 const int row8x8_remaining = tile->mi_row_end - mi_row;
2990 const int col8x8_remaining = tile->mi_col_end - mi_col;
2992 BLOCK_SIZE min_size = BLOCK_4X4;
2993 BLOCK_SIZE max_size = BLOCK_64X64;
2994 int bs_hist[BLOCK_SIZES] = { 0 };
2996 // Trap case where we do not have a prediction.
2997 if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
2998 // Default "min to max" and "max to min"
2999 min_size = BLOCK_64X64;
3000 max_size = BLOCK_4X4;
3002 // NOTE: each call to get_sb_partition_size_range() uses the previous
3003 // passed in values for min and max as a starting point.
3004 // Find the min and max partition used in previous frame at this location
3005 if (cm->frame_type != KEY_FRAME) {
3006 MODE_INFO **prev_mi =
3007 &cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col];
3008 get_sb_partition_size_range(xd, prev_mi, &min_size, &max_size, bs_hist);
3010 // Find the min and max partition sizes used in the left SB64
3011 if (left_in_image) {
3012 MODE_INFO **left_sb64_mi = &mi[-MI_BLOCK_SIZE];
3013 get_sb_partition_size_range(xd, left_sb64_mi, &min_size, &max_size,
3016 // Find the min and max partition sizes used in the above SB64.
3017 if (above_in_image) {
3018 MODE_INFO **above_sb64_mi = &mi[-xd->mi_stride * MI_BLOCK_SIZE];
3019 get_sb_partition_size_range(xd, above_sb64_mi, &min_size, &max_size,
3023 // Adjust observed min and max for "relaxed" auto partition case.
3024 if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
3025 min_size = min_partition_size[min_size];
3026 max_size = max_partition_size[max_size];
3030 // Check border cases where max and min from neighbors may not be legal.
3031 max_size = find_partition_size(max_size, row8x8_remaining, col8x8_remaining,
3033 // Test for blocks at the edge of the active image.
3034 // This may be the actual edge of the image or where there are formatting
3036 if (vp9_active_edge_sb(cpi, mi_row, mi_col)) {
3037 min_size = BLOCK_4X4;
3040 VPXMIN(cpi->sf.rd_auto_partition_min_limit, VPXMIN(min_size, max_size));
3043 // When use_square_partition_only is true, make sure at least one square
3044 // partition is allowed by selecting the next smaller square size as
3046 if (cpi->sf.use_square_partition_only &&
3047 next_square_size[max_size] < min_size) {
3048 min_size = next_square_size[max_size];
3051 *min_block_size = min_size;
3052 *max_block_size = max_size;
3055 // TODO(jingning) refactor functions setting partition search range
3056 static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd, int mi_row,
3057 int mi_col, BLOCK_SIZE bsize,
3058 BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
3059 int mi_width = num_8x8_blocks_wide_lookup[bsize];
3060 int mi_height = num_8x8_blocks_high_lookup[bsize];
3064 const int idx_str = cm->mi_stride * mi_row + mi_col;
3065 MODE_INFO **prev_mi = &cm->prev_mi_grid_visible[idx_str];
3066 BLOCK_SIZE bs, min_size, max_size;
3068 min_size = BLOCK_64X64;
3069 max_size = BLOCK_4X4;
3072 for (idy = 0; idy < mi_height; ++idy) {
3073 for (idx = 0; idx < mi_width; ++idx) {
3074 mi = prev_mi[idy * cm->mi_stride + idx];
3075 bs = mi ? mi->sb_type : bsize;
3076 min_size = VPXMIN(min_size, bs);
3077 max_size = VPXMAX(max_size, bs);
3083 for (idy = 0; idy < mi_height; ++idy) {
3084 mi = xd->mi[idy * cm->mi_stride - 1];
3085 bs = mi ? mi->sb_type : bsize;
3086 min_size = VPXMIN(min_size, bs);
3087 max_size = VPXMAX(max_size, bs);
3092 for (idx = 0; idx < mi_width; ++idx) {
3093 mi = xd->mi[idx - cm->mi_stride];
3094 bs = mi ? mi->sb_type : bsize;
3095 min_size = VPXMIN(min_size, bs);
3096 max_size = VPXMAX(max_size, bs);
3100 if (min_size == max_size) {
3101 min_size = min_partition_size[min_size];
3102 max_size = max_partition_size[max_size];
3108 #endif // !CONFIG_REALTIME_ONLY
3110 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
3111 memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
3114 static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
3115 memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
3118 #if CONFIG_FP_MB_STATS
3119 const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] = { 1, 1, 1, 1, 1, 1, 1,
3121 const int num_16x16_blocks_high_lookup[BLOCK_SIZES] = { 1, 1, 1, 1, 1, 1, 1,
3123 const int qindex_skip_threshold_lookup[BLOCK_SIZES] = { 0, 10, 10, 30, 40,
3126 const int qindex_split_threshold_lookup[BLOCK_SIZES] = { 0, 3, 3, 7, 15,
3129 const int complexity_16x16_blocks_threshold[BLOCK_SIZES] = { 1, 1, 1, 1, 1,
3142 static INLINE MOTION_DIRECTION get_motion_direction_fp(uint8_t fp_byte) {
3143 if (fp_byte & FPMB_MOTION_ZERO_MASK) {
3145 } else if (fp_byte & FPMB_MOTION_LEFT_MASK) {
3147 } else if (fp_byte & FPMB_MOTION_RIGHT_MASK) {
3149 } else if (fp_byte & FPMB_MOTION_UP_MASK) {
3156 static INLINE int get_motion_inconsistency(MOTION_DIRECTION this_mv,
3157 MOTION_DIRECTION that_mv) {
3158 if (this_mv == that_mv) {
3161 return abs(this_mv - that_mv) == 2 ? 2 : 1;
3166 // Calculate prediction based on the given input features and neural net config.
3167 // Assume there are no more than NN_MAX_NODES_PER_LAYER nodes in each hidden
3169 static void nn_predict(const float *features, const NN_CONFIG *nn_config,
3171 int num_input_nodes = nn_config->num_inputs;
3173 float buf[2][NN_MAX_NODES_PER_LAYER];
3174 const float *input_nodes = features;
3176 // Propagate hidden layers.
3177 const int num_layers = nn_config->num_hidden_layers;
3179 assert(num_layers <= NN_MAX_HIDDEN_LAYERS);
3180 for (layer = 0; layer < num_layers; ++layer) {
3181 const float *weights = nn_config->weights[layer];
3182 const float *bias = nn_config->bias[layer];
3183 float *output_nodes = buf[buf_index];
3184 const int num_output_nodes = nn_config->num_hidden_nodes[layer];
3185 assert(num_output_nodes < NN_MAX_NODES_PER_LAYER);
3186 for (node = 0; node < num_output_nodes; ++node) {
3188 for (i = 0; i < num_input_nodes; ++i) val += weights[i] * input_nodes[i];
3190 // ReLU as activation function.
3191 val = VPXMAX(val, 0.0f);
3192 output_nodes[node] = val;
3193 weights += num_input_nodes;
3195 num_input_nodes = num_output_nodes;
3196 input_nodes = output_nodes;
3197 buf_index = 1 - buf_index;
3200 // Final output layer.
3202 const float *weights = nn_config->weights[num_layers];
3203 for (node = 0; node < nn_config->num_outputs; ++node) {
3204 const float *bias = nn_config->bias[num_layers];
3206 for (i = 0; i < num_input_nodes; ++i) val += weights[i] * input_nodes[i];
3207 output[node] = val + bias[node];
3208 weights += num_input_nodes;
3213 #if !CONFIG_REALTIME_ONLY
3215 // Machine-learning based partition search early termination.
3216 // Return 1 to skip split and rect partitions.
3217 static int ml_pruning_partition(VP9_COMMON *const cm, MACROBLOCKD *const xd,
3218 PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col,
3221 abs(ctx->mic.mv[0].as_mv.col) + abs(ctx->mic.mv[0].as_mv.row);
3222 const int left_in_image = !!xd->left_mi;
3223 const int above_in_image = !!xd->above_mi;
3224 MODE_INFO **prev_mi =
3225 &cm->prev_mi_grid_visible[mi_col + cm->mi_stride * mi_row];
3226 int above_par = 0; // above_partitioning
3227 int left_par = 0; // left_partitioning
3228 int last_par = 0; // last_partitioning
3231 BLOCK_SIZE context_size;
3232 const NN_CONFIG *nn_config = NULL;
3233 const float *mean, *sd, *linear_weights;
3234 float nn_score, linear_score;
3235 float features[FEATURES];
3237 assert(b_width_log2_lookup[bsize] == b_height_log2_lookup[bsize]);
3238 vpx_clear_system_state();
3243 nn_config = &vp9_partition_nnconfig_64x64;
3247 nn_config = &vp9_partition_nnconfig_32x32;
3251 nn_config = &vp9_partition_nnconfig_16x16;
3253 default: assert(0 && "Unexpected block size."); return 0;
3256 if (above_in_image) {
3257 context_size = xd->above_mi->sb_type;
3258 if (context_size < bsize)
3260 else if (context_size == bsize)
3264 if (left_in_image) {
3265 context_size = xd->left_mi->sb_type;
3266 if (context_size < bsize)
3268 else if (context_size == bsize)
3273 context_size = prev_mi[0]->sb_type;
3274 if (context_size < bsize)
3276 else if (context_size == bsize)
3280 mean = &vp9_partition_feature_mean[offset];
3281 sd = &vp9_partition_feature_std[offset];
3282 features[0] = ((float)ctx->rate - mean[0]) / sd[0];
3283 features[1] = ((float)ctx->dist - mean[1]) / sd[1];
3284 features[2] = ((float)mag_mv / 2 - mean[2]) * sd[2];
3285 features[3] = ((float)(left_par + above_par) / 2 - mean[3]) * sd[3];
3286 features[4] = ((float)ctx->sum_y_eobs - mean[4]) / sd[4];
3287 features[5] = ((float)cm->base_qindex - mean[5]) * sd[5];
3288 features[6] = ((float)last_par - mean[6]) * sd[6];
3290 // Predict using linear model.
3291 linear_weights = &vp9_partition_linear_weights[offset];
3292 linear_score = linear_weights[FEATURES];
3293 for (i = 0; i < FEATURES; ++i)
3294 linear_score += linear_weights[i] * features[i];
3295 if (linear_score > 0.1f) return 0;
3297 // Predict using neural net model.
3298 nn_predict(features, nn_config, &nn_score);
3300 if (linear_score < -0.0f && nn_score < 0.1f) return 1;
3301 if (nn_score < -0.0f && linear_score < 0.1f) return 1;
3307 // ML-based partition search breakout.
3308 static int ml_predict_breakout(VP9_COMP *const cpi, BLOCK_SIZE bsize,
3309 const MACROBLOCK *const x,
3310 const RD_COST *const rd_cost) {
3311 DECLARE_ALIGNED(16, static const uint8_t, vp9_64_zeros[64]) = { 0 };
3312 const VP9_COMMON *const cm = &cpi->common;
3313 float features[FEATURES];
3314 const float *linear_weights = NULL; // Linear model weights.
3315 float linear_score = 0.0f;
3316 const int qindex = cm->base_qindex;
3317 const int q_ctx = qindex >= 200 ? 0 : (qindex >= 150 ? 1 : 2);
3318 const int is_720p_or_larger = VPXMIN(cm->width, cm->height) >= 720;
3319 const int resolution_ctx = is_720p_or_larger ? 1 : 0;
3323 linear_weights = vp9_partition_breakout_weights_64[resolution_ctx][q_ctx];
3326 linear_weights = vp9_partition_breakout_weights_32[resolution_ctx][q_ctx];
3329 linear_weights = vp9_partition_breakout_weights_16[resolution_ctx][q_ctx];
3332 linear_weights = vp9_partition_breakout_weights_8[resolution_ctx][q_ctx];
3334 default: assert(0 && "Unexpected block size."); return 0;
3336 if (!linear_weights) return 0;
3338 { // Generate feature values.
3339 #if CONFIG_VP9_HIGHBITDEPTH
3341 vp9_ac_quant(cm->base_qindex, 0, cm->bit_depth) >> (x->e_mbd.bd - 8);
3343 const int ac_q = vp9_ac_quant(qindex, 0, cm->bit_depth);
3344 #endif // CONFIG_VP9_HIGHBITDEPTH
3345 const int num_pels_log2 = num_pels_log2_lookup[bsize];
3346 int feature_index = 0;
3347 unsigned int var, sse;
3348 float rate_f, dist_f;
3350 #if CONFIG_VP9_HIGHBITDEPTH
3351 if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
3353 vp9_high_get_sby_variance(cpi, &x->plane[0].src, bsize, x->e_mbd.bd);
3355 var = cpi->fn_ptr[bsize].vf(x->plane[0].src.buf, x->plane[0].src.stride,
3356 vp9_64_zeros, 0, &sse);
3359 var = cpi->fn_ptr[bsize].vf(x->plane[0].src.buf, x->plane[0].src.stride,
3360 vp9_64_zeros, 0, &sse);
3362 var = var >> num_pels_log2;
3364 vpx_clear_system_state();
3366 rate_f = (float)VPXMIN(rd_cost->rate, INT_MAX);
3367 dist_f = (float)(VPXMIN(rd_cost->dist, INT_MAX) >> num_pels_log2);
3369 ((float)x->rdmult / 128.0f / 512.0f / (float)(1 << num_pels_log2)) *
3372 features[feature_index++] = rate_f;
3373 features[feature_index++] = dist_f;
3374 features[feature_index++] = (float)var;
3375 features[feature_index++] = (float)ac_q;
3376 assert(feature_index == FEATURES);
3379 { // Calculate the output score.
3381 linear_score = linear_weights[FEATURES];
3382 for (i = 0; i < FEATURES; ++i)
3383 linear_score += linear_weights[i] * features[i];
3386 return linear_score >= cpi->sf.rd_ml_partition.search_breakout_thresh[q_ctx];
3392 static void ml_prune_rect_partition(VP9_COMP *const cpi, MACROBLOCK *const x,
3394 const PC_TREE *const pc_tree,
3395 int *allow_horz, int *allow_vert,
3397 const NN_CONFIG *nn_config = NULL;
3398 float score[LABELS] = {
3405 if (ref_rd <= 0 || ref_rd > 1000000000) return;
3408 case BLOCK_8X8: break;
3410 nn_config = &vp9_rect_part_nnconfig_16;
3411 thresh = cpi->sf.rd_ml_partition.prune_rect_thresh[1];
3414 nn_config = &vp9_rect_part_nnconfig_32;
3415 thresh = cpi->sf.rd_ml_partition.prune_rect_thresh[2];
3418 nn_config = &vp9_rect_part_nnconfig_64;
3419 thresh = cpi->sf.rd_ml_partition.prune_rect_thresh[3];
3421 default: assert(0 && "Unexpected block size."); return;
3423 if (!nn_config || thresh < 0) return;
3425 // Feature extraction and model score calculation.
3427 const VP9_COMMON *const cm = &cpi->common;
3428 #if CONFIG_VP9_HIGHBITDEPTH
3430 vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth) >> (x->e_mbd.bd - 8);
3432 const int dc_q = vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth);
3433 #endif // CONFIG_VP9_HIGHBITDEPTH
3434 const int bs = 4 * num_4x4_blocks_wide_lookup[bsize];
3435 int feature_index = 0;
3436 float features[FEATURES];
3438 features[feature_index++] = logf((float)dc_q + 1.0f);
3439 features[feature_index++] =
3440 (float)(pc_tree->partitioning == PARTITION_NONE);
3441 features[feature_index++] = logf((float)ref_rd / bs / bs + 1.0f);
3444 const float norm_factor = 1.0f / ((float)ref_rd + 1.0f);
3445 const int64_t none_rdcost = pc_tree->none.rdcost;
3446 float rd_ratio = 2.0f;
3447 if (none_rdcost > 0 && none_rdcost < 1000000000)
3448 rd_ratio = (float)none_rdcost * norm_factor;
3449 features[feature_index++] = VPXMIN(rd_ratio, 2.0f);
3451 for (i = 0; i < 4; ++i) {
3452 const int64_t this_rd = pc_tree->split[i]->none.rdcost;
3453 const int rd_valid = this_rd > 0 && this_rd < 1000000000;
3454 // Ratio between sub-block RD and whole block RD.
3455 features[feature_index++] =
3456 rd_valid ? (float)this_rd * norm_factor : 1.0f;
3460 assert(feature_index == FEATURES);
3461 nn_predict(features, nn_config, score);
3464 // Make decisions based on the model score.
3466 int max_score = -1000;
3467 int horz = 0, vert = 0;
3468 int int_score[LABELS];
3469 for (i = 0; i < LABELS; ++i) {
3470 int_score[i] = (int)(100 * score[i]);
3471 max_score = VPXMAX(int_score[i], max_score);
3473 thresh = max_score - thresh;
3474 for (i = 0; i < LABELS; ++i) {
3475 if (int_score[i] >= thresh) {
3476 if ((i >> 0) & 1) horz = 1;
3477 if ((i >> 1) & 1) vert = 1;
3480 *allow_horz = *allow_horz && horz;
3481 *allow_vert = *allow_vert && vert;
3487 // Perform fast and coarse motion search for the given block. This is a
3488 // pre-processing step for the ML based partition search speedup.
3489 static void simple_motion_search(const VP9_COMP *const cpi, MACROBLOCK *const x,
3490 BLOCK_SIZE bsize, int mi_row, int mi_col,
3491 MV ref_mv, MV_REFERENCE_FRAME ref,
3492 uint8_t *const pred_buf) {
3493 const VP9_COMMON *const cm = &cpi->common;
3494 MACROBLOCKD *const xd = &x->e_mbd;
3495 MODE_INFO *const mi = xd->mi[0];
3496 const YV12_BUFFER_CONFIG *const yv12 = get_ref_frame_buffer(cpi, ref);
3497 const int step_param = 1;
3498 const MvLimits tmp_mv_limits = x->mv_limits;
3499 const SEARCH_METHODS search_method = NSTEP;
3500 const int sadpb = x->sadperbit16;
3501 MV ref_mv_full = { ref_mv.row >> 3, ref_mv.col >> 3 };
3502 MV best_mv = { 0, 0 };
3505 assert(yv12 != NULL);
3507 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
3508 &cm->frame_refs[ref - 1].sf);
3509 mi->ref_frame[0] = ref;
3510 mi->ref_frame[1] = NONE;
3511 mi->sb_type = bsize;
3512 vp9_set_mv_search_range(&x->mv_limits, &ref_mv);
3513 vp9_full_pixel_search(cpi, x, bsize, &ref_mv_full, step_param, search_method,
3514 sadpb, cond_cost_list(cpi, cost_list), &ref_mv,
3518 x->mv_limits = tmp_mv_limits;
3519 mi->mv[0].as_mv = best_mv;
3521 set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]);
3522 xd->plane[0].dst.buf = pred_buf;
3523 xd->plane[0].dst.stride = 64;
3524 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
3527 // Use a neural net model to prune partition-none and partition-split search.
3528 // Features used: QP; spatial block size contexts; variance of prediction
3529 // residue after simple_motion_search.
3531 static void ml_predict_var_rd_paritioning(const VP9_COMP *const cpi,
3532 MACROBLOCK *const x,
3533 PC_TREE *const pc_tree,
3534 BLOCK_SIZE bsize, int mi_row,
3535 int mi_col, int *none, int *split) {
3536 const VP9_COMMON *const cm = &cpi->common;
3537 const NN_CONFIG *nn_config = NULL;
3538 #if CONFIG_VP9_HIGHBITDEPTH
3539 MACROBLOCKD *xd = &x->e_mbd;
3540 DECLARE_ALIGNED(16, uint8_t, pred_buffer[64 * 64 * 2]);
3541 uint8_t *const pred_buf = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
3542 ? (CONVERT_TO_BYTEPTR(pred_buffer))
3545 DECLARE_ALIGNED(16, uint8_t, pred_buffer[64 * 64]);
3546 uint8_t *const pred_buf = pred_buffer;
3547 #endif // CONFIG_VP9_HIGHBITDEPTH
3548 const int speed = cpi->oxcf.speed;
3549 float thresh = 0.0f;
3553 nn_config = &vp9_part_split_nnconfig_64;
3554 thresh = speed > 0 ? 2.8f : 3.0f;
3557 nn_config = &vp9_part_split_nnconfig_32;
3558 thresh = speed > 0 ? 3.5f : 3.0f;
3561 nn_config = &vp9_part_split_nnconfig_16;
3562 thresh = speed > 0 ? 3.8f : 4.0f;
3565 nn_config = &vp9_part_split_nnconfig_8;
3566 if (cm->width >= 720 && cm->height >= 720)
3567 thresh = speed > 0 ? 2.5f : 2.0f;
3569 thresh = speed > 0 ? 3.8f : 2.0f;
3571 default: assert(0 && "Unexpected block size."); return;
3574 if (!nn_config) return;
3576 // Do a simple single motion search to find a prediction for current block.
3577 // The variance of the residue will be used as input features.
3580 const MV_REFERENCE_FRAME ref =
3581 cpi->rc.is_src_frame_alt_ref ? ALTREF_FRAME : LAST_FRAME;
3582 // If bsize is 64x64, use zero MV as reference; otherwise, use MV result
3583 // of previous(larger) block as reference.
3584 if (bsize == BLOCK_64X64)
3585 ref_mv.row = ref_mv.col = 0;
3587 ref_mv = pc_tree->mv;
3588 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
3589 simple_motion_search(cpi, x, bsize, mi_row, mi_col, ref_mv, ref, pred_buf);
3590 pc_tree->mv = x->e_mbd.mi[0]->mv[0].as_mv;
3593 vpx_clear_system_state();
3596 float features[FEATURES] = { 0.0f };
3597 #if CONFIG_VP9_HIGHBITDEPTH
3599 vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth) >> (xd->bd - 8);
3601 const int dc_q = vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth);
3602 #endif // CONFIG_VP9_HIGHBITDEPTH
3603 int feature_idx = 0;
3606 // Generate model input features.
3607 features[feature_idx++] = logf((float)dc_q + 1.0f);
3609 // Get the variance of the residue as input features.
3611 const int bs = 4 * num_4x4_blocks_wide_lookup[bsize];
3612 const BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
3613 const uint8_t *pred = pred_buf;
3614 const uint8_t *src = x->plane[0].src.buf;
3615 const int src_stride = x->plane[0].src.stride;
3616 const int pred_stride = 64;
3618 // Variance of whole block.
3619 const unsigned int var =
3620 cpi->fn_ptr[bsize].vf(src, src_stride, pred, pred_stride, &sse);
3621 const float factor = (var == 0) ? 1.0f : (1.0f / (float)var);
3622 const MACROBLOCKD *const xd = &x->e_mbd;
3623 const int has_above = !!xd->above_mi;
3624 const int has_left = !!xd->left_mi;
3625 const BLOCK_SIZE above_bsize = has_above ? xd->above_mi->sb_type : bsize;
3626 const BLOCK_SIZE left_bsize = has_left ? xd->left_mi->sb_type : bsize;
3629 features[feature_idx++] = (float)has_above;
3630 features[feature_idx++] = (float)b_width_log2_lookup[above_bsize];
3631 features[feature_idx++] = (float)b_height_log2_lookup[above_bsize];
3632 features[feature_idx++] = (float)has_left;
3633 features[feature_idx++] = (float)b_width_log2_lookup[left_bsize];
3634 features[feature_idx++] = (float)b_height_log2_lookup[left_bsize];
3635 features[feature_idx++] = logf((float)var + 1.0f);
3636 for (i = 0; i < 4; ++i) {
3637 const int x_idx = (i & 1) * bs / 2;
3638 const int y_idx = (i >> 1) * bs / 2;
3639 const int src_offset = y_idx * src_stride + x_idx;
3640 const int pred_offset = y_idx * pred_stride + x_idx;
3641 // Variance of quarter block.
3642 const unsigned int sub_var =
3643 cpi->fn_ptr[subsize].vf(src + src_offset, src_stride,
3644 pred + pred_offset, pred_stride, &sse);
3645 const float var_ratio = (var == 0) ? 1.0f : factor * (float)sub_var;
3646 features[feature_idx++] = var_ratio;
3649 assert(feature_idx == FEATURES);
3651 // Feed the features into the model to get the confidence score.
3652 nn_predict(features, nn_config, &score);
3654 // Higher score means that the model has higher confidence that the split
3655 // partition is better than the non-split partition. So if the score is
3656 // high enough, we skip the none-split partition search; if the score is
3657 // low enough, we skip the split partition search.
3658 if (score > thresh) *none = 0;
3659 if (score < -thresh) *split = 0;
3663 #endif // !CONFIG_REALTIME_ONLY
3665 static double log_wiener_var(int64_t wiener_variance) {
3666 return log(1.0 + wiener_variance) / log(2.0);
3669 static void build_kmeans_segmentation(VP9_COMP *cpi) {
3670 VP9_COMMON *cm = &cpi->common;
3671 BLOCK_SIZE bsize = BLOCK_64X64;
3672 KMEANS_DATA *kmeans_data;
3674 vp9_disable_segmentation(&cm->seg);
3675 if (cm->show_frame) {
3677 cpi->kmeans_data_size = 0;
3678 cpi->kmeans_ctr_num = 8;
3680 for (mi_row = 0; mi_row < cm->mi_rows; mi_row += MI_BLOCK_SIZE) {
3681 for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
3682 int mb_row_start = mi_row >> 1;
3683 int mb_col_start = mi_col >> 1;
3684 int mb_row_end = VPXMIN(
3685 (mi_row + num_8x8_blocks_high_lookup[bsize]) >> 1, cm->mb_rows);
3686 int mb_col_end = VPXMIN(
3687 (mi_col + num_8x8_blocks_wide_lookup[bsize]) >> 1, cm->mb_cols);
3689 int64_t wiener_variance = 0;
3691 for (row = mb_row_start; row < mb_row_end; ++row)
3692 for (col = mb_col_start; col < mb_col_end; ++col)
3693 wiener_variance += cpi->mb_wiener_variance[row * cm->mb_cols + col];
3696 (mb_row_end - mb_row_start) * (mb_col_end - mb_col_start);
3698 #if CONFIG_MULTITHREAD
3699 pthread_mutex_lock(&cpi->kmeans_mutex);
3700 #endif // CONFIG_MULTITHREAD
3702 kmeans_data = &cpi->kmeans_data_arr[cpi->kmeans_data_size++];
3703 kmeans_data->value = log_wiener_var(wiener_variance);
3704 kmeans_data->pos = mi_row * cpi->kmeans_data_stride + mi_col;
3705 #if CONFIG_MULTITHREAD
3706 pthread_mutex_unlock(&cpi->kmeans_mutex);
3707 #endif // CONFIG_MULTITHREAD
3711 vp9_kmeans(cpi->kmeans_ctr_ls, cpi->kmeans_boundary_ls,
3712 cpi->kmeans_count_ls, cpi->kmeans_ctr_num, cpi->kmeans_data_arr,
3713 cpi->kmeans_data_size);
3715 vp9_perceptual_aq_mode_setup(cpi, &cm->seg);
3719 #if !CONFIG_REALTIME_ONLY
3720 static int wiener_var_segment(VP9_COMP *cpi, BLOCK_SIZE bsize, int mi_row,
3722 VP9_COMMON *cm = &cpi->common;
3723 int mb_row_start = mi_row >> 1;
3724 int mb_col_start = mi_col >> 1;
3726 VPXMIN((mi_row + num_8x8_blocks_high_lookup[bsize]) >> 1, cm->mb_rows);
3728 VPXMIN((mi_col + num_8x8_blocks_wide_lookup[bsize]) >> 1, cm->mb_cols);
3730 int64_t wiener_variance = 0;
3732 int8_t seg_hist[MAX_SEGMENTS] = { 0 };
3733 int8_t max_count = 0, max_index = -1;
3735 vpx_clear_system_state();
3737 assert(cpi->norm_wiener_variance > 0);
3739 for (row = mb_row_start; row < mb_row_end; ++row) {
3740 for (col = mb_col_start; col < mb_col_end; ++col) {
3741 wiener_variance = cpi->mb_wiener_variance[row * cm->mb_cols + col];
3743 vp9_get_group_idx(log_wiener_var(wiener_variance),
3744 cpi->kmeans_boundary_ls, cpi->kmeans_ctr_num);
3745 ++seg_hist[segment_id];
3749 for (idx = 0; idx < cpi->kmeans_ctr_num; ++idx) {
3750 if (seg_hist[idx] > max_count) {
3751 max_count = seg_hist[idx];
3756 assert(max_index >= 0);
3757 segment_id = max_index;
3762 static int get_rdmult_delta(VP9_COMP *cpi, BLOCK_SIZE bsize, int mi_row,
3763 int mi_col, int orig_rdmult) {
3764 const int gf_group_index = cpi->twopass.gf_group.index;
3765 TplDepFrame *tpl_frame = &cpi->tpl_stats[gf_group_index];
3766 TplDepStats *tpl_stats = tpl_frame->tpl_stats_ptr;
3767 int tpl_stride = tpl_frame->stride;
3768 int64_t intra_cost = 0;
3769 int64_t mc_dep_cost = 0;
3770 int mi_wide = num_8x8_blocks_wide_lookup[bsize];
3771 int mi_high = num_8x8_blocks_high_lookup[bsize];
3776 double r0, rk, beta;
3778 if (tpl_frame->is_valid == 0) return orig_rdmult;
3780 if (cpi->twopass.gf_group.layer_depth[gf_group_index] > 1) return orig_rdmult;
3782 if (gf_group_index >= MAX_ARF_GOP_SIZE) return orig_rdmult;
3784 for (row = mi_row; row < mi_row + mi_high; ++row) {
3785 for (col = mi_col; col < mi_col + mi_wide; ++col) {
3786 TplDepStats *this_stats = &tpl_stats[row * tpl_stride + col];
3788 if (row >= cpi->common.mi_rows || col >= cpi->common.mi_cols) continue;
3790 intra_cost += this_stats->intra_cost;
3791 mc_dep_cost += this_stats->mc_dep_cost;
3797 vpx_clear_system_state();
3800 rk = (double)intra_cost / mc_dep_cost;
3802 dr = vp9_get_adaptive_rdmult(cpi, beta);
3804 dr = VPXMIN(dr, orig_rdmult * 3 / 2);
3805 dr = VPXMAX(dr, orig_rdmult * 1 / 2);
3811 #endif // !CONFIG_REALTIME_ONLY
3813 #if CONFIG_RATE_CTRL
3814 static void assign_partition_info(
3815 const int row_start_4x4, const int col_start_4x4, const int block_width_4x4,
3816 const int block_height_4x4, const int num_unit_rows,
3817 const int num_unit_cols, PARTITION_INFO *partition_info) {
3819 for (i = 0; i < block_height_4x4; ++i) {
3820 for (j = 0; j < block_width_4x4; ++j) {
3821 const int row_4x4 = row_start_4x4 + i;
3822 const int col_4x4 = col_start_4x4 + j;
3823 const int unit_index = row_4x4 * num_unit_cols + col_4x4;
3824 if (row_4x4 >= num_unit_rows || col_4x4 >= num_unit_cols) continue;
3825 partition_info[unit_index].row = row_4x4 << 2;
3826 partition_info[unit_index].column = col_4x4 << 2;
3827 partition_info[unit_index].row_start = row_start_4x4 << 2;
3828 partition_info[unit_index].column_start = col_start_4x4 << 2;
3829 partition_info[unit_index].width = block_width_4x4 << 2;
3830 partition_info[unit_index].height = block_height_4x4 << 2;
3835 static void assign_motion_vector_info(const int block_width_4x4,
3836 const int block_height_4x4,
3837 const int row_start_4x4,
3838 const int col_start_4x4,
3839 const int num_unit_rows,
3840 const int num_unit_cols, MV *source_mv[2],
3841 MV_REFERENCE_FRAME source_ref_frame[2],
3842 MOTION_VECTOR_INFO *motion_vector_info) {
3844 for (i = 0; i < block_height_4x4; ++i) {
3845 for (j = 0; j < block_width_4x4; ++j) {
3846 const int row_4x4 = row_start_4x4 + i;
3847 const int col_4x4 = col_start_4x4 + j;
3848 const int unit_index = row_4x4 * num_unit_cols + col_4x4;
3849 if (row_4x4 >= num_unit_rows || col_4x4 >= num_unit_cols) continue;
3850 if (source_ref_frame[1] == NONE) {
3851 assert(source_mv[1]->row == 0 && source_mv[1]->col == 0);
3853 motion_vector_info[unit_index].ref_frame[0] = source_ref_frame[0];
3854 motion_vector_info[unit_index].ref_frame[1] = source_ref_frame[1];
3855 motion_vector_info[unit_index].mv[0].as_mv.row = source_mv[0]->row;
3856 motion_vector_info[unit_index].mv[0].as_mv.col = source_mv[0]->col;
3857 motion_vector_info[unit_index].mv[1].as_mv.row = source_mv[1]->row;
3858 motion_vector_info[unit_index].mv[1].as_mv.col = source_mv[1]->col;
3863 static void store_superblock_info(
3864 const PC_TREE *const pc_tree, MODE_INFO **mi_grid_visible,
3865 const int mi_stride, const int square_size_4x4, const int num_unit_rows,
3866 const int num_unit_cols, const int row_start_4x4, const int col_start_4x4,
3867 PARTITION_INFO *partition_info, MOTION_VECTOR_INFO *motion_vector_info) {
3868 const int subblock_square_size_4x4 = square_size_4x4 >> 1;
3869 if (row_start_4x4 >= num_unit_rows || col_start_4x4 >= num_unit_cols) return;
3870 assert(pc_tree->partitioning != PARTITION_INVALID);
3871 // End node, no split.
3872 if (pc_tree->partitioning == PARTITION_NONE ||
3873 pc_tree->partitioning == PARTITION_HORZ ||
3874 pc_tree->partitioning == PARTITION_VERT || square_size_4x4 == 1) {
3875 const int mi_row = row_start_4x4 >> 1;
3876 const int mi_col = col_start_4x4 >> 1;
3877 const int mi_idx = mi_stride * mi_row + mi_col;
3878 MODE_INFO **mi = mi_grid_visible + mi_idx;
3880 MV_REFERENCE_FRAME source_ref_frame[2];
3883 const int block_width_4x4 = (pc_tree->partitioning == PARTITION_VERT)
3884 ? square_size_4x4 >> 1
3886 const int block_height_4x4 = (pc_tree->partitioning == PARTITION_HORZ)
3887 ? square_size_4x4 >> 1
3889 assign_partition_info(row_start_4x4, col_start_4x4, block_width_4x4,
3890 block_height_4x4, num_unit_rows, num_unit_cols,
3892 if (pc_tree->partitioning == PARTITION_VERT) {
3893 assign_partition_info(row_start_4x4, col_start_4x4 + block_width_4x4,
3894 block_width_4x4, block_height_4x4, num_unit_rows,
3895 num_unit_cols, partition_info);
3896 } else if (pc_tree->partitioning == PARTITION_HORZ) {
3897 assign_partition_info(row_start_4x4 + block_height_4x4, col_start_4x4,
3898 block_width_4x4, block_height_4x4, num_unit_rows,
3899 num_unit_cols, partition_info);
3902 // motion vector info
3903 if (pc_tree->partitioning == PARTITION_HORZ) {
3904 int is_valid_second_rectangle = 0;
3905 assert(square_size_4x4 > 1);
3907 source_ref_frame[0] = mi[0]->ref_frame[0];
3908 source_ref_frame[1] = mi[0]->ref_frame[1];
3909 source_mv[0] = &mi[0]->mv[0].as_mv;
3910 source_mv[1] = &mi[0]->mv[1].as_mv;
3911 assign_motion_vector_info(block_width_4x4, block_height_4x4,
3912 row_start_4x4, col_start_4x4, num_unit_rows,
3913 num_unit_cols, source_mv, source_ref_frame,
3914 motion_vector_info);
3915 // Second rectangle.
3916 if (square_size_4x4 == 2) {
3917 is_valid_second_rectangle = 1;
3918 source_ref_frame[0] = mi[0]->ref_frame[0];
3919 source_ref_frame[1] = mi[0]->ref_frame[1];
3920 source_mv[0] = &mi[0]->bmi[2].as_mv[0].as_mv;
3921 source_mv[1] = &mi[0]->bmi[2].as_mv[1].as_mv;
3923 const int mi_row_2 = mi_row + (block_height_4x4 >> 1);
3924 const int mi_col_2 = mi_col;
3925 if (mi_row_2 * 2 < num_unit_rows && mi_col_2 * 2 < num_unit_cols) {
3926 const int mi_idx_2 = mi_stride * mi_row_2 + mi_col_2;
3927 is_valid_second_rectangle = 1;
3928 mi = mi_grid_visible + mi_idx_2;
3929 source_ref_frame[0] = mi[0]->ref_frame[0];
3930 source_ref_frame[1] = mi[0]->ref_frame[1];
3931 source_mv[0] = &mi[0]->mv[0].as_mv;
3932 source_mv[1] = &mi[0]->mv[1].as_mv;
3935 if (is_valid_second_rectangle) {
3936 assign_motion_vector_info(
3937 block_width_4x4, block_height_4x4, row_start_4x4 + block_height_4x4,
3938 col_start_4x4, num_unit_rows, num_unit_cols, source_mv,
3939 source_ref_frame, motion_vector_info);
3941 } else if (pc_tree->partitioning == PARTITION_VERT) {
3942 int is_valid_second_rectangle = 0;
3943 assert(square_size_4x4 > 1);
3945 source_ref_frame[0] = mi[0]->ref_frame[0];
3946 source_ref_frame[1] = mi[0]->ref_frame[1];
3947 source_mv[0] = &mi[0]->mv[0].as_mv;
3948 source_mv[1] = &mi[0]->mv[1].as_mv;
3949 assign_motion_vector_info(block_width_4x4, block_height_4x4,
3950 row_start_4x4, col_start_4x4, num_unit_rows,
3951 num_unit_cols, source_mv, source_ref_frame,
3952 motion_vector_info);
3953 // Second rectangle.
3954 if (square_size_4x4 == 2) {
3955 is_valid_second_rectangle = 1;
3956 source_ref_frame[0] = mi[0]->ref_frame[0];
3957 source_ref_frame[1] = mi[0]->ref_frame[1];
3958 source_mv[0] = &mi[0]->bmi[1].as_mv[0].as_mv;
3959 source_mv[1] = &mi[0]->bmi[1].as_mv[1].as_mv;
3961 const int mi_row_2 = mi_row;
3962 const int mi_col_2 = mi_col + (block_width_4x4 >> 1);
3963 if (mi_row_2 * 2 < num_unit_rows && mi_col_2 * 2 < num_unit_cols) {
3964 const int mi_idx_2 = mi_stride * mi_row_2 + mi_col_2;
3965 is_valid_second_rectangle = 1;
3966 mi = mi_grid_visible + mi_idx_2;
3967 source_ref_frame[0] = mi[0]->ref_frame[0];
3968 source_ref_frame[1] = mi[0]->ref_frame[1];
3969 source_mv[0] = &mi[0]->mv[0].as_mv;
3970 source_mv[1] = &mi[0]->mv[1].as_mv;
3973 if (is_valid_second_rectangle) {
3974 assign_motion_vector_info(
3975 block_width_4x4, block_height_4x4, row_start_4x4,
3976 col_start_4x4 + block_width_4x4, num_unit_rows, num_unit_cols,
3977 source_mv, source_ref_frame, motion_vector_info);
3980 assert(pc_tree->partitioning == PARTITION_NONE || square_size_4x4 == 1);
3981 source_ref_frame[0] = mi[0]->ref_frame[0];
3982 source_ref_frame[1] = mi[0]->ref_frame[1];
3983 if (square_size_4x4 == 1) {
3984 const int sub8x8_row = row_start_4x4 % 2;
3985 const int sub8x8_col = col_start_4x4 % 2;
3986 const int sub8x8_idx = sub8x8_row * 2 + sub8x8_col;
3987 source_mv[0] = &mi[0]->bmi[sub8x8_idx].as_mv[0].as_mv;
3988 source_mv[1] = &mi[0]->bmi[sub8x8_idx].as_mv[1].as_mv;
3990 source_mv[0] = &mi[0]->mv[0].as_mv;
3991 source_mv[1] = &mi[0]->mv[1].as_mv;
3993 assign_motion_vector_info(block_width_4x4, block_height_4x4,
3994 row_start_4x4, col_start_4x4, num_unit_rows,
3995 num_unit_cols, source_mv, source_ref_frame,
3996 motion_vector_info);
4001 // recursively traverse partition tree when partition is split.
4002 assert(pc_tree->partitioning == PARTITION_SPLIT);
4003 store_superblock_info(pc_tree->split[0], mi_grid_visible, mi_stride,
4004 subblock_square_size_4x4, num_unit_rows, num_unit_cols,
4005 row_start_4x4, col_start_4x4, partition_info,
4006 motion_vector_info);
4007 store_superblock_info(pc_tree->split[1], mi_grid_visible, mi_stride,
4008 subblock_square_size_4x4, num_unit_rows, num_unit_cols,
4009 row_start_4x4, col_start_4x4 + subblock_square_size_4x4,
4010 partition_info, motion_vector_info);
4011 store_superblock_info(pc_tree->split[2], mi_grid_visible, mi_stride,
4012 subblock_square_size_4x4, num_unit_rows, num_unit_cols,
4013 row_start_4x4 + subblock_square_size_4x4, col_start_4x4,
4014 partition_info, motion_vector_info);
4015 store_superblock_info(pc_tree->split[3], mi_grid_visible, mi_stride,
4016 subblock_square_size_4x4, num_unit_rows, num_unit_cols,
4017 row_start_4x4 + subblock_square_size_4x4,
4018 col_start_4x4 + subblock_square_size_4x4,
4019 partition_info, motion_vector_info);
4021 #endif // CONFIG_RATE_CTRL
4023 #if !CONFIG_REALTIME_ONLY
4024 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
4025 // unlikely to be selected depending on previous rate-distortion optimization
4026 // results, for encoding speed-up.
4027 static int rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
4028 TileDataEnc *tile_data, TOKENEXTRA **tp,
4029 int mi_row, int mi_col, BLOCK_SIZE bsize,
4030 RD_COST *rd_cost, RD_COST best_rdc,
4032 VP9_COMMON *const cm = &cpi->common;
4033 const VP9EncoderConfig *const oxcf = &cpi->oxcf;
4034 TileInfo *const tile_info = &tile_data->tile_info;
4035 MACROBLOCK *const x = &td->mb;
4036 MACROBLOCKD *const xd = &x->e_mbd;
4037 const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
4038 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
4039 PARTITION_CONTEXT sl[8], sa[8];
4040 TOKENEXTRA *tp_orig = *tp;
4041 PICK_MODE_CONTEXT *const ctx = &pc_tree->none;
4043 const int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
4045 RD_COST this_rdc, sum_rdc;
4046 int do_split = bsize >= BLOCK_8X8;
4048 INTERP_FILTER pred_interp_filter;
4050 // Override skipping rectangular partition operations for edge blocks
4051 const int force_horz_split = (mi_row + mi_step >= cm->mi_rows);
4052 const int force_vert_split = (mi_col + mi_step >= cm->mi_cols);
4053 const int xss = x->e_mbd.plane[1].subsampling_x;
4054 const int yss = x->e_mbd.plane[1].subsampling_y;
4056 BLOCK_SIZE min_size = x->min_partition_size;
4057 BLOCK_SIZE max_size = x->max_partition_size;
4059 #if CONFIG_FP_MB_STATS
4060 unsigned int src_diff_var = UINT_MAX;
4061 int none_complexity = 0;
4064 int partition_none_allowed = !force_horz_split && !force_vert_split;
4065 int partition_horz_allowed =
4066 !force_vert_split && yss <= xss && bsize >= BLOCK_8X8;
4067 int partition_vert_allowed =
4068 !force_horz_split && xss <= yss && bsize >= BLOCK_8X8;
4070 int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_thr.dist;
4071 int rate_breakout_thr = cpi->sf.partition_search_breakout_thr.rate;
4073 int should_encode_sb = 0;
4075 // Ref frames picked in the [i_th] quarter subblock during square partition
4076 // RD search. It may be used to prune ref frame selection of rect partitions.
4077 uint8_t ref_frames_used[4] = { 0, 0, 0, 0 };
4079 int partition_mul = x->cb_rdmult;
4083 assert(num_8x8_blocks_wide_lookup[bsize] ==
4084 num_8x8_blocks_high_lookup[bsize]);
4086 dist_breakout_thr >>=
4087 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
4089 rate_breakout_thr *= num_pels_log2_lookup[bsize];
4091 vp9_rd_cost_init(&this_rdc);
4092 vp9_rd_cost_init(&sum_rdc);
4094 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
4096 if (oxcf->tuning == VP8_TUNE_SSIM) {
4097 set_ssim_rdmult(cpi, x, bsize, mi_row, mi_col, &partition_mul);
4099 vp9_rd_cost_update(partition_mul, x->rddiv, &best_rdc);
4101 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode != NO_AQ &&
4102 cpi->oxcf.aq_mode != LOOKAHEAD_AQ)
4103 x->mb_energy = vp9_block_energy(cpi, x, bsize);
4105 if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
4106 int cb_partition_search_ctrl =
4107 ((pc_tree->index == 0 || pc_tree->index == 3) +
4108 get_chessboard_index(cm->current_video_frame)) &
4111 if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size)
4112 set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size);
4115 // Get sub block energy range
4116 if (bsize >= BLOCK_16X16) {
4117 int min_energy, max_energy;
4118 vp9_get_sub_block_energy(cpi, x, mi_row, mi_col, bsize, &min_energy,
4120 must_split = (min_energy < -3) && (max_energy - min_energy > 2);
4123 // Determine partition types in search according to the speed features.
4124 // The threshold set here has to be of square block size.
4125 if (cpi->sf.auto_min_max_partition_size) {
4126 partition_none_allowed &= (bsize <= max_size);
4127 partition_horz_allowed &=
4128 ((bsize <= max_size && bsize > min_size) || force_horz_split);
4129 partition_vert_allowed &=
4130 ((bsize <= max_size && bsize > min_size) || force_vert_split);
4131 do_split &= bsize > min_size;
4134 if (cpi->sf.use_square_partition_only &&
4135 (bsize > cpi->sf.use_square_only_thresh_high ||
4136 bsize < cpi->sf.use_square_only_thresh_low)) {
4138 if (!vp9_active_h_edge(cpi, mi_row, mi_step) || x->e_mbd.lossless)
4139 partition_horz_allowed &= force_horz_split;
4140 if (!vp9_active_v_edge(cpi, mi_row, mi_step) || x->e_mbd.lossless)
4141 partition_vert_allowed &= force_vert_split;
4143 partition_horz_allowed &= force_horz_split;
4144 partition_vert_allowed &= force_vert_split;
4148 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
4150 #if CONFIG_FP_MB_STATS
4151 if (cpi->use_fp_mb_stats) {
4152 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
4153 src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src, mi_row,
4158 #if CONFIG_FP_MB_STATS
4159 // Decide whether we shall split directly and skip searching NONE by using
4160 // the first pass block statistics
4161 if (cpi->use_fp_mb_stats && bsize >= BLOCK_32X32 && do_split &&
4162 partition_none_allowed && src_diff_var > 4 &&
4163 cm->base_qindex < qindex_split_threshold_lookup[bsize]) {
4164 int mb_row = mi_row >> 1;
4165 int mb_col = mi_col >> 1;
4167 VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
4169 VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
4172 // compute a complexity measure, basically measure inconsistency of motion
4173 // vectors obtained from the first pass in the current block
4174 for (r = mb_row; r < mb_row_end; r++) {
4175 for (c = mb_col; c < mb_col_end; c++) {
4176 const int mb_index = r * cm->mb_cols + c;
4178 MOTION_DIRECTION this_mv;
4179 MOTION_DIRECTION right_mv;
4180 MOTION_DIRECTION bottom_mv;
4183 get_motion_direction_fp(cpi->twopass.this_frame_mb_stats[mb_index]);
4186 if (c != mb_col_end - 1) {
4187 right_mv = get_motion_direction_fp(
4188 cpi->twopass.this_frame_mb_stats[mb_index + 1]);
4189 none_complexity += get_motion_inconsistency(this_mv, right_mv);
4193 if (r != mb_row_end - 1) {
4194 bottom_mv = get_motion_direction_fp(
4195 cpi->twopass.this_frame_mb_stats[mb_index + cm->mb_cols]);
4196 none_complexity += get_motion_inconsistency(this_mv, bottom_mv);
4199 // do not count its left and top neighbors to avoid double counting
4203 if (none_complexity > complexity_16x16_blocks_threshold[bsize]) {
4204 partition_none_allowed = 0;
4209 pc_tree->partitioning = PARTITION_NONE;
4211 if (cpi->sf.rd_ml_partition.var_pruning && !frame_is_intra_only(cm)) {
4212 const int do_rd_ml_partition_var_pruning =
4213 partition_none_allowed && do_split &&
4214 mi_row + num_8x8_blocks_high_lookup[bsize] <= cm->mi_rows &&
4215 mi_col + num_8x8_blocks_wide_lookup[bsize] <= cm->mi_cols;
4216 if (do_rd_ml_partition_var_pruning) {
4217 ml_predict_var_rd_paritioning(cpi, x, pc_tree, bsize, mi_row, mi_col,
4218 &partition_none_allowed, &do_split);
4220 vp9_zero(pc_tree->mv);
4222 if (bsize > BLOCK_8X8) { // Store MV result as reference for subblocks.
4223 for (i = 0; i < 4; ++i) pc_tree->split[i]->mv = pc_tree->mv;
4228 if (partition_none_allowed) {
4229 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc, bsize, ctx,
4230 best_rdc.rate, best_rdc.dist);
4231 ctx->rdcost = this_rdc.rdcost;
4232 if (this_rdc.rate != INT_MAX) {
4233 if (cpi->sf.prune_ref_frame_for_rect_partitions) {
4234 const int ref1 = ctx->mic.ref_frame[0];
4235 const int ref2 = ctx->mic.ref_frame[1];
4236 for (i = 0; i < 4; ++i) {
4237 ref_frames_used[i] |= (1 << ref1);
4238 if (ref2 > 0) ref_frames_used[i] |= (1 << ref2);
4241 if (bsize >= BLOCK_8X8) {
4242 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
4243 vp9_rd_cost_update(partition_mul, x->rddiv, &this_rdc);
4246 if (this_rdc.rdcost < best_rdc.rdcost) {
4247 MODE_INFO *mi = xd->mi[0];
4249 best_rdc = this_rdc;
4250 should_encode_sb = 1;
4251 if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
4253 if (cpi->sf.rd_ml_partition.search_early_termination) {
4254 // Currently, the machine-learning based partition search early
4255 // termination is only used while bsize is 16x16, 32x32 or 64x64,
4256 // VPXMIN(cm->width, cm->height) >= 480, and speed = 0.
4257 if (!x->e_mbd.lossless &&
4258 !segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP) &&
4259 ctx->mic.mode >= INTRA_MODES && bsize >= BLOCK_16X16) {
4260 if (ml_pruning_partition(cm, xd, ctx, mi_row, mi_col, bsize)) {
4267 if ((do_split || do_rect) && !x->e_mbd.lossless && ctx->skippable) {
4268 const int use_ml_based_breakout =
4269 cpi->sf.rd_ml_partition.search_breakout && cm->base_qindex >= 100;
4270 if (use_ml_based_breakout) {
4271 if (ml_predict_breakout(cpi, bsize, x, &this_rdc)) {
4276 if (!cpi->sf.rd_ml_partition.search_early_termination) {
4277 if ((best_rdc.dist < (dist_breakout_thr >> 2)) ||
4278 (best_rdc.dist < dist_breakout_thr &&
4279 best_rdc.rate < rate_breakout_thr)) {
4287 #if CONFIG_FP_MB_STATS
4288 // Check if every 16x16 first pass block statistics has zero
4289 // motion and the corresponding first pass residue is small enough.
4290 // If that is the case, check the difference variance between the
4291 // current frame and the last frame. If the variance is small enough,
4292 // stop further splitting in RD optimization
4293 if (cpi->use_fp_mb_stats && do_split != 0 &&
4294 cm->base_qindex > qindex_skip_threshold_lookup[bsize]) {
4295 int mb_row = mi_row >> 1;
4296 int mb_col = mi_col >> 1;
4298 VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
4300 VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
4304 for (r = mb_row; r < mb_row_end; r++) {
4305 for (c = mb_col; c < mb_col_end; c++) {
4306 const int mb_index = r * cm->mb_cols + c;
4307 if (!(cpi->twopass.this_frame_mb_stats[mb_index] &
4308 FPMB_MOTION_ZERO_MASK) ||
4309 !(cpi->twopass.this_frame_mb_stats[mb_index] &
4310 FPMB_ERROR_SMALL_MASK)) {
4321 if (src_diff_var == UINT_MAX) {
4322 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
4323 src_diff_var = get_sby_perpixel_diff_variance(
4324 cpi, &x->plane[0].src, mi_row, mi_col, bsize);
4326 if (src_diff_var < 8) {
4335 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
4337 vp9_zero(ctx->pred_mv);
4338 ctx->mic.interp_filter = EIGHTTAP;
4341 // store estimated motion vector
4342 store_pred_mv(x, ctx);
4344 // If the interp_filter is marked as SWITCHABLE_FILTERS, it was for an
4345 // intra block and used for context purposes.
4346 if (ctx->mic.interp_filter == SWITCHABLE_FILTERS) {
4347 pred_interp_filter = EIGHTTAP;
4349 pred_interp_filter = ctx->mic.interp_filter;
4353 // TODO(jingning): use the motion vectors given by the above search as
4354 // the starting point of motion search in the following partition type check.
4355 pc_tree->split[0]->none.rdcost = 0;
4356 pc_tree->split[1]->none.rdcost = 0;
4357 pc_tree->split[2]->none.rdcost = 0;
4358 pc_tree->split[3]->none.rdcost = 0;
4359 if (do_split || must_split) {
4360 subsize = get_subsize(bsize, PARTITION_SPLIT);
4361 load_pred_mv(x, ctx);
4362 if (bsize == BLOCK_8X8) {
4364 if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
4365 pc_tree->leaf_split[0]->pred_interp_filter = pred_interp_filter;
4366 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
4367 pc_tree->leaf_split[0], best_rdc.rate, best_rdc.dist);
4368 if (sum_rdc.rate == INT_MAX) {
4369 sum_rdc.rdcost = INT64_MAX;
4371 if (cpi->sf.prune_ref_frame_for_rect_partitions) {
4372 const int ref1 = pc_tree->leaf_split[0]->mic.ref_frame[0];
4373 const int ref2 = pc_tree->leaf_split[0]->mic.ref_frame[1];
4374 for (i = 0; i < 4; ++i) {
4375 ref_frames_used[i] |= (1 << ref1);
4376 if (ref2 > 0) ref_frames_used[i] |= (1 << ref2);
4381 for (i = 0; (i < 4) && ((sum_rdc.rdcost < best_rdc.rdcost) || must_split);
4383 const int x_idx = (i & 1) * mi_step;
4384 const int y_idx = (i >> 1) * mi_step;
4385 int found_best_rd = 0;
4386 RD_COST best_rdc_split;
4387 vp9_rd_cost_reset(&best_rdc_split);
4389 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX) {
4390 // A must split test here increases the number of sub
4391 // partitions but hurts metrics results quite a bit,
4392 // so this extra test is commented out pending
4393 // further tests on whether it adds much in terms of
4395 // (must_split) ? best_rdc.rate
4396 // : best_rdc.rate - sum_rdc.rate,
4397 // (must_split) ? best_rdc.dist
4398 // : best_rdc.dist - sum_rdc.dist,
4399 best_rdc_split.rate = best_rdc.rate - sum_rdc.rate;
4400 best_rdc_split.dist = best_rdc.dist - sum_rdc.dist;
4403 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
4406 pc_tree->split[i]->index = i;
4407 if (cpi->sf.prune_ref_frame_for_rect_partitions)
4408 pc_tree->split[i]->none.rate = INT_MAX;
4409 found_best_rd = rd_pick_partition(
4410 cpi, td, tile_data, tp, mi_row + y_idx, mi_col + x_idx, subsize,
4411 &this_rdc, best_rdc_split, pc_tree->split[i]);
4413 if (found_best_rd == 0) {
4414 sum_rdc.rdcost = INT64_MAX;
4417 if (cpi->sf.prune_ref_frame_for_rect_partitions &&
4418 pc_tree->split[i]->none.rate != INT_MAX) {
4419 const int ref1 = pc_tree->split[i]->none.mic.ref_frame[0];
4420 const int ref2 = pc_tree->split[i]->none.mic.ref_frame[1];
4421 ref_frames_used[i] |= (1 << ref1);
4422 if (ref2 > 0) ref_frames_used[i] |= (1 << ref2);
4424 sum_rdc.rate += this_rdc.rate;
4425 sum_rdc.dist += this_rdc.dist;
4426 vp9_rd_cost_update(partition_mul, x->rddiv, &sum_rdc);
4431 if (((sum_rdc.rdcost < best_rdc.rdcost) || must_split) && i == 4) {
4432 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
4433 vp9_rd_cost_update(partition_mul, x->rddiv, &sum_rdc);
4435 if ((sum_rdc.rdcost < best_rdc.rdcost) ||
4436 (must_split && (sum_rdc.dist < best_rdc.dist))) {
4438 should_encode_sb = 1;
4439 pc_tree->partitioning = PARTITION_SPLIT;
4441 // Rate and distortion based partition search termination clause.
4442 if (!cpi->sf.rd_ml_partition.search_early_termination &&
4443 !x->e_mbd.lossless &&
4444 ((best_rdc.dist < (dist_breakout_thr >> 2)) ||
4445 (best_rdc.dist < dist_breakout_thr &&
4446 best_rdc.rate < rate_breakout_thr))) {
4451 // skip rectangular partition test when larger block size
4452 // gives better rd cost
4453 if (cpi->sf.less_rectangular_check &&
4454 (bsize > cpi->sf.use_square_only_thresh_high ||
4455 best_rdc.dist < dist_breakout_thr))
4456 do_rect &= !partition_none_allowed;
4458 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
4461 pc_tree->horizontal[0].skip_ref_frame_mask = 0;
4462 pc_tree->horizontal[1].skip_ref_frame_mask = 0;
4463 pc_tree->vertical[0].skip_ref_frame_mask = 0;
4464 pc_tree->vertical[1].skip_ref_frame_mask = 0;
4465 if (cpi->sf.prune_ref_frame_for_rect_partitions) {
4466 uint8_t used_frames;
4467 used_frames = ref_frames_used[0] | ref_frames_used[1];
4469 pc_tree->horizontal[0].skip_ref_frame_mask = ~used_frames & 0xff;
4471 used_frames = ref_frames_used[2] | ref_frames_used[3];
4473 pc_tree->horizontal[1].skip_ref_frame_mask = ~used_frames & 0xff;
4475 used_frames = ref_frames_used[0] | ref_frames_used[2];
4477 pc_tree->vertical[0].skip_ref_frame_mask = ~used_frames & 0xff;
4479 used_frames = ref_frames_used[1] | ref_frames_used[3];
4481 pc_tree->vertical[1].skip_ref_frame_mask = ~used_frames & 0xff;
4486 const int do_ml_rect_partition_pruning =
4487 !frame_is_intra_only(cm) && !force_horz_split && !force_vert_split &&
4488 (partition_horz_allowed || partition_vert_allowed) && bsize > BLOCK_8X8;
4489 if (do_ml_rect_partition_pruning) {
4490 ml_prune_rect_partition(cpi, x, bsize, pc_tree, &partition_horz_allowed,
4491 &partition_vert_allowed, best_rdc.rdcost);
4496 if (partition_horz_allowed &&
4497 (do_rect || vp9_active_h_edge(cpi, mi_row, mi_step))) {
4498 const int part_mode_rate = cpi->partition_cost[pl][PARTITION_HORZ];
4499 subsize = get_subsize(bsize, PARTITION_HORZ);
4500 load_pred_mv(x, ctx);
4501 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
4502 partition_none_allowed)
4503 pc_tree->horizontal[0].pred_interp_filter = pred_interp_filter;
4504 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
4505 &pc_tree->horizontal[0], best_rdc.rate - part_mode_rate,
4507 if (sum_rdc.rdcost < INT64_MAX) {
4508 sum_rdc.rate += part_mode_rate;
4509 vp9_rd_cost_update(partition_mul, x->rddiv, &sum_rdc);
4512 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + mi_step < cm->mi_rows &&
4513 bsize > BLOCK_8X8) {
4514 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
4515 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
4516 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
4517 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
4518 partition_none_allowed)
4519 pc_tree->horizontal[1].pred_interp_filter = pred_interp_filter;
4520 rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col, &this_rdc,
4521 subsize, &pc_tree->horizontal[1],
4522 best_rdc.rate - sum_rdc.rate,
4523 best_rdc.dist - sum_rdc.dist);
4524 if (this_rdc.rate == INT_MAX) {
4525 sum_rdc.rdcost = INT64_MAX;
4527 sum_rdc.rate += this_rdc.rate;
4528 sum_rdc.dist += this_rdc.dist;
4529 vp9_rd_cost_update(partition_mul, x->rddiv, &sum_rdc);
4533 if (sum_rdc.rdcost < best_rdc.rdcost) {
4535 should_encode_sb = 1;
4536 pc_tree->partitioning = PARTITION_HORZ;
4538 if (cpi->sf.less_rectangular_check &&
4539 bsize > cpi->sf.use_square_only_thresh_high)
4542 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
4546 if (partition_vert_allowed &&
4547 (do_rect || vp9_active_v_edge(cpi, mi_col, mi_step))) {
4548 const int part_mode_rate = cpi->partition_cost[pl][PARTITION_VERT];
4549 subsize = get_subsize(bsize, PARTITION_VERT);
4550 load_pred_mv(x, ctx);
4551 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
4552 partition_none_allowed)
4553 pc_tree->vertical[0].pred_interp_filter = pred_interp_filter;
4554 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
4555 &pc_tree->vertical[0], best_rdc.rate - part_mode_rate,
4557 if (sum_rdc.rdcost < INT64_MAX) {
4558 sum_rdc.rate += part_mode_rate;
4559 vp9_rd_cost_update(partition_mul, x->rddiv, &sum_rdc);
4562 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + mi_step < cm->mi_cols &&
4563 bsize > BLOCK_8X8) {
4564 update_state(cpi, td, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0);
4565 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize,
4566 &pc_tree->vertical[0]);
4567 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
4568 partition_none_allowed)
4569 pc_tree->vertical[1].pred_interp_filter = pred_interp_filter;
4570 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step, &this_rdc,
4571 subsize, &pc_tree->vertical[1],
4572 best_rdc.rate - sum_rdc.rate,
4573 best_rdc.dist - sum_rdc.dist);
4574 if (this_rdc.rate == INT_MAX) {
4575 sum_rdc.rdcost = INT64_MAX;
4577 sum_rdc.rate += this_rdc.rate;
4578 sum_rdc.dist += this_rdc.dist;
4579 vp9_rd_cost_update(partition_mul, x->rddiv, &sum_rdc);
4583 if (sum_rdc.rdcost < best_rdc.rdcost) {
4585 should_encode_sb = 1;
4586 pc_tree->partitioning = PARTITION_VERT;
4588 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
4591 *rd_cost = best_rdc;
4593 if (should_encode_sb && pc_tree->index != 3) {
4594 int output_enabled = (bsize == BLOCK_64X64);
4595 encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
4597 #if CONFIG_RATE_CTRL
4598 // Store partition, motion vector of the superblock.
4599 if (output_enabled) {
4600 const int num_unit_rows = get_num_unit_4x4(cpi->frame_info.frame_height);
4601 const int num_unit_cols = get_num_unit_4x4(cpi->frame_info.frame_width);
4602 store_superblock_info(pc_tree, cm->mi_grid_visible, cm->mi_stride,
4603 num_4x4_blocks_wide_lookup[BLOCK_64X64],
4604 num_unit_rows, num_unit_cols, mi_row << 1,
4605 mi_col << 1, cpi->partition_info,
4606 cpi->motion_vector_info);
4608 #endif // CONFIG_RATE_CTRL
4611 if (bsize == BLOCK_64X64) {
4612 assert(tp_orig < *tp);
4613 assert(best_rdc.rate < INT_MAX);
4614 assert(best_rdc.dist < INT64_MAX);
4616 assert(tp_orig == *tp);
4619 return should_encode_sb;
4622 static void encode_rd_sb_row(VP9_COMP *cpi, ThreadData *td,
4623 TileDataEnc *tile_data, int mi_row,
4625 VP9_COMMON *const cm = &cpi->common;
4626 TileInfo *const tile_info = &tile_data->tile_info;
4627 MACROBLOCK *const x = &td->mb;
4628 MACROBLOCKD *const xd = &x->e_mbd;
4629 SPEED_FEATURES *const sf = &cpi->sf;
4630 const int mi_col_start = tile_info->mi_col_start;
4631 const int mi_col_end = tile_info->mi_col_end;
4633 const int sb_row = mi_row >> MI_BLOCK_SIZE_LOG2;
4634 const int num_sb_cols =
4635 get_num_cols(tile_data->tile_info, MI_BLOCK_SIZE_LOG2);
4638 // Initialize the left context for the new SB row
4639 memset(&xd->left_context, 0, sizeof(xd->left_context));
4640 memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
4642 // Code each SB in the row
4643 for (mi_col = mi_col_start, sb_col_in_tile = 0; mi_col < mi_col_end;
4644 mi_col += MI_BLOCK_SIZE, sb_col_in_tile++) {
4645 const struct segmentation *const seg = &cm->seg;
4651 int orig_rdmult = cpi->rd.RDMULT;
4653 const int idx_str = cm->mi_stride * mi_row + mi_col;
4654 MODE_INFO **mi = cm->mi_grid_visible + idx_str;
4656 vp9_rd_cost_reset(&dummy_rdc);
4657 (*(cpi->row_mt_sync_read_ptr))(&tile_data->row_mt_sync, sb_row,
4660 if (sf->adaptive_pred_interp_filter) {
4661 for (i = 0; i < 64; ++i) td->leaf_tree[i].pred_interp_filter = SWITCHABLE;
4663 for (i = 0; i < 64; ++i) {
4664 td->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
4665 td->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE;
4666 td->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE;
4667 td->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE;
4671 for (i = 0; i < MAX_REF_FRAMES; ++i) {
4672 x->pred_mv[i].row = INT16_MAX;
4673 x->pred_mv[i].col = INT16_MAX;
4675 td->pc_root->index = 0;
4678 const uint8_t *const map =
4679 seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
4680 int segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
4681 seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
4684 x->source_variance = UINT_MAX;
4686 x->cb_rdmult = orig_rdmult;
4688 if (sf->partition_search_type == FIXED_PARTITION || seg_skip) {
4689 const BLOCK_SIZE bsize =
4690 seg_skip ? BLOCK_64X64 : sf->always_this_block_size;
4691 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
4692 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
4693 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
4694 &dummy_rate, &dummy_dist, 1, td->pc_root);
4695 } else if (cpi->partition_search_skippable_frame) {
4697 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
4698 bsize = get_rd_var_based_fixed_partition(cpi, x, mi_row, mi_col);
4699 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
4700 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
4701 &dummy_rate, &dummy_dist, 1, td->pc_root);
4702 } else if (sf->partition_search_type == VAR_BASED_PARTITION &&
4703 cm->frame_type != KEY_FRAME) {
4704 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
4705 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
4706 &dummy_rate, &dummy_dist, 1, td->pc_root);
4708 if (cpi->twopass.gf_group.index > 0 && cpi->sf.enable_tpl_model) {
4710 get_rdmult_delta(cpi, BLOCK_64X64, mi_row, mi_col, orig_rdmult);
4714 if (cpi->oxcf.aq_mode == PERCEPTUAL_AQ && cm->show_frame) {
4715 x->segment_id = wiener_var_segment(cpi, BLOCK_64X64, mi_row, mi_col);
4716 x->cb_rdmult = vp9_compute_rd_mult(
4717 cpi, vp9_get_qindex(&cm->seg, x->segment_id, cm->base_qindex));
4720 // If required set upper and lower partition size limits
4721 if (sf->auto_min_max_partition_size) {
4722 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
4723 rd_auto_partition_range(cpi, tile_info, xd, mi_row, mi_col,
4724 &x->min_partition_size, &x->max_partition_size);
4726 td->pc_root->none.rdcost = 0;
4727 rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, BLOCK_64X64,
4728 &dummy_rdc, dummy_rdc, td->pc_root);
4730 (*(cpi->row_mt_sync_write_ptr))(&tile_data->row_mt_sync, sb_row,
4731 sb_col_in_tile, num_sb_cols);
4734 #endif // !CONFIG_REALTIME_ONLY
4736 static void init_encode_frame_mb_context(VP9_COMP *cpi) {
4737 MACROBLOCK *const x = &cpi->td.mb;
4738 VP9_COMMON *const cm = &cpi->common;
4739 MACROBLOCKD *const xd = &x->e_mbd;
4740 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
4742 // Copy data over into macro block data structures.
4743 vp9_setup_src_planes(x, cpi->Source, 0, 0);
4745 vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
4747 // Note: this memset assumes above_context[0], [1] and [2]
4748 // are allocated as part of the same buffer.
4749 memset(xd->above_context[0], 0,
4750 sizeof(*xd->above_context[0]) * 2 * aligned_mi_cols * MAX_MB_PLANE);
4751 memset(xd->above_seg_context, 0,
4752 sizeof(*xd->above_seg_context) * aligned_mi_cols);
4755 static int check_dual_ref_flags(VP9_COMP *cpi) {
4756 const int ref_flags = cpi->ref_frame_flags;
4758 if (segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
4761 return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG) +
4762 !!(ref_flags & VP9_ALT_FLAG)) >= 2;
4766 static void reset_skip_tx_size(VP9_COMMON *cm, TX_SIZE max_tx_size) {
4768 const int mis = cm->mi_stride;
4769 MODE_INFO **mi_ptr = cm->mi_grid_visible;
4771 for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) {
4772 for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
4773 if (mi_ptr[mi_col]->tx_size > max_tx_size)
4774 mi_ptr[mi_col]->tx_size = max_tx_size;
4779 static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) {
4780 if (frame_is_intra_only(&cpi->common))
4782 else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
4783 return ALTREF_FRAME;
4784 else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
4785 return GOLDEN_FRAME;
4790 static TX_MODE select_tx_mode(const VP9_COMP *cpi, MACROBLOCKD *const xd) {
4791 if (xd->lossless) return ONLY_4X4;
4792 if (cpi->common.frame_type == KEY_FRAME && cpi->sf.use_nonrd_pick_mode)
4794 if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
4796 else if (cpi->sf.tx_size_search_method == USE_FULL_RD ||
4797 cpi->sf.tx_size_search_method == USE_TX_8X8)
4798 return TX_MODE_SELECT;
4800 return cpi->common.tx_mode;
4803 static void hybrid_intra_mode_search(VP9_COMP *cpi, MACROBLOCK *const x,
4804 RD_COST *rd_cost, BLOCK_SIZE bsize,
4805 PICK_MODE_CONTEXT *ctx) {
4806 if (!cpi->sf.nonrd_keyframe && bsize < BLOCK_16X16)
4807 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, INT64_MAX);
4809 vp9_pick_intra_mode(cpi, x, rd_cost, bsize, ctx);
4812 static void hybrid_search_svc_baseiskey(VP9_COMP *cpi, MACROBLOCK *const x,
4813 RD_COST *rd_cost, BLOCK_SIZE bsize,
4814 PICK_MODE_CONTEXT *ctx,
4815 TileDataEnc *tile_data, int mi_row,
4817 if (!cpi->sf.nonrd_keyframe && bsize <= BLOCK_8X8) {
4818 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, INT64_MAX);
4820 if (cpi->svc.disable_inter_layer_pred == INTER_LAYER_PRED_OFF)
4821 vp9_pick_intra_mode(cpi, x, rd_cost, bsize, ctx);
4822 else if (bsize >= BLOCK_8X8)
4823 vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col, rd_cost, bsize,
4826 vp9_pick_inter_mode_sub8x8(cpi, x, mi_row, mi_col, rd_cost, bsize, ctx);
4830 static void hybrid_search_scene_change(VP9_COMP *cpi, MACROBLOCK *const x,
4831 RD_COST *rd_cost, BLOCK_SIZE bsize,
4832 PICK_MODE_CONTEXT *ctx,
4833 TileDataEnc *tile_data, int mi_row,
4835 if (!cpi->sf.nonrd_keyframe && bsize <= BLOCK_8X8) {
4836 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, INT64_MAX);
4838 vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col, rd_cost, bsize, ctx);
4842 static void nonrd_pick_sb_modes(VP9_COMP *cpi, TileDataEnc *tile_data,
4843 MACROBLOCK *const x, int mi_row, int mi_col,
4844 RD_COST *rd_cost, BLOCK_SIZE bsize,
4845 PICK_MODE_CONTEXT *ctx) {
4846 VP9_COMMON *const cm = &cpi->common;
4847 TileInfo *const tile_info = &tile_data->tile_info;
4848 MACROBLOCKD *const xd = &x->e_mbd;
4850 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
4851 BLOCK_SIZE bs = VPXMAX(bsize, BLOCK_8X8); // processing unit block size
4852 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bs];
4853 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bs];
4856 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
4858 set_segment_index(cpi, x, mi_row, mi_col, bsize, 0);
4861 mi->sb_type = bsize;
4863 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
4864 struct macroblockd_plane *pd = &xd->plane[plane];
4865 memcpy(a + num_4x4_blocks_wide * plane, pd->above_context,
4866 (sizeof(a[0]) * num_4x4_blocks_wide) >> pd->subsampling_x);
4867 memcpy(l + num_4x4_blocks_high * plane, pd->left_context,
4868 (sizeof(l[0]) * num_4x4_blocks_high) >> pd->subsampling_y);
4871 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
4872 if (cyclic_refresh_segment_id_boosted(mi->segment_id))
4873 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
4875 if (frame_is_intra_only(cm))
4876 hybrid_intra_mode_search(cpi, x, rd_cost, bsize, ctx);
4877 else if (cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame)
4878 hybrid_search_svc_baseiskey(cpi, x, rd_cost, bsize, ctx, tile_data, mi_row,
4880 else if (segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP))
4881 set_mode_info_seg_skip(x, cm->tx_mode, rd_cost, bsize);
4882 else if (bsize >= BLOCK_8X8) {
4883 if (cpi->rc.hybrid_intra_scene_change)
4884 hybrid_search_scene_change(cpi, x, rd_cost, bsize, ctx, tile_data, mi_row,
4887 vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col, rd_cost, bsize,
4890 vp9_pick_inter_mode_sub8x8(cpi, x, mi_row, mi_col, rd_cost, bsize, ctx);
4893 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
4895 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
4896 struct macroblockd_plane *pd = &xd->plane[plane];
4897 memcpy(pd->above_context, a + num_4x4_blocks_wide * plane,
4898 (sizeof(a[0]) * num_4x4_blocks_wide) >> pd->subsampling_x);
4899 memcpy(pd->left_context, l + num_4x4_blocks_high * plane,
4900 (sizeof(l[0]) * num_4x4_blocks_high) >> pd->subsampling_y);
4903 if (rd_cost->rate == INT_MAX) vp9_rd_cost_reset(rd_cost);
4905 ctx->rate = rd_cost->rate;
4906 ctx->dist = rd_cost->dist;
4909 static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x, int mi_row,
4910 int mi_col, BLOCK_SIZE bsize, PC_TREE *pc_tree) {
4911 MACROBLOCKD *xd = &x->e_mbd;
4912 int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
4913 PARTITION_TYPE partition = pc_tree->partitioning;
4914 BLOCK_SIZE subsize = get_subsize(bsize, partition);
4916 assert(bsize >= BLOCK_8X8);
4918 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
4920 switch (partition) {
4921 case PARTITION_NONE:
4922 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
4923 *(xd->mi[0]) = pc_tree->none.mic;
4924 *(x->mbmi_ext) = pc_tree->none.mbmi_ext;
4925 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
4927 case PARTITION_VERT:
4928 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
4929 *(xd->mi[0]) = pc_tree->vertical[0].mic;
4930 *(x->mbmi_ext) = pc_tree->vertical[0].mbmi_ext;
4931 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
4933 if (mi_col + hbs < cm->mi_cols) {
4934 set_mode_info_offsets(cm, x, xd, mi_row, mi_col + hbs);
4935 *(xd->mi[0]) = pc_tree->vertical[1].mic;
4936 *(x->mbmi_ext) = pc_tree->vertical[1].mbmi_ext;
4937 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, subsize);
4940 case PARTITION_HORZ:
4941 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
4942 *(xd->mi[0]) = pc_tree->horizontal[0].mic;
4943 *(x->mbmi_ext) = pc_tree->horizontal[0].mbmi_ext;
4944 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
4945 if (mi_row + hbs < cm->mi_rows) {
4946 set_mode_info_offsets(cm, x, xd, mi_row + hbs, mi_col);
4947 *(xd->mi[0]) = pc_tree->horizontal[1].mic;
4948 *(x->mbmi_ext) = pc_tree->horizontal[1].mbmi_ext;
4949 duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, subsize);
4952 case PARTITION_SPLIT: {
4953 fill_mode_info_sb(cm, x, mi_row, mi_col, subsize, pc_tree->split[0]);
4954 fill_mode_info_sb(cm, x, mi_row, mi_col + hbs, subsize,
4956 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col, subsize,
4958 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col + hbs, subsize,
4966 // Reset the prediction pixel ready flag recursively.
4967 static void pred_pixel_ready_reset(PC_TREE *pc_tree, BLOCK_SIZE bsize) {
4968 pc_tree->none.pred_pixel_ready = 0;
4969 pc_tree->horizontal[0].pred_pixel_ready = 0;
4970 pc_tree->horizontal[1].pred_pixel_ready = 0;
4971 pc_tree->vertical[0].pred_pixel_ready = 0;
4972 pc_tree->vertical[1].pred_pixel_ready = 0;
4974 if (bsize > BLOCK_8X8) {
4975 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
4977 for (i = 0; i < 4; ++i) pred_pixel_ready_reset(pc_tree->split[i], subsize);
4983 static int ml_predict_var_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
4984 BLOCK_SIZE bsize, int mi_row,
4986 VP9_COMMON *const cm = &cpi->common;
4987 const NN_CONFIG *nn_config = NULL;
4990 case BLOCK_64X64: nn_config = &vp9_var_part_nnconfig_64; break;
4991 case BLOCK_32X32: nn_config = &vp9_var_part_nnconfig_32; break;
4992 case BLOCK_16X16: nn_config = &vp9_var_part_nnconfig_16; break;
4993 case BLOCK_8X8: break;
4994 default: assert(0 && "Unexpected block size."); return -1;
4997 if (!nn_config) return -1;
4999 vpx_clear_system_state();
5002 const float thresh = cpi->oxcf.speed <= 5 ? 1.25f : 0.0f;
5003 float features[FEATURES] = { 0.0f };
5004 const int dc_q = vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth);
5005 int feature_idx = 0;
5006 float score[LABELS];
5008 features[feature_idx++] = logf((float)(dc_q * dc_q) / 256.0f + 1.0f);
5009 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
5011 const int bs = 4 * num_4x4_blocks_wide_lookup[bsize];
5012 const BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
5013 const int sb_offset_row = 8 * (mi_row & 7);
5014 const int sb_offset_col = 8 * (mi_col & 7);
5015 const uint8_t *pred = x->est_pred + sb_offset_row * 64 + sb_offset_col;
5016 const uint8_t *src = x->plane[0].src.buf;
5017 const int src_stride = x->plane[0].src.stride;
5018 const int pred_stride = 64;
5021 // Variance of whole block.
5022 const unsigned int var =
5023 cpi->fn_ptr[bsize].vf(src, src_stride, pred, pred_stride, &sse);
5024 const float factor = (var == 0) ? 1.0f : (1.0f / (float)var);
5026 features[feature_idx++] = logf((float)var + 1.0f);
5027 for (i = 0; i < 4; ++i) {
5028 const int x_idx = (i & 1) * bs / 2;
5029 const int y_idx = (i >> 1) * bs / 2;
5030 const int src_offset = y_idx * src_stride + x_idx;
5031 const int pred_offset = y_idx * pred_stride + x_idx;
5032 // Variance of quarter block.
5033 const unsigned int sub_var =
5034 cpi->fn_ptr[subsize].vf(src + src_offset, src_stride,
5035 pred + pred_offset, pred_stride, &sse);
5036 const float var_ratio = (var == 0) ? 1.0f : factor * (float)sub_var;
5037 features[feature_idx++] = var_ratio;
5041 assert(feature_idx == FEATURES);
5042 nn_predict(features, nn_config, score);
5043 if (score[0] > thresh) return PARTITION_SPLIT;
5044 if (score[0] < -thresh) return PARTITION_NONE;
5051 static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
5052 TileDataEnc *tile_data, TOKENEXTRA **tp,
5053 int mi_row, int mi_col, BLOCK_SIZE bsize,
5054 RD_COST *rd_cost, int do_recon,
5055 int64_t best_rd, PC_TREE *pc_tree) {
5056 const SPEED_FEATURES *const sf = &cpi->sf;
5057 VP9_COMMON *const cm = &cpi->common;
5058 TileInfo *const tile_info = &tile_data->tile_info;
5059 MACROBLOCK *const x = &td->mb;
5060 MACROBLOCKD *const xd = &x->e_mbd;
5061 const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
5062 TOKENEXTRA *tp_orig = *tp;
5063 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
5065 BLOCK_SIZE subsize = bsize;
5066 RD_COST this_rdc, sum_rdc, best_rdc;
5067 int do_split = bsize >= BLOCK_8X8;
5069 // Override skipping rectangular partition operations for edge blocks
5070 const int force_horz_split = (mi_row + ms >= cm->mi_rows);
5071 const int force_vert_split = (mi_col + ms >= cm->mi_cols);
5072 const int xss = x->e_mbd.plane[1].subsampling_x;
5073 const int yss = x->e_mbd.plane[1].subsampling_y;
5075 int partition_none_allowed = !force_horz_split && !force_vert_split;
5076 int partition_horz_allowed =
5077 !force_vert_split && yss <= xss && bsize >= BLOCK_8X8;
5078 int partition_vert_allowed =
5079 !force_horz_split && xss <= yss && bsize >= BLOCK_8X8;
5080 const int use_ml_based_partitioning =
5081 sf->partition_search_type == ML_BASED_PARTITION;
5085 // Avoid checking for rectangular partitions for speed >= 6.
5086 if (cpi->oxcf.speed >= 6) do_rect = 0;
5088 assert(num_8x8_blocks_wide_lookup[bsize] ==
5089 num_8x8_blocks_high_lookup[bsize]);
5091 vp9_rd_cost_init(&sum_rdc);
5092 vp9_rd_cost_reset(&best_rdc);
5093 best_rdc.rdcost = best_rd;
5095 // Determine partition types in search according to the speed features.
5096 // The threshold set here has to be of square block size.
5097 if (sf->auto_min_max_partition_size) {
5098 partition_none_allowed &=
5099 (bsize <= x->max_partition_size && bsize >= x->min_partition_size);
5100 partition_horz_allowed &=
5101 ((bsize <= x->max_partition_size && bsize > x->min_partition_size) ||
5103 partition_vert_allowed &=
5104 ((bsize <= x->max_partition_size && bsize > x->min_partition_size) ||
5106 do_split &= bsize > x->min_partition_size;
5108 if (sf->use_square_partition_only) {
5109 partition_horz_allowed &= force_horz_split;
5110 partition_vert_allowed &= force_vert_split;
5113 if (use_ml_based_partitioning) {
5114 if (partition_none_allowed || do_split) do_rect = 0;
5115 if (partition_none_allowed && do_split) {
5116 const int ml_predicted_partition =
5117 ml_predict_var_paritioning(cpi, x, bsize, mi_row, mi_col);
5118 if (ml_predicted_partition == PARTITION_NONE) do_split = 0;
5119 if (ml_predicted_partition == PARTITION_SPLIT) partition_none_allowed = 0;
5123 if (!partition_none_allowed && !do_split) do_rect = 1;
5125 ctx->pred_pixel_ready =
5126 !(partition_vert_allowed || partition_horz_allowed || do_split);
5129 if (partition_none_allowed) {
5130 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc, bsize,
5132 ctx->mic = *xd->mi[0];
5133 ctx->mbmi_ext = *x->mbmi_ext;
5134 ctx->skip_txfm[0] = x->skip_txfm[0];
5135 ctx->skip = x->skip;
5137 if (this_rdc.rate != INT_MAX) {
5138 const int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
5139 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
5141 RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
5142 if (this_rdc.rdcost < best_rdc.rdcost) {
5143 best_rdc = this_rdc;
5144 if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
5146 if (!use_ml_based_partitioning) {
5147 int64_t dist_breakout_thr = sf->partition_search_breakout_thr.dist;
5148 int64_t rate_breakout_thr = sf->partition_search_breakout_thr.rate;
5149 dist_breakout_thr >>=
5150 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
5151 rate_breakout_thr *= num_pels_log2_lookup[bsize];
5152 if (!x->e_mbd.lossless && this_rdc.rate < rate_breakout_thr &&
5153 this_rdc.dist < dist_breakout_thr) {
5162 // store estimated motion vector
5163 store_pred_mv(x, ctx);
5167 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
5168 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
5169 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
5170 subsize = get_subsize(bsize, PARTITION_SPLIT);
5171 for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
5172 const int x_idx = (i & 1) * ms;
5173 const int y_idx = (i >> 1) * ms;
5175 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
5177 load_pred_mv(x, ctx);
5178 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row + y_idx,
5179 mi_col + x_idx, subsize, &this_rdc, 0,
5180 best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
5182 if (this_rdc.rate == INT_MAX) {
5183 vp9_rd_cost_reset(&sum_rdc);
5185 sum_rdc.rate += this_rdc.rate;
5186 sum_rdc.dist += this_rdc.dist;
5187 sum_rdc.rdcost += this_rdc.rdcost;
5191 if (sum_rdc.rdcost < best_rdc.rdcost) {
5193 pc_tree->partitioning = PARTITION_SPLIT;
5195 // skip rectangular partition test when larger block size
5196 // gives better rd cost
5197 if (sf->less_rectangular_check) do_rect &= !partition_none_allowed;
5202 if (partition_horz_allowed && do_rect) {
5203 subsize = get_subsize(bsize, PARTITION_HORZ);
5204 load_pred_mv(x, ctx);
5205 pc_tree->horizontal[0].pred_pixel_ready = 1;
5206 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
5207 &pc_tree->horizontal[0]);
5209 pc_tree->horizontal[0].mic = *xd->mi[0];
5210 pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
5211 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
5212 pc_tree->horizontal[0].skip = x->skip;
5214 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + ms < cm->mi_rows) {
5215 load_pred_mv(x, ctx);
5216 pc_tree->horizontal[1].pred_pixel_ready = 1;
5217 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + ms, mi_col, &this_rdc,
5218 subsize, &pc_tree->horizontal[1]);
5220 pc_tree->horizontal[1].mic = *xd->mi[0];
5221 pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
5222 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
5223 pc_tree->horizontal[1].skip = x->skip;
5225 if (this_rdc.rate == INT_MAX) {
5226 vp9_rd_cost_reset(&sum_rdc);
5228 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
5229 this_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
5230 sum_rdc.rate += this_rdc.rate;
5231 sum_rdc.dist += this_rdc.dist;
5233 RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
5237 if (sum_rdc.rdcost < best_rdc.rdcost) {
5239 pc_tree->partitioning = PARTITION_HORZ;
5241 pred_pixel_ready_reset(pc_tree, bsize);
5246 if (partition_vert_allowed && do_rect) {
5247 subsize = get_subsize(bsize, PARTITION_VERT);
5248 load_pred_mv(x, ctx);
5249 pc_tree->vertical[0].pred_pixel_ready = 1;
5250 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
5251 &pc_tree->vertical[0]);
5252 pc_tree->vertical[0].mic = *xd->mi[0];
5253 pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
5254 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
5255 pc_tree->vertical[0].skip = x->skip;
5257 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + ms < cm->mi_cols) {
5258 load_pred_mv(x, ctx);
5259 pc_tree->vertical[1].pred_pixel_ready = 1;
5260 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + ms, &this_rdc,
5261 subsize, &pc_tree->vertical[1]);
5262 pc_tree->vertical[1].mic = *xd->mi[0];
5263 pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
5264 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
5265 pc_tree->vertical[1].skip = x->skip;
5267 if (this_rdc.rate == INT_MAX) {
5268 vp9_rd_cost_reset(&sum_rdc);
5270 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
5271 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
5272 sum_rdc.rate += this_rdc.rate;
5273 sum_rdc.dist += this_rdc.dist;
5275 RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
5279 if (sum_rdc.rdcost < best_rdc.rdcost) {
5281 pc_tree->partitioning = PARTITION_VERT;
5283 pred_pixel_ready_reset(pc_tree, bsize);
5287 *rd_cost = best_rdc;
5289 if (best_rdc.rate == INT_MAX) {
5290 vp9_rd_cost_reset(rd_cost);
5294 // update mode info array
5295 fill_mode_info_sb(cm, x, mi_row, mi_col, bsize, pc_tree);
5297 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX && do_recon) {
5298 int output_enabled = (bsize == BLOCK_64X64);
5299 encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
5303 if (bsize == BLOCK_64X64 && do_recon) {
5304 assert(tp_orig < *tp);
5305 assert(best_rdc.rate < INT_MAX);
5306 assert(best_rdc.dist < INT64_MAX);
5308 assert(tp_orig == *tp);
5312 static void nonrd_select_partition(VP9_COMP *cpi, ThreadData *td,
5313 TileDataEnc *tile_data, MODE_INFO **mi,
5314 TOKENEXTRA **tp, int mi_row, int mi_col,
5315 BLOCK_SIZE bsize, int output_enabled,
5316 RD_COST *rd_cost, PC_TREE *pc_tree) {
5317 VP9_COMMON *const cm = &cpi->common;
5318 TileInfo *const tile_info = &tile_data->tile_info;
5319 MACROBLOCK *const x = &td->mb;
5320 MACROBLOCKD *const xd = &x->e_mbd;
5321 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
5322 const int mis = cm->mi_stride;
5323 PARTITION_TYPE partition;
5326 BLOCK_SIZE subsize_ref =
5327 (cpi->sf.adapt_partition_source_sad) ? BLOCK_8X8 : BLOCK_16X16;
5329 vp9_rd_cost_reset(&this_rdc);
5330 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
5332 subsize = (bsize >= BLOCK_8X8) ? mi[0]->sb_type : BLOCK_4X4;
5333 partition = partition_lookup[bsl][subsize];
5335 if (bsize == BLOCK_32X32 && subsize == BLOCK_32X32) {
5336 x->max_partition_size = BLOCK_32X32;
5337 x->min_partition_size = BLOCK_16X16;
5338 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, rd_cost,
5339 0, INT64_MAX, pc_tree);
5340 } else if (bsize == BLOCK_32X32 && partition != PARTITION_NONE &&
5341 subsize >= subsize_ref) {
5342 x->max_partition_size = BLOCK_32X32;
5343 x->min_partition_size = BLOCK_8X8;
5344 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, rd_cost,
5345 0, INT64_MAX, pc_tree);
5346 } else if (bsize == BLOCK_16X16 && partition != PARTITION_NONE) {
5347 x->max_partition_size = BLOCK_16X16;
5348 x->min_partition_size = BLOCK_8X8;
5349 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, rd_cost,
5350 0, INT64_MAX, pc_tree);
5352 switch (partition) {
5353 case PARTITION_NONE:
5354 pc_tree->none.pred_pixel_ready = 1;
5355 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize,
5357 pc_tree->none.mic = *xd->mi[0];
5358 pc_tree->none.mbmi_ext = *x->mbmi_ext;
5359 pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
5360 pc_tree->none.skip = x->skip;
5362 case PARTITION_VERT:
5363 pc_tree->vertical[0].pred_pixel_ready = 1;
5364 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize,
5365 &pc_tree->vertical[0]);
5366 pc_tree->vertical[0].mic = *xd->mi[0];
5367 pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
5368 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
5369 pc_tree->vertical[0].skip = x->skip;
5370 if (mi_col + hbs < cm->mi_cols) {
5371 pc_tree->vertical[1].pred_pixel_ready = 1;
5372 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
5373 &this_rdc, subsize, &pc_tree->vertical[1]);
5374 pc_tree->vertical[1].mic = *xd->mi[0];
5375 pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
5376 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
5377 pc_tree->vertical[1].skip = x->skip;
5378 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
5379 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
5380 rd_cost->rate += this_rdc.rate;
5381 rd_cost->dist += this_rdc.dist;
5385 case PARTITION_HORZ:
5386 pc_tree->horizontal[0].pred_pixel_ready = 1;
5387 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize,
5388 &pc_tree->horizontal[0]);
5389 pc_tree->horizontal[0].mic = *xd->mi[0];
5390 pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
5391 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
5392 pc_tree->horizontal[0].skip = x->skip;
5393 if (mi_row + hbs < cm->mi_rows) {
5394 pc_tree->horizontal[1].pred_pixel_ready = 1;
5395 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
5396 &this_rdc, subsize, &pc_tree->horizontal[1]);
5397 pc_tree->horizontal[1].mic = *xd->mi[0];
5398 pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
5399 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
5400 pc_tree->horizontal[1].skip = x->skip;
5401 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
5402 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
5403 rd_cost->rate += this_rdc.rate;
5404 rd_cost->dist += this_rdc.dist;
5409 assert(partition == PARTITION_SPLIT);
5410 subsize = get_subsize(bsize, PARTITION_SPLIT);
5411 nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
5412 subsize, output_enabled, rd_cost,
5414 nonrd_select_partition(cpi, td, tile_data, mi + hbs, tp, mi_row,
5415 mi_col + hbs, subsize, output_enabled, &this_rdc,
5417 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
5418 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
5419 rd_cost->rate += this_rdc.rate;
5420 rd_cost->dist += this_rdc.dist;
5422 nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis, tp,
5423 mi_row + hbs, mi_col, subsize, output_enabled,
5424 &this_rdc, pc_tree->split[2]);
5425 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
5426 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
5427 rd_cost->rate += this_rdc.rate;
5428 rd_cost->dist += this_rdc.dist;
5430 nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
5431 mi_row + hbs, mi_col + hbs, subsize,
5432 output_enabled, &this_rdc, pc_tree->split[3]);
5433 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
5434 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
5435 rd_cost->rate += this_rdc.rate;
5436 rd_cost->dist += this_rdc.dist;
5442 if (bsize == BLOCK_64X64 && output_enabled)
5443 encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, 1, bsize, pc_tree);
5446 static void nonrd_use_partition(VP9_COMP *cpi, ThreadData *td,
5447 TileDataEnc *tile_data, MODE_INFO **mi,
5448 TOKENEXTRA **tp, int mi_row, int mi_col,
5449 BLOCK_SIZE bsize, int output_enabled,
5450 RD_COST *dummy_cost, PC_TREE *pc_tree) {
5451 VP9_COMMON *const cm = &cpi->common;
5452 TileInfo *tile_info = &tile_data->tile_info;
5453 MACROBLOCK *const x = &td->mb;
5454 MACROBLOCKD *const xd = &x->e_mbd;
5455 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
5456 const int mis = cm->mi_stride;
5457 PARTITION_TYPE partition;
5460 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
5462 subsize = (bsize >= BLOCK_8X8) ? mi[0]->sb_type : BLOCK_4X4;
5463 partition = partition_lookup[bsl][subsize];
5465 if (output_enabled && bsize != BLOCK_4X4) {
5466 int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
5467 td->counts->partition[ctx][partition]++;
5470 switch (partition) {
5471 case PARTITION_NONE:
5472 pc_tree->none.pred_pixel_ready = 1;
5473 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
5474 subsize, &pc_tree->none);
5475 pc_tree->none.mic = *xd->mi[0];
5476 pc_tree->none.mbmi_ext = *x->mbmi_ext;
5477 pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
5478 pc_tree->none.skip = x->skip;
5479 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
5480 subsize, &pc_tree->none);
5482 case PARTITION_VERT:
5483 pc_tree->vertical[0].pred_pixel_ready = 1;
5484 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
5485 subsize, &pc_tree->vertical[0]);
5486 pc_tree->vertical[0].mic = *xd->mi[0];
5487 pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
5488 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
5489 pc_tree->vertical[0].skip = x->skip;
5490 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
5491 subsize, &pc_tree->vertical[0]);
5492 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
5493 pc_tree->vertical[1].pred_pixel_ready = 1;
5494 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs, dummy_cost,
5495 subsize, &pc_tree->vertical[1]);
5496 pc_tree->vertical[1].mic = *xd->mi[0];
5497 pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
5498 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
5499 pc_tree->vertical[1].skip = x->skip;
5500 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col + hbs,
5501 output_enabled, subsize, &pc_tree->vertical[1]);
5504 case PARTITION_HORZ:
5505 pc_tree->horizontal[0].pred_pixel_ready = 1;
5506 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
5507 subsize, &pc_tree->horizontal[0]);
5508 pc_tree->horizontal[0].mic = *xd->mi[0];
5509 pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
5510 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
5511 pc_tree->horizontal[0].skip = x->skip;
5512 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
5513 subsize, &pc_tree->horizontal[0]);
5515 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
5516 pc_tree->horizontal[1].pred_pixel_ready = 1;
5517 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col, dummy_cost,
5518 subsize, &pc_tree->horizontal[1]);
5519 pc_tree->horizontal[1].mic = *xd->mi[0];
5520 pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
5521 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
5522 pc_tree->horizontal[1].skip = x->skip;
5523 encode_b_rt(cpi, td, tile_info, tp, mi_row + hbs, mi_col,
5524 output_enabled, subsize, &pc_tree->horizontal[1]);
5528 assert(partition == PARTITION_SPLIT);
5529 subsize = get_subsize(bsize, PARTITION_SPLIT);
5530 if (bsize == BLOCK_8X8) {
5531 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
5532 subsize, pc_tree->leaf_split[0]);
5533 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
5534 subsize, pc_tree->leaf_split[0]);
5536 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, subsize,
5537 output_enabled, dummy_cost, pc_tree->split[0]);
5538 nonrd_use_partition(cpi, td, tile_data, mi + hbs, tp, mi_row,
5539 mi_col + hbs, subsize, output_enabled, dummy_cost,
5541 nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis, tp,
5542 mi_row + hbs, mi_col, subsize, output_enabled,
5543 dummy_cost, pc_tree->split[2]);
5544 nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
5545 mi_row + hbs, mi_col + hbs, subsize, output_enabled,
5546 dummy_cost, pc_tree->split[3]);
5551 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
5552 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
5555 // Get a prediction(stored in x->est_pred) for the whole 64x64 superblock.
5556 static void get_estimated_pred(VP9_COMP *cpi, const TileInfo *const tile,
5557 MACROBLOCK *x, int mi_row, int mi_col) {
5558 VP9_COMMON *const cm = &cpi->common;
5559 const int is_key_frame = frame_is_intra_only(cm);
5560 MACROBLOCKD *xd = &x->e_mbd;
5562 set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
5564 if (!is_key_frame) {
5565 MODE_INFO *mi = xd->mi[0];
5566 YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
5567 const YV12_BUFFER_CONFIG *yv12_g = NULL;
5568 const BLOCK_SIZE bsize = BLOCK_32X32 + (mi_col + 4 < cm->mi_cols) * 2 +
5569 (mi_row + 4 < cm->mi_rows);
5570 unsigned int y_sad_g, y_sad_thr;
5571 unsigned int y_sad = UINT_MAX;
5573 assert(yv12 != NULL);
5575 if (!(is_one_pass_cbr_svc(cpi) && cpi->svc.spatial_layer_id) ||
5576 cpi->svc.use_gf_temporal_ref_current_layer) {
5577 // For now, GOLDEN will not be used for non-zero spatial layers, since
5578 // it may not be a temporal reference.
5579 yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
5582 // Only compute y_sad_g (sad for golden reference) for speed < 8.
5583 if (cpi->oxcf.speed < 8 && yv12_g && yv12_g != yv12 &&
5584 (cpi->ref_frame_flags & VP9_GOLD_FLAG)) {
5585 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
5586 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
5587 y_sad_g = cpi->fn_ptr[bsize].sdf(
5588 x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
5589 xd->plane[0].pre[0].stride);
5594 if (cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR &&
5595 cpi->rc.is_src_frame_alt_ref) {
5596 yv12 = get_ref_frame_buffer(cpi, ALTREF_FRAME);
5597 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
5598 &cm->frame_refs[ALTREF_FRAME - 1].sf);
5599 mi->ref_frame[0] = ALTREF_FRAME;
5602 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
5603 &cm->frame_refs[LAST_FRAME - 1].sf);
5604 mi->ref_frame[0] = LAST_FRAME;
5606 mi->ref_frame[1] = NONE;
5607 mi->sb_type = BLOCK_64X64;
5608 mi->mv[0].as_int = 0;
5609 mi->interp_filter = BILINEAR;
5612 const MV dummy_mv = { 0, 0 };
5613 y_sad = vp9_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col,
5615 x->sb_use_mv_part = 1;
5616 x->sb_mvcol_part = mi->mv[0].as_mv.col;
5617 x->sb_mvrow_part = mi->mv[0].as_mv.row;
5620 // Pick ref frame for partitioning, bias last frame when y_sad_g and y_sad
5621 // are close if short_circuit_low_temp_var is on.
5622 y_sad_thr = cpi->sf.short_circuit_low_temp_var ? (y_sad * 7) >> 3 : y_sad;
5623 if (y_sad_g < y_sad_thr) {
5624 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
5625 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
5626 mi->ref_frame[0] = GOLDEN_FRAME;
5627 mi->mv[0].as_int = 0;
5629 x->pred_mv[LAST_FRAME] = mi->mv[0].as_mv;
5632 set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]);
5633 xd->plane[0].dst.buf = x->est_pred;
5634 xd->plane[0].dst.stride = 64;
5635 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
5637 #if CONFIG_VP9_HIGHBITDEPTH
5639 case 8: memset(x->est_pred, 128, 64 * 64 * sizeof(x->est_pred[0])); break;
5641 memset(x->est_pred, 128 * 4, 64 * 64 * sizeof(x->est_pred[0]));
5644 memset(x->est_pred, 128 * 16, 64 * 64 * sizeof(x->est_pred[0]));
5648 memset(x->est_pred, 128, 64 * 64 * sizeof(x->est_pred[0]));
5649 #endif // CONFIG_VP9_HIGHBITDEPTH
5653 static void encode_nonrd_sb_row(VP9_COMP *cpi, ThreadData *td,
5654 TileDataEnc *tile_data, int mi_row,
5656 SPEED_FEATURES *const sf = &cpi->sf;
5657 VP9_COMMON *const cm = &cpi->common;
5658 TileInfo *const tile_info = &tile_data->tile_info;
5659 MACROBLOCK *const x = &td->mb;
5660 MACROBLOCKD *const xd = &x->e_mbd;
5661 const int mi_col_start = tile_info->mi_col_start;
5662 const int mi_col_end = tile_info->mi_col_end;
5664 const int sb_row = mi_row >> MI_BLOCK_SIZE_LOG2;
5665 const int num_sb_cols =
5666 get_num_cols(tile_data->tile_info, MI_BLOCK_SIZE_LOG2);
5669 // Initialize the left context for the new SB row
5670 memset(&xd->left_context, 0, sizeof(xd->left_context));
5671 memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
5673 // Code each SB in the row
5674 for (mi_col = mi_col_start, sb_col_in_tile = 0; mi_col < mi_col_end;
5675 mi_col += MI_BLOCK_SIZE, ++sb_col_in_tile) {
5676 const struct segmentation *const seg = &cm->seg;
5678 const int idx_str = cm->mi_stride * mi_row + mi_col;
5679 MODE_INFO **mi = cm->mi_grid_visible + idx_str;
5680 PARTITION_SEARCH_TYPE partition_search_type = sf->partition_search_type;
5681 BLOCK_SIZE bsize = BLOCK_64X64;
5685 (*(cpi->row_mt_sync_read_ptr))(&tile_data->row_mt_sync, sb_row,
5688 if (cpi->use_skin_detection) {
5689 vp9_compute_skin_sb(cpi, BLOCK_16X16, mi_row, mi_col);
5692 x->source_variance = UINT_MAX;
5693 for (i = 0; i < MAX_REF_FRAMES; ++i) {
5694 x->pred_mv[i].row = INT16_MAX;
5695 x->pred_mv[i].col = INT16_MAX;
5697 vp9_rd_cost_init(&dummy_rdc);
5698 x->color_sensitivity[0] = 0;
5699 x->color_sensitivity[1] = 0;
5701 x->skip_low_source_sad = 0;
5702 x->lowvar_highsumdiff = 0;
5703 x->content_state_sb = 0;
5704 x->zero_temp_sad_source = 0;
5705 x->sb_use_mv_part = 0;
5706 x->sb_mvcol_part = 0;
5707 x->sb_mvrow_part = 0;
5708 x->sb_pickmode_part = 0;
5709 x->arf_frame_usage = 0;
5710 x->lastgolden_frame_usage = 0;
5713 const uint8_t *const map =
5714 seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
5715 int segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
5716 seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
5718 partition_search_type = FIXED_PARTITION;
5722 if (cpi->compute_source_sad_onepass && cpi->sf.use_source_sad) {
5723 int shift = cpi->Source->y_stride * (mi_row << 3) + (mi_col << 3);
5724 int sb_offset2 = ((cm->mi_cols + 7) >> 3) * (mi_row >> 3) + (mi_col >> 3);
5725 int64_t source_sad = avg_source_sad(cpi, x, shift, sb_offset2);
5726 if (sf->adapt_partition_source_sad &&
5727 (cpi->oxcf.rc_mode == VPX_VBR && !cpi->rc.is_src_frame_alt_ref &&
5728 source_sad > sf->adapt_partition_thresh &&
5729 (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)))
5730 partition_search_type = REFERENCE_PARTITION;
5733 // Set the partition type of the 64X64 block
5734 switch (partition_search_type) {
5735 case VAR_BASED_PARTITION:
5736 // TODO(jingning, marpan): The mode decision and encoding process
5737 // support both intra and inter sub8x8 block coding for RTC mode.
5738 // Tune the thresholds accordingly to use sub8x8 block coding for
5739 // coding performance improvement.
5740 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
5741 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
5742 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
5744 case ML_BASED_PARTITION:
5745 get_estimated_pred(cpi, tile_info, x, mi_row, mi_col);
5746 x->max_partition_size = BLOCK_64X64;
5747 x->min_partition_size = BLOCK_8X8;
5748 x->sb_pickmode_part = 1;
5749 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col,
5750 BLOCK_64X64, &dummy_rdc, 1, INT64_MAX,
5753 case SOURCE_VAR_BASED_PARTITION:
5754 set_source_var_based_partition(cpi, tile_info, x, mi, mi_row, mi_col);
5755 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
5756 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
5758 case FIXED_PARTITION:
5759 if (!seg_skip) bsize = sf->always_this_block_size;
5760 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
5761 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
5762 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
5765 assert(partition_search_type == REFERENCE_PARTITION);
5766 x->sb_pickmode_part = 1;
5767 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
5768 // Use nonrd_pick_partition on scene-cut for VBR mode.
5769 // nonrd_pick_partition does not support 4x4 partition, so avoid it
5770 // on key frame for now.
5771 if ((cpi->oxcf.rc_mode == VPX_VBR && cpi->rc.high_source_sad &&
5772 cpi->oxcf.speed < 6 && !frame_is_intra_only(cm) &&
5773 (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))) {
5774 // Use lower max_partition_size for low resoultions.
5775 if (cm->width <= 352 && cm->height <= 288)
5776 x->max_partition_size = BLOCK_32X32;
5778 x->max_partition_size = BLOCK_64X64;
5779 x->min_partition_size = BLOCK_8X8;
5780 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col,
5781 BLOCK_64X64, &dummy_rdc, 1, INT64_MAX,
5784 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
5785 // TODO(marpan): Seems like nonrd_select_partition does not support
5786 // 4x4 partition. Since 4x4 is used on key frame, use this switch
5788 if (frame_is_intra_only(cm))
5789 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
5790 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
5792 nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
5793 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
5799 // Update ref_frame usage for inter frame if this group is ARF group.
5800 if (!cpi->rc.is_src_frame_alt_ref && !cpi->refresh_golden_frame &&
5801 !cpi->refresh_alt_ref_frame && cpi->rc.alt_ref_gf_group &&
5802 cpi->sf.use_altref_onepass) {
5803 int sboffset = ((cm->mi_cols + 7) >> 3) * (mi_row >> 3) + (mi_col >> 3);
5804 if (cpi->count_arf_frame_usage != NULL)
5805 cpi->count_arf_frame_usage[sboffset] = x->arf_frame_usage;
5806 if (cpi->count_lastgolden_frame_usage != NULL)
5807 cpi->count_lastgolden_frame_usage[sboffset] = x->lastgolden_frame_usage;
5810 (*(cpi->row_mt_sync_write_ptr))(&tile_data->row_mt_sync, sb_row,
5811 sb_col_in_tile, num_sb_cols);
5814 // end RTC play code
5816 static INLINE uint32_t variance(const diff *const d) {
5817 return d->sse - (uint32_t)(((int64_t)d->sum * d->sum) >> 8);
5820 #if CONFIG_VP9_HIGHBITDEPTH
5821 static INLINE uint32_t variance_highbd(diff *const d) {
5822 const int64_t var = (int64_t)d->sse - (((int64_t)d->sum * d->sum) >> 8);
5823 return (var >= 0) ? (uint32_t)var : 0;
5825 #endif // CONFIG_VP9_HIGHBITDEPTH
5827 static int set_var_thresh_from_histogram(VP9_COMP *cpi) {
5828 const SPEED_FEATURES *const sf = &cpi->sf;
5829 const VP9_COMMON *const cm = &cpi->common;
5831 const uint8_t *src = cpi->Source->y_buffer;
5832 const uint8_t *last_src = cpi->Last_Source->y_buffer;
5833 const int src_stride = cpi->Source->y_stride;
5834 const int last_stride = cpi->Last_Source->y_stride;
5836 // Pick cutoff threshold
5837 const int cutoff = (VPXMIN(cm->width, cm->height) >= 720)
5838 ? (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100)
5839 : (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100);
5840 DECLARE_ALIGNED(16, int, hist[VAR_HIST_BINS]);
5841 diff *var16 = cpi->source_diff_var;
5846 memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0]));
5848 for (i = 0; i < cm->mb_rows; i++) {
5849 for (j = 0; j < cm->mb_cols; j++) {
5850 #if CONFIG_VP9_HIGHBITDEPTH
5851 if (cm->use_highbitdepth) {
5852 switch (cm->bit_depth) {
5854 vpx_highbd_8_get16x16var(src, src_stride, last_src, last_stride,
5855 &var16->sse, &var16->sum);
5856 var16->var = variance(var16);
5859 vpx_highbd_10_get16x16var(src, src_stride, last_src, last_stride,
5860 &var16->sse, &var16->sum);
5861 var16->var = variance_highbd(var16);
5864 assert(cm->bit_depth == VPX_BITS_12);
5865 vpx_highbd_12_get16x16var(src, src_stride, last_src, last_stride,
5866 &var16->sse, &var16->sum);
5867 var16->var = variance_highbd(var16);
5871 vpx_get16x16var(src, src_stride, last_src, last_stride, &var16->sse,
5873 var16->var = variance(var16);
5876 vpx_get16x16var(src, src_stride, last_src, last_stride, &var16->sse,
5878 var16->var = variance(var16);
5879 #endif // CONFIG_VP9_HIGHBITDEPTH
5881 if (var16->var >= VAR_HIST_MAX_BG_VAR)
5882 hist[VAR_HIST_BINS - 1]++;
5884 hist[var16->var / VAR_HIST_FACTOR]++;
5891 src = src - cm->mb_cols * 16 + 16 * src_stride;
5892 last_src = last_src - cm->mb_cols * 16 + 16 * last_stride;
5895 cpi->source_var_thresh = 0;
5897 if (hist[VAR_HIST_BINS - 1] < cutoff) {
5898 for (i = 0; i < VAR_HIST_BINS - 1; i++) {
5902 cpi->source_var_thresh = (i + 1) * VAR_HIST_FACTOR;
5908 return sf->search_type_check_frequency;
5911 static void source_var_based_partition_search_method(VP9_COMP *cpi) {
5912 VP9_COMMON *const cm = &cpi->common;
5913 SPEED_FEATURES *const sf = &cpi->sf;
5915 if (cm->frame_type == KEY_FRAME) {
5916 // For key frame, use SEARCH_PARTITION.
5917 sf->partition_search_type = SEARCH_PARTITION;
5918 } else if (cm->intra_only) {
5919 sf->partition_search_type = FIXED_PARTITION;
5921 if (cm->last_width != cm->width || cm->last_height != cm->height) {
5922 if (cpi->source_diff_var) vpx_free(cpi->source_diff_var);
5924 CHECK_MEM_ERROR(cm, cpi->source_diff_var,
5925 vpx_calloc(cm->MBs, sizeof(diff)));
5928 if (!cpi->frames_till_next_var_check)
5929 cpi->frames_till_next_var_check = set_var_thresh_from_histogram(cpi);
5931 if (cpi->frames_till_next_var_check > 0) {
5932 sf->partition_search_type = FIXED_PARTITION;
5933 cpi->frames_till_next_var_check--;
5938 static int get_skip_encode_frame(const VP9_COMMON *cm, ThreadData *const td) {
5939 unsigned int intra_count = 0, inter_count = 0;
5942 for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
5943 intra_count += td->counts->intra_inter[j][0];
5944 inter_count += td->counts->intra_inter[j][1];
5947 return (intra_count << 2) < inter_count && cm->frame_type != KEY_FRAME &&
5951 void vp9_init_tile_data(VP9_COMP *cpi) {
5952 VP9_COMMON *const cm = &cpi->common;
5953 const int tile_cols = 1 << cm->log2_tile_cols;
5954 const int tile_rows = 1 << cm->log2_tile_rows;
5955 int tile_col, tile_row;
5956 TOKENEXTRA *pre_tok = cpi->tile_tok[0][0];
5957 TOKENLIST *tplist = cpi->tplist[0][0];
5959 int tplist_count = 0;
5961 if (cpi->tile_data == NULL || cpi->allocated_tiles < tile_cols * tile_rows) {
5962 if (cpi->tile_data != NULL) vpx_free(cpi->tile_data);
5965 vpx_malloc(tile_cols * tile_rows * sizeof(*cpi->tile_data)));
5966 cpi->allocated_tiles = tile_cols * tile_rows;
5968 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
5969 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
5970 TileDataEnc *tile_data =
5971 &cpi->tile_data[tile_row * tile_cols + tile_col];
5973 for (i = 0; i < BLOCK_SIZES; ++i) {
5974 for (j = 0; j < MAX_MODES; ++j) {
5975 tile_data->thresh_freq_fact[i][j] = RD_THRESH_INIT_FACT;
5976 #if CONFIG_CONSISTENT_RECODE
5977 tile_data->thresh_freq_fact_prev[i][j] = RD_THRESH_INIT_FACT;
5979 tile_data->mode_map[i][j] = j;
5982 #if CONFIG_MULTITHREAD
5983 tile_data->row_base_thresh_freq_fact = NULL;
5988 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
5989 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
5990 TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
5991 TileInfo *tile_info = &this_tile->tile_info;
5992 if (cpi->sf.adaptive_rd_thresh_row_mt &&
5993 this_tile->row_base_thresh_freq_fact == NULL)
5994 vp9_row_mt_alloc_rd_thresh(cpi, this_tile);
5995 vp9_tile_init(tile_info, cm, tile_row, tile_col);
5997 cpi->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
5998 pre_tok = cpi->tile_tok[tile_row][tile_col];
5999 tile_tok = allocated_tokens(*tile_info);
6001 cpi->tplist[tile_row][tile_col] = tplist + tplist_count;
6002 tplist = cpi->tplist[tile_row][tile_col];
6003 tplist_count = get_num_vert_units(*tile_info, MI_BLOCK_SIZE_LOG2);
6008 void vp9_encode_sb_row(VP9_COMP *cpi, ThreadData *td, int tile_row,
6009 int tile_col, int mi_row) {
6010 VP9_COMMON *const cm = &cpi->common;
6011 const int tile_cols = 1 << cm->log2_tile_cols;
6012 TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
6013 const TileInfo *const tile_info = &this_tile->tile_info;
6014 TOKENEXTRA *tok = NULL;
6016 int tile_mb_cols = (tile_info->mi_col_end - tile_info->mi_col_start + 1) >> 1;
6018 tile_sb_row = mi_cols_aligned_to_sb(mi_row - tile_info->mi_row_start) >>
6020 get_start_tok(cpi, tile_row, tile_col, mi_row, &tok);
6021 cpi->tplist[tile_row][tile_col][tile_sb_row].start = tok;
6023 if (cpi->sf.use_nonrd_pick_mode)
6024 encode_nonrd_sb_row(cpi, td, this_tile, mi_row, &tok);
6025 #if !CONFIG_REALTIME_ONLY
6027 encode_rd_sb_row(cpi, td, this_tile, mi_row, &tok);
6030 cpi->tplist[tile_row][tile_col][tile_sb_row].stop = tok;
6031 cpi->tplist[tile_row][tile_col][tile_sb_row].count =
6032 (unsigned int)(cpi->tplist[tile_row][tile_col][tile_sb_row].stop -
6033 cpi->tplist[tile_row][tile_col][tile_sb_row].start);
6034 assert(tok - cpi->tplist[tile_row][tile_col][tile_sb_row].start <=
6035 get_token_alloc(MI_BLOCK_SIZE >> 1, tile_mb_cols));
6040 void vp9_encode_tile(VP9_COMP *cpi, ThreadData *td, int tile_row,
6042 VP9_COMMON *const cm = &cpi->common;
6043 const int tile_cols = 1 << cm->log2_tile_cols;
6044 TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
6045 const TileInfo *const tile_info = &this_tile->tile_info;
6046 const int mi_row_start = tile_info->mi_row_start;
6047 const int mi_row_end = tile_info->mi_row_end;
6050 for (mi_row = mi_row_start; mi_row < mi_row_end; mi_row += MI_BLOCK_SIZE)
6051 vp9_encode_sb_row(cpi, td, tile_row, tile_col, mi_row);
6054 static void encode_tiles(VP9_COMP *cpi) {
6055 VP9_COMMON *const cm = &cpi->common;
6056 const int tile_cols = 1 << cm->log2_tile_cols;
6057 const int tile_rows = 1 << cm->log2_tile_rows;
6058 int tile_col, tile_row;
6060 vp9_init_tile_data(cpi);
6062 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
6063 for (tile_col = 0; tile_col < tile_cols; ++tile_col)
6064 vp9_encode_tile(cpi, &cpi->td, tile_row, tile_col);
6067 #if CONFIG_FP_MB_STATS
6068 static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
6069 VP9_COMMON *cm, uint8_t **this_frame_mb_stats) {
6070 uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
6071 cm->current_video_frame * cm->MBs * sizeof(uint8_t);
6073 if (mb_stats_in > firstpass_mb_stats->mb_stats_end) return EOF;
6075 *this_frame_mb_stats = mb_stats_in;
6081 static int compare_kmeans_data(const void *a, const void *b) {
6082 if (((const KMEANS_DATA *)a)->value > ((const KMEANS_DATA *)b)->value) {
6084 } else if (((const KMEANS_DATA *)a)->value <
6085 ((const KMEANS_DATA *)b)->value) {
6092 static void compute_boundary_ls(const double *ctr_ls, int k,
6093 double *boundary_ls) {
6094 // boundary_ls[j] is the upper bound of data centered at ctr_ls[j]
6096 for (j = 0; j < k - 1; ++j) {
6097 boundary_ls[j] = (ctr_ls[j] + ctr_ls[j + 1]) / 2.;
6099 boundary_ls[k - 1] = DBL_MAX;
6102 int vp9_get_group_idx(double value, double *boundary_ls, int k) {
6104 while (value >= boundary_ls[group_idx]) {
6106 if (group_idx == k - 1) {
6113 void vp9_kmeans(double *ctr_ls, double *boundary_ls, int *count_ls, int k,
6114 KMEANS_DATA *arr, int size) {
6118 double sum[MAX_KMEANS_GROUPS];
6119 int count[MAX_KMEANS_GROUPS];
6121 vpx_clear_system_state();
6123 assert(k >= 2 && k <= MAX_KMEANS_GROUPS);
6125 qsort(arr, size, sizeof(*arr), compare_kmeans_data);
6127 // initialize the center points
6128 for (j = 0; j < k; ++j) {
6129 ctr_ls[j] = arr[(size * (2 * j + 1)) / (2 * k)].value;
6132 for (itr = 0; itr < 10; ++itr) {
6133 compute_boundary_ls(ctr_ls, k, boundary_ls);
6134 for (i = 0; i < MAX_KMEANS_GROUPS; ++i) {
6139 // Both the data and centers are sorted in ascending order.
6140 // As each data point is processed in order, its corresponding group index
6141 // can only increase. So we only need to reset the group index to zero here.
6143 for (i = 0; i < size; ++i) {
6144 while (arr[i].value >= boundary_ls[group_idx]) {
6145 // place samples into clusters
6147 if (group_idx == k - 1) {
6151 sum[group_idx] += arr[i].value;
6155 for (group_idx = 0; group_idx < k; ++group_idx) {
6156 if (count[group_idx] > 0)
6157 ctr_ls[group_idx] = sum[group_idx] / count[group_idx];
6160 count[group_idx] = 0;
6164 // compute group_idx, boundary_ls and count_ls
6165 for (j = 0; j < k; ++j) {
6168 compute_boundary_ls(ctr_ls, k, boundary_ls);
6170 for (i = 0; i < size; ++i) {
6171 while (arr[i].value >= boundary_ls[group_idx]) {
6173 if (group_idx == k - 1) {
6177 arr[i].group_idx = group_idx;
6178 ++count_ls[group_idx];
6182 static void encode_frame_internal(VP9_COMP *cpi) {
6183 SPEED_FEATURES *const sf = &cpi->sf;
6184 ThreadData *const td = &cpi->td;
6185 MACROBLOCK *const x = &td->mb;
6186 VP9_COMMON *const cm = &cpi->common;
6187 MACROBLOCKD *const xd = &x->e_mbd;
6188 const int gf_group_index = cpi->twopass.gf_group.index;
6190 xd->mi = cm->mi_grid_visible;
6192 vp9_zero(*td->counts);
6193 vp9_zero(cpi->td.rd_counts);
6195 xd->lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0 &&
6196 cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
6198 #if CONFIG_VP9_HIGHBITDEPTH
6199 if (cm->use_highbitdepth)
6200 x->fwd_txfm4x4 = xd->lossless ? vp9_highbd_fwht4x4 : vpx_highbd_fdct4x4;
6202 x->fwd_txfm4x4 = xd->lossless ? vp9_fwht4x4 : vpx_fdct4x4;
6203 x->highbd_inv_txfm_add =
6204 xd->lossless ? vp9_highbd_iwht4x4_add : vp9_highbd_idct4x4_add;
6206 x->fwd_txfm4x4 = xd->lossless ? vp9_fwht4x4 : vpx_fdct4x4;
6207 #endif // CONFIG_VP9_HIGHBITDEPTH
6208 x->inv_txfm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
6209 #if CONFIG_CONSISTENT_RECODE
6210 x->optimize = sf->optimize_coefficients == 1 && cpi->oxcf.pass != 1;
6212 if (xd->lossless) x->optimize = 0;
6213 x->sharpness = cpi->oxcf.sharpness;
6214 x->adjust_rdmult_by_segment = (cpi->oxcf.aq_mode == VARIANCE_AQ);
6216 cm->tx_mode = select_tx_mode(cpi, xd);
6218 vp9_frame_init_quantizer(cpi);
6220 vp9_initialize_rd_consts(cpi);
6221 vp9_initialize_me_consts(cpi, x, cm->base_qindex);
6222 init_encode_frame_mb_context(cpi);
6223 cm->use_prev_frame_mvs =
6224 !cm->error_resilient_mode && cm->width == cm->last_width &&
6225 cm->height == cm->last_height && !cm->intra_only && cm->last_show_frame;
6226 // Special case: set prev_mi to NULL when the previous mode info
6227 // context cannot be used.
6229 cm->use_prev_frame_mvs ? cm->prev_mip + cm->mi_stride + 1 : NULL;
6231 x->quant_fp = cpi->sf.use_quant_fp;
6232 vp9_zero(x->skip_txfm);
6233 if (sf->use_nonrd_pick_mode) {
6234 // Initialize internal buffer pointers for rtc coding, where non-RD
6235 // mode decision is used and hence no buffer pointer swap needed.
6237 struct macroblock_plane *const p = x->plane;
6238 struct macroblockd_plane *const pd = xd->plane;
6239 PICK_MODE_CONTEXT *ctx = &cpi->td.pc_root->none;
6241 for (i = 0; i < MAX_MB_PLANE; ++i) {
6242 p[i].coeff = ctx->coeff_pbuf[i][0];
6243 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
6244 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
6245 p[i].eobs = ctx->eobs_pbuf[i][0];
6247 vp9_zero(x->zcoeff_blk);
6249 if (cm->frame_type != KEY_FRAME && cpi->rc.frames_since_golden == 0 &&
6250 !(cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR) &&
6252 cpi->ref_frame_flags &= (~VP9_GOLD_FLAG);
6254 if (sf->partition_search_type == SOURCE_VAR_BASED_PARTITION)
6255 source_var_based_partition_search_method(cpi);
6256 } else if (gf_group_index && gf_group_index < MAX_ARF_GOP_SIZE &&
6257 cpi->sf.enable_tpl_model) {
6258 TplDepFrame *tpl_frame = &cpi->tpl_stats[cpi->twopass.gf_group.index];
6259 TplDepStats *tpl_stats = tpl_frame->tpl_stats_ptr;
6261 int tpl_stride = tpl_frame->stride;
6262 int64_t intra_cost_base = 0;
6263 int64_t mc_dep_cost_base = 0;
6266 for (row = 0; row < cm->mi_rows && tpl_frame->is_valid; ++row) {
6267 for (col = 0; col < cm->mi_cols; ++col) {
6268 TplDepStats *this_stats = &tpl_stats[row * tpl_stride + col];
6269 intra_cost_base += this_stats->intra_cost;
6270 mc_dep_cost_base += this_stats->mc_dep_cost;
6274 vpx_clear_system_state();
6276 if (tpl_frame->is_valid)
6277 cpi->rd.r0 = (double)intra_cost_base / mc_dep_cost_base;
6280 // Frame segmentation
6281 if (cpi->oxcf.aq_mode == PERCEPTUAL_AQ) build_kmeans_segmentation(cpi);
6284 struct vpx_usec_timer emr_timer;
6285 vpx_usec_timer_start(&emr_timer);
6287 #if CONFIG_FP_MB_STATS
6288 if (cpi->use_fp_mb_stats) {
6289 input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm,
6290 &cpi->twopass.this_frame_mb_stats);
6295 cpi->row_mt_sync_read_ptr = vp9_row_mt_sync_read_dummy;
6296 cpi->row_mt_sync_write_ptr = vp9_row_mt_sync_write_dummy;
6297 // If allowed, encoding tiles in parallel with one thread handling one
6298 // tile when row based multi-threading is disabled.
6299 if (VPXMIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1)
6300 vp9_encode_tiles_mt(cpi);
6304 cpi->row_mt_sync_read_ptr = vp9_row_mt_sync_read;
6305 cpi->row_mt_sync_write_ptr = vp9_row_mt_sync_write;
6306 vp9_encode_tiles_row_mt(cpi);
6309 vpx_usec_timer_mark(&emr_timer);
6310 cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
6313 sf->skip_encode_frame =
6314 sf->skip_encode_sb ? get_skip_encode_frame(cm, td) : 0;
6317 // Keep record of the total distortion this time around for future use
6318 cpi->last_frame_distortion = cpi->frame_distortion;
6322 static INTERP_FILTER get_interp_filter(
6323 const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) {
6324 if (!is_alt_ref && threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
6325 threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] &&
6326 threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) {
6327 return EIGHTTAP_SMOOTH;
6328 } else if (threshes[EIGHTTAP_SHARP] > threshes[EIGHTTAP] &&
6329 threshes[EIGHTTAP_SHARP] > threshes[SWITCHABLE - 1]) {
6330 return EIGHTTAP_SHARP;
6331 } else if (threshes[EIGHTTAP] > threshes[SWITCHABLE - 1]) {
6338 static int compute_frame_aq_offset(struct VP9_COMP *cpi) {
6339 VP9_COMMON *const cm = &cpi->common;
6340 MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible;
6341 struct segmentation *const seg = &cm->seg;
6349 for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) {
6350 MODE_INFO **mi_8x8 = mi_8x8_ptr;
6351 for (mi_col = 0; mi_col < cm->mi_cols; mi_col++, mi_8x8++) {
6352 segment_id = mi_8x8[0]->segment_id;
6353 qdelta_index = get_segdata(seg, segment_id, SEG_LVL_ALT_Q);
6354 sum_delta += qdelta_index;
6357 mi_8x8_ptr += cm->mi_stride;
6360 return sum_delta / (cm->mi_rows * cm->mi_cols);
6363 #if CONFIG_CONSISTENT_RECODE
6364 static void restore_encode_params(VP9_COMP *cpi) {
6365 VP9_COMMON *const cm = &cpi->common;
6366 const int tile_cols = 1 << cm->log2_tile_cols;
6367 const int tile_rows = 1 << cm->log2_tile_rows;
6368 int tile_col, tile_row;
6370 RD_OPT *rd_opt = &cpi->rd;
6371 for (i = 0; i < MAX_REF_FRAMES; i++) {
6372 for (j = 0; j < REFERENCE_MODES; j++)
6373 rd_opt->prediction_type_threshes[i][j] =
6374 rd_opt->prediction_type_threshes_prev[i][j];
6376 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; j++)
6377 rd_opt->filter_threshes[i][j] = rd_opt->filter_threshes_prev[i][j];
6380 if (cpi->tile_data != NULL) {
6381 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
6382 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
6383 TileDataEnc *tile_data =
6384 &cpi->tile_data[tile_row * tile_cols + tile_col];
6385 for (i = 0; i < BLOCK_SIZES; ++i) {
6386 for (j = 0; j < MAX_MODES; ++j) {
6387 tile_data->thresh_freq_fact[i][j] =
6388 tile_data->thresh_freq_fact_prev[i][j];
6394 cm->interp_filter = cpi->sf.default_interp_filter;
6398 void vp9_encode_frame(VP9_COMP *cpi) {
6399 VP9_COMMON *const cm = &cpi->common;
6401 #if CONFIG_CONSISTENT_RECODE
6402 restore_encode_params(cpi);
6405 #if CONFIG_MISMATCH_DEBUG
6406 mismatch_reset_frame(MAX_MB_PLANE);
6409 // In the longer term the encoder should be generalized to match the
6410 // decoder such that we allow compound where one of the 3 buffers has a
6411 // different sign bias and that buffer is then the fixed ref. However, this
6412 // requires further work in the rd loop. For now the only supported encoder
6413 // side behavior is where the ALT ref buffer has opposite sign bias to
6415 if (!frame_is_intra_only(cm)) {
6416 if (vp9_compound_reference_allowed(cm)) {
6417 cpi->allow_comp_inter_inter = 1;
6418 vp9_setup_compound_reference_mode(cm);
6420 cpi->allow_comp_inter_inter = 0;
6424 if (cpi->sf.frame_parameter_update) {
6426 RD_OPT *const rd_opt = &cpi->rd;
6427 FRAME_COUNTS *counts = cpi->td.counts;
6428 RD_COUNTS *const rdc = &cpi->td.rd_counts;
6430 // This code does a single RD pass over the whole frame assuming
6431 // either compound, single or hybrid prediction as per whatever has
6432 // worked best for that type of frame in the past.
6433 // It also predicts whether another coding mode would have worked
6434 // better than this coding mode. If that is the case, it remembers
6435 // that for subsequent frames.
6436 // It also does the same analysis for transform size selection.
6437 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
6438 int64_t *const mode_thrs = rd_opt->prediction_type_threshes[frame_type];
6439 int64_t *const filter_thrs = rd_opt->filter_threshes[frame_type];
6440 const int is_alt_ref = frame_type == ALTREF_FRAME;
6442 /* prediction (compound, single or hybrid) mode selection */
6443 if (is_alt_ref || !cpi->allow_comp_inter_inter)
6444 cm->reference_mode = SINGLE_REFERENCE;
6445 else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] &&
6446 mode_thrs[COMPOUND_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT] &&
6447 check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
6448 cm->reference_mode = COMPOUND_REFERENCE;
6449 else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT])
6450 cm->reference_mode = SINGLE_REFERENCE;
6452 cm->reference_mode = REFERENCE_MODE_SELECT;
6454 if (cm->interp_filter == SWITCHABLE)
6455 cm->interp_filter = get_interp_filter(filter_thrs, is_alt_ref);
6457 encode_frame_internal(cpi);
6459 for (i = 0; i < REFERENCE_MODES; ++i)
6460 mode_thrs[i] = (mode_thrs[i] + rdc->comp_pred_diff[i] / cm->MBs) / 2;
6462 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
6463 filter_thrs[i] = (filter_thrs[i] + rdc->filter_diff[i] / cm->MBs) / 2;
6465 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
6466 int single_count_zero = 0;
6467 int comp_count_zero = 0;
6469 for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
6470 single_count_zero += counts->comp_inter[i][0];
6471 comp_count_zero += counts->comp_inter[i][1];
6474 if (comp_count_zero == 0) {
6475 cm->reference_mode = SINGLE_REFERENCE;
6476 vp9_zero(counts->comp_inter);
6477 } else if (single_count_zero == 0) {
6478 cm->reference_mode = COMPOUND_REFERENCE;
6479 vp9_zero(counts->comp_inter);
6483 if (cm->tx_mode == TX_MODE_SELECT) {
6485 int count8x8_lp = 0, count8x8_8x8p = 0;
6486 int count16x16_16x16p = 0, count16x16_lp = 0;
6489 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
6490 count4x4 += counts->tx.p32x32[i][TX_4X4];
6491 count4x4 += counts->tx.p16x16[i][TX_4X4];
6492 count4x4 += counts->tx.p8x8[i][TX_4X4];
6494 count8x8_lp += counts->tx.p32x32[i][TX_8X8];
6495 count8x8_lp += counts->tx.p16x16[i][TX_8X8];
6496 count8x8_8x8p += counts->tx.p8x8[i][TX_8X8];
6498 count16x16_16x16p += counts->tx.p16x16[i][TX_16X16];
6499 count16x16_lp += counts->tx.p32x32[i][TX_16X16];
6500 count32x32 += counts->tx.p32x32[i][TX_32X32];
6502 if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
6504 cm->tx_mode = ALLOW_8X8;
6505 reset_skip_tx_size(cm, TX_8X8);
6506 } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
6507 count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
6508 cm->tx_mode = ONLY_4X4;
6509 reset_skip_tx_size(cm, TX_4X4);
6510 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
6511 cm->tx_mode = ALLOW_32X32;
6512 } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
6513 cm->tx_mode = ALLOW_16X16;
6514 reset_skip_tx_size(cm, TX_16X16);
6518 FRAME_COUNTS *counts = cpi->td.counts;
6519 cm->reference_mode = SINGLE_REFERENCE;
6520 if (cpi->allow_comp_inter_inter && cpi->sf.use_compound_nonrd_pickmode &&
6521 cpi->rc.alt_ref_gf_group && !cpi->rc.is_src_frame_alt_ref &&
6522 cm->frame_type != KEY_FRAME)
6523 cm->reference_mode = REFERENCE_MODE_SELECT;
6525 encode_frame_internal(cpi);
6527 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
6528 int single_count_zero = 0;
6529 int comp_count_zero = 0;
6531 for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
6532 single_count_zero += counts->comp_inter[i][0];
6533 comp_count_zero += counts->comp_inter[i][1];
6535 if (comp_count_zero == 0) {
6536 cm->reference_mode = SINGLE_REFERENCE;
6537 vp9_zero(counts->comp_inter);
6538 } else if (single_count_zero == 0) {
6539 cm->reference_mode = COMPOUND_REFERENCE;
6540 vp9_zero(counts->comp_inter);
6545 // If segmented AQ is enabled compute the average AQ weighting.
6546 if (cm->seg.enabled && (cpi->oxcf.aq_mode != NO_AQ) &&
6547 (cm->seg.update_map || cm->seg.update_data)) {
6548 cm->seg.aq_av_offset = compute_frame_aq_offset(cpi);
6552 static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) {
6553 const PREDICTION_MODE y_mode = mi->mode;
6554 const PREDICTION_MODE uv_mode = mi->uv_mode;
6555 const BLOCK_SIZE bsize = mi->sb_type;
6557 if (bsize < BLOCK_8X8) {
6559 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
6560 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
6561 for (idy = 0; idy < 2; idy += num_4x4_h)
6562 for (idx = 0; idx < 2; idx += num_4x4_w)
6563 ++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode];
6565 ++counts->y_mode[size_group_lookup[bsize]][y_mode];
6568 ++counts->uv_mode[y_mode][uv_mode];
6571 static void update_zeromv_cnt(VP9_COMP *const cpi, const MODE_INFO *const mi,
6572 int mi_row, int mi_col, BLOCK_SIZE bsize) {
6573 const VP9_COMMON *const cm = &cpi->common;
6574 MV mv = mi->mv[0].as_mv;
6575 const int bw = num_8x8_blocks_wide_lookup[bsize];
6576 const int bh = num_8x8_blocks_high_lookup[bsize];
6577 const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
6578 const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
6579 const int block_index = mi_row * cm->mi_cols + mi_col;
6581 for (y = 0; y < ymis; y++)
6582 for (x = 0; x < xmis; x++) {
6583 int map_offset = block_index + y * cm->mi_cols + x;
6584 if (mi->ref_frame[0] == LAST_FRAME && is_inter_block(mi) &&
6585 mi->segment_id <= CR_SEGMENT_ID_BOOST2) {
6586 if (abs(mv.row) < 8 && abs(mv.col) < 8) {
6587 if (cpi->consec_zero_mv[map_offset] < 255)
6588 cpi->consec_zero_mv[map_offset]++;
6590 cpi->consec_zero_mv[map_offset] = 0;
6596 static void encode_superblock(VP9_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
6597 int output_enabled, int mi_row, int mi_col,
6598 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
6599 VP9_COMMON *const cm = &cpi->common;
6600 MACROBLOCK *const x = &td->mb;
6601 MACROBLOCKD *const xd = &x->e_mbd;
6602 MODE_INFO *mi = xd->mi[0];
6603 const int seg_skip =
6604 segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP);
6605 x->skip_recode = !x->select_tx_size && mi->sb_type >= BLOCK_8X8 &&
6606 cpi->oxcf.aq_mode != COMPLEXITY_AQ &&
6607 cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ &&
6608 cpi->sf.allow_skip_recode;
6610 if (!x->skip_recode && !cpi->sf.use_nonrd_pick_mode)
6611 memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
6613 x->skip_optimize = ctx->is_coded;
6615 x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
6616 x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
6617 x->q_index < QIDX_SKIP_THRESH);
6619 if (x->skip_encode) return;
6621 if (!is_inter_block(mi)) {
6623 #if CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
6624 if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) &&
6625 (xd->above_mi == NULL || xd->left_mi == NULL) &&
6626 need_top_left[mi->uv_mode])
6628 #endif // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
6630 for (plane = 0; plane < MAX_MB_PLANE; ++plane)
6631 vp9_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane, 1);
6632 if (output_enabled) sum_intra_stats(td->counts, mi);
6633 vp9_tokenize_sb(cpi, td, t, !output_enabled, seg_skip,
6634 VPXMAX(bsize, BLOCK_8X8));
6637 const int is_compound = has_second_ref(mi);
6638 set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]);
6639 for (ref = 0; ref < 1 + is_compound; ++ref) {
6640 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mi->ref_frame[ref]);
6641 assert(cfg != NULL);
6642 vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
6643 &xd->block_refs[ref]->sf);
6645 if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip)
6646 vp9_build_inter_predictors_sby(xd, mi_row, mi_col,
6647 VPXMAX(bsize, BLOCK_8X8));
6649 vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col,
6650 VPXMAX(bsize, BLOCK_8X8));
6652 #if CONFIG_MISMATCH_DEBUG
6653 if (output_enabled) {
6655 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
6656 const struct macroblockd_plane *pd = &xd->plane[plane];
6657 int pixel_c, pixel_r;
6658 const BLOCK_SIZE plane_bsize =
6659 get_plane_block_size(VPXMAX(bsize, BLOCK_8X8), &xd->plane[plane]);
6660 const int bw = get_block_width(plane_bsize);
6661 const int bh = get_block_height(plane_bsize);
6662 mi_to_pixel_loc(&pixel_c, &pixel_r, mi_col, mi_row, 0, 0,
6663 pd->subsampling_x, pd->subsampling_y);
6665 mismatch_record_block_pre(pd->dst.buf, pd->dst.stride, plane, pixel_c,
6667 xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH);
6672 vp9_encode_sb(x, VPXMAX(bsize, BLOCK_8X8), mi_row, mi_col, output_enabled);
6673 vp9_tokenize_sb(cpi, td, t, !output_enabled, seg_skip,
6674 VPXMAX(bsize, BLOCK_8X8));
6681 if (output_enabled) {
6682 if (cm->tx_mode == TX_MODE_SELECT && mi->sb_type >= BLOCK_8X8 &&
6683 !(is_inter_block(mi) && mi->skip)) {
6684 ++get_tx_counts(max_txsize_lookup[bsize], get_tx_size_context(xd),
6685 &td->counts->tx)[mi->tx_size];
6687 // The new intra coding scheme requires no change of transform size
6688 if (is_inter_block(mi)) {
6689 mi->tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
6690 max_txsize_lookup[bsize]);
6692 mi->tx_size = (bsize >= BLOCK_8X8) ? mi->tx_size : TX_4X4;
6696 ++td->counts->tx.tx_totals[mi->tx_size];
6697 ++td->counts->tx.tx_totals[get_uv_tx_size(mi, &xd->plane[1])];
6698 if (cm->seg.enabled && cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
6699 vp9_cyclic_refresh_update_sb_postencode(cpi, mi, mi_row, mi_col, bsize);
6700 if (cpi->oxcf.pass == 0 && cpi->svc.temporal_layer_id == 0 &&
6703 !cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame &&
6704 cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 1)))
6705 update_zeromv_cnt(cpi, mi, mi_row, mi_col, bsize);