2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
16 #include "./vp9_rtcd.h"
17 #include "./vpx_dsp_rtcd.h"
18 #include "./vpx_config.h"
20 #include "vpx_dsp/vpx_dsp_common.h"
21 #include "vpx_ports/mem.h"
22 #include "vpx_ports/vpx_timer.h"
23 #include "vpx_ports/system_state.h"
25 #if CONFIG_MISMATCH_DEBUG
26 #include "vpx_util/vpx_debug_util.h"
27 #endif // CONFIG_MISMATCH_DEBUG
29 #include "vp9/common/vp9_common.h"
30 #include "vp9/common/vp9_entropy.h"
31 #include "vp9/common/vp9_entropymode.h"
32 #include "vp9/common/vp9_idct.h"
33 #include "vp9/common/vp9_mvref_common.h"
34 #include "vp9/common/vp9_pred_common.h"
35 #include "vp9/common/vp9_quant_common.h"
36 #include "vp9/common/vp9_reconintra.h"
37 #include "vp9/common/vp9_reconinter.h"
38 #include "vp9/common/vp9_seg_common.h"
39 #include "vp9/common/vp9_tile_common.h"
40 #if !CONFIG_REALTIME_ONLY
41 #include "vp9/encoder/vp9_aq_360.h"
42 #include "vp9/encoder/vp9_aq_complexity.h"
44 #include "vp9/encoder/vp9_aq_cyclicrefresh.h"
45 #if !CONFIG_REALTIME_ONLY
46 #include "vp9/encoder/vp9_aq_variance.h"
48 #include "vp9/encoder/vp9_encodeframe.h"
49 #include "vp9/encoder/vp9_encodemb.h"
50 #include "vp9/encoder/vp9_encodemv.h"
51 #include "vp9/encoder/vp9_ethread.h"
52 #include "vp9/encoder/vp9_extend.h"
53 #include "vp9/encoder/vp9_multi_thread.h"
54 #include "vp9/encoder/vp9_partition_models.h"
55 #include "vp9/encoder/vp9_pickmode.h"
56 #include "vp9/encoder/vp9_rd.h"
57 #include "vp9/encoder/vp9_rdopt.h"
58 #include "vp9/encoder/vp9_segmentation.h"
59 #include "vp9/encoder/vp9_tokenize.h"
61 static void encode_superblock(VP9_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
62 int output_enabled, int mi_row, int mi_col,
63 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx);
65 // This is used as a reference when computing the source variance for the
66 // purpose of activity masking.
67 // Eventually this should be replaced by custom no-reference routines,
68 // which will be faster.
69 static const uint8_t VP9_VAR_OFFS[64] = {
70 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
71 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
72 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
73 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
74 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
77 #if CONFIG_VP9_HIGHBITDEPTH
78 static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
79 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
80 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
81 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
82 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
83 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
86 static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
87 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
88 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
89 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
90 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
91 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
92 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
93 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
94 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4
97 static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
98 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
99 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
100 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
101 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
102 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
103 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
104 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
105 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
106 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
109 #endif // CONFIG_VP9_HIGHBITDEPTH
111 unsigned int vp9_get_sby_variance(VP9_COMP *cpi, const struct buf_2d *ref,
114 const unsigned int var =
115 cpi->fn_ptr[bs].vf(ref->buf, ref->stride, VP9_VAR_OFFS, 0, &sse);
119 #if CONFIG_VP9_HIGHBITDEPTH
120 unsigned int vp9_high_get_sby_variance(VP9_COMP *cpi, const struct buf_2d *ref,
121 BLOCK_SIZE bs, int bd) {
122 unsigned int var, sse;
126 cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
127 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10), 0, &sse);
131 cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
132 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12), 0, &sse);
137 cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
138 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8), 0, &sse);
143 #endif // CONFIG_VP9_HIGHBITDEPTH
145 unsigned int vp9_get_sby_perpixel_variance(VP9_COMP *cpi,
146 const struct buf_2d *ref,
148 return ROUND_POWER_OF_TWO(vp9_get_sby_variance(cpi, ref, bs),
149 num_pels_log2_lookup[bs]);
152 #if CONFIG_VP9_HIGHBITDEPTH
153 unsigned int vp9_high_get_sby_perpixel_variance(VP9_COMP *cpi,
154 const struct buf_2d *ref,
155 BLOCK_SIZE bs, int bd) {
156 return (unsigned int)ROUND64_POWER_OF_TWO(
157 (int64_t)vp9_high_get_sby_variance(cpi, ref, bs, bd),
158 num_pels_log2_lookup[bs]);
160 #endif // CONFIG_VP9_HIGHBITDEPTH
162 static void set_segment_index(VP9_COMP *cpi, MACROBLOCK *const x, int mi_row,
163 int mi_col, BLOCK_SIZE bsize, int segment_index) {
164 VP9_COMMON *const cm = &cpi->common;
165 const struct segmentation *const seg = &cm->seg;
166 MACROBLOCKD *const xd = &x->e_mbd;
167 MODE_INFO *mi = xd->mi[0];
169 const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
170 const uint8_t *const map =
171 seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
173 // Initialize the segmentation index as 0.
176 // Skip the rest if AQ mode is disabled.
177 if (!seg->enabled) return;
180 case CYCLIC_REFRESH_AQ:
181 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
183 #if !CONFIG_REALTIME_ONLY
185 if (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
186 cpi->force_update_segmentation ||
187 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
190 // Get sub block energy range
191 if (bsize >= BLOCK_32X32) {
192 vp9_get_sub_block_energy(cpi, x, mi_row, mi_col, bsize, &min_energy,
195 min_energy = bsize <= BLOCK_16X16 ? x->mb_energy
196 : vp9_block_energy(cpi, x, bsize);
198 mi->segment_id = vp9_vaq_segment_id(min_energy);
200 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
204 if (cm->frame_type == KEY_FRAME || cpi->force_update_segmentation)
205 mi->segment_id = vp9_360aq_segment_id(mi_row, cm->mi_rows);
207 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
211 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
213 case PSNR_AQ: mi->segment_id = segment_index; break;
214 case PERCEPTUAL_AQ: mi->segment_id = x->segment_id; break;
220 // Set segment index from ROI map if it's enabled.
221 if (cpi->roi.enabled)
222 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
224 vp9_init_plane_quantizers(cpi, x);
227 // Lighter version of set_offsets that only sets the mode info
229 static INLINE void set_mode_info_offsets(VP9_COMMON *const cm,
231 MACROBLOCKD *const xd, int mi_row,
233 const int idx_str = xd->mi_stride * mi_row + mi_col;
234 xd->mi = cm->mi_grid_visible + idx_str;
235 xd->mi[0] = cm->mi + idx_str;
236 x->mbmi_ext = x->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
239 static void set_ssim_rdmult(VP9_COMP *const cpi, MACROBLOCK *const x,
240 const BLOCK_SIZE bsize, const int mi_row,
241 const int mi_col, int *const rdmult) {
242 const VP9_COMMON *const cm = &cpi->common;
244 const int bsize_base = BLOCK_16X16;
245 const int num_8x8_w = num_8x8_blocks_wide_lookup[bsize_base];
246 const int num_8x8_h = num_8x8_blocks_high_lookup[bsize_base];
247 const int num_cols = (cm->mi_cols + num_8x8_w - 1) / num_8x8_w;
248 const int num_rows = (cm->mi_rows + num_8x8_h - 1) / num_8x8_h;
249 const int num_bcols =
250 (num_8x8_blocks_wide_lookup[bsize] + num_8x8_w - 1) / num_8x8_w;
251 const int num_brows =
252 (num_8x8_blocks_high_lookup[bsize] + num_8x8_h - 1) / num_8x8_h;
254 double num_of_mi = 0.0;
255 double geom_mean_of_scale = 0.0;
257 assert(cpi->oxcf.tuning == VP8_TUNE_SSIM);
259 for (row = mi_row / num_8x8_w;
260 row < num_rows && row < mi_row / num_8x8_w + num_brows; ++row) {
261 for (col = mi_col / num_8x8_h;
262 col < num_cols && col < mi_col / num_8x8_h + num_bcols; ++col) {
263 const int index = row * num_cols + col;
264 geom_mean_of_scale += log(cpi->mi_ssim_rdmult_scaling_factors[index]);
268 geom_mean_of_scale = exp(geom_mean_of_scale / num_of_mi);
270 *rdmult = (int)((double)(*rdmult) * geom_mean_of_scale);
271 *rdmult = VPXMAX(*rdmult, 0);
272 set_error_per_bit(x, *rdmult);
273 vpx_clear_system_state();
276 static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
277 MACROBLOCK *const x, int mi_row, int mi_col,
279 VP9_COMMON *const cm = &cpi->common;
280 const VP9EncoderConfig *const oxcf = &cpi->oxcf;
281 MACROBLOCKD *const xd = &x->e_mbd;
282 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
283 const int mi_height = num_8x8_blocks_high_lookup[bsize];
284 MvLimits *const mv_limits = &x->mv_limits;
286 set_skip_context(xd, mi_row, mi_col);
288 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
290 // Set up destination pointers.
291 vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
293 // Set up limit values for MV components.
294 // Mv beyond the range do not produce new/different prediction block.
295 mv_limits->row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
296 mv_limits->col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
297 mv_limits->row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
298 mv_limits->col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
300 // Set up distance of MB to edge of frame in 1/8th pel units.
301 assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
302 set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width, cm->mi_rows,
305 // Set up source buffers.
306 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
309 x->rddiv = cpi->rd.RDDIV;
310 x->rdmult = cpi->rd.RDMULT;
311 if (oxcf->tuning == VP8_TUNE_SSIM) {
312 set_ssim_rdmult(cpi, x, bsize, mi_row, mi_col, &x->rdmult);
315 // required by vp9_append_sub8x8_mvs_for_idx() and vp9_find_best_ref_mvs()
319 static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
320 int mi_row, int mi_col,
322 const int block_width =
323 VPXMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col);
324 const int block_height =
325 VPXMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row);
326 const int mi_stride = xd->mi_stride;
327 MODE_INFO *const src_mi = xd->mi[0];
330 for (j = 0; j < block_height; ++j)
331 for (i = 0; i < block_width; ++i) xd->mi[j * mi_stride + i] = src_mi;
334 static void set_block_size(VP9_COMP *const cpi, MACROBLOCK *const x,
335 MACROBLOCKD *const xd, int mi_row, int mi_col,
337 if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
338 set_mode_info_offsets(&cpi->common, x, xd, mi_row, mi_col);
339 xd->mi[0]->sb_type = bsize;
344 // This struct is used for computing variance in choose_partitioning(), where
345 // the max number of samples within a superblock is 16x16 (with 4x4 avg). Even
346 // in high bitdepth, uint32_t is enough for sum_square_error (2^12 * 2^12 * 16
348 uint32_t sum_square_error;
358 } partition_variance;
361 partition_variance part_variances;
366 partition_variance part_variances;
371 partition_variance part_variances;
376 partition_variance part_variances;
381 partition_variance part_variances;
386 partition_variance *part_variances;
396 static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
398 node->part_variances = NULL;
401 v64x64 *vt = (v64x64 *)data;
402 node->part_variances = &vt->part_variances;
403 for (i = 0; i < 4; i++)
404 node->split[i] = &vt->split[i].part_variances.none;
408 v32x32 *vt = (v32x32 *)data;
409 node->part_variances = &vt->part_variances;
410 for (i = 0; i < 4; i++)
411 node->split[i] = &vt->split[i].part_variances.none;
415 v16x16 *vt = (v16x16 *)data;
416 node->part_variances = &vt->part_variances;
417 for (i = 0; i < 4; i++)
418 node->split[i] = &vt->split[i].part_variances.none;
422 v8x8 *vt = (v8x8 *)data;
423 node->part_variances = &vt->part_variances;
424 for (i = 0; i < 4; i++)
425 node->split[i] = &vt->split[i].part_variances.none;
429 v4x4 *vt = (v4x4 *)data;
430 assert(bsize == BLOCK_4X4);
431 node->part_variances = &vt->part_variances;
432 for (i = 0; i < 4; i++) node->split[i] = &vt->split[i];
438 // Set variance values given sum square error, sum error, count.
439 static void fill_variance(uint32_t s2, int32_t s, int c, var *v) {
440 v->sum_square_error = s2;
445 static void get_variance(var *v) {
447 (int)(256 * (v->sum_square_error -
448 (uint32_t)(((int64_t)v->sum_error * v->sum_error) >>
453 static void sum_2_variances(const var *a, const var *b, var *r) {
454 assert(a->log2_count == b->log2_count);
455 fill_variance(a->sum_square_error + b->sum_square_error,
456 a->sum_error + b->sum_error, a->log2_count + 1, r);
459 static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
461 memset(&node, 0, sizeof(node));
462 tree_to_node(data, bsize, &node);
463 sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
464 sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
465 sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
466 sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
467 sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
468 &node.part_variances->none);
471 static int set_vt_partitioning(VP9_COMP *cpi, MACROBLOCK *const x,
472 MACROBLOCKD *const xd, void *data,
473 BLOCK_SIZE bsize, int mi_row, int mi_col,
474 int64_t threshold, BLOCK_SIZE bsize_min,
476 VP9_COMMON *const cm = &cpi->common;
478 const int block_width = num_8x8_blocks_wide_lookup[bsize];
479 const int block_height = num_8x8_blocks_high_lookup[bsize];
481 assert(block_height == block_width);
482 tree_to_node(data, bsize, &vt);
484 if (force_split == 1) return 0;
486 // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
487 // variance is below threshold, otherwise split will be selected.
488 // No check for vert/horiz split as too few samples for variance.
489 if (bsize == bsize_min) {
490 // Variance already computed to set the force_split.
491 if (frame_is_intra_only(cm)) get_variance(&vt.part_variances->none);
492 if (mi_col + block_width / 2 < cm->mi_cols &&
493 mi_row + block_height / 2 < cm->mi_rows &&
494 vt.part_variances->none.variance < threshold) {
495 set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
499 } else if (bsize > bsize_min) {
500 // Variance already computed to set the force_split.
501 if (frame_is_intra_only(cm)) get_variance(&vt.part_variances->none);
502 // For key frame: take split for bsize above 32X32 or very high variance.
503 if (frame_is_intra_only(cm) &&
504 (bsize > BLOCK_32X32 ||
505 vt.part_variances->none.variance > (threshold << 4))) {
508 // If variance is low, take the bsize (no split).
509 if (mi_col + block_width / 2 < cm->mi_cols &&
510 mi_row + block_height / 2 < cm->mi_rows &&
511 vt.part_variances->none.variance < threshold) {
512 set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
516 // Check vertical split.
517 if (mi_row + block_height / 2 < cm->mi_rows) {
518 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
519 get_variance(&vt.part_variances->vert[0]);
520 get_variance(&vt.part_variances->vert[1]);
521 if (vt.part_variances->vert[0].variance < threshold &&
522 vt.part_variances->vert[1].variance < threshold &&
523 get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
524 set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
525 set_block_size(cpi, x, xd, mi_row, mi_col + block_width / 2, subsize);
529 // Check horizontal split.
530 if (mi_col + block_width / 2 < cm->mi_cols) {
531 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
532 get_variance(&vt.part_variances->horz[0]);
533 get_variance(&vt.part_variances->horz[1]);
534 if (vt.part_variances->horz[0].variance < threshold &&
535 vt.part_variances->horz[1].variance < threshold &&
536 get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
537 set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
538 set_block_size(cpi, x, xd, mi_row + block_height / 2, mi_col, subsize);
548 static int64_t scale_part_thresh_sumdiff(int64_t threshold_base, int speed,
549 int width, int height,
552 if (width <= 640 && height <= 480)
553 return (5 * threshold_base) >> 2;
554 else if ((content_state == kLowSadLowSumdiff) ||
555 (content_state == kHighSadLowSumdiff) ||
556 (content_state == kLowVarHighSumdiff))
557 return (5 * threshold_base) >> 2;
558 } else if (speed == 7) {
559 if ((content_state == kLowSadLowSumdiff) ||
560 (content_state == kHighSadLowSumdiff) ||
561 (content_state == kLowVarHighSumdiff)) {
562 return (5 * threshold_base) >> 2;
565 return threshold_base;
568 // Set the variance split thresholds for following the block sizes:
569 // 0 - threshold_64x64, 1 - threshold_32x32, 2 - threshold_16x16,
570 // 3 - vbp_threshold_8x8. vbp_threshold_8x8 (to split to 4x4 partition) is
571 // currently only used on key frame.
572 static void set_vbp_thresholds(VP9_COMP *cpi, int64_t thresholds[], int q,
574 VP9_COMMON *const cm = &cpi->common;
575 const int is_key_frame = frame_is_intra_only(cm);
576 const int threshold_multiplier =
577 is_key_frame ? 20 : cpi->sf.variance_part_thresh_mult;
578 int64_t threshold_base =
579 (int64_t)(threshold_multiplier * cpi->y_dequant[q][1]);
582 thresholds[0] = threshold_base;
583 thresholds[1] = threshold_base >> 2;
584 thresholds[2] = threshold_base >> 2;
585 thresholds[3] = threshold_base << 2;
587 // Increase base variance threshold based on estimated noise level.
588 if (cpi->noise_estimate.enabled && cm->width >= 640 && cm->height >= 480) {
589 NOISE_LEVEL noise_level =
590 vp9_noise_estimate_extract_level(&cpi->noise_estimate);
591 if (noise_level == kHigh)
592 threshold_base = 3 * threshold_base;
593 else if (noise_level == kMedium)
594 threshold_base = threshold_base << 1;
595 else if (noise_level < kLow)
596 threshold_base = (7 * threshold_base) >> 3;
598 #if CONFIG_VP9_TEMPORAL_DENOISING
599 if (cpi->oxcf.noise_sensitivity > 0 && denoise_svc(cpi) &&
600 cpi->oxcf.speed > 5 && cpi->denoiser.denoising_level >= kDenLow)
602 vp9_scale_part_thresh(threshold_base, cpi->denoiser.denoising_level,
603 content_state, cpi->svc.temporal_layer_id);
606 scale_part_thresh_sumdiff(threshold_base, cpi->oxcf.speed, cm->width,
607 cm->height, content_state);
609 // Increase base variance threshold based on content_state/sum_diff level.
610 threshold_base = scale_part_thresh_sumdiff(
611 threshold_base, cpi->oxcf.speed, cm->width, cm->height, content_state);
613 thresholds[0] = threshold_base;
614 thresholds[2] = threshold_base << cpi->oxcf.speed;
615 if (cm->width >= 1280 && cm->height >= 720 && cpi->oxcf.speed < 7)
616 thresholds[2] = thresholds[2] << 1;
617 if (cm->width <= 352 && cm->height <= 288) {
618 thresholds[0] = threshold_base >> 3;
619 thresholds[1] = threshold_base >> 1;
620 thresholds[2] = threshold_base << 3;
621 if (cpi->rc.avg_frame_qindex[INTER_FRAME] > 220)
622 thresholds[2] = thresholds[2] << 2;
623 else if (cpi->rc.avg_frame_qindex[INTER_FRAME] > 200)
624 thresholds[2] = thresholds[2] << 1;
625 } else if (cm->width < 1280 && cm->height < 720) {
626 thresholds[1] = (5 * threshold_base) >> 2;
627 } else if (cm->width < 1920 && cm->height < 1080) {
628 thresholds[1] = threshold_base << 1;
630 thresholds[1] = (5 * threshold_base) >> 1;
632 if (cpi->sf.disable_16x16part_nonkey) thresholds[2] = INT64_MAX;
636 void vp9_set_variance_partition_thresholds(VP9_COMP *cpi, int q,
638 VP9_COMMON *const cm = &cpi->common;
639 SPEED_FEATURES *const sf = &cpi->sf;
640 const int is_key_frame = frame_is_intra_only(cm);
641 if (sf->partition_search_type != VAR_BASED_PARTITION &&
642 sf->partition_search_type != REFERENCE_PARTITION) {
645 set_vbp_thresholds(cpi, cpi->vbp_thresholds, q, content_state);
646 // The thresholds below are not changed locally.
648 cpi->vbp_threshold_sad = 0;
649 cpi->vbp_threshold_copy = 0;
650 cpi->vbp_bsize_min = BLOCK_8X8;
652 if (cm->width <= 352 && cm->height <= 288)
653 cpi->vbp_threshold_sad = 10;
655 cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000
656 ? (cpi->y_dequant[q][1] << 1)
658 cpi->vbp_bsize_min = BLOCK_16X16;
659 if (cm->width <= 352 && cm->height <= 288)
660 cpi->vbp_threshold_copy = 4000;
661 else if (cm->width <= 640 && cm->height <= 360)
662 cpi->vbp_threshold_copy = 8000;
664 cpi->vbp_threshold_copy = (cpi->y_dequant[q][1] << 3) > 8000
665 ? (cpi->y_dequant[q][1] << 3)
667 if (cpi->rc.high_source_sad ||
668 (cpi->use_svc && cpi->svc.high_source_sad_superframe)) {
669 cpi->vbp_threshold_sad = 0;
670 cpi->vbp_threshold_copy = 0;
673 cpi->vbp_threshold_minmax = 15 + (q >> 3);
677 // Compute the minmax over the 8x8 subblocks.
678 static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d,
679 int dp, int x16_idx, int y16_idx,
680 #if CONFIG_VP9_HIGHBITDEPTH
683 int pixels_wide, int pixels_high) {
686 int minmax_min = 255;
687 // Loop over the 4 8x8 subblocks.
688 for (k = 0; k < 4; k++) {
689 int x8_idx = x16_idx + ((k & 1) << 3);
690 int y8_idx = y16_idx + ((k >> 1) << 3);
693 if (x8_idx < pixels_wide && y8_idx < pixels_high) {
694 #if CONFIG_VP9_HIGHBITDEPTH
695 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
696 vpx_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
697 d + y8_idx * dp + x8_idx, dp, &min, &max);
699 vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx,
703 vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx, dp,
706 if ((max - min) > minmax_max) minmax_max = (max - min);
707 if ((max - min) < minmax_min) minmax_min = (max - min);
710 return (minmax_max - minmax_min);
713 static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d,
714 int dp, int x8_idx, int y8_idx, v8x8 *vst,
715 #if CONFIG_VP9_HIGHBITDEPTH
718 int pixels_wide, int pixels_high,
721 for (k = 0; k < 4; k++) {
722 int x4_idx = x8_idx + ((k & 1) << 2);
723 int y4_idx = y8_idx + ((k >> 1) << 2);
724 unsigned int sse = 0;
726 if (x4_idx < pixels_wide && y4_idx < pixels_high) {
729 #if CONFIG_VP9_HIGHBITDEPTH
730 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
731 s_avg = vpx_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
733 d_avg = vpx_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
735 s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
736 if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
739 s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
740 if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
745 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
749 static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
750 int dp, int x16_idx, int y16_idx, v16x16 *vst,
751 #if CONFIG_VP9_HIGHBITDEPTH
754 int pixels_wide, int pixels_high,
757 for (k = 0; k < 4; k++) {
758 int x8_idx = x16_idx + ((k & 1) << 3);
759 int y8_idx = y16_idx + ((k >> 1) << 3);
760 unsigned int sse = 0;
762 if (x8_idx < pixels_wide && y8_idx < pixels_high) {
765 #if CONFIG_VP9_HIGHBITDEPTH
766 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
767 s_avg = vpx_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
769 d_avg = vpx_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
771 s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
772 if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
775 s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
776 if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
781 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
785 // Check if most of the superblock is skin content, and if so, force split to
786 // 32x32, and set x->sb_is_skin for use in mode selection.
787 static int skin_sb_split(VP9_COMP *cpi, const int low_res, int mi_row,
788 int mi_col, int *force_split) {
789 VP9_COMMON *const cm = &cpi->common;
790 #if CONFIG_VP9_HIGHBITDEPTH
791 if (cm->use_highbitdepth) return 0;
793 // Avoid checking superblocks on/near boundary and avoid low resolutions.
794 // Note superblock may still pick 64X64 if y_sad is very small
795 // (i.e., y_sad < cpi->vbp_threshold_sad) below. For now leave this as is.
796 if (!low_res && (mi_col >= 8 && mi_col + 8 < cm->mi_cols && mi_row >= 8 &&
797 mi_row + 8 < cm->mi_rows)) {
798 int num_16x16_skin = 0;
799 int num_16x16_nonskin = 0;
800 const int block_index = mi_row * cm->mi_cols + mi_col;
801 const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64];
802 const int bh = num_8x8_blocks_high_lookup[BLOCK_64X64];
803 const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
804 const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
805 // Loop through the 16x16 sub-blocks.
807 for (i = 0; i < ymis; i += 2) {
808 for (j = 0; j < xmis; j += 2) {
809 int bl_index = block_index + i * cm->mi_cols + j;
810 int is_skin = cpi->skin_map[bl_index];
811 num_16x16_skin += is_skin;
812 num_16x16_nonskin += (1 - is_skin);
813 if (num_16x16_nonskin > 3) {
814 // Exit loop if at least 4 of the 16x16 blocks are not skin.
820 if (num_16x16_skin > 12) {
828 static void set_low_temp_var_flag(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
829 v64x64 *vt, int64_t thresholds[],
830 MV_REFERENCE_FRAME ref_frame_partition,
831 int mi_col, int mi_row) {
833 VP9_COMMON *const cm = &cpi->common;
834 const int mv_thr = cm->width > 640 ? 8 : 4;
835 // Check temporal variance for bsize >= 16x16, if LAST_FRAME was selected and
836 // int_pro mv is small. If the temporal variance is small set the flag
837 // variance_low for the block. The variance threshold can be adjusted, the
838 // higher the more aggressive.
839 if (ref_frame_partition == LAST_FRAME &&
840 (cpi->sf.short_circuit_low_temp_var == 1 ||
841 (xd->mi[0]->mv[0].as_mv.col < mv_thr &&
842 xd->mi[0]->mv[0].as_mv.col > -mv_thr &&
843 xd->mi[0]->mv[0].as_mv.row < mv_thr &&
844 xd->mi[0]->mv[0].as_mv.row > -mv_thr))) {
845 if (xd->mi[0]->sb_type == BLOCK_64X64) {
846 if ((vt->part_variances).none.variance < (thresholds[0] >> 1))
847 x->variance_low[0] = 1;
848 } else if (xd->mi[0]->sb_type == BLOCK_64X32) {
849 for (i = 0; i < 2; i++) {
850 if (vt->part_variances.horz[i].variance < (thresholds[0] >> 2))
851 x->variance_low[i + 1] = 1;
853 } else if (xd->mi[0]->sb_type == BLOCK_32X64) {
854 for (i = 0; i < 2; i++) {
855 if (vt->part_variances.vert[i].variance < (thresholds[0] >> 2))
856 x->variance_low[i + 3] = 1;
859 for (i = 0; i < 4; i++) {
860 const int idx[4][2] = { { 0, 0 }, { 0, 4 }, { 4, 0 }, { 4, 4 } };
862 cm->mi_stride * (mi_row + idx[i][0]) + mi_col + idx[i][1];
863 MODE_INFO **this_mi = cm->mi_grid_visible + idx_str;
865 if (cm->mi_cols <= mi_col + idx[i][1] ||
866 cm->mi_rows <= mi_row + idx[i][0])
869 if ((*this_mi)->sb_type == BLOCK_32X32) {
870 int64_t threshold_32x32 = (cpi->sf.short_circuit_low_temp_var == 1 ||
871 cpi->sf.short_circuit_low_temp_var == 3)
872 ? ((5 * thresholds[1]) >> 3)
873 : (thresholds[1] >> 1);
874 if (vt->split[i].part_variances.none.variance < threshold_32x32)
875 x->variance_low[i + 5] = 1;
876 } else if (cpi->sf.short_circuit_low_temp_var >= 2) {
877 // For 32x16 and 16x32 blocks, the flag is set on each 16x16 block
879 if ((*this_mi)->sb_type == BLOCK_16X16 ||
880 (*this_mi)->sb_type == BLOCK_32X16 ||
881 (*this_mi)->sb_type == BLOCK_16X32) {
882 for (j = 0; j < 4; j++) {
883 if (vt->split[i].split[j].part_variances.none.variance <
884 (thresholds[2] >> 8))
885 x->variance_low[(i << 2) + j + 9] = 1;
894 static void copy_partitioning_helper(VP9_COMP *cpi, MACROBLOCK *x,
895 MACROBLOCKD *xd, BLOCK_SIZE bsize,
896 int mi_row, int mi_col) {
897 VP9_COMMON *const cm = &cpi->common;
898 BLOCK_SIZE *prev_part = cpi->prev_partition;
899 int start_pos = mi_row * cm->mi_stride + mi_col;
901 const int bsl = b_width_log2_lookup[bsize];
902 const int bs = (1 << bsl) >> 2;
904 PARTITION_TYPE partition;
906 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
908 partition = partition_lookup[bsl][prev_part[start_pos]];
909 subsize = get_subsize(bsize, partition);
911 if (subsize < BLOCK_8X8) {
912 set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
916 set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
919 set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
920 set_block_size(cpi, x, xd, mi_row + bs, mi_col, subsize);
923 set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
924 set_block_size(cpi, x, xd, mi_row, mi_col + bs, subsize);
927 assert(partition == PARTITION_SPLIT);
928 copy_partitioning_helper(cpi, x, xd, subsize, mi_row, mi_col);
929 copy_partitioning_helper(cpi, x, xd, subsize, mi_row + bs, mi_col);
930 copy_partitioning_helper(cpi, x, xd, subsize, mi_row, mi_col + bs);
931 copy_partitioning_helper(cpi, x, xd, subsize, mi_row + bs, mi_col + bs);
937 static int copy_partitioning(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
938 int mi_row, int mi_col, int segment_id,
940 int svc_copy_allowed = 1;
941 int frames_since_key_thresh = 1;
943 // For SVC, don't allow copy if base spatial layer is key frame, or if
944 // frame is not a temporal enhancement layer frame.
945 int layer = LAYER_IDS_TO_IDX(0, cpi->svc.temporal_layer_id,
946 cpi->svc.number_temporal_layers);
947 const LAYER_CONTEXT *lc = &cpi->svc.layer_context[layer];
948 if (lc->is_key_frame || !cpi->svc.non_reference_frame) svc_copy_allowed = 0;
949 frames_since_key_thresh = cpi->svc.number_spatial_layers << 1;
951 if (cpi->rc.frames_since_key > frames_since_key_thresh && svc_copy_allowed &&
952 !cpi->resize_pending && segment_id == CR_SEGMENT_ID_BASE &&
953 cpi->prev_segment_id[sb_offset] == CR_SEGMENT_ID_BASE &&
954 cpi->copied_frame_cnt[sb_offset] < cpi->max_copied_frame) {
955 if (cpi->prev_partition != NULL) {
956 copy_partitioning_helper(cpi, x, xd, BLOCK_64X64, mi_row, mi_col);
957 cpi->copied_frame_cnt[sb_offset] += 1;
958 memcpy(x->variance_low, &(cpi->prev_variance_low[sb_offset * 25]),
959 sizeof(x->variance_low));
967 static int scale_partitioning_svc(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
968 BLOCK_SIZE bsize, int mi_row, int mi_col,
969 int mi_row_high, int mi_col_high) {
970 VP9_COMMON *const cm = &cpi->common;
971 SVC *const svc = &cpi->svc;
972 BLOCK_SIZE *prev_part = svc->prev_partition_svc;
973 // Variables with _high are for higher resolution.
975 int subsize_high = 0;
976 const int bsl_high = b_width_log2_lookup[bsize];
977 const int bs_high = (1 << bsl_high) >> 2;
978 const int has_rows = (mi_row_high + bs_high) < cm->mi_rows;
979 const int has_cols = (mi_col_high + bs_high) < cm->mi_cols;
981 const int row_boundary_block_scale_factor[BLOCK_SIZES] = { 13, 13, 13, 1, 0,
984 const int col_boundary_block_scale_factor[BLOCK_SIZES] = { 13, 13, 13, 2, 2,
988 BLOCK_SIZE bsize_low;
989 PARTITION_TYPE partition_high;
991 if (mi_row_high >= cm->mi_rows || mi_col_high >= cm->mi_cols) return 0;
992 if (mi_row >= svc->mi_rows[svc->spatial_layer_id - 1] ||
993 mi_col >= svc->mi_cols[svc->spatial_layer_id - 1])
996 // Find corresponding (mi_col/mi_row) block down-scaled by 2x2.
997 start_pos = mi_row * (svc->mi_stride[svc->spatial_layer_id - 1]) + mi_col;
998 bsize_low = prev_part[start_pos];
999 // The block size is too big for boundaries. Do variance based partitioning.
1000 if ((!has_rows || !has_cols) && bsize_low > BLOCK_16X16) return 1;
1002 // For reference frames: return 1 (do variance-based partitioning) if the
1003 // superblock is not low source sad and lower-resoln bsize is below 32x32.
1004 if (!cpi->svc.non_reference_frame && !x->skip_low_source_sad &&
1005 bsize_low < BLOCK_32X32)
1008 // Scale up block size by 2x2. Force 64x64 for size larger than 32x32.
1009 if (bsize_low < BLOCK_32X32) {
1010 bsize_high = bsize_low + 3;
1011 } else if (bsize_low >= BLOCK_32X32) {
1012 bsize_high = BLOCK_64X64;
1014 // Scale up blocks on boundary.
1015 if (!has_cols && has_rows) {
1016 bsize_high = bsize_low + row_boundary_block_scale_factor[bsize_low];
1017 } else if (has_cols && !has_rows) {
1018 bsize_high = bsize_low + col_boundary_block_scale_factor[bsize_low];
1019 } else if (!has_cols && !has_rows) {
1020 bsize_high = bsize_low;
1023 partition_high = partition_lookup[bsl_high][bsize_high];
1024 subsize_high = get_subsize(bsize, partition_high);
1026 if (subsize_high < BLOCK_8X8) {
1027 set_block_size(cpi, x, xd, mi_row_high, mi_col_high, bsize_high);
1029 const int bsl = b_width_log2_lookup[bsize];
1030 const int bs = (1 << bsl) >> 2;
1031 switch (partition_high) {
1032 case PARTITION_NONE:
1033 set_block_size(cpi, x, xd, mi_row_high, mi_col_high, bsize_high);
1035 case PARTITION_HORZ:
1036 set_block_size(cpi, x, xd, mi_row_high, mi_col_high, subsize_high);
1037 if (subsize_high < BLOCK_64X64)
1038 set_block_size(cpi, x, xd, mi_row_high + bs_high, mi_col_high,
1041 case PARTITION_VERT:
1042 set_block_size(cpi, x, xd, mi_row_high, mi_col_high, subsize_high);
1043 if (subsize_high < BLOCK_64X64)
1044 set_block_size(cpi, x, xd, mi_row_high, mi_col_high + bs_high,
1048 assert(partition_high == PARTITION_SPLIT);
1049 if (scale_partitioning_svc(cpi, x, xd, subsize_high, mi_row, mi_col,
1050 mi_row_high, mi_col_high))
1052 if (scale_partitioning_svc(cpi, x, xd, subsize_high, mi_row + (bs >> 1),
1053 mi_col, mi_row_high + bs_high, mi_col_high))
1055 if (scale_partitioning_svc(cpi, x, xd, subsize_high, mi_row,
1056 mi_col + (bs >> 1), mi_row_high,
1057 mi_col_high + bs_high))
1059 if (scale_partitioning_svc(cpi, x, xd, subsize_high, mi_row + (bs >> 1),
1060 mi_col + (bs >> 1), mi_row_high + bs_high,
1061 mi_col_high + bs_high))
1070 static void update_partition_svc(VP9_COMP *cpi, BLOCK_SIZE bsize, int mi_row,
1072 VP9_COMMON *const cm = &cpi->common;
1073 BLOCK_SIZE *prev_part = cpi->svc.prev_partition_svc;
1074 int start_pos = mi_row * cm->mi_stride + mi_col;
1075 const int bsl = b_width_log2_lookup[bsize];
1076 const int bs = (1 << bsl) >> 2;
1078 PARTITION_TYPE partition;
1079 const MODE_INFO *mi = NULL;
1082 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
1084 mi = cm->mi_grid_visible[start_pos];
1085 partition = partition_lookup[bsl][mi->sb_type];
1086 subsize = get_subsize(bsize, partition);
1087 if (subsize < BLOCK_8X8) {
1088 prev_part[start_pos] = bsize;
1090 switch (partition) {
1091 case PARTITION_NONE:
1092 prev_part[start_pos] = bsize;
1093 if (bsize == BLOCK_64X64) {
1094 for (xx = 0; xx < 8; xx += 4)
1095 for (yy = 0; yy < 8; yy += 4) {
1096 if ((mi_row + xx < cm->mi_rows) && (mi_col + yy < cm->mi_cols))
1097 prev_part[start_pos + xx * cm->mi_stride + yy] = bsize;
1101 case PARTITION_HORZ:
1102 prev_part[start_pos] = subsize;
1103 if (mi_row + bs < cm->mi_rows)
1104 prev_part[start_pos + bs * cm->mi_stride] = subsize;
1106 case PARTITION_VERT:
1107 prev_part[start_pos] = subsize;
1108 if (mi_col + bs < cm->mi_cols) prev_part[start_pos + bs] = subsize;
1111 assert(partition == PARTITION_SPLIT);
1112 update_partition_svc(cpi, subsize, mi_row, mi_col);
1113 update_partition_svc(cpi, subsize, mi_row + bs, mi_col);
1114 update_partition_svc(cpi, subsize, mi_row, mi_col + bs);
1115 update_partition_svc(cpi, subsize, mi_row + bs, mi_col + bs);
1121 static void update_prev_partition_helper(VP9_COMP *cpi, BLOCK_SIZE bsize,
1122 int mi_row, int mi_col) {
1123 VP9_COMMON *const cm = &cpi->common;
1124 BLOCK_SIZE *prev_part = cpi->prev_partition;
1125 int start_pos = mi_row * cm->mi_stride + mi_col;
1126 const int bsl = b_width_log2_lookup[bsize];
1127 const int bs = (1 << bsl) >> 2;
1129 PARTITION_TYPE partition;
1130 const MODE_INFO *mi = NULL;
1132 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
1134 mi = cm->mi_grid_visible[start_pos];
1135 partition = partition_lookup[bsl][mi->sb_type];
1136 subsize = get_subsize(bsize, partition);
1137 if (subsize < BLOCK_8X8) {
1138 prev_part[start_pos] = bsize;
1140 switch (partition) {
1141 case PARTITION_NONE: prev_part[start_pos] = bsize; break;
1142 case PARTITION_HORZ:
1143 prev_part[start_pos] = subsize;
1144 if (mi_row + bs < cm->mi_rows)
1145 prev_part[start_pos + bs * cm->mi_stride] = subsize;
1147 case PARTITION_VERT:
1148 prev_part[start_pos] = subsize;
1149 if (mi_col + bs < cm->mi_cols) prev_part[start_pos + bs] = subsize;
1152 assert(partition == PARTITION_SPLIT);
1153 update_prev_partition_helper(cpi, subsize, mi_row, mi_col);
1154 update_prev_partition_helper(cpi, subsize, mi_row + bs, mi_col);
1155 update_prev_partition_helper(cpi, subsize, mi_row, mi_col + bs);
1156 update_prev_partition_helper(cpi, subsize, mi_row + bs, mi_col + bs);
1162 static void update_prev_partition(VP9_COMP *cpi, MACROBLOCK *x, int segment_id,
1163 int mi_row, int mi_col, int sb_offset) {
1164 update_prev_partition_helper(cpi, BLOCK_64X64, mi_row, mi_col);
1165 cpi->prev_segment_id[sb_offset] = segment_id;
1166 memcpy(&(cpi->prev_variance_low[sb_offset * 25]), x->variance_low,
1167 sizeof(x->variance_low));
1168 // Reset the counter for copy partitioning
1169 cpi->copied_frame_cnt[sb_offset] = 0;
1172 static void chroma_check(VP9_COMP *cpi, MACROBLOCK *x, int bsize,
1173 unsigned int y_sad, int is_key_frame,
1174 int scene_change_detected) {
1176 MACROBLOCKD *xd = &x->e_mbd;
1179 if (is_key_frame) return;
1181 // For speed > 8, avoid the chroma check if y_sad is above threshold.
1182 if (cpi->oxcf.speed > 8) {
1183 if (y_sad > cpi->vbp_thresholds[1] &&
1184 (!cpi->noise_estimate.enabled ||
1185 vp9_noise_estimate_extract_level(&cpi->noise_estimate) < kMedium))
1189 if (cpi->oxcf.content == VP9E_CONTENT_SCREEN && scene_change_detected)
1192 for (i = 1; i <= 2; ++i) {
1193 unsigned int uv_sad = UINT_MAX;
1194 struct macroblock_plane *p = &x->plane[i];
1195 struct macroblockd_plane *pd = &xd->plane[i];
1196 const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
1198 if (bs != BLOCK_INVALID)
1199 uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride, pd->dst.buf,
1202 // TODO(marpan): Investigate if we should lower this threshold if
1203 // superblock is detected as skin.
1204 x->color_sensitivity[i - 1] = uv_sad > (y_sad >> shift);
1208 static uint64_t avg_source_sad(VP9_COMP *cpi, MACROBLOCK *x, int shift,
1210 unsigned int tmp_sse;
1212 unsigned int tmp_variance;
1213 const BLOCK_SIZE bsize = BLOCK_64X64;
1214 uint8_t *src_y = cpi->Source->y_buffer;
1215 int src_ystride = cpi->Source->y_stride;
1216 uint8_t *last_src_y = cpi->Last_Source->y_buffer;
1217 int last_src_ystride = cpi->Last_Source->y_stride;
1218 uint64_t avg_source_sad_threshold = 10000;
1219 uint64_t avg_source_sad_threshold2 = 12000;
1220 #if CONFIG_VP9_HIGHBITDEPTH
1221 if (cpi->common.use_highbitdepth) return 0;
1224 last_src_y += shift;
1226 cpi->fn_ptr[bsize].sdf(src_y, src_ystride, last_src_y, last_src_ystride);
1227 tmp_variance = vpx_variance64x64(src_y, src_ystride, last_src_y,
1228 last_src_ystride, &tmp_sse);
1229 // Note: tmp_sse - tmp_variance = ((sum * sum) >> 12)
1230 if (tmp_sad < avg_source_sad_threshold)
1231 x->content_state_sb = ((tmp_sse - tmp_variance) < 25) ? kLowSadLowSumdiff
1232 : kLowSadHighSumdiff;
1234 x->content_state_sb = ((tmp_sse - tmp_variance) < 25) ? kHighSadLowSumdiff
1235 : kHighSadHighSumdiff;
1237 // Detect large lighting change.
1238 if (cpi->oxcf.content != VP9E_CONTENT_SCREEN &&
1239 cpi->oxcf.rc_mode == VPX_CBR && tmp_variance < (tmp_sse >> 3) &&
1240 (tmp_sse - tmp_variance) > 10000)
1241 x->content_state_sb = kLowVarHighSumdiff;
1242 else if (tmp_sad > (avg_source_sad_threshold << 1))
1243 x->content_state_sb = kVeryHighSad;
1245 if (cpi->content_state_sb_fd != NULL) {
1246 if (tmp_sad < avg_source_sad_threshold2) {
1247 // Cap the increment to 255.
1248 if (cpi->content_state_sb_fd[sb_offset] < 255)
1249 cpi->content_state_sb_fd[sb_offset]++;
1251 cpi->content_state_sb_fd[sb_offset] = 0;
1254 if (tmp_sad == 0) x->zero_temp_sad_source = 1;
1258 // This function chooses partitioning based on the variance between source and
1259 // reconstructed last, where variance is computed for down-sampled inputs.
1260 static int choose_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
1261 MACROBLOCK *x, int mi_row, int mi_col) {
1262 VP9_COMMON *const cm = &cpi->common;
1263 MACROBLOCKD *xd = &x->e_mbd;
1267 int force_split[21];
1269 int max_var_32x32 = 0;
1270 int min_var_32x32 = INT_MAX;
1273 int maxvar_16x16[4];
1274 int minvar_16x16[4];
1275 int64_t threshold_4x4avg;
1276 NOISE_LEVEL noise_level = kLow;
1277 int content_state = 0;
1282 int compute_minmax_variance = 1;
1283 unsigned int y_sad = UINT_MAX;
1284 BLOCK_SIZE bsize = BLOCK_64X64;
1285 // Ref frame used in partitioning.
1286 MV_REFERENCE_FRAME ref_frame_partition = LAST_FRAME;
1287 int pixels_wide = 64, pixels_high = 64;
1288 int64_t thresholds[4] = { cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
1289 cpi->vbp_thresholds[2], cpi->vbp_thresholds[3] };
1290 int scene_change_detected =
1291 cpi->rc.high_source_sad ||
1292 (cpi->use_svc && cpi->svc.high_source_sad_superframe);
1293 int force_64_split = scene_change_detected ||
1294 (cpi->oxcf.content == VP9E_CONTENT_SCREEN &&
1295 cpi->compute_source_sad_onepass &&
1296 cpi->sf.use_source_sad && !x->zero_temp_sad_source);
1298 // For the variance computation under SVC mode, we treat the frame as key if
1299 // the reference (base layer frame) is key frame (i.e., is_key_frame == 1).
1301 (frame_is_intra_only(cm) ||
1302 (is_one_pass_cbr_svc(cpi) &&
1303 cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame));
1304 // Always use 4x4 partition for key frame.
1305 const int use_4x4_partition = frame_is_intra_only(cm);
1306 const int low_res = (cm->width <= 352 && cm->height <= 288);
1307 int variance4x4downsample[16];
1309 int sb_offset = (cm->mi_stride >> 3) * (mi_row >> 3) + (mi_col >> 3);
1311 // For SVC: check if LAST frame is NULL or if the resolution of LAST is
1312 // different than the current frame resolution, and if so, treat this frame
1313 // as a key frame, for the purpose of the superblock partitioning.
1314 // LAST == NULL can happen in some cases where enhancement spatial layers are
1315 // enabled dyanmically in the stream and the only reference is the spatial
1316 // reference (GOLDEN).
1318 const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, LAST_FRAME);
1319 if (ref == NULL || ref->y_crop_height != cm->height ||
1320 ref->y_crop_width != cm->width)
1324 set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
1325 set_segment_index(cpi, x, mi_row, mi_col, BLOCK_64X64, 0);
1326 segment_id = xd->mi[0]->segment_id;
1328 if (cpi->oxcf.speed >= 8 || (cpi->use_svc && cpi->svc.non_reference_frame))
1329 compute_minmax_variance = 0;
1331 memset(x->variance_low, 0, sizeof(x->variance_low));
1333 if (cpi->sf.use_source_sad && !is_key_frame) {
1334 int sb_offset2 = ((cm->mi_cols + 7) >> 3) * (mi_row >> 3) + (mi_col >> 3);
1335 content_state = x->content_state_sb;
1336 x->skip_low_source_sad = (content_state == kLowSadLowSumdiff ||
1337 content_state == kLowSadHighSumdiff)
1340 x->lowvar_highsumdiff = (content_state == kLowVarHighSumdiff) ? 1 : 0;
1341 if (cpi->content_state_sb_fd != NULL)
1342 x->last_sb_high_content = cpi->content_state_sb_fd[sb_offset2];
1344 // For SVC on top spatial layer: use/scale the partition from
1345 // the lower spatial resolution if svc_use_lowres_part is enabled.
1346 if (cpi->sf.svc_use_lowres_part &&
1347 cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 1 &&
1348 cpi->svc.prev_partition_svc != NULL && content_state != kVeryHighSad) {
1349 if (!scale_partitioning_svc(cpi, x, xd, BLOCK_64X64, mi_row >> 1,
1350 mi_col >> 1, mi_row, mi_col)) {
1351 if (cpi->sf.copy_partition_flag) {
1352 update_prev_partition(cpi, x, segment_id, mi_row, mi_col, sb_offset);
1357 // If source_sad is low copy the partition without computing the y_sad.
1358 if (x->skip_low_source_sad && cpi->sf.copy_partition_flag &&
1360 copy_partitioning(cpi, x, xd, mi_row, mi_col, segment_id, sb_offset)) {
1361 x->sb_use_mv_part = 1;
1362 if (cpi->sf.svc_use_lowres_part &&
1363 cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 2)
1364 update_partition_svc(cpi, BLOCK_64X64, mi_row, mi_col);
1369 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled &&
1370 cyclic_refresh_segment_id_boosted(segment_id)) {
1371 int q = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex);
1372 set_vbp_thresholds(cpi, thresholds, q, content_state);
1374 set_vbp_thresholds(cpi, thresholds, cm->base_qindex, content_state);
1376 // Decrease 32x32 split threshold for screen on base layer, for scene
1377 // change/high motion frames.
1378 if (cpi->oxcf.content == VP9E_CONTENT_SCREEN &&
1379 cpi->svc.spatial_layer_id == 0 && force_64_split)
1380 thresholds[1] = 3 * thresholds[1] >> 2;
1382 // For non keyframes, disable 4x4 average for low resolution when speed = 8
1383 threshold_4x4avg = (cpi->oxcf.speed < 8) ? thresholds[1] << 1 : INT64_MAX;
1385 if (xd->mb_to_right_edge < 0) pixels_wide += (xd->mb_to_right_edge >> 3);
1386 if (xd->mb_to_bottom_edge < 0) pixels_high += (xd->mb_to_bottom_edge >> 3);
1388 s = x->plane[0].src.buf;
1389 sp = x->plane[0].src.stride;
1391 // Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks,
1392 // 5-20 for the 16x16 blocks.
1393 force_split[0] = force_64_split;
1395 if (!is_key_frame) {
1396 // In the case of spatial/temporal scalable coding, the assumption here is
1397 // that the temporal reference frame will always be of type LAST_FRAME.
1398 // TODO(marpan): If that assumption is broken, we need to revisit this code.
1399 MODE_INFO *mi = xd->mi[0];
1400 YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
1402 const YV12_BUFFER_CONFIG *yv12_g = NULL;
1403 unsigned int y_sad_g, y_sad_thr, y_sad_last;
1404 bsize = BLOCK_32X32 + (mi_col + 4 < cm->mi_cols) * 2 +
1405 (mi_row + 4 < cm->mi_rows);
1407 assert(yv12 != NULL);
1409 if (!(is_one_pass_cbr_svc(cpi) && cpi->svc.spatial_layer_id) ||
1410 cpi->svc.use_gf_temporal_ref_current_layer) {
1411 // For now, GOLDEN will not be used for non-zero spatial layers, since
1412 // it may not be a temporal reference.
1413 yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
1416 // Only compute y_sad_g (sad for golden reference) for speed < 8.
1417 if (cpi->oxcf.speed < 8 && yv12_g && yv12_g != yv12 &&
1418 (cpi->ref_frame_flags & VP9_GOLD_FLAG)) {
1419 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
1420 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
1421 y_sad_g = cpi->fn_ptr[bsize].sdf(
1422 x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
1423 xd->plane[0].pre[0].stride);
1428 if (cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR &&
1429 cpi->rc.is_src_frame_alt_ref) {
1430 yv12 = get_ref_frame_buffer(cpi, ALTREF_FRAME);
1431 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
1432 &cm->frame_refs[ALTREF_FRAME - 1].sf);
1433 mi->ref_frame[0] = ALTREF_FRAME;
1436 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
1437 &cm->frame_refs[LAST_FRAME - 1].sf);
1438 mi->ref_frame[0] = LAST_FRAME;
1440 mi->ref_frame[1] = NONE;
1441 mi->sb_type = BLOCK_64X64;
1442 mi->mv[0].as_int = 0;
1443 mi->interp_filter = BILINEAR;
1445 if (cpi->oxcf.speed >= 8 && !low_res &&
1446 x->content_state_sb != kVeryHighSad) {
1447 y_sad = cpi->fn_ptr[bsize].sdf(
1448 x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
1449 xd->plane[0].pre[0].stride);
1451 const MV dummy_mv = { 0, 0 };
1452 y_sad = vp9_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col,
1454 x->sb_use_mv_part = 1;
1455 x->sb_mvcol_part = mi->mv[0].as_mv.col;
1456 x->sb_mvrow_part = mi->mv[0].as_mv.row;
1457 if (cpi->oxcf.content == VP9E_CONTENT_SCREEN &&
1458 cpi->svc.spatial_layer_id == cpi->svc.first_spatial_layer_to_encode &&
1459 cpi->svc.high_num_blocks_with_motion && !x->zero_temp_sad_source &&
1460 cm->width > 640 && cm->height > 480) {
1461 // Disable split below 16x16 block size when scroll motion (horz or
1462 // vert) is detected.
1463 // TODO(marpan/jianj): Improve this condition: issue is that search
1464 // range is hard-coded/limited in vp9_int_pro_motion_estimation() so
1465 // scroll motion may not be detected here.
1466 if (((abs(x->sb_mvrow_part) >= 48 && abs(x->sb_mvcol_part) <= 8) ||
1467 (abs(x->sb_mvcol_part) >= 48 && abs(x->sb_mvrow_part) <= 8)) &&
1469 compute_minmax_variance = 0;
1470 thresholds[2] = INT64_MAX;
1476 // Pick ref frame for partitioning, bias last frame when y_sad_g and y_sad
1477 // are close if short_circuit_low_temp_var is on.
1478 y_sad_thr = cpi->sf.short_circuit_low_temp_var ? (y_sad * 7) >> 3 : y_sad;
1479 if (y_sad_g < y_sad_thr) {
1480 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
1481 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
1482 mi->ref_frame[0] = GOLDEN_FRAME;
1483 mi->mv[0].as_int = 0;
1485 ref_frame_partition = GOLDEN_FRAME;
1487 x->pred_mv[LAST_FRAME] = mi->mv[0].as_mv;
1488 ref_frame_partition = LAST_FRAME;
1491 set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]);
1492 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
1494 if (cpi->use_skin_detection)
1495 x->sb_is_skin = skin_sb_split(cpi, low_res, mi_row, mi_col, force_split);
1497 d = xd->plane[0].dst.buf;
1498 dp = xd->plane[0].dst.stride;
1500 // If the y_sad is very small, take 64x64 as partition and exit.
1501 // Don't check on boosted segment for now, as 64x64 is suppressed there.
1502 if (segment_id == CR_SEGMENT_ID_BASE && y_sad < cpi->vbp_threshold_sad) {
1503 const int block_width = num_8x8_blocks_wide_lookup[BLOCK_64X64];
1504 const int block_height = num_8x8_blocks_high_lookup[BLOCK_64X64];
1505 if (mi_col + block_width / 2 < cm->mi_cols &&
1506 mi_row + block_height / 2 < cm->mi_rows) {
1507 set_block_size(cpi, x, xd, mi_row, mi_col, BLOCK_64X64);
1508 x->variance_low[0] = 1;
1509 chroma_check(cpi, x, bsize, y_sad, is_key_frame, scene_change_detected);
1510 if (cpi->sf.svc_use_lowres_part &&
1511 cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 2)
1512 update_partition_svc(cpi, BLOCK_64X64, mi_row, mi_col);
1513 if (cpi->sf.copy_partition_flag) {
1514 update_prev_partition(cpi, x, segment_id, mi_row, mi_col, sb_offset);
1520 // If the y_sad is small enough, copy the partition of the superblock in the
1521 // last frame to current frame only if the last frame is not a keyframe.
1522 // Stop the copy every cpi->max_copied_frame to refresh the partition.
1523 // TODO(jianj) : tune the threshold.
1524 if (cpi->sf.copy_partition_flag && y_sad_last < cpi->vbp_threshold_copy &&
1525 copy_partitioning(cpi, x, xd, mi_row, mi_col, segment_id, sb_offset)) {
1526 chroma_check(cpi, x, bsize, y_sad, is_key_frame, scene_change_detected);
1527 if (cpi->sf.svc_use_lowres_part &&
1528 cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 2)
1529 update_partition_svc(cpi, BLOCK_64X64, mi_row, mi_col);
1535 #if CONFIG_VP9_HIGHBITDEPTH
1536 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1538 case 10: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); break;
1539 case 12: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12); break;
1541 default: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); break;
1544 #endif // CONFIG_VP9_HIGHBITDEPTH
1547 if (low_res && threshold_4x4avg < INT64_MAX)
1548 CHECK_MEM_ERROR(cm, vt2, vpx_calloc(16, sizeof(*vt2)));
1549 // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances
1551 for (i = 0; i < 4; i++) {
1552 const int x32_idx = ((i & 1) << 5);
1553 const int y32_idx = ((i >> 1) << 5);
1554 const int i2 = i << 2;
1555 force_split[i + 1] = 0;
1557 maxvar_16x16[i] = 0;
1558 minvar_16x16[i] = INT_MAX;
1559 for (j = 0; j < 4; j++) {
1560 const int x16_idx = x32_idx + ((j & 1) << 4);
1561 const int y16_idx = y32_idx + ((j >> 1) << 4);
1562 const int split_index = 5 + i2 + j;
1563 v16x16 *vst = &vt.split[i].split[j];
1564 force_split[split_index] = 0;
1565 variance4x4downsample[i2 + j] = 0;
1566 if (!is_key_frame) {
1567 fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst,
1568 #if CONFIG_VP9_HIGHBITDEPTH
1571 pixels_wide, pixels_high, is_key_frame);
1572 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
1573 get_variance(&vt.split[i].split[j].part_variances.none);
1574 avg_16x16[i] += vt.split[i].split[j].part_variances.none.variance;
1575 if (vt.split[i].split[j].part_variances.none.variance < minvar_16x16[i])
1576 minvar_16x16[i] = vt.split[i].split[j].part_variances.none.variance;
1577 if (vt.split[i].split[j].part_variances.none.variance > maxvar_16x16[i])
1578 maxvar_16x16[i] = vt.split[i].split[j].part_variances.none.variance;
1579 if (vt.split[i].split[j].part_variances.none.variance > thresholds[2]) {
1580 // 16X16 variance is above threshold for split, so force split to 8x8
1581 // for this 16x16 block (this also forces splits for upper levels).
1582 force_split[split_index] = 1;
1583 force_split[i + 1] = 1;
1585 } else if (compute_minmax_variance &&
1586 vt.split[i].split[j].part_variances.none.variance >
1588 !cyclic_refresh_segment_id_boosted(segment_id)) {
1589 // We have some nominal amount of 16x16 variance (based on average),
1590 // compute the minmax over the 8x8 sub-blocks, and if above threshold,
1591 // force split to 8x8 block for this 16x16 block.
1592 int minmax = compute_minmax_8x8(s, sp, d, dp, x16_idx, y16_idx,
1593 #if CONFIG_VP9_HIGHBITDEPTH
1596 pixels_wide, pixels_high);
1597 int thresh_minmax = (int)cpi->vbp_threshold_minmax;
1598 if (x->content_state_sb == kVeryHighSad)
1599 thresh_minmax = thresh_minmax << 1;
1600 if (minmax > thresh_minmax) {
1601 force_split[split_index] = 1;
1602 force_split[i + 1] = 1;
1608 (low_res && vt.split[i].split[j].part_variances.none.variance >
1609 threshold_4x4avg)) {
1610 force_split[split_index] = 0;
1611 // Go down to 4x4 down-sampling for variance.
1612 variance4x4downsample[i2 + j] = 1;
1613 for (k = 0; k < 4; k++) {
1614 int x8_idx = x16_idx + ((k & 1) << 3);
1615 int y8_idx = y16_idx + ((k >> 1) << 3);
1616 v8x8 *vst2 = is_key_frame ? &vst->split[k] : &vt2[i2 + j].split[k];
1617 fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2,
1618 #if CONFIG_VP9_HIGHBITDEPTH
1621 pixels_wide, pixels_high, is_key_frame);
1626 if (cpi->noise_estimate.enabled)
1627 noise_level = vp9_noise_estimate_extract_level(&cpi->noise_estimate);
1628 // Fill the rest of the variance tree by summing split partition values.
1630 for (i = 0; i < 4; i++) {
1631 const int i2 = i << 2;
1632 for (j = 0; j < 4; j++) {
1633 if (variance4x4downsample[i2 + j] == 1) {
1634 v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] : &vt.split[i].split[j];
1635 for (m = 0; m < 4; m++) fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
1636 fill_variance_tree(vtemp, BLOCK_16X16);
1637 // If variance of this 16x16 block is above the threshold, force block
1638 // to split. This also forces a split on the upper levels.
1639 get_variance(&vtemp->part_variances.none);
1640 if (vtemp->part_variances.none.variance > thresholds[2]) {
1641 force_split[5 + i2 + j] = 1;
1642 force_split[i + 1] = 1;
1647 fill_variance_tree(&vt.split[i], BLOCK_32X32);
1648 // If variance of this 32x32 block is above the threshold, or if its above
1649 // (some threshold of) the average variance over the sub-16x16 blocks, then
1650 // force this block to split. This also forces a split on the upper
1652 if (!force_split[i + 1]) {
1653 get_variance(&vt.split[i].part_variances.none);
1654 var_32x32 = vt.split[i].part_variances.none.variance;
1655 max_var_32x32 = VPXMAX(var_32x32, max_var_32x32);
1656 min_var_32x32 = VPXMIN(var_32x32, min_var_32x32);
1657 if (vt.split[i].part_variances.none.variance > thresholds[1] ||
1659 vt.split[i].part_variances.none.variance > (thresholds[1] >> 1) &&
1660 vt.split[i].part_variances.none.variance > (avg_16x16[i] >> 1))) {
1661 force_split[i + 1] = 1;
1663 } else if (!is_key_frame && noise_level < kLow && cm->height <= 360 &&
1664 (maxvar_16x16[i] - minvar_16x16[i]) > (thresholds[1] >> 1) &&
1665 maxvar_16x16[i] > thresholds[1]) {
1666 force_split[i + 1] = 1;
1669 avg_32x32 += var_32x32;
1672 if (!force_split[0]) {
1673 fill_variance_tree(&vt, BLOCK_64X64);
1674 get_variance(&vt.part_variances.none);
1675 // If variance of this 64x64 block is above (some threshold of) the average
1676 // variance over the sub-32x32 blocks, then force this block to split.
1677 // Only checking this for noise level >= medium for now.
1678 if (!is_key_frame && noise_level >= kMedium &&
1679 vt.part_variances.none.variance > (9 * avg_32x32) >> 5)
1681 // Else if the maximum 32x32 variance minus the miniumum 32x32 variance in
1682 // a 64x64 block is greater than threshold and the maximum 32x32 variance is
1683 // above a miniumum threshold, then force the split of a 64x64 block
1684 // Only check this for low noise.
1685 else if (!is_key_frame && noise_level < kMedium &&
1686 (max_var_32x32 - min_var_32x32) > 3 * (thresholds[0] >> 3) &&
1687 max_var_32x32 > thresholds[0] >> 1)
1691 // Now go through the entire structure, splitting every block size until
1692 // we get to one that's got a variance lower than our threshold.
1693 if (mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
1694 !set_vt_partitioning(cpi, x, xd, &vt, BLOCK_64X64, mi_row, mi_col,
1695 thresholds[0], BLOCK_16X16, force_split[0])) {
1696 for (i = 0; i < 4; ++i) {
1697 const int x32_idx = ((i & 1) << 2);
1698 const int y32_idx = ((i >> 1) << 2);
1699 const int i2 = i << 2;
1700 if (!set_vt_partitioning(cpi, x, xd, &vt.split[i], BLOCK_32X32,
1701 (mi_row + y32_idx), (mi_col + x32_idx),
1702 thresholds[1], BLOCK_16X16,
1703 force_split[i + 1])) {
1704 for (j = 0; j < 4; ++j) {
1705 const int x16_idx = ((j & 1) << 1);
1706 const int y16_idx = ((j >> 1) << 1);
1707 // For inter frames: if variance4x4downsample[] == 1 for this 16x16
1708 // block, then the variance is based on 4x4 down-sampling, so use vt2
1709 // in set_vt_partioning(), otherwise use vt.
1710 v16x16 *vtemp = (!is_key_frame && variance4x4downsample[i2 + j] == 1)
1712 : &vt.split[i].split[j];
1713 if (!set_vt_partitioning(
1714 cpi, x, xd, vtemp, BLOCK_16X16, mi_row + y32_idx + y16_idx,
1715 mi_col + x32_idx + x16_idx, thresholds[2], cpi->vbp_bsize_min,
1716 force_split[5 + i2 + j])) {
1717 for (k = 0; k < 4; ++k) {
1718 const int x8_idx = (k & 1);
1719 const int y8_idx = (k >> 1);
1720 if (use_4x4_partition) {
1721 if (!set_vt_partitioning(cpi, x, xd, &vtemp->split[k],
1723 mi_row + y32_idx + y16_idx + y8_idx,
1724 mi_col + x32_idx + x16_idx + x8_idx,
1725 thresholds[3], BLOCK_8X8, 0)) {
1727 cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx),
1728 (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_4X4);
1732 cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx),
1733 (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_8X8);
1742 if (!frame_is_intra_only(cm) && cpi->sf.copy_partition_flag) {
1743 update_prev_partition(cpi, x, segment_id, mi_row, mi_col, sb_offset);
1746 if (!frame_is_intra_only(cm) && cpi->sf.svc_use_lowres_part &&
1747 cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 2)
1748 update_partition_svc(cpi, BLOCK_64X64, mi_row, mi_col);
1750 if (cpi->sf.short_circuit_low_temp_var) {
1751 set_low_temp_var_flag(cpi, x, xd, &vt, thresholds, ref_frame_partition,
1755 chroma_check(cpi, x, bsize, y_sad, is_key_frame, scene_change_detected);
1756 if (vt2) vpx_free(vt2);
1760 #if !CONFIG_REALTIME_ONLY
1761 static void update_state(VP9_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
1762 int mi_row, int mi_col, BLOCK_SIZE bsize,
1763 int output_enabled) {
1765 VP9_COMMON *const cm = &cpi->common;
1766 RD_COUNTS *const rdc = &td->rd_counts;
1767 MACROBLOCK *const x = &td->mb;
1768 MACROBLOCKD *const xd = &x->e_mbd;
1769 struct macroblock_plane *const p = x->plane;
1770 struct macroblockd_plane *const pd = xd->plane;
1771 MODE_INFO *mi = &ctx->mic;
1772 MODE_INFO *const xdmi = xd->mi[0];
1773 MODE_INFO *mi_addr = xd->mi[0];
1774 const struct segmentation *const seg = &cm->seg;
1775 const int bw = num_8x8_blocks_wide_lookup[mi->sb_type];
1776 const int bh = num_8x8_blocks_high_lookup[mi->sb_type];
1777 const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
1778 const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
1779 MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
1782 const int mis = cm->mi_stride;
1783 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
1784 const int mi_height = num_8x8_blocks_high_lookup[bsize];
1787 assert(mi->sb_type == bsize);
1790 *x->mbmi_ext = ctx->mbmi_ext;
1792 // If segmentation in use
1794 // For in frame complexity AQ copy the segment id from the segment map.
1795 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
1796 const uint8_t *const map =
1797 seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
1798 mi_addr->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
1800 // Else for cyclic refresh mode update the segment map, set the segment id
1801 // and then update the quantizer.
1802 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
1803 cpi->cyclic_refresh->content_mode) {
1804 vp9_cyclic_refresh_update_segment(cpi, xd->mi[0], mi_row, mi_col, bsize,
1805 ctx->rate, ctx->dist, x->skip, p);
1809 max_plane = is_inter_block(xdmi) ? MAX_MB_PLANE : 1;
1810 for (i = 0; i < max_plane; ++i) {
1811 p[i].coeff = ctx->coeff_pbuf[i][1];
1812 p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
1813 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
1814 p[i].eobs = ctx->eobs_pbuf[i][1];
1817 for (i = max_plane; i < MAX_MB_PLANE; ++i) {
1818 p[i].coeff = ctx->coeff_pbuf[i][2];
1819 p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
1820 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
1821 p[i].eobs = ctx->eobs_pbuf[i][2];
1824 // Restore the coding context of the MB to that that was in place
1825 // when the mode was picked for it
1826 for (y = 0; y < mi_height; y++)
1827 for (x_idx = 0; x_idx < mi_width; x_idx++)
1828 if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx &&
1829 (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
1830 xd->mi[x_idx + y * mis] = mi_addr;
1833 if (cpi->oxcf.aq_mode != NO_AQ) vp9_init_plane_quantizers(cpi, x);
1835 if (is_inter_block(xdmi) && xdmi->sb_type < BLOCK_8X8) {
1836 xdmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
1837 xdmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
1840 x->skip = ctx->skip;
1841 memcpy(x->zcoeff_blk[xdmi->tx_size], ctx->zcoeff_blk,
1842 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
1844 if (!output_enabled) return;
1846 #if CONFIG_INTERNAL_STATS
1847 if (frame_is_intra_only(cm)) {
1848 static const int kf_mode_index[] = {
1849 THR_DC /*DC_PRED*/, THR_V_PRED /*V_PRED*/,
1850 THR_H_PRED /*H_PRED*/, THR_D45_PRED /*D45_PRED*/,
1851 THR_D135_PRED /*D135_PRED*/, THR_D117_PRED /*D117_PRED*/,
1852 THR_D153_PRED /*D153_PRED*/, THR_D207_PRED /*D207_PRED*/,
1853 THR_D63_PRED /*D63_PRED*/, THR_TM /*TM_PRED*/,
1855 ++cpi->mode_chosen_counts[kf_mode_index[xdmi->mode]];
1857 // Note how often each mode chosen as best
1858 ++cpi->mode_chosen_counts[ctx->best_mode_index];
1861 if (!frame_is_intra_only(cm)) {
1862 if (is_inter_block(xdmi)) {
1863 vp9_update_mv_count(td);
1865 if (cm->interp_filter == SWITCHABLE) {
1866 const int ctx = get_pred_context_switchable_interp(xd);
1867 ++td->counts->switchable_interp[ctx][xdmi->interp_filter];
1871 rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
1872 rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
1873 rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
1875 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
1876 rdc->filter_diff[i] += ctx->best_filter_diff[i];
1879 for (h = 0; h < y_mis; ++h) {
1880 MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
1881 for (w = 0; w < x_mis; ++w) {
1882 MV_REF *const mv = frame_mv + w;
1883 mv->ref_frame[0] = mi->ref_frame[0];
1884 mv->ref_frame[1] = mi->ref_frame[1];
1885 mv->mv[0].as_int = mi->mv[0].as_int;
1886 mv->mv[1].as_int = mi->mv[1].as_int;
1890 #endif // !CONFIG_REALTIME_ONLY
1892 void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
1893 int mi_row, int mi_col) {
1894 uint8_t *const buffers[3] = { src->y_buffer, src->u_buffer, src->v_buffer };
1895 const int strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
1898 // Set current frame pointer.
1899 x->e_mbd.cur_buf = src;
1901 for (i = 0; i < MAX_MB_PLANE; i++)
1902 setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
1903 NULL, x->e_mbd.plane[i].subsampling_x,
1904 x->e_mbd.plane[i].subsampling_y);
1907 static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode,
1908 INTERP_FILTER interp_filter,
1909 RD_COST *rd_cost, BLOCK_SIZE bsize) {
1910 MACROBLOCKD *const xd = &x->e_mbd;
1911 MODE_INFO *const mi = xd->mi[0];
1912 INTERP_FILTER filter_ref;
1914 filter_ref = get_pred_context_switchable_interp(xd);
1915 if (interp_filter == BILINEAR)
1916 filter_ref = BILINEAR;
1917 else if (filter_ref == SWITCHABLE_FILTERS)
1918 filter_ref = EIGHTTAP;
1920 mi->sb_type = bsize;
1923 VPXMIN(max_txsize_lookup[bsize], tx_mode_to_biggest_tx_size[tx_mode]);
1925 mi->uv_mode = DC_PRED;
1926 mi->ref_frame[0] = LAST_FRAME;
1927 mi->ref_frame[1] = NONE;
1928 mi->mv[0].as_int = 0;
1929 mi->interp_filter = filter_ref;
1931 xd->mi[0]->bmi[0].as_mv[0].as_int = 0;
1934 vp9_rd_cost_init(rd_cost);
1937 #if !CONFIG_REALTIME_ONLY
1938 static void set_segment_rdmult(VP9_COMP *const cpi, MACROBLOCK *const x,
1939 int mi_row, int mi_col, BLOCK_SIZE bsize,
1941 VP9_COMMON *const cm = &cpi->common;
1942 const VP9EncoderConfig *const oxcf = &cpi->oxcf;
1943 const uint8_t *const map =
1944 cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
1946 vp9_init_plane_quantizers(cpi, x);
1947 vpx_clear_system_state();
1949 if (aq_mode == NO_AQ || aq_mode == PSNR_AQ) {
1950 if (cpi->sf.enable_tpl_model) x->rdmult = x->cb_rdmult;
1951 } else if (aq_mode == PERCEPTUAL_AQ) {
1952 x->rdmult = x->cb_rdmult;
1953 } else if (aq_mode == CYCLIC_REFRESH_AQ) {
1954 // If segment is boosted, use rdmult for that segment.
1955 if (cyclic_refresh_segment_id_boosted(
1956 get_segment_id(cm, map, bsize, mi_row, mi_col)))
1957 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
1959 x->rdmult = vp9_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q);
1962 if (oxcf->tuning == VP8_TUNE_SSIM) {
1963 set_ssim_rdmult(cpi, x, bsize, mi_row, mi_col, &x->rdmult);
1967 static void rd_pick_sb_modes(VP9_COMP *cpi, TileDataEnc *tile_data,
1968 MACROBLOCK *const x, int mi_row, int mi_col,
1969 RD_COST *rd_cost, BLOCK_SIZE bsize,
1970 PICK_MODE_CONTEXT *ctx, int rate_in_best_rd,
1971 int64_t dist_in_best_rd) {
1972 VP9_COMMON *const cm = &cpi->common;
1973 TileInfo *const tile_info = &tile_data->tile_info;
1974 MACROBLOCKD *const xd = &x->e_mbd;
1976 struct macroblock_plane *const p = x->plane;
1977 struct macroblockd_plane *const pd = xd->plane;
1978 const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
1980 int64_t best_rd = INT64_MAX;
1982 vpx_clear_system_state();
1984 // Use the lower precision, but faster, 32x32 fdct for mode selection.
1985 x->use_lp32x32fdct = 1;
1987 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
1989 mi->sb_type = bsize;
1991 for (i = 0; i < MAX_MB_PLANE; ++i) {
1992 p[i].coeff = ctx->coeff_pbuf[i][0];
1993 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
1994 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
1995 p[i].eobs = ctx->eobs_pbuf[i][0];
1999 ctx->pred_pixel_ready = 0;
2002 // Set to zero to make sure we do not use the previous encoded frame stats
2005 #if CONFIG_VP9_HIGHBITDEPTH
2006 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2007 x->source_variance = vp9_high_get_sby_perpixel_variance(
2008 cpi, &x->plane[0].src, bsize, xd->bd);
2010 x->source_variance =
2011 vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
2014 x->source_variance =
2015 vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
2016 #endif // CONFIG_VP9_HIGHBITDEPTH
2018 // Save rdmult before it might be changed, so it can be restored later.
2019 orig_rdmult = x->rdmult;
2021 if ((cpi->sf.tx_domain_thresh > 0.0) || (cpi->sf.quant_opt_thresh > 0.0)) {
2022 double logvar = vp9_log_block_var(cpi, x, bsize);
2023 // Check block complexity as part of descision on using pixel or transform
2024 // domain distortion in rd tests.
2025 x->block_tx_domain = cpi->sf.allow_txfm_domain_distortion &&
2026 (logvar >= cpi->sf.tx_domain_thresh);
2028 // Check block complexity as part of descision on using quantized
2029 // coefficient optimisation inside the rd loop.
2030 x->block_qcoeff_opt =
2031 cpi->sf.allow_quant_coeff_opt && (logvar <= cpi->sf.quant_opt_thresh);
2033 x->block_tx_domain = cpi->sf.allow_txfm_domain_distortion;
2034 x->block_qcoeff_opt = cpi->sf.allow_quant_coeff_opt;
2037 set_segment_index(cpi, x, mi_row, mi_col, bsize, 0);
2038 set_segment_rdmult(cpi, x, mi_row, mi_col, bsize, aq_mode);
2039 if (rate_in_best_rd < INT_MAX && dist_in_best_rd < INT64_MAX) {
2040 best_rd = vp9_calculate_rd_cost(x->rdmult, x->rddiv, rate_in_best_rd,
2044 // Find best coding mode & reconstruct the MB so it is available
2045 // as a predictor for MBs that follow in the SB
2046 if (frame_is_intra_only(cm)) {
2047 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
2049 if (bsize >= BLOCK_8X8) {
2050 if (segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP))
2051 vp9_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
2054 vp9_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost,
2055 bsize, ctx, best_rd);
2057 vp9_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col, rd_cost,
2058 bsize, ctx, best_rd);
2062 // Examine the resulting rate and for AQ mode 2 make a segment choice.
2063 if ((rd_cost->rate != INT_MAX) && (aq_mode == COMPLEXITY_AQ) &&
2064 (bsize >= BLOCK_16X16) &&
2065 (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
2066 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
2067 vp9_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
2070 // TODO(jingning) The rate-distortion optimization flow needs to be
2071 // refactored to provide proper exit/return handle.
2072 if (rd_cost->rate == INT_MAX || rd_cost->dist == INT64_MAX)
2073 rd_cost->rdcost = INT64_MAX;
2075 rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
2077 x->rdmult = orig_rdmult;
2079 ctx->rate = rd_cost->rate;
2080 ctx->dist = rd_cost->dist;
2082 #endif // !CONFIG_REALTIME_ONLY
2084 static void update_stats(VP9_COMMON *cm, ThreadData *td) {
2085 const MACROBLOCK *x = &td->mb;
2086 const MACROBLOCKD *const xd = &x->e_mbd;
2087 const MODE_INFO *const mi = xd->mi[0];
2088 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2089 const BLOCK_SIZE bsize = mi->sb_type;
2091 if (!frame_is_intra_only(cm)) {
2092 FRAME_COUNTS *const counts = td->counts;
2093 const int inter_block = is_inter_block(mi);
2094 const int seg_ref_active =
2095 segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_REF_FRAME);
2096 if (!seg_ref_active) {
2097 counts->intra_inter[get_intra_inter_context(xd)][inter_block]++;
2098 // If the segment reference feature is enabled we have only a single
2099 // reference frame allowed for the segment so exclude it from
2100 // the reference frame counts used to work out probabilities.
2102 const MV_REFERENCE_FRAME ref0 = mi->ref_frame[0];
2103 if (cm->reference_mode == REFERENCE_MODE_SELECT)
2104 counts->comp_inter[vp9_get_reference_mode_context(cm, xd)]
2105 [has_second_ref(mi)]++;
2107 if (has_second_ref(mi)) {
2108 const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
2109 const int ctx = vp9_get_pred_context_comp_ref_p(cm, xd);
2110 const int bit = mi->ref_frame[!idx] == cm->comp_var_ref[1];
2111 counts->comp_ref[ctx][bit]++;
2113 counts->single_ref[vp9_get_pred_context_single_ref_p1(xd)][0]
2114 [ref0 != LAST_FRAME]++;
2115 if (ref0 != LAST_FRAME)
2116 counts->single_ref[vp9_get_pred_context_single_ref_p2(xd)][1]
2117 [ref0 != GOLDEN_FRAME]++;
2122 !segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP)) {
2123 const int mode_ctx = mbmi_ext->mode_context[mi->ref_frame[0]];
2124 if (bsize >= BLOCK_8X8) {
2125 const PREDICTION_MODE mode = mi->mode;
2126 ++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)];
2128 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
2129 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
2131 for (idy = 0; idy < 2; idy += num_4x4_h) {
2132 for (idx = 0; idx < 2; idx += num_4x4_w) {
2133 const int j = idy * 2 + idx;
2134 const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
2135 ++counts->inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
2143 #if !CONFIG_REALTIME_ONLY
2144 static void restore_context(MACROBLOCK *const x, int mi_row, int mi_col,
2145 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
2146 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
2147 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
2149 MACROBLOCKD *const xd = &x->e_mbd;
2151 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
2152 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
2153 int mi_width = num_8x8_blocks_wide_lookup[bsize];
2154 int mi_height = num_8x8_blocks_high_lookup[bsize];
2155 for (p = 0; p < MAX_MB_PLANE; p++) {
2156 memcpy(xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
2157 a + num_4x4_blocks_wide * p,
2158 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
2159 xd->plane[p].subsampling_x);
2160 memcpy(xd->left_context[p] +
2161 ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
2162 l + num_4x4_blocks_high * p,
2163 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
2164 xd->plane[p].subsampling_y);
2166 memcpy(xd->above_seg_context + mi_col, sa,
2167 sizeof(*xd->above_seg_context) * mi_width);
2168 memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
2169 sizeof(xd->left_seg_context[0]) * mi_height);
2172 static void save_context(MACROBLOCK *const x, int mi_row, int mi_col,
2173 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
2174 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
2175 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
2177 const MACROBLOCKD *const xd = &x->e_mbd;
2179 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
2180 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
2181 int mi_width = num_8x8_blocks_wide_lookup[bsize];
2182 int mi_height = num_8x8_blocks_high_lookup[bsize];
2184 // buffer the above/left context information of the block in search.
2185 for (p = 0; p < MAX_MB_PLANE; ++p) {
2186 memcpy(a + num_4x4_blocks_wide * p,
2187 xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
2188 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
2189 xd->plane[p].subsampling_x);
2190 memcpy(l + num_4x4_blocks_high * p,
2191 xd->left_context[p] +
2192 ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
2193 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
2194 xd->plane[p].subsampling_y);
2196 memcpy(sa, xd->above_seg_context + mi_col,
2197 sizeof(*xd->above_seg_context) * mi_width);
2198 memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
2199 sizeof(xd->left_seg_context[0]) * mi_height);
2202 static void encode_b(VP9_COMP *cpi, const TileInfo *const tile, ThreadData *td,
2203 TOKENEXTRA **tp, int mi_row, int mi_col,
2204 int output_enabled, BLOCK_SIZE bsize,
2205 PICK_MODE_CONTEXT *ctx) {
2206 MACROBLOCK *const x = &td->mb;
2207 set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
2209 if (cpi->sf.enable_tpl_model &&
2210 (cpi->oxcf.aq_mode == NO_AQ || cpi->oxcf.aq_mode == PERCEPTUAL_AQ)) {
2211 const VP9EncoderConfig *const oxcf = &cpi->oxcf;
2212 x->rdmult = x->cb_rdmult;
2213 if (oxcf->tuning == VP8_TUNE_SSIM) {
2214 set_ssim_rdmult(cpi, x, bsize, mi_row, mi_col, &x->rdmult);
2218 update_state(cpi, td, ctx, mi_row, mi_col, bsize, output_enabled);
2219 encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
2221 if (output_enabled) {
2222 update_stats(&cpi->common, td);
2224 (*tp)->token = EOSB_TOKEN;
2229 static void encode_sb(VP9_COMP *cpi, ThreadData *td, const TileInfo *const tile,
2230 TOKENEXTRA **tp, int mi_row, int mi_col,
2231 int output_enabled, BLOCK_SIZE bsize, PC_TREE *pc_tree) {
2232 VP9_COMMON *const cm = &cpi->common;
2233 MACROBLOCK *const x = &td->mb;
2234 MACROBLOCKD *const xd = &x->e_mbd;
2236 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
2238 PARTITION_TYPE partition;
2239 BLOCK_SIZE subsize = bsize;
2241 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
2243 if (bsize >= BLOCK_8X8) {
2244 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
2245 subsize = get_subsize(bsize, pc_tree->partitioning);
2248 subsize = BLOCK_4X4;
2251 partition = partition_lookup[bsl][subsize];
2252 if (output_enabled && bsize != BLOCK_4X4)
2253 td->counts->partition[ctx][partition]++;
2255 switch (partition) {
2256 case PARTITION_NONE:
2257 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
2260 case PARTITION_VERT:
2261 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
2262 &pc_tree->vertical[0]);
2263 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
2264 encode_b(cpi, tile, td, tp, mi_row, mi_col + hbs, output_enabled,
2265 subsize, &pc_tree->vertical[1]);
2268 case PARTITION_HORZ:
2269 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
2270 &pc_tree->horizontal[0]);
2271 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
2272 encode_b(cpi, tile, td, tp, mi_row + hbs, mi_col, output_enabled,
2273 subsize, &pc_tree->horizontal[1]);
2277 assert(partition == PARTITION_SPLIT);
2278 if (bsize == BLOCK_8X8) {
2279 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
2280 pc_tree->leaf_split[0]);
2282 encode_sb(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
2284 encode_sb(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
2285 subsize, pc_tree->split[1]);
2286 encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
2287 subsize, pc_tree->split[2]);
2288 encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
2289 subsize, pc_tree->split[3]);
2294 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
2295 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
2297 #endif // !CONFIG_REALTIME_ONLY
2299 // Check to see if the given partition size is allowed for a specified number
2300 // of 8x8 block rows and columns remaining in the image.
2301 // If not then return the largest allowed partition size
2302 static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, int rows_left,
2303 int cols_left, int *bh, int *bw) {
2304 if (rows_left <= 0 || cols_left <= 0) {
2305 return VPXMIN(bsize, BLOCK_8X8);
2307 for (; bsize > 0; bsize -= 3) {
2308 *bh = num_8x8_blocks_high_lookup[bsize];
2309 *bw = num_8x8_blocks_wide_lookup[bsize];
2310 if ((*bh <= rows_left) && (*bw <= cols_left)) {
2318 static void set_partial_b64x64_partition(MODE_INFO *mi, int mis, int bh_in,
2319 int bw_in, int row8x8_remaining,
2320 int col8x8_remaining, BLOCK_SIZE bsize,
2321 MODE_INFO **mi_8x8) {
2324 for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
2326 for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
2327 const int index = r * mis + c;
2328 mi_8x8[index] = mi + index;
2329 mi_8x8[index]->sb_type = find_partition_size(
2330 bsize, row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
2335 // This function attempts to set all mode info entries in a given SB64
2336 // to the same block partition size.
2337 // However, at the bottom and right borders of the image the requested size
2338 // may not be allowed in which case this code attempts to choose the largest
2339 // allowable partition.
2340 static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
2341 MODE_INFO **mi_8x8, int mi_row, int mi_col,
2343 VP9_COMMON *const cm = &cpi->common;
2344 const int mis = cm->mi_stride;
2345 const int row8x8_remaining = tile->mi_row_end - mi_row;
2346 const int col8x8_remaining = tile->mi_col_end - mi_col;
2347 int block_row, block_col;
2348 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
2349 int bh = num_8x8_blocks_high_lookup[bsize];
2350 int bw = num_8x8_blocks_wide_lookup[bsize];
2352 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
2354 // Apply the requested partition size to the SB64 if it is all "in image"
2355 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
2356 (row8x8_remaining >= MI_BLOCK_SIZE)) {
2357 for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
2358 for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
2359 int index = block_row * mis + block_col;
2360 mi_8x8[index] = mi_upper_left + index;
2361 mi_8x8[index]->sb_type = bsize;
2365 // Else this is a partial SB64.
2366 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
2367 col8x8_remaining, bsize, mi_8x8);
2371 static const struct {
2374 } coord_lookup[16] = {
2397 static void set_source_var_based_partition(VP9_COMP *cpi,
2398 const TileInfo *const tile,
2399 MACROBLOCK *const x,
2400 MODE_INFO **mi_8x8, int mi_row,
2402 VP9_COMMON *const cm = &cpi->common;
2403 const int mis = cm->mi_stride;
2404 const int row8x8_remaining = tile->mi_row_end - mi_row;
2405 const int col8x8_remaining = tile->mi_col_end - mi_col;
2406 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
2408 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
2410 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
2413 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
2414 (row8x8_remaining >= MI_BLOCK_SIZE)) {
2418 const int offset = (mi_row >> 1) * cm->mb_cols + (mi_col >> 1);
2419 int is_larger_better = 0;
2421 unsigned int thr = cpi->source_var_thresh;
2423 memset(d32, 0, 4 * sizeof(diff));
2425 for (i = 0; i < 4; i++) {
2428 for (j = 0; j < 4; j++) {
2429 int b_mi_row = coord_lookup[i * 4 + j].row;
2430 int b_mi_col = coord_lookup[i * 4 + j].col;
2431 int boffset = b_mi_row / 2 * cm->mb_cols + b_mi_col / 2;
2433 d16[j] = cpi->source_diff_var + offset + boffset;
2435 index = b_mi_row * mis + b_mi_col;
2436 mi_8x8[index] = mi_upper_left + index;
2437 mi_8x8[index]->sb_type = BLOCK_16X16;
2439 // TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
2440 // size to further improve quality.
2443 is_larger_better = (d16[0]->var < thr) && (d16[1]->var < thr) &&
2444 (d16[2]->var < thr) && (d16[3]->var < thr);
2446 // Use 32x32 partition
2447 if (is_larger_better) {
2450 for (j = 0; j < 4; j++) {
2451 d32[i].sse += d16[j]->sse;
2452 d32[i].sum += d16[j]->sum;
2456 (unsigned int)(d32[i].sse -
2457 (unsigned int)(((int64_t)d32[i].sum * d32[i].sum) >>
2460 index = coord_lookup[i * 4].row * mis + coord_lookup[i * 4].col;
2461 mi_8x8[index] = mi_upper_left + index;
2462 mi_8x8[index]->sb_type = BLOCK_32X32;
2466 if (use32x32 == 4) {
2468 is_larger_better = (d32[0].var < thr) && (d32[1].var < thr) &&
2469 (d32[2].var < thr) && (d32[3].var < thr);
2471 // Use 64x64 partition
2472 if (is_larger_better) {
2473 mi_8x8[0] = mi_upper_left;
2474 mi_8x8[0]->sb_type = BLOCK_64X64;
2477 } else { // partial in-image SB64
2478 int bh = num_8x8_blocks_high_lookup[BLOCK_16X16];
2479 int bw = num_8x8_blocks_wide_lookup[BLOCK_16X16];
2480 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
2481 col8x8_remaining, BLOCK_16X16, mi_8x8);
2485 static void update_state_rt(VP9_COMP *cpi, ThreadData *td,
2486 PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col,
2488 VP9_COMMON *const cm = &cpi->common;
2489 MACROBLOCK *const x = &td->mb;
2490 MACROBLOCKD *const xd = &x->e_mbd;
2491 MODE_INFO *const mi = xd->mi[0];
2492 struct macroblock_plane *const p = x->plane;
2493 const struct segmentation *const seg = &cm->seg;
2494 const int bw = num_8x8_blocks_wide_lookup[mi->sb_type];
2495 const int bh = num_8x8_blocks_high_lookup[mi->sb_type];
2496 const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
2497 const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
2499 *(xd->mi[0]) = ctx->mic;
2500 *(x->mbmi_ext) = ctx->mbmi_ext;
2502 if (seg->enabled && (cpi->oxcf.aq_mode != NO_AQ || cpi->roi.enabled)) {
2503 // Setting segmentation map for cyclic_refresh.
2504 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
2505 cpi->cyclic_refresh->content_mode) {
2506 vp9_cyclic_refresh_update_segment(cpi, mi, mi_row, mi_col, bsize,
2507 ctx->rate, ctx->dist, x->skip, p);
2509 const uint8_t *const map =
2510 seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
2511 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
2513 vp9_init_plane_quantizers(cpi, x);
2516 if (is_inter_block(mi)) {
2517 vp9_update_mv_count(td);
2518 if (cm->interp_filter == SWITCHABLE) {
2519 const int pred_ctx = get_pred_context_switchable_interp(xd);
2520 ++td->counts->switchable_interp[pred_ctx][mi->interp_filter];
2523 if (mi->sb_type < BLOCK_8X8) {
2524 mi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
2525 mi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
2529 if (cm->use_prev_frame_mvs || !cm->error_resilient_mode ||
2530 (cpi->svc.use_base_mv && cpi->svc.number_spatial_layers > 1 &&
2531 cpi->svc.spatial_layer_id != cpi->svc.number_spatial_layers - 1)) {
2532 MV_REF *const frame_mvs =
2533 cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
2536 for (h = 0; h < y_mis; ++h) {
2537 MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
2538 for (w = 0; w < x_mis; ++w) {
2539 MV_REF *const mv = frame_mv + w;
2540 mv->ref_frame[0] = mi->ref_frame[0];
2541 mv->ref_frame[1] = mi->ref_frame[1];
2542 mv->mv[0].as_int = mi->mv[0].as_int;
2543 mv->mv[1].as_int = mi->mv[1].as_int;
2548 x->skip = ctx->skip;
2549 x->skip_txfm[0] = (mi->segment_id || xd->lossless) ? 0 : ctx->skip_txfm[0];
2552 static void encode_b_rt(VP9_COMP *cpi, ThreadData *td,
2553 const TileInfo *const tile, TOKENEXTRA **tp, int mi_row,
2554 int mi_col, int output_enabled, BLOCK_SIZE bsize,
2555 PICK_MODE_CONTEXT *ctx) {
2556 MACROBLOCK *const x = &td->mb;
2557 set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
2558 update_state_rt(cpi, td, ctx, mi_row, mi_col, bsize);
2560 encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
2561 update_stats(&cpi->common, td);
2563 (*tp)->token = EOSB_TOKEN;
2567 static void encode_sb_rt(VP9_COMP *cpi, ThreadData *td,
2568 const TileInfo *const tile, TOKENEXTRA **tp,
2569 int mi_row, int mi_col, int output_enabled,
2570 BLOCK_SIZE bsize, PC_TREE *pc_tree) {
2571 VP9_COMMON *const cm = &cpi->common;
2572 MACROBLOCK *const x = &td->mb;
2573 MACROBLOCKD *const xd = &x->e_mbd;
2575 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
2577 PARTITION_TYPE partition;
2580 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
2582 if (bsize >= BLOCK_8X8) {
2583 const int idx_str = xd->mi_stride * mi_row + mi_col;
2584 MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
2585 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
2586 subsize = mi_8x8[0]->sb_type;
2589 subsize = BLOCK_4X4;
2592 partition = partition_lookup[bsl][subsize];
2593 if (output_enabled && bsize != BLOCK_4X4)
2594 td->counts->partition[ctx][partition]++;
2596 switch (partition) {
2597 case PARTITION_NONE:
2598 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
2601 case PARTITION_VERT:
2602 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
2603 &pc_tree->vertical[0]);
2604 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
2605 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
2606 subsize, &pc_tree->vertical[1]);
2609 case PARTITION_HORZ:
2610 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
2611 &pc_tree->horizontal[0]);
2612 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
2613 encode_b_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
2614 subsize, &pc_tree->horizontal[1]);
2618 assert(partition == PARTITION_SPLIT);
2619 subsize = get_subsize(bsize, PARTITION_SPLIT);
2620 encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
2622 encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
2623 subsize, pc_tree->split[1]);
2624 encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
2625 subsize, pc_tree->split[2]);
2626 encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs,
2627 output_enabled, subsize, pc_tree->split[3]);
2631 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
2632 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
2635 #if !CONFIG_REALTIME_ONLY
2636 static void rd_use_partition(VP9_COMP *cpi, ThreadData *td,
2637 TileDataEnc *tile_data, MODE_INFO **mi_8x8,
2638 TOKENEXTRA **tp, int mi_row, int mi_col,
2639 BLOCK_SIZE bsize, int *rate, int64_t *dist,
2640 int do_recon, PC_TREE *pc_tree) {
2641 VP9_COMMON *const cm = &cpi->common;
2642 TileInfo *const tile_info = &tile_data->tile_info;
2643 MACROBLOCK *const x = &td->mb;
2644 MACROBLOCKD *const xd = &x->e_mbd;
2645 const int mis = cm->mi_stride;
2646 const int bsl = b_width_log2_lookup[bsize];
2647 const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2;
2648 const int bss = (1 << bsl) / 4;
2650 PARTITION_TYPE partition = PARTITION_NONE;
2652 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
2653 PARTITION_CONTEXT sl[8], sa[8];
2654 RD_COST last_part_rdc, none_rdc, chosen_rdc;
2655 BLOCK_SIZE sub_subsize = BLOCK_4X4;
2656 int splits_below = 0;
2657 BLOCK_SIZE bs_type = mi_8x8[0]->sb_type;
2658 int do_partition_search = 1;
2659 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
2661 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
2663 assert(num_4x4_blocks_wide_lookup[bsize] ==
2664 num_4x4_blocks_high_lookup[bsize]);
2666 vp9_rd_cost_reset(&last_part_rdc);
2667 vp9_rd_cost_reset(&none_rdc);
2668 vp9_rd_cost_reset(&chosen_rdc);
2670 partition = partition_lookup[bsl][bs_type];
2671 subsize = get_subsize(bsize, partition);
2673 pc_tree->partitioning = partition;
2674 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2676 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode != NO_AQ) {
2677 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2678 x->mb_energy = vp9_block_energy(cpi, x, bsize);
2681 if (do_partition_search &&
2682 cpi->sf.partition_search_type == SEARCH_PARTITION &&
2683 cpi->sf.adjust_partitioning_from_last_frame) {
2684 // Check if any of the sub blocks are further split.
2685 if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
2686 sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
2688 for (i = 0; i < 4; i++) {
2689 int jj = i >> 1, ii = i & 0x01;
2690 MODE_INFO *this_mi = mi_8x8[jj * bss * mis + ii * bss];
2691 if (this_mi && this_mi->sb_type >= sub_subsize) {
2697 // If partition is not none try none unless each of the 4 splits are split
2699 if (partition != PARTITION_NONE && !splits_below &&
2700 mi_row + (mi_step >> 1) < cm->mi_rows &&
2701 mi_col + (mi_step >> 1) < cm->mi_cols) {
2702 pc_tree->partitioning = PARTITION_NONE;
2703 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize, ctx,
2704 INT_MAX, INT64_MAX);
2706 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2708 if (none_rdc.rate < INT_MAX) {
2709 none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
2711 RDCOST(x->rdmult, x->rddiv, none_rdc.rate, none_rdc.dist);
2714 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2715 mi_8x8[0]->sb_type = bs_type;
2716 pc_tree->partitioning = partition;
2720 switch (partition) {
2721 case PARTITION_NONE:
2722 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, bsize,
2723 ctx, INT_MAX, INT64_MAX);
2725 case PARTITION_HORZ:
2726 pc_tree->horizontal[0].skip_ref_frame_mask = 0;
2727 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
2728 subsize, &pc_tree->horizontal[0], INT_MAX, INT64_MAX);
2729 if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 &&
2730 mi_row + (mi_step >> 1) < cm->mi_rows) {
2732 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
2733 vp9_rd_cost_init(&tmp_rdc);
2734 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
2735 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
2736 pc_tree->horizontal[1].skip_ref_frame_mask = 0;
2737 rd_pick_sb_modes(cpi, tile_data, x, mi_row + (mi_step >> 1), mi_col,
2738 &tmp_rdc, subsize, &pc_tree->horizontal[1], INT_MAX,
2740 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2741 vp9_rd_cost_reset(&last_part_rdc);
2744 last_part_rdc.rate += tmp_rdc.rate;
2745 last_part_rdc.dist += tmp_rdc.dist;
2746 last_part_rdc.rdcost += tmp_rdc.rdcost;
2749 case PARTITION_VERT:
2750 pc_tree->vertical[0].skip_ref_frame_mask = 0;
2751 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
2752 subsize, &pc_tree->vertical[0], INT_MAX, INT64_MAX);
2753 if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 &&
2754 mi_col + (mi_step >> 1) < cm->mi_cols) {
2756 PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
2757 vp9_rd_cost_init(&tmp_rdc);
2758 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
2759 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
2760 pc_tree->vertical[bsize > BLOCK_8X8].skip_ref_frame_mask = 0;
2762 cpi, tile_data, x, mi_row, mi_col + (mi_step >> 1), &tmp_rdc,
2763 subsize, &pc_tree->vertical[bsize > BLOCK_8X8], INT_MAX, INT64_MAX);
2764 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2765 vp9_rd_cost_reset(&last_part_rdc);
2768 last_part_rdc.rate += tmp_rdc.rate;
2769 last_part_rdc.dist += tmp_rdc.dist;
2770 last_part_rdc.rdcost += tmp_rdc.rdcost;
2774 assert(partition == PARTITION_SPLIT);
2775 if (bsize == BLOCK_8X8) {
2776 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
2777 subsize, pc_tree->leaf_split[0], INT_MAX, INT64_MAX);
2780 last_part_rdc.rate = 0;
2781 last_part_rdc.dist = 0;
2782 last_part_rdc.rdcost = 0;
2783 for (i = 0; i < 4; i++) {
2784 int x_idx = (i & 1) * (mi_step >> 1);
2785 int y_idx = (i >> 1) * (mi_step >> 1);
2786 int jj = i >> 1, ii = i & 0x01;
2788 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
2791 vp9_rd_cost_init(&tmp_rdc);
2792 rd_use_partition(cpi, td, tile_data, mi_8x8 + jj * bss * mis + ii * bss,
2793 tp, mi_row + y_idx, mi_col + x_idx, subsize,
2794 &tmp_rdc.rate, &tmp_rdc.dist, i != 3,
2796 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2797 vp9_rd_cost_reset(&last_part_rdc);
2800 last_part_rdc.rate += tmp_rdc.rate;
2801 last_part_rdc.dist += tmp_rdc.dist;
2806 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2807 if (last_part_rdc.rate < INT_MAX) {
2808 last_part_rdc.rate += cpi->partition_cost[pl][partition];
2809 last_part_rdc.rdcost =
2810 RDCOST(x->rdmult, x->rddiv, last_part_rdc.rate, last_part_rdc.dist);
2813 if (do_partition_search && cpi->sf.adjust_partitioning_from_last_frame &&
2814 cpi->sf.partition_search_type == SEARCH_PARTITION &&
2815 partition != PARTITION_SPLIT && bsize > BLOCK_8X8 &&
2816 (mi_row + mi_step < cm->mi_rows ||
2817 mi_row + (mi_step >> 1) == cm->mi_rows) &&
2818 (mi_col + mi_step < cm->mi_cols ||
2819 mi_col + (mi_step >> 1) == cm->mi_cols)) {
2820 BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
2821 chosen_rdc.rate = 0;
2822 chosen_rdc.dist = 0;
2823 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2824 pc_tree->partitioning = PARTITION_SPLIT;
2827 for (i = 0; i < 4; i++) {
2828 int x_idx = (i & 1) * (mi_step >> 1);
2829 int y_idx = (i >> 1) * (mi_step >> 1);
2831 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
2832 PARTITION_CONTEXT sl[8], sa[8];
2834 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
2837 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2838 pc_tree->split[i]->partitioning = PARTITION_NONE;
2839 rd_pick_sb_modes(cpi, tile_data, x, mi_row + y_idx, mi_col + x_idx,
2840 &tmp_rdc, split_subsize, &pc_tree->split[i]->none,
2841 INT_MAX, INT64_MAX);
2843 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2845 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2846 vp9_rd_cost_reset(&chosen_rdc);
2850 chosen_rdc.rate += tmp_rdc.rate;
2851 chosen_rdc.dist += tmp_rdc.dist;
2854 encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0,
2855 split_subsize, pc_tree->split[i]);
2857 pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
2859 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
2861 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2862 if (chosen_rdc.rate < INT_MAX) {
2863 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
2865 RDCOST(x->rdmult, x->rddiv, chosen_rdc.rate, chosen_rdc.dist);
2869 // If last_part is better set the partitioning to that.
2870 if (last_part_rdc.rdcost < chosen_rdc.rdcost) {
2871 mi_8x8[0]->sb_type = bsize;
2872 if (bsize >= BLOCK_8X8) pc_tree->partitioning = partition;
2873 chosen_rdc = last_part_rdc;
2875 // If none was better set the partitioning to that.
2876 if (none_rdc.rdcost < chosen_rdc.rdcost) {
2877 if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
2878 chosen_rdc = none_rdc;
2881 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2883 // We must have chosen a partitioning and encoding or we'll fail later on.
2884 // No other opportunities for success.
2885 if (bsize == BLOCK_64X64)
2886 assert(chosen_rdc.rate < INT_MAX && chosen_rdc.dist < INT64_MAX);
2889 int output_enabled = (bsize == BLOCK_64X64);
2890 encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
2894 *rate = chosen_rdc.rate;
2895 *dist = chosen_rdc.dist;
2898 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
2899 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
2900 BLOCK_4X4, BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, BLOCK_16X16,
2901 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16
2904 static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
2905 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_32X32,
2906 BLOCK_32X32, BLOCK_32X32, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
2907 BLOCK_64X64, BLOCK_64X64, BLOCK_64X64
2910 // Look at all the mode_info entries for blocks that are part of this
2911 // partition and find the min and max values for sb_type.
2912 // At the moment this is designed to work on a 64x64 SB but could be
2913 // adjusted to use a size parameter.
2915 // The min and max are assumed to have been initialized prior to calling this
2916 // function so repeat calls can accumulate a min and max of more than one sb64.
2917 static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO **mi_8x8,
2918 BLOCK_SIZE *min_block_size,
2919 BLOCK_SIZE *max_block_size,
2920 int bs_hist[BLOCK_SIZES]) {
2921 int sb_width_in_blocks = MI_BLOCK_SIZE;
2922 int sb_height_in_blocks = MI_BLOCK_SIZE;
2926 // Check the sb_type for each block that belongs to this region.
2927 for (i = 0; i < sb_height_in_blocks; ++i) {
2928 for (j = 0; j < sb_width_in_blocks; ++j) {
2929 MODE_INFO *mi = mi_8x8[index + j];
2930 BLOCK_SIZE sb_type = mi ? mi->sb_type : 0;
2932 *min_block_size = VPXMIN(*min_block_size, sb_type);
2933 *max_block_size = VPXMAX(*max_block_size, sb_type);
2935 index += xd->mi_stride;
2939 // Next square block size less or equal than current block size.
2940 static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
2941 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_8X8, BLOCK_8X8,
2942 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_32X32,
2943 BLOCK_32X32, BLOCK_32X32, BLOCK_64X64
2946 // Look at neighboring blocks and set a min and max partition size based on
2948 static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
2949 MACROBLOCKD *const xd, int mi_row,
2950 int mi_col, BLOCK_SIZE *min_block_size,
2951 BLOCK_SIZE *max_block_size) {
2952 VP9_COMMON *const cm = &cpi->common;
2953 MODE_INFO **mi = xd->mi;
2954 const int left_in_image = !!xd->left_mi;
2955 const int above_in_image = !!xd->above_mi;
2956 const int row8x8_remaining = tile->mi_row_end - mi_row;
2957 const int col8x8_remaining = tile->mi_col_end - mi_col;
2959 BLOCK_SIZE min_size = BLOCK_4X4;
2960 BLOCK_SIZE max_size = BLOCK_64X64;
2961 int bs_hist[BLOCK_SIZES] = { 0 };
2963 // Trap case where we do not have a prediction.
2964 if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
2965 // Default "min to max" and "max to min"
2966 min_size = BLOCK_64X64;
2967 max_size = BLOCK_4X4;
2969 // NOTE: each call to get_sb_partition_size_range() uses the previous
2970 // passed in values for min and max as a starting point.
2971 // Find the min and max partition used in previous frame at this location
2972 if (cm->frame_type != KEY_FRAME) {
2973 MODE_INFO **prev_mi =
2974 &cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col];
2975 get_sb_partition_size_range(xd, prev_mi, &min_size, &max_size, bs_hist);
2977 // Find the min and max partition sizes used in the left SB64
2978 if (left_in_image) {
2979 MODE_INFO **left_sb64_mi = &mi[-MI_BLOCK_SIZE];
2980 get_sb_partition_size_range(xd, left_sb64_mi, &min_size, &max_size,
2983 // Find the min and max partition sizes used in the above SB64.
2984 if (above_in_image) {
2985 MODE_INFO **above_sb64_mi = &mi[-xd->mi_stride * MI_BLOCK_SIZE];
2986 get_sb_partition_size_range(xd, above_sb64_mi, &min_size, &max_size,
2990 // Adjust observed min and max for "relaxed" auto partition case.
2991 if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
2992 min_size = min_partition_size[min_size];
2993 max_size = max_partition_size[max_size];
2997 // Check border cases where max and min from neighbors may not be legal.
2998 max_size = find_partition_size(max_size, row8x8_remaining, col8x8_remaining,
3000 // Test for blocks at the edge of the active image.
3001 // This may be the actual edge of the image or where there are formatting
3003 if (vp9_active_edge_sb(cpi, mi_row, mi_col)) {
3004 min_size = BLOCK_4X4;
3007 VPXMIN(cpi->sf.rd_auto_partition_min_limit, VPXMIN(min_size, max_size));
3010 // When use_square_partition_only is true, make sure at least one square
3011 // partition is allowed by selecting the next smaller square size as
3013 if (cpi->sf.use_square_partition_only &&
3014 next_square_size[max_size] < min_size) {
3015 min_size = next_square_size[max_size];
3018 *min_block_size = min_size;
3019 *max_block_size = max_size;
3022 // TODO(jingning) refactor functions setting partition search range
3023 static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd, int mi_row,
3024 int mi_col, BLOCK_SIZE bsize,
3025 BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
3026 int mi_width = num_8x8_blocks_wide_lookup[bsize];
3027 int mi_height = num_8x8_blocks_high_lookup[bsize];
3031 const int idx_str = cm->mi_stride * mi_row + mi_col;
3032 MODE_INFO **prev_mi = &cm->prev_mi_grid_visible[idx_str];
3033 BLOCK_SIZE bs, min_size, max_size;
3035 min_size = BLOCK_64X64;
3036 max_size = BLOCK_4X4;
3039 for (idy = 0; idy < mi_height; ++idy) {
3040 for (idx = 0; idx < mi_width; ++idx) {
3041 mi = prev_mi[idy * cm->mi_stride + idx];
3042 bs = mi ? mi->sb_type : bsize;
3043 min_size = VPXMIN(min_size, bs);
3044 max_size = VPXMAX(max_size, bs);
3050 for (idy = 0; idy < mi_height; ++idy) {
3051 mi = xd->mi[idy * cm->mi_stride - 1];
3052 bs = mi ? mi->sb_type : bsize;
3053 min_size = VPXMIN(min_size, bs);
3054 max_size = VPXMAX(max_size, bs);
3059 for (idx = 0; idx < mi_width; ++idx) {
3060 mi = xd->mi[idx - cm->mi_stride];
3061 bs = mi ? mi->sb_type : bsize;
3062 min_size = VPXMIN(min_size, bs);
3063 max_size = VPXMAX(max_size, bs);
3067 if (min_size == max_size) {
3068 min_size = min_partition_size[min_size];
3069 max_size = max_partition_size[max_size];
3075 #endif // !CONFIG_REALTIME_ONLY
3077 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
3078 memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
3081 static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
3082 memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
3085 // Calculate prediction based on the given input features and neural net config.
3086 // Assume there are no more than NN_MAX_NODES_PER_LAYER nodes in each hidden
3088 static void nn_predict(const float *features, const NN_CONFIG *nn_config,
3090 int num_input_nodes = nn_config->num_inputs;
3092 float buf[2][NN_MAX_NODES_PER_LAYER];
3093 const float *input_nodes = features;
3095 // Propagate hidden layers.
3096 const int num_layers = nn_config->num_hidden_layers;
3098 assert(num_layers <= NN_MAX_HIDDEN_LAYERS);
3099 for (layer = 0; layer < num_layers; ++layer) {
3100 const float *weights = nn_config->weights[layer];
3101 const float *bias = nn_config->bias[layer];
3102 float *output_nodes = buf[buf_index];
3103 const int num_output_nodes = nn_config->num_hidden_nodes[layer];
3104 assert(num_output_nodes < NN_MAX_NODES_PER_LAYER);
3105 for (node = 0; node < num_output_nodes; ++node) {
3107 for (i = 0; i < num_input_nodes; ++i) val += weights[i] * input_nodes[i];
3109 // ReLU as activation function.
3110 val = VPXMAX(val, 0.0f);
3111 output_nodes[node] = val;
3112 weights += num_input_nodes;
3114 num_input_nodes = num_output_nodes;
3115 input_nodes = output_nodes;
3116 buf_index = 1 - buf_index;
3119 // Final output layer.
3121 const float *weights = nn_config->weights[num_layers];
3122 for (node = 0; node < nn_config->num_outputs; ++node) {
3123 const float *bias = nn_config->bias[num_layers];
3125 for (i = 0; i < num_input_nodes; ++i) val += weights[i] * input_nodes[i];
3126 output[node] = val + bias[node];
3127 weights += num_input_nodes;
3132 #if !CONFIG_REALTIME_ONLY
3134 // Machine-learning based partition search early termination.
3135 // Return 1 to skip split and rect partitions.
3136 static int ml_pruning_partition(VP9_COMMON *const cm, MACROBLOCKD *const xd,
3137 PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col,
3140 abs(ctx->mic.mv[0].as_mv.col) + abs(ctx->mic.mv[0].as_mv.row);
3141 const int left_in_image = !!xd->left_mi;
3142 const int above_in_image = !!xd->above_mi;
3143 MODE_INFO **prev_mi =
3144 &cm->prev_mi_grid_visible[mi_col + cm->mi_stride * mi_row];
3145 int above_par = 0; // above_partitioning
3146 int left_par = 0; // left_partitioning
3147 int last_par = 0; // last_partitioning
3150 BLOCK_SIZE context_size;
3151 const NN_CONFIG *nn_config = NULL;
3152 const float *mean, *sd, *linear_weights;
3153 float nn_score, linear_score;
3154 float features[FEATURES];
3156 assert(b_width_log2_lookup[bsize] == b_height_log2_lookup[bsize]);
3157 vpx_clear_system_state();
3162 nn_config = &vp9_partition_nnconfig_64x64;
3166 nn_config = &vp9_partition_nnconfig_32x32;
3170 nn_config = &vp9_partition_nnconfig_16x16;
3172 default: assert(0 && "Unexpected block size."); return 0;
3175 if (above_in_image) {
3176 context_size = xd->above_mi->sb_type;
3177 if (context_size < bsize)
3179 else if (context_size == bsize)
3183 if (left_in_image) {
3184 context_size = xd->left_mi->sb_type;
3185 if (context_size < bsize)
3187 else if (context_size == bsize)
3192 context_size = prev_mi[0]->sb_type;
3193 if (context_size < bsize)
3195 else if (context_size == bsize)
3199 mean = &vp9_partition_feature_mean[offset];
3200 sd = &vp9_partition_feature_std[offset];
3201 features[0] = ((float)ctx->rate - mean[0]) / sd[0];
3202 features[1] = ((float)ctx->dist - mean[1]) / sd[1];
3203 features[2] = ((float)mag_mv / 2 - mean[2]) * sd[2];
3204 features[3] = ((float)(left_par + above_par) / 2 - mean[3]) * sd[3];
3205 features[4] = ((float)ctx->sum_y_eobs - mean[4]) / sd[4];
3206 features[5] = ((float)cm->base_qindex - mean[5]) * sd[5];
3207 features[6] = ((float)last_par - mean[6]) * sd[6];
3209 // Predict using linear model.
3210 linear_weights = &vp9_partition_linear_weights[offset];
3211 linear_score = linear_weights[FEATURES];
3212 for (i = 0; i < FEATURES; ++i)
3213 linear_score += linear_weights[i] * features[i];
3214 if (linear_score > 0.1f) return 0;
3216 // Predict using neural net model.
3217 nn_predict(features, nn_config, &nn_score);
3219 if (linear_score < -0.0f && nn_score < 0.1f) return 1;
3220 if (nn_score < -0.0f && linear_score < 0.1f) return 1;
3226 // ML-based partition search breakout.
3227 static int ml_predict_breakout(VP9_COMP *const cpi, BLOCK_SIZE bsize,
3228 const MACROBLOCK *const x,
3229 const RD_COST *const rd_cost) {
3230 DECLARE_ALIGNED(16, static const uint8_t, vp9_64_zeros[64]) = { 0 };
3231 const VP9_COMMON *const cm = &cpi->common;
3232 float features[FEATURES];
3233 const float *linear_weights = NULL; // Linear model weights.
3234 float linear_score = 0.0f;
3235 const int qindex = cm->base_qindex;
3236 const int q_ctx = qindex >= 200 ? 0 : (qindex >= 150 ? 1 : 2);
3237 const int is_720p_or_larger = VPXMIN(cm->width, cm->height) >= 720;
3238 const int resolution_ctx = is_720p_or_larger ? 1 : 0;
3242 linear_weights = vp9_partition_breakout_weights_64[resolution_ctx][q_ctx];
3245 linear_weights = vp9_partition_breakout_weights_32[resolution_ctx][q_ctx];
3248 linear_weights = vp9_partition_breakout_weights_16[resolution_ctx][q_ctx];
3251 linear_weights = vp9_partition_breakout_weights_8[resolution_ctx][q_ctx];
3253 default: assert(0 && "Unexpected block size."); return 0;
3255 if (!linear_weights) return 0;
3257 { // Generate feature values.
3258 #if CONFIG_VP9_HIGHBITDEPTH
3260 vp9_ac_quant(cm->base_qindex, 0, cm->bit_depth) >> (x->e_mbd.bd - 8);
3262 const int ac_q = vp9_ac_quant(qindex, 0, cm->bit_depth);
3263 #endif // CONFIG_VP9_HIGHBITDEPTH
3264 const int num_pels_log2 = num_pels_log2_lookup[bsize];
3265 int feature_index = 0;
3266 unsigned int var, sse;
3267 float rate_f, dist_f;
3269 #if CONFIG_VP9_HIGHBITDEPTH
3270 if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
3272 vp9_high_get_sby_variance(cpi, &x->plane[0].src, bsize, x->e_mbd.bd);
3274 var = cpi->fn_ptr[bsize].vf(x->plane[0].src.buf, x->plane[0].src.stride,
3275 vp9_64_zeros, 0, &sse);
3278 var = cpi->fn_ptr[bsize].vf(x->plane[0].src.buf, x->plane[0].src.stride,
3279 vp9_64_zeros, 0, &sse);
3281 var = var >> num_pels_log2;
3283 vpx_clear_system_state();
3285 rate_f = (float)VPXMIN(rd_cost->rate, INT_MAX);
3286 dist_f = (float)(VPXMIN(rd_cost->dist, INT_MAX) >> num_pels_log2);
3288 ((float)x->rdmult / 128.0f / 512.0f / (float)(1 << num_pels_log2)) *
3291 features[feature_index++] = rate_f;
3292 features[feature_index++] = dist_f;
3293 features[feature_index++] = (float)var;
3294 features[feature_index++] = (float)ac_q;
3295 assert(feature_index == FEATURES);
3298 { // Calculate the output score.
3300 linear_score = linear_weights[FEATURES];
3301 for (i = 0; i < FEATURES; ++i)
3302 linear_score += linear_weights[i] * features[i];
3305 return linear_score >= cpi->sf.rd_ml_partition.search_breakout_thresh[q_ctx];
3311 static void ml_prune_rect_partition(VP9_COMP *const cpi, MACROBLOCK *const x,
3313 const PC_TREE *const pc_tree,
3314 int *allow_horz, int *allow_vert,
3316 const NN_CONFIG *nn_config = NULL;
3317 float score[LABELS] = {
3324 if (ref_rd <= 0 || ref_rd > 1000000000) return;
3327 case BLOCK_8X8: break;
3329 nn_config = &vp9_rect_part_nnconfig_16;
3330 thresh = cpi->sf.rd_ml_partition.prune_rect_thresh[1];
3333 nn_config = &vp9_rect_part_nnconfig_32;
3334 thresh = cpi->sf.rd_ml_partition.prune_rect_thresh[2];
3337 nn_config = &vp9_rect_part_nnconfig_64;
3338 thresh = cpi->sf.rd_ml_partition.prune_rect_thresh[3];
3340 default: assert(0 && "Unexpected block size."); return;
3342 if (!nn_config || thresh < 0) return;
3344 // Feature extraction and model score calculation.
3346 const VP9_COMMON *const cm = &cpi->common;
3347 #if CONFIG_VP9_HIGHBITDEPTH
3349 vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth) >> (x->e_mbd.bd - 8);
3351 const int dc_q = vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth);
3352 #endif // CONFIG_VP9_HIGHBITDEPTH
3353 const int bs = 4 * num_4x4_blocks_wide_lookup[bsize];
3354 int feature_index = 0;
3355 float features[FEATURES];
3357 features[feature_index++] = logf((float)dc_q + 1.0f);
3358 features[feature_index++] =
3359 (float)(pc_tree->partitioning == PARTITION_NONE);
3360 features[feature_index++] = logf((float)ref_rd / bs / bs + 1.0f);
3363 const float norm_factor = 1.0f / ((float)ref_rd + 1.0f);
3364 const int64_t none_rdcost = pc_tree->none.rdcost;
3365 float rd_ratio = 2.0f;
3366 if (none_rdcost > 0 && none_rdcost < 1000000000)
3367 rd_ratio = (float)none_rdcost * norm_factor;
3368 features[feature_index++] = VPXMIN(rd_ratio, 2.0f);
3370 for (i = 0; i < 4; ++i) {
3371 const int64_t this_rd = pc_tree->split[i]->none.rdcost;
3372 const int rd_valid = this_rd > 0 && this_rd < 1000000000;
3373 // Ratio between sub-block RD and whole block RD.
3374 features[feature_index++] =
3375 rd_valid ? (float)this_rd * norm_factor : 1.0f;
3379 assert(feature_index == FEATURES);
3380 nn_predict(features, nn_config, score);
3383 // Make decisions based on the model score.
3385 int max_score = -1000;
3386 int horz = 0, vert = 0;
3387 int int_score[LABELS];
3388 for (i = 0; i < LABELS; ++i) {
3389 int_score[i] = (int)(100 * score[i]);
3390 max_score = VPXMAX(int_score[i], max_score);
3392 thresh = max_score - thresh;
3393 for (i = 0; i < LABELS; ++i) {
3394 if (int_score[i] >= thresh) {
3395 if ((i >> 0) & 1) horz = 1;
3396 if ((i >> 1) & 1) vert = 1;
3399 *allow_horz = *allow_horz && horz;
3400 *allow_vert = *allow_vert && vert;
3406 // Perform fast and coarse motion search for the given block. This is a
3407 // pre-processing step for the ML based partition search speedup.
3408 static void simple_motion_search(const VP9_COMP *const cpi, MACROBLOCK *const x,
3409 BLOCK_SIZE bsize, int mi_row, int mi_col,
3410 MV ref_mv, MV_REFERENCE_FRAME ref,
3411 uint8_t *const pred_buf) {
3412 const VP9_COMMON *const cm = &cpi->common;
3413 MACROBLOCKD *const xd = &x->e_mbd;
3414 MODE_INFO *const mi = xd->mi[0];
3415 const YV12_BUFFER_CONFIG *const yv12 = get_ref_frame_buffer(cpi, ref);
3416 const int step_param = 1;
3417 const MvLimits tmp_mv_limits = x->mv_limits;
3418 const SEARCH_METHODS search_method = NSTEP;
3419 const int sadpb = x->sadperbit16;
3420 MV ref_mv_full = { ref_mv.row >> 3, ref_mv.col >> 3 };
3421 MV best_mv = { 0, 0 };
3424 assert(yv12 != NULL);
3426 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
3427 &cm->frame_refs[ref - 1].sf);
3428 mi->ref_frame[0] = ref;
3429 mi->ref_frame[1] = NONE;
3430 mi->sb_type = bsize;
3431 vp9_set_mv_search_range(&x->mv_limits, &ref_mv);
3432 vp9_full_pixel_search(cpi, x, bsize, &ref_mv_full, step_param, search_method,
3433 sadpb, cond_cost_list(cpi, cost_list), &ref_mv,
3437 x->mv_limits = tmp_mv_limits;
3438 mi->mv[0].as_mv = best_mv;
3440 set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]);
3441 xd->plane[0].dst.buf = pred_buf;
3442 xd->plane[0].dst.stride = 64;
3443 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
3446 // Use a neural net model to prune partition-none and partition-split search.
3447 // Features used: QP; spatial block size contexts; variance of prediction
3448 // residue after simple_motion_search.
3450 static void ml_predict_var_rd_paritioning(const VP9_COMP *const cpi,
3451 MACROBLOCK *const x,
3452 PC_TREE *const pc_tree,
3453 BLOCK_SIZE bsize, int mi_row,
3454 int mi_col, int *none, int *split) {
3455 const VP9_COMMON *const cm = &cpi->common;
3456 const NN_CONFIG *nn_config = NULL;
3457 #if CONFIG_VP9_HIGHBITDEPTH
3458 MACROBLOCKD *xd = &x->e_mbd;
3459 DECLARE_ALIGNED(16, uint8_t, pred_buffer[64 * 64 * 2]);
3460 uint8_t *const pred_buf = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
3461 ? (CONVERT_TO_BYTEPTR(pred_buffer))
3464 DECLARE_ALIGNED(16, uint8_t, pred_buffer[64 * 64]);
3465 uint8_t *const pred_buf = pred_buffer;
3466 #endif // CONFIG_VP9_HIGHBITDEPTH
3467 const int speed = cpi->oxcf.speed;
3468 float thresh = 0.0f;
3472 nn_config = &vp9_part_split_nnconfig_64;
3473 thresh = speed > 0 ? 2.8f : 3.0f;
3476 nn_config = &vp9_part_split_nnconfig_32;
3477 thresh = speed > 0 ? 3.5f : 3.0f;
3480 nn_config = &vp9_part_split_nnconfig_16;
3481 thresh = speed > 0 ? 3.8f : 4.0f;
3484 nn_config = &vp9_part_split_nnconfig_8;
3485 if (cm->width >= 720 && cm->height >= 720)
3486 thresh = speed > 0 ? 2.5f : 2.0f;
3488 thresh = speed > 0 ? 3.8f : 2.0f;
3490 default: assert(0 && "Unexpected block size."); return;
3493 if (!nn_config) return;
3495 // Do a simple single motion search to find a prediction for current block.
3496 // The variance of the residue will be used as input features.
3499 const MV_REFERENCE_FRAME ref =
3500 cpi->rc.is_src_frame_alt_ref ? ALTREF_FRAME : LAST_FRAME;
3501 // If bsize is 64x64, use zero MV as reference; otherwise, use MV result
3502 // of previous(larger) block as reference.
3503 if (bsize == BLOCK_64X64)
3504 ref_mv.row = ref_mv.col = 0;
3506 ref_mv = pc_tree->mv;
3507 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
3508 simple_motion_search(cpi, x, bsize, mi_row, mi_col, ref_mv, ref, pred_buf);
3509 pc_tree->mv = x->e_mbd.mi[0]->mv[0].as_mv;
3512 vpx_clear_system_state();
3515 float features[FEATURES] = { 0.0f };
3516 #if CONFIG_VP9_HIGHBITDEPTH
3518 vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth) >> (xd->bd - 8);
3520 const int dc_q = vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth);
3521 #endif // CONFIG_VP9_HIGHBITDEPTH
3522 int feature_idx = 0;
3525 // Generate model input features.
3526 features[feature_idx++] = logf((float)dc_q + 1.0f);
3528 // Get the variance of the residue as input features.
3530 const int bs = 4 * num_4x4_blocks_wide_lookup[bsize];
3531 const BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
3532 const uint8_t *pred = pred_buf;
3533 const uint8_t *src = x->plane[0].src.buf;
3534 const int src_stride = x->plane[0].src.stride;
3535 const int pred_stride = 64;
3537 // Variance of whole block.
3538 const unsigned int var =
3539 cpi->fn_ptr[bsize].vf(src, src_stride, pred, pred_stride, &sse);
3540 const float factor = (var == 0) ? 1.0f : (1.0f / (float)var);
3541 const MACROBLOCKD *const xd = &x->e_mbd;
3542 const int has_above = !!xd->above_mi;
3543 const int has_left = !!xd->left_mi;
3544 const BLOCK_SIZE above_bsize = has_above ? xd->above_mi->sb_type : bsize;
3545 const BLOCK_SIZE left_bsize = has_left ? xd->left_mi->sb_type : bsize;
3548 features[feature_idx++] = (float)has_above;
3549 features[feature_idx++] = (float)b_width_log2_lookup[above_bsize];
3550 features[feature_idx++] = (float)b_height_log2_lookup[above_bsize];
3551 features[feature_idx++] = (float)has_left;
3552 features[feature_idx++] = (float)b_width_log2_lookup[left_bsize];
3553 features[feature_idx++] = (float)b_height_log2_lookup[left_bsize];
3554 features[feature_idx++] = logf((float)var + 1.0f);
3555 for (i = 0; i < 4; ++i) {
3556 const int x_idx = (i & 1) * bs / 2;
3557 const int y_idx = (i >> 1) * bs / 2;
3558 const int src_offset = y_idx * src_stride + x_idx;
3559 const int pred_offset = y_idx * pred_stride + x_idx;
3560 // Variance of quarter block.
3561 const unsigned int sub_var =
3562 cpi->fn_ptr[subsize].vf(src + src_offset, src_stride,
3563 pred + pred_offset, pred_stride, &sse);
3564 const float var_ratio = (var == 0) ? 1.0f : factor * (float)sub_var;
3565 features[feature_idx++] = var_ratio;
3568 assert(feature_idx == FEATURES);
3570 // Feed the features into the model to get the confidence score.
3571 nn_predict(features, nn_config, &score);
3573 // Higher score means that the model has higher confidence that the split
3574 // partition is better than the non-split partition. So if the score is
3575 // high enough, we skip the none-split partition search; if the score is
3576 // low enough, we skip the split partition search.
3577 if (score > thresh) *none = 0;
3578 if (score < -thresh) *split = 0;
3582 #endif // !CONFIG_REALTIME_ONLY
3584 static double log_wiener_var(int64_t wiener_variance) {
3585 return log(1.0 + wiener_variance) / log(2.0);
3588 static void build_kmeans_segmentation(VP9_COMP *cpi) {
3589 VP9_COMMON *cm = &cpi->common;
3590 BLOCK_SIZE bsize = BLOCK_64X64;
3591 KMEANS_DATA *kmeans_data;
3593 vp9_disable_segmentation(&cm->seg);
3594 if (cm->show_frame) {
3596 cpi->kmeans_data_size = 0;
3597 cpi->kmeans_ctr_num = 8;
3599 for (mi_row = 0; mi_row < cm->mi_rows; mi_row += MI_BLOCK_SIZE) {
3600 for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
3601 int mb_row_start = mi_row >> 1;
3602 int mb_col_start = mi_col >> 1;
3603 int mb_row_end = VPXMIN(
3604 (mi_row + num_8x8_blocks_high_lookup[bsize]) >> 1, cm->mb_rows);
3605 int mb_col_end = VPXMIN(
3606 (mi_col + num_8x8_blocks_wide_lookup[bsize]) >> 1, cm->mb_cols);
3608 int64_t wiener_variance = 0;
3610 for (row = mb_row_start; row < mb_row_end; ++row)
3611 for (col = mb_col_start; col < mb_col_end; ++col)
3612 wiener_variance += cpi->mb_wiener_variance[row * cm->mb_cols + col];
3615 (mb_row_end - mb_row_start) * (mb_col_end - mb_col_start);
3617 #if CONFIG_MULTITHREAD
3618 pthread_mutex_lock(&cpi->kmeans_mutex);
3619 #endif // CONFIG_MULTITHREAD
3621 kmeans_data = &cpi->kmeans_data_arr[cpi->kmeans_data_size++];
3622 kmeans_data->value = log_wiener_var(wiener_variance);
3623 kmeans_data->pos = mi_row * cpi->kmeans_data_stride + mi_col;
3624 #if CONFIG_MULTITHREAD
3625 pthread_mutex_unlock(&cpi->kmeans_mutex);
3626 #endif // CONFIG_MULTITHREAD
3630 vp9_kmeans(cpi->kmeans_ctr_ls, cpi->kmeans_boundary_ls,
3631 cpi->kmeans_count_ls, cpi->kmeans_ctr_num, cpi->kmeans_data_arr,
3632 cpi->kmeans_data_size);
3634 vp9_perceptual_aq_mode_setup(cpi, &cm->seg);
3638 #if !CONFIG_REALTIME_ONLY
3639 static int wiener_var_segment(VP9_COMP *cpi, BLOCK_SIZE bsize, int mi_row,
3641 VP9_COMMON *cm = &cpi->common;
3642 int mb_row_start = mi_row >> 1;
3643 int mb_col_start = mi_col >> 1;
3645 VPXMIN((mi_row + num_8x8_blocks_high_lookup[bsize]) >> 1, cm->mb_rows);
3647 VPXMIN((mi_col + num_8x8_blocks_wide_lookup[bsize]) >> 1, cm->mb_cols);
3649 int64_t wiener_variance = 0;
3651 int8_t seg_hist[MAX_SEGMENTS] = { 0 };
3652 int8_t max_count = 0, max_index = -1;
3654 vpx_clear_system_state();
3656 assert(cpi->norm_wiener_variance > 0);
3658 for (row = mb_row_start; row < mb_row_end; ++row) {
3659 for (col = mb_col_start; col < mb_col_end; ++col) {
3660 wiener_variance = cpi->mb_wiener_variance[row * cm->mb_cols + col];
3662 vp9_get_group_idx(log_wiener_var(wiener_variance),
3663 cpi->kmeans_boundary_ls, cpi->kmeans_ctr_num);
3664 ++seg_hist[segment_id];
3668 for (idx = 0; idx < cpi->kmeans_ctr_num; ++idx) {
3669 if (seg_hist[idx] > max_count) {
3670 max_count = seg_hist[idx];
3675 assert(max_index >= 0);
3676 segment_id = max_index;
3681 static int get_rdmult_delta(VP9_COMP *cpi, BLOCK_SIZE bsize, int mi_row,
3682 int mi_col, int orig_rdmult) {
3683 const int gf_group_index = cpi->twopass.gf_group.index;
3684 int64_t intra_cost = 0;
3685 int64_t mc_dep_cost = 0;
3686 int mi_wide = num_8x8_blocks_wide_lookup[bsize];
3687 int mi_high = num_8x8_blocks_high_lookup[bsize];
3692 double r0, rk, beta;
3694 TplDepFrame *tpl_frame;
3695 TplDepStats *tpl_stats;
3698 if (gf_group_index >= MAX_ARF_GOP_SIZE) return orig_rdmult;
3699 tpl_frame = &cpi->tpl_stats[gf_group_index];
3701 if (tpl_frame->is_valid == 0) return orig_rdmult;
3702 tpl_stats = tpl_frame->tpl_stats_ptr;
3703 tpl_stride = tpl_frame->stride;
3705 if (cpi->twopass.gf_group.layer_depth[gf_group_index] > 1) return orig_rdmult;
3707 for (row = mi_row; row < mi_row + mi_high; ++row) {
3708 for (col = mi_col; col < mi_col + mi_wide; ++col) {
3709 TplDepStats *this_stats = &tpl_stats[row * tpl_stride + col];
3711 if (row >= cpi->common.mi_rows || col >= cpi->common.mi_cols) continue;
3713 intra_cost += this_stats->intra_cost;
3714 mc_dep_cost += this_stats->mc_dep_cost;
3720 vpx_clear_system_state();
3723 rk = (double)intra_cost / mc_dep_cost;
3725 dr = vp9_get_adaptive_rdmult(cpi, beta);
3727 dr = VPXMIN(dr, orig_rdmult * 3 / 2);
3728 dr = VPXMAX(dr, orig_rdmult * 1 / 2);
3734 #endif // !CONFIG_REALTIME_ONLY
3736 #if CONFIG_RATE_CTRL
3737 static void assign_partition_info(
3738 const int row_start_4x4, const int col_start_4x4, const int block_width_4x4,
3739 const int block_height_4x4, const int num_unit_rows,
3740 const int num_unit_cols, PARTITION_INFO *partition_info) {
3742 for (i = 0; i < block_height_4x4; ++i) {
3743 for (j = 0; j < block_width_4x4; ++j) {
3744 const int row_4x4 = row_start_4x4 + i;
3745 const int col_4x4 = col_start_4x4 + j;
3746 const int unit_index = row_4x4 * num_unit_cols + col_4x4;
3747 if (row_4x4 >= num_unit_rows || col_4x4 >= num_unit_cols) continue;
3748 partition_info[unit_index].row = row_4x4 << 2;
3749 partition_info[unit_index].column = col_4x4 << 2;
3750 partition_info[unit_index].row_start = row_start_4x4 << 2;
3751 partition_info[unit_index].column_start = col_start_4x4 << 2;
3752 partition_info[unit_index].width = block_width_4x4 << 2;
3753 partition_info[unit_index].height = block_height_4x4 << 2;
3758 static void assign_motion_vector_info(const int block_width_4x4,
3759 const int block_height_4x4,
3760 const int row_start_4x4,
3761 const int col_start_4x4,
3762 const int num_unit_rows,
3763 const int num_unit_cols, MV *source_mv[2],
3764 MV_REFERENCE_FRAME source_ref_frame[2],
3765 MOTION_VECTOR_INFO *motion_vector_info) {
3767 for (i = 0; i < block_height_4x4; ++i) {
3768 for (j = 0; j < block_width_4x4; ++j) {
3769 const int row_4x4 = row_start_4x4 + i;
3770 const int col_4x4 = col_start_4x4 + j;
3771 const int unit_index = row_4x4 * num_unit_cols + col_4x4;
3772 if (row_4x4 >= num_unit_rows || col_4x4 >= num_unit_cols) continue;
3773 if (source_ref_frame[1] == NONE) {
3774 assert(source_mv[1]->row == 0 && source_mv[1]->col == 0);
3776 motion_vector_info[unit_index].ref_frame[0] = source_ref_frame[0];
3777 motion_vector_info[unit_index].ref_frame[1] = source_ref_frame[1];
3778 motion_vector_info[unit_index].mv[0].as_mv.row = source_mv[0]->row;
3779 motion_vector_info[unit_index].mv[0].as_mv.col = source_mv[0]->col;
3780 motion_vector_info[unit_index].mv[1].as_mv.row = source_mv[1]->row;
3781 motion_vector_info[unit_index].mv[1].as_mv.col = source_mv[1]->col;
3786 static void store_superblock_info(
3787 const PC_TREE *const pc_tree, MODE_INFO **mi_grid_visible,
3788 const int mi_stride, const int square_size_4x4, const int num_unit_rows,
3789 const int num_unit_cols, const int row_start_4x4, const int col_start_4x4,
3790 PARTITION_INFO *partition_info, MOTION_VECTOR_INFO *motion_vector_info) {
3791 const int subblock_square_size_4x4 = square_size_4x4 >> 1;
3792 if (row_start_4x4 >= num_unit_rows || col_start_4x4 >= num_unit_cols) return;
3793 assert(pc_tree->partitioning != PARTITION_INVALID);
3794 // End node, no split.
3795 if (pc_tree->partitioning == PARTITION_NONE ||
3796 pc_tree->partitioning == PARTITION_HORZ ||
3797 pc_tree->partitioning == PARTITION_VERT || square_size_4x4 == 1) {
3798 const int mi_row = row_start_4x4 >> 1;
3799 const int mi_col = col_start_4x4 >> 1;
3800 const int mi_idx = mi_stride * mi_row + mi_col;
3801 MODE_INFO **mi = mi_grid_visible + mi_idx;
3803 MV_REFERENCE_FRAME source_ref_frame[2];
3806 const int block_width_4x4 = (pc_tree->partitioning == PARTITION_VERT)
3807 ? square_size_4x4 >> 1
3809 const int block_height_4x4 = (pc_tree->partitioning == PARTITION_HORZ)
3810 ? square_size_4x4 >> 1
3812 assign_partition_info(row_start_4x4, col_start_4x4, block_width_4x4,
3813 block_height_4x4, num_unit_rows, num_unit_cols,
3815 if (pc_tree->partitioning == PARTITION_VERT) {
3816 assign_partition_info(row_start_4x4, col_start_4x4 + block_width_4x4,
3817 block_width_4x4, block_height_4x4, num_unit_rows,
3818 num_unit_cols, partition_info);
3819 } else if (pc_tree->partitioning == PARTITION_HORZ) {
3820 assign_partition_info(row_start_4x4 + block_height_4x4, col_start_4x4,
3821 block_width_4x4, block_height_4x4, num_unit_rows,
3822 num_unit_cols, partition_info);
3825 // motion vector info
3826 if (pc_tree->partitioning == PARTITION_HORZ) {
3827 int is_valid_second_rectangle = 0;
3828 assert(square_size_4x4 > 1);
3830 source_ref_frame[0] = mi[0]->ref_frame[0];
3831 source_ref_frame[1] = mi[0]->ref_frame[1];
3832 source_mv[0] = &mi[0]->mv[0].as_mv;
3833 source_mv[1] = &mi[0]->mv[1].as_mv;
3834 assign_motion_vector_info(block_width_4x4, block_height_4x4,
3835 row_start_4x4, col_start_4x4, num_unit_rows,
3836 num_unit_cols, source_mv, source_ref_frame,
3837 motion_vector_info);
3838 // Second rectangle.
3839 if (square_size_4x4 == 2) {
3840 is_valid_second_rectangle = 1;
3841 source_ref_frame[0] = mi[0]->ref_frame[0];
3842 source_ref_frame[1] = mi[0]->ref_frame[1];
3843 source_mv[0] = &mi[0]->bmi[2].as_mv[0].as_mv;
3844 source_mv[1] = &mi[0]->bmi[2].as_mv[1].as_mv;
3846 const int mi_row_2 = mi_row + (block_height_4x4 >> 1);
3847 const int mi_col_2 = mi_col;
3848 if (mi_row_2 * 2 < num_unit_rows && mi_col_2 * 2 < num_unit_cols) {
3849 const int mi_idx_2 = mi_stride * mi_row_2 + mi_col_2;
3850 is_valid_second_rectangle = 1;
3851 mi = mi_grid_visible + mi_idx_2;
3852 source_ref_frame[0] = mi[0]->ref_frame[0];
3853 source_ref_frame[1] = mi[0]->ref_frame[1];
3854 source_mv[0] = &mi[0]->mv[0].as_mv;
3855 source_mv[1] = &mi[0]->mv[1].as_mv;
3858 if (is_valid_second_rectangle) {
3859 assign_motion_vector_info(
3860 block_width_4x4, block_height_4x4, row_start_4x4 + block_height_4x4,
3861 col_start_4x4, num_unit_rows, num_unit_cols, source_mv,
3862 source_ref_frame, motion_vector_info);
3864 } else if (pc_tree->partitioning == PARTITION_VERT) {
3865 int is_valid_second_rectangle = 0;
3866 assert(square_size_4x4 > 1);
3868 source_ref_frame[0] = mi[0]->ref_frame[0];
3869 source_ref_frame[1] = mi[0]->ref_frame[1];
3870 source_mv[0] = &mi[0]->mv[0].as_mv;
3871 source_mv[1] = &mi[0]->mv[1].as_mv;
3872 assign_motion_vector_info(block_width_4x4, block_height_4x4,
3873 row_start_4x4, col_start_4x4, num_unit_rows,
3874 num_unit_cols, source_mv, source_ref_frame,
3875 motion_vector_info);
3876 // Second rectangle.
3877 if (square_size_4x4 == 2) {
3878 is_valid_second_rectangle = 1;
3879 source_ref_frame[0] = mi[0]->ref_frame[0];
3880 source_ref_frame[1] = mi[0]->ref_frame[1];
3881 source_mv[0] = &mi[0]->bmi[1].as_mv[0].as_mv;
3882 source_mv[1] = &mi[0]->bmi[1].as_mv[1].as_mv;
3884 const int mi_row_2 = mi_row;
3885 const int mi_col_2 = mi_col + (block_width_4x4 >> 1);
3886 if (mi_row_2 * 2 < num_unit_rows && mi_col_2 * 2 < num_unit_cols) {
3887 const int mi_idx_2 = mi_stride * mi_row_2 + mi_col_2;
3888 is_valid_second_rectangle = 1;
3889 mi = mi_grid_visible + mi_idx_2;
3890 source_ref_frame[0] = mi[0]->ref_frame[0];
3891 source_ref_frame[1] = mi[0]->ref_frame[1];
3892 source_mv[0] = &mi[0]->mv[0].as_mv;
3893 source_mv[1] = &mi[0]->mv[1].as_mv;
3896 if (is_valid_second_rectangle) {
3897 assign_motion_vector_info(
3898 block_width_4x4, block_height_4x4, row_start_4x4,
3899 col_start_4x4 + block_width_4x4, num_unit_rows, num_unit_cols,
3900 source_mv, source_ref_frame, motion_vector_info);
3903 assert(pc_tree->partitioning == PARTITION_NONE || square_size_4x4 == 1);
3904 source_ref_frame[0] = mi[0]->ref_frame[0];
3905 source_ref_frame[1] = mi[0]->ref_frame[1];
3906 if (square_size_4x4 == 1) {
3907 const int sub8x8_row = row_start_4x4 % 2;
3908 const int sub8x8_col = col_start_4x4 % 2;
3909 const int sub8x8_idx = sub8x8_row * 2 + sub8x8_col;
3910 source_mv[0] = &mi[0]->bmi[sub8x8_idx].as_mv[0].as_mv;
3911 source_mv[1] = &mi[0]->bmi[sub8x8_idx].as_mv[1].as_mv;
3913 source_mv[0] = &mi[0]->mv[0].as_mv;
3914 source_mv[1] = &mi[0]->mv[1].as_mv;
3916 assign_motion_vector_info(block_width_4x4, block_height_4x4,
3917 row_start_4x4, col_start_4x4, num_unit_rows,
3918 num_unit_cols, source_mv, source_ref_frame,
3919 motion_vector_info);
3924 // recursively traverse partition tree when partition is split.
3925 assert(pc_tree->partitioning == PARTITION_SPLIT);
3926 store_superblock_info(pc_tree->split[0], mi_grid_visible, mi_stride,
3927 subblock_square_size_4x4, num_unit_rows, num_unit_cols,
3928 row_start_4x4, col_start_4x4, partition_info,
3929 motion_vector_info);
3930 store_superblock_info(pc_tree->split[1], mi_grid_visible, mi_stride,
3931 subblock_square_size_4x4, num_unit_rows, num_unit_cols,
3932 row_start_4x4, col_start_4x4 + subblock_square_size_4x4,
3933 partition_info, motion_vector_info);
3934 store_superblock_info(pc_tree->split[2], mi_grid_visible, mi_stride,
3935 subblock_square_size_4x4, num_unit_rows, num_unit_cols,
3936 row_start_4x4 + subblock_square_size_4x4, col_start_4x4,
3937 partition_info, motion_vector_info);
3938 store_superblock_info(pc_tree->split[3], mi_grid_visible, mi_stride,
3939 subblock_square_size_4x4, num_unit_rows, num_unit_cols,
3940 row_start_4x4 + subblock_square_size_4x4,
3941 col_start_4x4 + subblock_square_size_4x4,
3942 partition_info, motion_vector_info);
3944 #endif // CONFIG_RATE_CTRL
3946 #if !CONFIG_REALTIME_ONLY
3947 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
3948 // unlikely to be selected depending on previous rate-distortion optimization
3949 // results, for encoding speed-up.
3950 static int rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
3951 TileDataEnc *tile_data, TOKENEXTRA **tp,
3952 int mi_row, int mi_col, BLOCK_SIZE bsize,
3953 RD_COST *rd_cost, RD_COST best_rdc,
3955 VP9_COMMON *const cm = &cpi->common;
3956 const VP9EncoderConfig *const oxcf = &cpi->oxcf;
3957 TileInfo *const tile_info = &tile_data->tile_info;
3958 MACROBLOCK *const x = &td->mb;
3959 MACROBLOCKD *const xd = &x->e_mbd;
3960 const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
3961 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
3962 PARTITION_CONTEXT sl[8], sa[8];
3963 TOKENEXTRA *tp_orig = *tp;
3964 PICK_MODE_CONTEXT *const ctx = &pc_tree->none;
3966 const int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3968 RD_COST this_rdc, sum_rdc;
3969 int do_split = bsize >= BLOCK_8X8;
3971 INTERP_FILTER pred_interp_filter;
3973 // Override skipping rectangular partition operations for edge blocks
3974 const int force_horz_split = (mi_row + mi_step >= cm->mi_rows);
3975 const int force_vert_split = (mi_col + mi_step >= cm->mi_cols);
3976 const int xss = x->e_mbd.plane[1].subsampling_x;
3977 const int yss = x->e_mbd.plane[1].subsampling_y;
3979 BLOCK_SIZE min_size = x->min_partition_size;
3980 BLOCK_SIZE max_size = x->max_partition_size;
3982 int partition_none_allowed = !force_horz_split && !force_vert_split;
3983 int partition_horz_allowed =
3984 !force_vert_split && yss <= xss && bsize >= BLOCK_8X8;
3985 int partition_vert_allowed =
3986 !force_horz_split && xss <= yss && bsize >= BLOCK_8X8;
3988 int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_thr.dist;
3989 int rate_breakout_thr = cpi->sf.partition_search_breakout_thr.rate;
3991 int should_encode_sb = 0;
3993 // Ref frames picked in the [i_th] quarter subblock during square partition
3994 // RD search. It may be used to prune ref frame selection of rect partitions.
3995 uint8_t ref_frames_used[4] = { 0, 0, 0, 0 };
3997 int partition_mul = x->cb_rdmult;
4001 assert(num_8x8_blocks_wide_lookup[bsize] ==
4002 num_8x8_blocks_high_lookup[bsize]);
4004 dist_breakout_thr >>=
4005 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
4007 rate_breakout_thr *= num_pels_log2_lookup[bsize];
4009 vp9_rd_cost_init(&this_rdc);
4010 vp9_rd_cost_init(&sum_rdc);
4012 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
4014 if (oxcf->tuning == VP8_TUNE_SSIM) {
4015 set_ssim_rdmult(cpi, x, bsize, mi_row, mi_col, &partition_mul);
4017 vp9_rd_cost_update(partition_mul, x->rddiv, &best_rdc);
4019 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode != NO_AQ &&
4020 cpi->oxcf.aq_mode != LOOKAHEAD_AQ)
4021 x->mb_energy = vp9_block_energy(cpi, x, bsize);
4023 if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
4024 int cb_partition_search_ctrl =
4025 ((pc_tree->index == 0 || pc_tree->index == 3) +
4026 get_chessboard_index(cm->current_video_frame)) &
4029 if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size)
4030 set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size);
4033 // Get sub block energy range
4034 if (bsize >= BLOCK_16X16) {
4035 int min_energy, max_energy;
4036 vp9_get_sub_block_energy(cpi, x, mi_row, mi_col, bsize, &min_energy,
4038 must_split = (min_energy < -3) && (max_energy - min_energy > 2);
4041 // Determine partition types in search according to the speed features.
4042 // The threshold set here has to be of square block size.
4043 if (cpi->sf.auto_min_max_partition_size) {
4044 partition_none_allowed &= (bsize <= max_size);
4045 partition_horz_allowed &=
4046 ((bsize <= max_size && bsize > min_size) || force_horz_split);
4047 partition_vert_allowed &=
4048 ((bsize <= max_size && bsize > min_size) || force_vert_split);
4049 do_split &= bsize > min_size;
4052 if (cpi->sf.use_square_partition_only &&
4053 (bsize > cpi->sf.use_square_only_thresh_high ||
4054 bsize < cpi->sf.use_square_only_thresh_low)) {
4056 if (!vp9_active_h_edge(cpi, mi_row, mi_step) || x->e_mbd.lossless)
4057 partition_horz_allowed &= force_horz_split;
4058 if (!vp9_active_v_edge(cpi, mi_row, mi_step) || x->e_mbd.lossless)
4059 partition_vert_allowed &= force_vert_split;
4061 partition_horz_allowed &= force_horz_split;
4062 partition_vert_allowed &= force_vert_split;
4066 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
4068 pc_tree->partitioning = PARTITION_NONE;
4070 if (cpi->sf.rd_ml_partition.var_pruning && !frame_is_intra_only(cm)) {
4071 const int do_rd_ml_partition_var_pruning =
4072 partition_none_allowed && do_split &&
4073 mi_row + num_8x8_blocks_high_lookup[bsize] <= cm->mi_rows &&
4074 mi_col + num_8x8_blocks_wide_lookup[bsize] <= cm->mi_cols;
4075 if (do_rd_ml_partition_var_pruning) {
4076 ml_predict_var_rd_paritioning(cpi, x, pc_tree, bsize, mi_row, mi_col,
4077 &partition_none_allowed, &do_split);
4079 vp9_zero(pc_tree->mv);
4081 if (bsize > BLOCK_8X8) { // Store MV result as reference for subblocks.
4082 for (i = 0; i < 4; ++i) pc_tree->split[i]->mv = pc_tree->mv;
4087 if (partition_none_allowed) {
4088 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc, bsize, ctx,
4089 best_rdc.rate, best_rdc.dist);
4090 ctx->rdcost = this_rdc.rdcost;
4091 if (this_rdc.rate != INT_MAX) {
4092 if (cpi->sf.prune_ref_frame_for_rect_partitions) {
4093 const int ref1 = ctx->mic.ref_frame[0];
4094 const int ref2 = ctx->mic.ref_frame[1];
4095 for (i = 0; i < 4; ++i) {
4096 ref_frames_used[i] |= (1 << ref1);
4097 if (ref2 > 0) ref_frames_used[i] |= (1 << ref2);
4100 if (bsize >= BLOCK_8X8) {
4101 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
4102 vp9_rd_cost_update(partition_mul, x->rddiv, &this_rdc);
4105 if (this_rdc.rdcost < best_rdc.rdcost) {
4106 MODE_INFO *mi = xd->mi[0];
4108 best_rdc = this_rdc;
4109 should_encode_sb = 1;
4110 if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
4112 if (cpi->sf.rd_ml_partition.search_early_termination) {
4113 // Currently, the machine-learning based partition search early
4114 // termination is only used while bsize is 16x16, 32x32 or 64x64,
4115 // VPXMIN(cm->width, cm->height) >= 480, and speed = 0.
4116 if (!x->e_mbd.lossless &&
4117 !segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP) &&
4118 ctx->mic.mode >= INTRA_MODES && bsize >= BLOCK_16X16) {
4119 if (ml_pruning_partition(cm, xd, ctx, mi_row, mi_col, bsize)) {
4126 if ((do_split || do_rect) && !x->e_mbd.lossless && ctx->skippable) {
4127 const int use_ml_based_breakout =
4128 cpi->sf.rd_ml_partition.search_breakout && cm->base_qindex >= 100;
4129 if (use_ml_based_breakout) {
4130 if (ml_predict_breakout(cpi, bsize, x, &this_rdc)) {
4135 if (!cpi->sf.rd_ml_partition.search_early_termination) {
4136 if ((best_rdc.dist < (dist_breakout_thr >> 2)) ||
4137 (best_rdc.dist < dist_breakout_thr &&
4138 best_rdc.rate < rate_breakout_thr)) {
4147 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
4149 vp9_zero(ctx->pred_mv);
4150 ctx->mic.interp_filter = EIGHTTAP;
4153 // store estimated motion vector
4154 store_pred_mv(x, ctx);
4156 // If the interp_filter is marked as SWITCHABLE_FILTERS, it was for an
4157 // intra block and used for context purposes.
4158 if (ctx->mic.interp_filter == SWITCHABLE_FILTERS) {
4159 pred_interp_filter = EIGHTTAP;
4161 pred_interp_filter = ctx->mic.interp_filter;
4165 // TODO(jingning): use the motion vectors given by the above search as
4166 // the starting point of motion search in the following partition type check.
4167 pc_tree->split[0]->none.rdcost = 0;
4168 pc_tree->split[1]->none.rdcost = 0;
4169 pc_tree->split[2]->none.rdcost = 0;
4170 pc_tree->split[3]->none.rdcost = 0;
4171 if (do_split || must_split) {
4172 subsize = get_subsize(bsize, PARTITION_SPLIT);
4173 load_pred_mv(x, ctx);
4174 if (bsize == BLOCK_8X8) {
4176 if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
4177 pc_tree->leaf_split[0]->pred_interp_filter = pred_interp_filter;
4178 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
4179 pc_tree->leaf_split[0], best_rdc.rate, best_rdc.dist);
4180 if (sum_rdc.rate == INT_MAX) {
4181 sum_rdc.rdcost = INT64_MAX;
4183 if (cpi->sf.prune_ref_frame_for_rect_partitions) {
4184 const int ref1 = pc_tree->leaf_split[0]->mic.ref_frame[0];
4185 const int ref2 = pc_tree->leaf_split[0]->mic.ref_frame[1];
4186 for (i = 0; i < 4; ++i) {
4187 ref_frames_used[i] |= (1 << ref1);
4188 if (ref2 > 0) ref_frames_used[i] |= (1 << ref2);
4193 for (i = 0; (i < 4) && ((sum_rdc.rdcost < best_rdc.rdcost) || must_split);
4195 const int x_idx = (i & 1) * mi_step;
4196 const int y_idx = (i >> 1) * mi_step;
4197 int found_best_rd = 0;
4198 RD_COST best_rdc_split;
4199 vp9_rd_cost_reset(&best_rdc_split);
4201 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX) {
4202 // A must split test here increases the number of sub
4203 // partitions but hurts metrics results quite a bit,
4204 // so this extra test is commented out pending
4205 // further tests on whether it adds much in terms of
4207 // (must_split) ? best_rdc.rate
4208 // : best_rdc.rate - sum_rdc.rate,
4209 // (must_split) ? best_rdc.dist
4210 // : best_rdc.dist - sum_rdc.dist,
4211 best_rdc_split.rate = best_rdc.rate - sum_rdc.rate;
4212 best_rdc_split.dist = best_rdc.dist - sum_rdc.dist;
4215 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
4218 pc_tree->split[i]->index = i;
4219 if (cpi->sf.prune_ref_frame_for_rect_partitions)
4220 pc_tree->split[i]->none.rate = INT_MAX;
4221 found_best_rd = rd_pick_partition(
4222 cpi, td, tile_data, tp, mi_row + y_idx, mi_col + x_idx, subsize,
4223 &this_rdc, best_rdc_split, pc_tree->split[i]);
4225 if (found_best_rd == 0) {
4226 sum_rdc.rdcost = INT64_MAX;
4229 if (cpi->sf.prune_ref_frame_for_rect_partitions &&
4230 pc_tree->split[i]->none.rate != INT_MAX) {
4231 const int ref1 = pc_tree->split[i]->none.mic.ref_frame[0];
4232 const int ref2 = pc_tree->split[i]->none.mic.ref_frame[1];
4233 ref_frames_used[i] |= (1 << ref1);
4234 if (ref2 > 0) ref_frames_used[i] |= (1 << ref2);
4236 sum_rdc.rate += this_rdc.rate;
4237 sum_rdc.dist += this_rdc.dist;
4238 vp9_rd_cost_update(partition_mul, x->rddiv, &sum_rdc);
4243 if (((sum_rdc.rdcost < best_rdc.rdcost) || must_split) && i == 4) {
4244 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
4245 vp9_rd_cost_update(partition_mul, x->rddiv, &sum_rdc);
4247 if ((sum_rdc.rdcost < best_rdc.rdcost) ||
4248 (must_split && (sum_rdc.dist < best_rdc.dist))) {
4250 should_encode_sb = 1;
4251 pc_tree->partitioning = PARTITION_SPLIT;
4253 // Rate and distortion based partition search termination clause.
4254 if (!cpi->sf.rd_ml_partition.search_early_termination &&
4255 !x->e_mbd.lossless &&
4256 ((best_rdc.dist < (dist_breakout_thr >> 2)) ||
4257 (best_rdc.dist < dist_breakout_thr &&
4258 best_rdc.rate < rate_breakout_thr))) {
4263 // skip rectangular partition test when larger block size
4264 // gives better rd cost
4265 if (cpi->sf.less_rectangular_check &&
4266 (bsize > cpi->sf.use_square_only_thresh_high ||
4267 best_rdc.dist < dist_breakout_thr))
4268 do_rect &= !partition_none_allowed;
4270 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
4273 pc_tree->horizontal[0].skip_ref_frame_mask = 0;
4274 pc_tree->horizontal[1].skip_ref_frame_mask = 0;
4275 pc_tree->vertical[0].skip_ref_frame_mask = 0;
4276 pc_tree->vertical[1].skip_ref_frame_mask = 0;
4277 if (cpi->sf.prune_ref_frame_for_rect_partitions) {
4278 uint8_t used_frames;
4279 used_frames = ref_frames_used[0] | ref_frames_used[1];
4281 pc_tree->horizontal[0].skip_ref_frame_mask = ~used_frames & 0xff;
4283 used_frames = ref_frames_used[2] | ref_frames_used[3];
4285 pc_tree->horizontal[1].skip_ref_frame_mask = ~used_frames & 0xff;
4287 used_frames = ref_frames_used[0] | ref_frames_used[2];
4289 pc_tree->vertical[0].skip_ref_frame_mask = ~used_frames & 0xff;
4291 used_frames = ref_frames_used[1] | ref_frames_used[3];
4293 pc_tree->vertical[1].skip_ref_frame_mask = ~used_frames & 0xff;
4298 const int do_ml_rect_partition_pruning =
4299 !frame_is_intra_only(cm) && !force_horz_split && !force_vert_split &&
4300 (partition_horz_allowed || partition_vert_allowed) && bsize > BLOCK_8X8;
4301 if (do_ml_rect_partition_pruning) {
4302 ml_prune_rect_partition(cpi, x, bsize, pc_tree, &partition_horz_allowed,
4303 &partition_vert_allowed, best_rdc.rdcost);
4308 if (partition_horz_allowed &&
4309 (do_rect || vp9_active_h_edge(cpi, mi_row, mi_step))) {
4310 const int part_mode_rate = cpi->partition_cost[pl][PARTITION_HORZ];
4311 subsize = get_subsize(bsize, PARTITION_HORZ);
4312 load_pred_mv(x, ctx);
4313 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
4314 partition_none_allowed)
4315 pc_tree->horizontal[0].pred_interp_filter = pred_interp_filter;
4316 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
4317 &pc_tree->horizontal[0], best_rdc.rate - part_mode_rate,
4319 if (sum_rdc.rdcost < INT64_MAX) {
4320 sum_rdc.rate += part_mode_rate;
4321 vp9_rd_cost_update(partition_mul, x->rddiv, &sum_rdc);
4324 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + mi_step < cm->mi_rows &&
4325 bsize > BLOCK_8X8) {
4326 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
4327 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
4328 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
4329 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
4330 partition_none_allowed)
4331 pc_tree->horizontal[1].pred_interp_filter = pred_interp_filter;
4332 rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col, &this_rdc,
4333 subsize, &pc_tree->horizontal[1],
4334 best_rdc.rate - sum_rdc.rate,
4335 best_rdc.dist - sum_rdc.dist);
4336 if (this_rdc.rate == INT_MAX) {
4337 sum_rdc.rdcost = INT64_MAX;
4339 sum_rdc.rate += this_rdc.rate;
4340 sum_rdc.dist += this_rdc.dist;
4341 vp9_rd_cost_update(partition_mul, x->rddiv, &sum_rdc);
4345 if (sum_rdc.rdcost < best_rdc.rdcost) {
4347 should_encode_sb = 1;
4348 pc_tree->partitioning = PARTITION_HORZ;
4350 if (cpi->sf.less_rectangular_check &&
4351 bsize > cpi->sf.use_square_only_thresh_high)
4354 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
4358 if (partition_vert_allowed &&
4359 (do_rect || vp9_active_v_edge(cpi, mi_col, mi_step))) {
4360 const int part_mode_rate = cpi->partition_cost[pl][PARTITION_VERT];
4361 subsize = get_subsize(bsize, PARTITION_VERT);
4362 load_pred_mv(x, ctx);
4363 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
4364 partition_none_allowed)
4365 pc_tree->vertical[0].pred_interp_filter = pred_interp_filter;
4366 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
4367 &pc_tree->vertical[0], best_rdc.rate - part_mode_rate,
4369 if (sum_rdc.rdcost < INT64_MAX) {
4370 sum_rdc.rate += part_mode_rate;
4371 vp9_rd_cost_update(partition_mul, x->rddiv, &sum_rdc);
4374 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + mi_step < cm->mi_cols &&
4375 bsize > BLOCK_8X8) {
4376 update_state(cpi, td, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0);
4377 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize,
4378 &pc_tree->vertical[0]);
4379 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
4380 partition_none_allowed)
4381 pc_tree->vertical[1].pred_interp_filter = pred_interp_filter;
4382 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step, &this_rdc,
4383 subsize, &pc_tree->vertical[1],
4384 best_rdc.rate - sum_rdc.rate,
4385 best_rdc.dist - sum_rdc.dist);
4386 if (this_rdc.rate == INT_MAX) {
4387 sum_rdc.rdcost = INT64_MAX;
4389 sum_rdc.rate += this_rdc.rate;
4390 sum_rdc.dist += this_rdc.dist;
4391 vp9_rd_cost_update(partition_mul, x->rddiv, &sum_rdc);
4395 if (sum_rdc.rdcost < best_rdc.rdcost) {
4397 should_encode_sb = 1;
4398 pc_tree->partitioning = PARTITION_VERT;
4400 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
4403 *rd_cost = best_rdc;
4405 if (should_encode_sb && pc_tree->index != 3) {
4406 int output_enabled = (bsize == BLOCK_64X64);
4407 encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
4409 #if CONFIG_RATE_CTRL
4410 if (oxcf->use_simple_encode_api) {
4411 // Store partition, motion vector of the superblock.
4412 if (output_enabled) {
4413 const int num_unit_rows =
4414 get_num_unit_4x4(cpi->frame_info.frame_height);
4415 const int num_unit_cols = get_num_unit_4x4(cpi->frame_info.frame_width);
4416 store_superblock_info(pc_tree, cm->mi_grid_visible, cm->mi_stride,
4417 num_4x4_blocks_wide_lookup[BLOCK_64X64],
4418 num_unit_rows, num_unit_cols, mi_row << 1,
4419 mi_col << 1, cpi->partition_info,
4420 cpi->motion_vector_info);
4423 #endif // CONFIG_RATE_CTRL
4426 if (bsize == BLOCK_64X64) {
4427 assert(tp_orig < *tp);
4428 assert(best_rdc.rate < INT_MAX);
4429 assert(best_rdc.dist < INT64_MAX);
4431 assert(tp_orig == *tp);
4434 return should_encode_sb;
4437 static void encode_rd_sb_row(VP9_COMP *cpi, ThreadData *td,
4438 TileDataEnc *tile_data, int mi_row,
4440 VP9_COMMON *const cm = &cpi->common;
4441 TileInfo *const tile_info = &tile_data->tile_info;
4442 MACROBLOCK *const x = &td->mb;
4443 MACROBLOCKD *const xd = &x->e_mbd;
4444 SPEED_FEATURES *const sf = &cpi->sf;
4445 const int mi_col_start = tile_info->mi_col_start;
4446 const int mi_col_end = tile_info->mi_col_end;
4448 const int sb_row = mi_row >> MI_BLOCK_SIZE_LOG2;
4449 const int num_sb_cols =
4450 get_num_cols(tile_data->tile_info, MI_BLOCK_SIZE_LOG2);
4453 // Initialize the left context for the new SB row
4454 memset(&xd->left_context, 0, sizeof(xd->left_context));
4455 memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
4457 // Code each SB in the row
4458 for (mi_col = mi_col_start, sb_col_in_tile = 0; mi_col < mi_col_end;
4459 mi_col += MI_BLOCK_SIZE, sb_col_in_tile++) {
4460 const struct segmentation *const seg = &cm->seg;
4466 int orig_rdmult = cpi->rd.RDMULT;
4468 const int idx_str = cm->mi_stride * mi_row + mi_col;
4469 MODE_INFO **mi = cm->mi_grid_visible + idx_str;
4471 vp9_rd_cost_reset(&dummy_rdc);
4472 (*(cpi->row_mt_sync_read_ptr))(&tile_data->row_mt_sync, sb_row,
4475 if (sf->adaptive_pred_interp_filter) {
4476 for (i = 0; i < 64; ++i) td->leaf_tree[i].pred_interp_filter = SWITCHABLE;
4478 for (i = 0; i < 64; ++i) {
4479 td->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
4480 td->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE;
4481 td->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE;
4482 td->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE;
4486 for (i = 0; i < MAX_REF_FRAMES; ++i) {
4487 x->pred_mv[i].row = INT16_MAX;
4488 x->pred_mv[i].col = INT16_MAX;
4490 td->pc_root->index = 0;
4493 const uint8_t *const map =
4494 seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
4495 int segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
4496 seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
4499 x->source_variance = UINT_MAX;
4501 x->cb_rdmult = orig_rdmult;
4503 if (sf->partition_search_type == FIXED_PARTITION || seg_skip) {
4504 const BLOCK_SIZE bsize =
4505 seg_skip ? BLOCK_64X64 : sf->always_this_block_size;
4506 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
4507 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
4508 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
4509 &dummy_rate, &dummy_dist, 1, td->pc_root);
4510 } else if (sf->partition_search_type == VAR_BASED_PARTITION &&
4511 cm->frame_type != KEY_FRAME) {
4512 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
4513 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
4514 &dummy_rate, &dummy_dist, 1, td->pc_root);
4516 if (cpi->twopass.gf_group.index > 0 && cpi->sf.enable_tpl_model) {
4518 get_rdmult_delta(cpi, BLOCK_64X64, mi_row, mi_col, orig_rdmult);
4522 if (cpi->oxcf.aq_mode == PERCEPTUAL_AQ && cm->show_frame) {
4523 x->segment_id = wiener_var_segment(cpi, BLOCK_64X64, mi_row, mi_col);
4524 x->cb_rdmult = vp9_compute_rd_mult(
4525 cpi, vp9_get_qindex(&cm->seg, x->segment_id, cm->base_qindex));
4528 // If required set upper and lower partition size limits
4529 if (sf->auto_min_max_partition_size) {
4530 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
4531 rd_auto_partition_range(cpi, tile_info, xd, mi_row, mi_col,
4532 &x->min_partition_size, &x->max_partition_size);
4534 td->pc_root->none.rdcost = 0;
4535 rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, BLOCK_64X64,
4536 &dummy_rdc, dummy_rdc, td->pc_root);
4538 (*(cpi->row_mt_sync_write_ptr))(&tile_data->row_mt_sync, sb_row,
4539 sb_col_in_tile, num_sb_cols);
4542 #endif // !CONFIG_REALTIME_ONLY
4544 static void init_encode_frame_mb_context(VP9_COMP *cpi) {
4545 MACROBLOCK *const x = &cpi->td.mb;
4546 VP9_COMMON *const cm = &cpi->common;
4547 MACROBLOCKD *const xd = &x->e_mbd;
4548 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
4550 // Copy data over into macro block data structures.
4551 vp9_setup_src_planes(x, cpi->Source, 0, 0);
4553 vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
4555 // Note: this memset assumes above_context[0], [1] and [2]
4556 // are allocated as part of the same buffer.
4557 memset(xd->above_context[0], 0,
4558 sizeof(*xd->above_context[0]) * 2 * aligned_mi_cols * MAX_MB_PLANE);
4559 memset(xd->above_seg_context, 0,
4560 sizeof(*xd->above_seg_context) * aligned_mi_cols);
4563 static int check_dual_ref_flags(VP9_COMP *cpi) {
4564 const int ref_flags = cpi->ref_frame_flags;
4566 if (segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
4569 return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG) +
4570 !!(ref_flags & VP9_ALT_FLAG)) >= 2;
4574 static void reset_skip_tx_size(VP9_COMMON *cm, TX_SIZE max_tx_size) {
4576 const int mis = cm->mi_stride;
4577 MODE_INFO **mi_ptr = cm->mi_grid_visible;
4579 for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) {
4580 for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
4581 if (mi_ptr[mi_col]->tx_size > max_tx_size)
4582 mi_ptr[mi_col]->tx_size = max_tx_size;
4587 static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) {
4588 if (frame_is_intra_only(&cpi->common))
4590 else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
4591 return ALTREF_FRAME;
4592 else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
4593 return GOLDEN_FRAME;
4598 static TX_MODE select_tx_mode(const VP9_COMP *cpi, MACROBLOCKD *const xd) {
4599 if (xd->lossless) return ONLY_4X4;
4600 if (cpi->common.frame_type == KEY_FRAME && cpi->sf.use_nonrd_pick_mode)
4602 if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
4604 else if (cpi->sf.tx_size_search_method == USE_FULL_RD ||
4605 cpi->sf.tx_size_search_method == USE_TX_8X8)
4606 return TX_MODE_SELECT;
4608 return cpi->common.tx_mode;
4611 static void hybrid_intra_mode_search(VP9_COMP *cpi, MACROBLOCK *const x,
4612 RD_COST *rd_cost, BLOCK_SIZE bsize,
4613 PICK_MODE_CONTEXT *ctx) {
4614 if (!cpi->sf.nonrd_keyframe && bsize < BLOCK_16X16)
4615 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, INT64_MAX);
4617 vp9_pick_intra_mode(cpi, x, rd_cost, bsize, ctx);
4620 static void hybrid_search_svc_baseiskey(VP9_COMP *cpi, MACROBLOCK *const x,
4621 RD_COST *rd_cost, BLOCK_SIZE bsize,
4622 PICK_MODE_CONTEXT *ctx,
4623 TileDataEnc *tile_data, int mi_row,
4625 if (!cpi->sf.nonrd_keyframe && bsize <= BLOCK_8X8) {
4626 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, INT64_MAX);
4628 if (cpi->svc.disable_inter_layer_pred == INTER_LAYER_PRED_OFF)
4629 vp9_pick_intra_mode(cpi, x, rd_cost, bsize, ctx);
4630 else if (bsize >= BLOCK_8X8)
4631 vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col, rd_cost, bsize,
4634 vp9_pick_inter_mode_sub8x8(cpi, x, mi_row, mi_col, rd_cost, bsize, ctx);
4638 static void hybrid_search_scene_change(VP9_COMP *cpi, MACROBLOCK *const x,
4639 RD_COST *rd_cost, BLOCK_SIZE bsize,
4640 PICK_MODE_CONTEXT *ctx,
4641 TileDataEnc *tile_data, int mi_row,
4643 if (!cpi->sf.nonrd_keyframe && bsize <= BLOCK_8X8) {
4644 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, INT64_MAX);
4646 vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col, rd_cost, bsize, ctx);
4650 static void nonrd_pick_sb_modes(VP9_COMP *cpi, TileDataEnc *tile_data,
4651 MACROBLOCK *const x, int mi_row, int mi_col,
4652 RD_COST *rd_cost, BLOCK_SIZE bsize,
4653 PICK_MODE_CONTEXT *ctx) {
4654 VP9_COMMON *const cm = &cpi->common;
4655 TileInfo *const tile_info = &tile_data->tile_info;
4656 MACROBLOCKD *const xd = &x->e_mbd;
4658 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
4659 BLOCK_SIZE bs = VPXMAX(bsize, BLOCK_8X8); // processing unit block size
4660 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bs];
4661 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bs];
4664 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
4666 set_segment_index(cpi, x, mi_row, mi_col, bsize, 0);
4669 mi->sb_type = bsize;
4671 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
4672 struct macroblockd_plane *pd = &xd->plane[plane];
4673 memcpy(a + num_4x4_blocks_wide * plane, pd->above_context,
4674 (sizeof(a[0]) * num_4x4_blocks_wide) >> pd->subsampling_x);
4675 memcpy(l + num_4x4_blocks_high * plane, pd->left_context,
4676 (sizeof(l[0]) * num_4x4_blocks_high) >> pd->subsampling_y);
4679 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
4680 if (cyclic_refresh_segment_id_boosted(mi->segment_id))
4681 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
4683 if (frame_is_intra_only(cm))
4684 hybrid_intra_mode_search(cpi, x, rd_cost, bsize, ctx);
4685 else if (cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame)
4686 hybrid_search_svc_baseiskey(cpi, x, rd_cost, bsize, ctx, tile_data, mi_row,
4688 else if (segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP))
4689 set_mode_info_seg_skip(x, cm->tx_mode, cm->interp_filter, rd_cost, bsize);
4690 else if (bsize >= BLOCK_8X8) {
4691 if (cpi->rc.hybrid_intra_scene_change)
4692 hybrid_search_scene_change(cpi, x, rd_cost, bsize, ctx, tile_data, mi_row,
4695 vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col, rd_cost, bsize,
4698 vp9_pick_inter_mode_sub8x8(cpi, x, mi_row, mi_col, rd_cost, bsize, ctx);
4701 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
4703 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
4704 struct macroblockd_plane *pd = &xd->plane[plane];
4705 memcpy(pd->above_context, a + num_4x4_blocks_wide * plane,
4706 (sizeof(a[0]) * num_4x4_blocks_wide) >> pd->subsampling_x);
4707 memcpy(pd->left_context, l + num_4x4_blocks_high * plane,
4708 (sizeof(l[0]) * num_4x4_blocks_high) >> pd->subsampling_y);
4711 if (rd_cost->rate == INT_MAX) vp9_rd_cost_reset(rd_cost);
4713 ctx->rate = rd_cost->rate;
4714 ctx->dist = rd_cost->dist;
4717 static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x, int mi_row,
4718 int mi_col, BLOCK_SIZE bsize, PC_TREE *pc_tree) {
4719 MACROBLOCKD *xd = &x->e_mbd;
4720 int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
4721 PARTITION_TYPE partition = pc_tree->partitioning;
4722 BLOCK_SIZE subsize = get_subsize(bsize, partition);
4724 assert(bsize >= BLOCK_8X8);
4726 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
4728 switch (partition) {
4729 case PARTITION_NONE:
4730 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
4731 *(xd->mi[0]) = pc_tree->none.mic;
4732 *(x->mbmi_ext) = pc_tree->none.mbmi_ext;
4733 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
4735 case PARTITION_VERT:
4736 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
4737 *(xd->mi[0]) = pc_tree->vertical[0].mic;
4738 *(x->mbmi_ext) = pc_tree->vertical[0].mbmi_ext;
4739 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
4741 if (mi_col + hbs < cm->mi_cols) {
4742 set_mode_info_offsets(cm, x, xd, mi_row, mi_col + hbs);
4743 *(xd->mi[0]) = pc_tree->vertical[1].mic;
4744 *(x->mbmi_ext) = pc_tree->vertical[1].mbmi_ext;
4745 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, subsize);
4748 case PARTITION_HORZ:
4749 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
4750 *(xd->mi[0]) = pc_tree->horizontal[0].mic;
4751 *(x->mbmi_ext) = pc_tree->horizontal[0].mbmi_ext;
4752 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
4753 if (mi_row + hbs < cm->mi_rows) {
4754 set_mode_info_offsets(cm, x, xd, mi_row + hbs, mi_col);
4755 *(xd->mi[0]) = pc_tree->horizontal[1].mic;
4756 *(x->mbmi_ext) = pc_tree->horizontal[1].mbmi_ext;
4757 duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, subsize);
4760 case PARTITION_SPLIT: {
4761 fill_mode_info_sb(cm, x, mi_row, mi_col, subsize, pc_tree->split[0]);
4762 fill_mode_info_sb(cm, x, mi_row, mi_col + hbs, subsize,
4764 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col, subsize,
4766 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col + hbs, subsize,
4774 // Reset the prediction pixel ready flag recursively.
4775 static void pred_pixel_ready_reset(PC_TREE *pc_tree, BLOCK_SIZE bsize) {
4776 pc_tree->none.pred_pixel_ready = 0;
4777 pc_tree->horizontal[0].pred_pixel_ready = 0;
4778 pc_tree->horizontal[1].pred_pixel_ready = 0;
4779 pc_tree->vertical[0].pred_pixel_ready = 0;
4780 pc_tree->vertical[1].pred_pixel_ready = 0;
4782 if (bsize > BLOCK_8X8) {
4783 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
4785 for (i = 0; i < 4; ++i) pred_pixel_ready_reset(pc_tree->split[i], subsize);
4791 static int ml_predict_var_paritioning(VP9_COMP *cpi, MACROBLOCK *x,
4792 BLOCK_SIZE bsize, int mi_row,
4794 VP9_COMMON *const cm = &cpi->common;
4795 const NN_CONFIG *nn_config = NULL;
4798 case BLOCK_64X64: nn_config = &vp9_var_part_nnconfig_64; break;
4799 case BLOCK_32X32: nn_config = &vp9_var_part_nnconfig_32; break;
4800 case BLOCK_16X16: nn_config = &vp9_var_part_nnconfig_16; break;
4801 case BLOCK_8X8: break;
4802 default: assert(0 && "Unexpected block size."); return -1;
4805 if (!nn_config) return -1;
4807 vpx_clear_system_state();
4810 const float thresh = cpi->oxcf.speed <= 5 ? 1.25f : 0.0f;
4811 float features[FEATURES] = { 0.0f };
4812 const int dc_q = vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth);
4813 int feature_idx = 0;
4814 float score[LABELS];
4816 features[feature_idx++] = logf((float)(dc_q * dc_q) / 256.0f + 1.0f);
4817 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
4819 const int bs = 4 * num_4x4_blocks_wide_lookup[bsize];
4820 const BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
4821 const int sb_offset_row = 8 * (mi_row & 7);
4822 const int sb_offset_col = 8 * (mi_col & 7);
4823 const uint8_t *pred = x->est_pred + sb_offset_row * 64 + sb_offset_col;
4824 const uint8_t *src = x->plane[0].src.buf;
4825 const int src_stride = x->plane[0].src.stride;
4826 const int pred_stride = 64;
4829 // Variance of whole block.
4830 const unsigned int var =
4831 cpi->fn_ptr[bsize].vf(src, src_stride, pred, pred_stride, &sse);
4832 const float factor = (var == 0) ? 1.0f : (1.0f / (float)var);
4834 features[feature_idx++] = logf((float)var + 1.0f);
4835 for (i = 0; i < 4; ++i) {
4836 const int x_idx = (i & 1) * bs / 2;
4837 const int y_idx = (i >> 1) * bs / 2;
4838 const int src_offset = y_idx * src_stride + x_idx;
4839 const int pred_offset = y_idx * pred_stride + x_idx;
4840 // Variance of quarter block.
4841 const unsigned int sub_var =
4842 cpi->fn_ptr[subsize].vf(src + src_offset, src_stride,
4843 pred + pred_offset, pred_stride, &sse);
4844 const float var_ratio = (var == 0) ? 1.0f : factor * (float)sub_var;
4845 features[feature_idx++] = var_ratio;
4849 assert(feature_idx == FEATURES);
4850 nn_predict(features, nn_config, score);
4851 if (score[0] > thresh) return PARTITION_SPLIT;
4852 if (score[0] < -thresh) return PARTITION_NONE;
4859 static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
4860 TileDataEnc *tile_data, TOKENEXTRA **tp,
4861 int mi_row, int mi_col, BLOCK_SIZE bsize,
4862 RD_COST *rd_cost, int do_recon,
4863 int64_t best_rd, PC_TREE *pc_tree) {
4864 const SPEED_FEATURES *const sf = &cpi->sf;
4865 VP9_COMMON *const cm = &cpi->common;
4866 TileInfo *const tile_info = &tile_data->tile_info;
4867 MACROBLOCK *const x = &td->mb;
4868 MACROBLOCKD *const xd = &x->e_mbd;
4869 const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
4870 TOKENEXTRA *tp_orig = *tp;
4871 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
4873 BLOCK_SIZE subsize = bsize;
4874 RD_COST this_rdc, sum_rdc, best_rdc;
4875 int do_split = bsize >= BLOCK_8X8;
4877 // Override skipping rectangular partition operations for edge blocks
4878 const int force_horz_split = (mi_row + ms >= cm->mi_rows);
4879 const int force_vert_split = (mi_col + ms >= cm->mi_cols);
4880 const int xss = x->e_mbd.plane[1].subsampling_x;
4881 const int yss = x->e_mbd.plane[1].subsampling_y;
4883 int partition_none_allowed = !force_horz_split && !force_vert_split;
4884 int partition_horz_allowed =
4885 !force_vert_split && yss <= xss && bsize >= BLOCK_8X8;
4886 int partition_vert_allowed =
4887 !force_horz_split && xss <= yss && bsize >= BLOCK_8X8;
4888 const int use_ml_based_partitioning =
4889 sf->partition_search_type == ML_BASED_PARTITION;
4893 // Avoid checking for rectangular partitions for speed >= 5.
4894 if (cpi->oxcf.speed >= 5) do_rect = 0;
4896 assert(num_8x8_blocks_wide_lookup[bsize] ==
4897 num_8x8_blocks_high_lookup[bsize]);
4899 vp9_rd_cost_init(&sum_rdc);
4900 vp9_rd_cost_reset(&best_rdc);
4901 best_rdc.rdcost = best_rd;
4903 // Determine partition types in search according to the speed features.
4904 // The threshold set here has to be of square block size.
4905 if (sf->auto_min_max_partition_size) {
4906 partition_none_allowed &=
4907 (bsize <= x->max_partition_size && bsize >= x->min_partition_size);
4908 partition_horz_allowed &=
4909 ((bsize <= x->max_partition_size && bsize > x->min_partition_size) ||
4911 partition_vert_allowed &=
4912 ((bsize <= x->max_partition_size && bsize > x->min_partition_size) ||
4914 do_split &= bsize > x->min_partition_size;
4916 if (sf->use_square_partition_only) {
4917 partition_horz_allowed &= force_horz_split;
4918 partition_vert_allowed &= force_vert_split;
4921 if (use_ml_based_partitioning) {
4922 if (partition_none_allowed || do_split) do_rect = 0;
4923 if (partition_none_allowed && do_split) {
4924 const int ml_predicted_partition =
4925 ml_predict_var_paritioning(cpi, x, bsize, mi_row, mi_col);
4926 if (ml_predicted_partition == PARTITION_NONE) do_split = 0;
4927 if (ml_predicted_partition == PARTITION_SPLIT) partition_none_allowed = 0;
4931 if (!partition_none_allowed && !do_split) do_rect = 1;
4933 ctx->pred_pixel_ready =
4934 !(partition_vert_allowed || partition_horz_allowed || do_split);
4937 if (partition_none_allowed) {
4938 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc, bsize,
4940 ctx->mic = *xd->mi[0];
4941 ctx->mbmi_ext = *x->mbmi_ext;
4942 ctx->skip_txfm[0] = x->skip_txfm[0];
4943 ctx->skip = x->skip;
4945 if (this_rdc.rate != INT_MAX) {
4946 const int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
4947 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
4949 RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
4950 if (this_rdc.rdcost < best_rdc.rdcost) {
4951 best_rdc = this_rdc;
4952 if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
4954 if (!use_ml_based_partitioning) {
4955 int64_t dist_breakout_thr = sf->partition_search_breakout_thr.dist;
4956 int64_t rate_breakout_thr = sf->partition_search_breakout_thr.rate;
4957 dist_breakout_thr >>=
4958 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
4959 rate_breakout_thr *= num_pels_log2_lookup[bsize];
4960 if (!x->e_mbd.lossless && this_rdc.rate < rate_breakout_thr &&
4961 this_rdc.dist < dist_breakout_thr) {
4970 // store estimated motion vector
4971 store_pred_mv(x, ctx);
4975 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
4976 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
4977 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
4978 subsize = get_subsize(bsize, PARTITION_SPLIT);
4979 for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
4980 const int x_idx = (i & 1) * ms;
4981 const int y_idx = (i >> 1) * ms;
4983 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
4985 load_pred_mv(x, ctx);
4986 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row + y_idx,
4987 mi_col + x_idx, subsize, &this_rdc, 0,
4988 best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
4990 if (this_rdc.rate == INT_MAX) {
4991 vp9_rd_cost_reset(&sum_rdc);
4993 sum_rdc.rate += this_rdc.rate;
4994 sum_rdc.dist += this_rdc.dist;
4995 sum_rdc.rdcost += this_rdc.rdcost;
4999 if (sum_rdc.rdcost < best_rdc.rdcost) {
5001 pc_tree->partitioning = PARTITION_SPLIT;
5003 // skip rectangular partition test when larger block size
5004 // gives better rd cost
5005 if (sf->less_rectangular_check) do_rect &= !partition_none_allowed;
5010 if (partition_horz_allowed && do_rect) {
5011 subsize = get_subsize(bsize, PARTITION_HORZ);
5012 load_pred_mv(x, ctx);
5013 pc_tree->horizontal[0].pred_pixel_ready = 1;
5014 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
5015 &pc_tree->horizontal[0]);
5017 pc_tree->horizontal[0].mic = *xd->mi[0];
5018 pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
5019 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
5020 pc_tree->horizontal[0].skip = x->skip;
5022 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + ms < cm->mi_rows) {
5023 load_pred_mv(x, ctx);
5024 pc_tree->horizontal[1].pred_pixel_ready = 1;
5025 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + ms, mi_col, &this_rdc,
5026 subsize, &pc_tree->horizontal[1]);
5028 pc_tree->horizontal[1].mic = *xd->mi[0];
5029 pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
5030 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
5031 pc_tree->horizontal[1].skip = x->skip;
5033 if (this_rdc.rate == INT_MAX) {
5034 vp9_rd_cost_reset(&sum_rdc);
5036 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
5037 this_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
5038 sum_rdc.rate += this_rdc.rate;
5039 sum_rdc.dist += this_rdc.dist;
5041 RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
5045 if (sum_rdc.rdcost < best_rdc.rdcost) {
5047 pc_tree->partitioning = PARTITION_HORZ;
5049 pred_pixel_ready_reset(pc_tree, bsize);
5054 if (partition_vert_allowed && do_rect) {
5055 subsize = get_subsize(bsize, PARTITION_VERT);
5056 load_pred_mv(x, ctx);
5057 pc_tree->vertical[0].pred_pixel_ready = 1;
5058 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
5059 &pc_tree->vertical[0]);
5060 pc_tree->vertical[0].mic = *xd->mi[0];
5061 pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
5062 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
5063 pc_tree->vertical[0].skip = x->skip;
5065 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + ms < cm->mi_cols) {
5066 load_pred_mv(x, ctx);
5067 pc_tree->vertical[1].pred_pixel_ready = 1;
5068 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + ms, &this_rdc,
5069 subsize, &pc_tree->vertical[1]);
5070 pc_tree->vertical[1].mic = *xd->mi[0];
5071 pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
5072 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
5073 pc_tree->vertical[1].skip = x->skip;
5075 if (this_rdc.rate == INT_MAX) {
5076 vp9_rd_cost_reset(&sum_rdc);
5078 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
5079 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
5080 sum_rdc.rate += this_rdc.rate;
5081 sum_rdc.dist += this_rdc.dist;
5083 RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
5087 if (sum_rdc.rdcost < best_rdc.rdcost) {
5089 pc_tree->partitioning = PARTITION_VERT;
5091 pred_pixel_ready_reset(pc_tree, bsize);
5095 *rd_cost = best_rdc;
5097 if (best_rdc.rate == INT_MAX) {
5098 vp9_rd_cost_reset(rd_cost);
5102 // update mode info array
5103 fill_mode_info_sb(cm, x, mi_row, mi_col, bsize, pc_tree);
5105 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX && do_recon) {
5106 int output_enabled = (bsize == BLOCK_64X64);
5107 encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
5111 if (bsize == BLOCK_64X64 && do_recon) {
5112 assert(tp_orig < *tp);
5113 assert(best_rdc.rate < INT_MAX);
5114 assert(best_rdc.dist < INT64_MAX);
5116 assert(tp_orig == *tp);
5120 static void nonrd_select_partition(VP9_COMP *cpi, ThreadData *td,
5121 TileDataEnc *tile_data, MODE_INFO **mi,
5122 TOKENEXTRA **tp, int mi_row, int mi_col,
5123 BLOCK_SIZE bsize, int output_enabled,
5124 RD_COST *rd_cost, PC_TREE *pc_tree) {
5125 VP9_COMMON *const cm = &cpi->common;
5126 TileInfo *const tile_info = &tile_data->tile_info;
5127 MACROBLOCK *const x = &td->mb;
5128 MACROBLOCKD *const xd = &x->e_mbd;
5129 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
5130 const int mis = cm->mi_stride;
5131 PARTITION_TYPE partition;
5134 BLOCK_SIZE subsize_ref =
5135 (cpi->sf.adapt_partition_source_sad) ? BLOCK_8X8 : BLOCK_16X16;
5137 vp9_rd_cost_reset(&this_rdc);
5138 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
5140 subsize = (bsize >= BLOCK_8X8) ? mi[0]->sb_type : BLOCK_4X4;
5141 partition = partition_lookup[bsl][subsize];
5143 if (bsize == BLOCK_32X32 && subsize == BLOCK_32X32) {
5144 x->max_partition_size = BLOCK_32X32;
5145 x->min_partition_size = BLOCK_16X16;
5146 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, rd_cost,
5147 0, INT64_MAX, pc_tree);
5148 } else if (bsize == BLOCK_32X32 && partition != PARTITION_NONE &&
5149 subsize >= subsize_ref) {
5150 x->max_partition_size = BLOCK_32X32;
5151 x->min_partition_size = BLOCK_8X8;
5152 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, rd_cost,
5153 0, INT64_MAX, pc_tree);
5154 } else if (bsize == BLOCK_16X16 && partition != PARTITION_NONE) {
5155 x->max_partition_size = BLOCK_16X16;
5156 x->min_partition_size = BLOCK_8X8;
5157 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, rd_cost,
5158 0, INT64_MAX, pc_tree);
5160 switch (partition) {
5161 case PARTITION_NONE:
5162 pc_tree->none.pred_pixel_ready = 1;
5163 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize,
5165 pc_tree->none.mic = *xd->mi[0];
5166 pc_tree->none.mbmi_ext = *x->mbmi_ext;
5167 pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
5168 pc_tree->none.skip = x->skip;
5170 case PARTITION_VERT:
5171 pc_tree->vertical[0].pred_pixel_ready = 1;
5172 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize,
5173 &pc_tree->vertical[0]);
5174 pc_tree->vertical[0].mic = *xd->mi[0];
5175 pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
5176 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
5177 pc_tree->vertical[0].skip = x->skip;
5178 if (mi_col + hbs < cm->mi_cols) {
5179 pc_tree->vertical[1].pred_pixel_ready = 1;
5180 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
5181 &this_rdc, subsize, &pc_tree->vertical[1]);
5182 pc_tree->vertical[1].mic = *xd->mi[0];
5183 pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
5184 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
5185 pc_tree->vertical[1].skip = x->skip;
5186 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
5187 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
5188 rd_cost->rate += this_rdc.rate;
5189 rd_cost->dist += this_rdc.dist;
5193 case PARTITION_HORZ:
5194 pc_tree->horizontal[0].pred_pixel_ready = 1;
5195 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize,
5196 &pc_tree->horizontal[0]);
5197 pc_tree->horizontal[0].mic = *xd->mi[0];
5198 pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
5199 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
5200 pc_tree->horizontal[0].skip = x->skip;
5201 if (mi_row + hbs < cm->mi_rows) {
5202 pc_tree->horizontal[1].pred_pixel_ready = 1;
5203 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
5204 &this_rdc, subsize, &pc_tree->horizontal[1]);
5205 pc_tree->horizontal[1].mic = *xd->mi[0];
5206 pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
5207 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
5208 pc_tree->horizontal[1].skip = x->skip;
5209 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
5210 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
5211 rd_cost->rate += this_rdc.rate;
5212 rd_cost->dist += this_rdc.dist;
5217 assert(partition == PARTITION_SPLIT);
5218 subsize = get_subsize(bsize, PARTITION_SPLIT);
5219 nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
5220 subsize, output_enabled, rd_cost,
5222 nonrd_select_partition(cpi, td, tile_data, mi + hbs, tp, mi_row,
5223 mi_col + hbs, subsize, output_enabled, &this_rdc,
5225 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
5226 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
5227 rd_cost->rate += this_rdc.rate;
5228 rd_cost->dist += this_rdc.dist;
5230 nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis, tp,
5231 mi_row + hbs, mi_col, subsize, output_enabled,
5232 &this_rdc, pc_tree->split[2]);
5233 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
5234 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
5235 rd_cost->rate += this_rdc.rate;
5236 rd_cost->dist += this_rdc.dist;
5238 nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
5239 mi_row + hbs, mi_col + hbs, subsize,
5240 output_enabled, &this_rdc, pc_tree->split[3]);
5241 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
5242 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
5243 rd_cost->rate += this_rdc.rate;
5244 rd_cost->dist += this_rdc.dist;
5250 if (bsize == BLOCK_64X64 && output_enabled)
5251 encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, 1, bsize, pc_tree);
5254 static void nonrd_use_partition(VP9_COMP *cpi, ThreadData *td,
5255 TileDataEnc *tile_data, MODE_INFO **mi,
5256 TOKENEXTRA **tp, int mi_row, int mi_col,
5257 BLOCK_SIZE bsize, int output_enabled,
5258 RD_COST *dummy_cost, PC_TREE *pc_tree) {
5259 VP9_COMMON *const cm = &cpi->common;
5260 TileInfo *tile_info = &tile_data->tile_info;
5261 MACROBLOCK *const x = &td->mb;
5262 MACROBLOCKD *const xd = &x->e_mbd;
5263 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
5264 const int mis = cm->mi_stride;
5265 PARTITION_TYPE partition;
5268 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
5270 subsize = (bsize >= BLOCK_8X8) ? mi[0]->sb_type : BLOCK_4X4;
5271 partition = partition_lookup[bsl][subsize];
5273 if (output_enabled && bsize != BLOCK_4X4) {
5274 int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
5275 td->counts->partition[ctx][partition]++;
5278 switch (partition) {
5279 case PARTITION_NONE:
5280 pc_tree->none.pred_pixel_ready = 1;
5281 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
5282 subsize, &pc_tree->none);
5283 pc_tree->none.mic = *xd->mi[0];
5284 pc_tree->none.mbmi_ext = *x->mbmi_ext;
5285 pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
5286 pc_tree->none.skip = x->skip;
5287 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
5288 subsize, &pc_tree->none);
5290 case PARTITION_VERT:
5291 pc_tree->vertical[0].pred_pixel_ready = 1;
5292 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
5293 subsize, &pc_tree->vertical[0]);
5294 pc_tree->vertical[0].mic = *xd->mi[0];
5295 pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
5296 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
5297 pc_tree->vertical[0].skip = x->skip;
5298 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
5299 subsize, &pc_tree->vertical[0]);
5300 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
5301 pc_tree->vertical[1].pred_pixel_ready = 1;
5302 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs, dummy_cost,
5303 subsize, &pc_tree->vertical[1]);
5304 pc_tree->vertical[1].mic = *xd->mi[0];
5305 pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
5306 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
5307 pc_tree->vertical[1].skip = x->skip;
5308 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col + hbs,
5309 output_enabled, subsize, &pc_tree->vertical[1]);
5312 case PARTITION_HORZ:
5313 pc_tree->horizontal[0].pred_pixel_ready = 1;
5314 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
5315 subsize, &pc_tree->horizontal[0]);
5316 pc_tree->horizontal[0].mic = *xd->mi[0];
5317 pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
5318 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
5319 pc_tree->horizontal[0].skip = x->skip;
5320 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
5321 subsize, &pc_tree->horizontal[0]);
5323 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
5324 pc_tree->horizontal[1].pred_pixel_ready = 1;
5325 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col, dummy_cost,
5326 subsize, &pc_tree->horizontal[1]);
5327 pc_tree->horizontal[1].mic = *xd->mi[0];
5328 pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
5329 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
5330 pc_tree->horizontal[1].skip = x->skip;
5331 encode_b_rt(cpi, td, tile_info, tp, mi_row + hbs, mi_col,
5332 output_enabled, subsize, &pc_tree->horizontal[1]);
5336 assert(partition == PARTITION_SPLIT);
5337 subsize = get_subsize(bsize, PARTITION_SPLIT);
5338 if (bsize == BLOCK_8X8) {
5339 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
5340 subsize, pc_tree->leaf_split[0]);
5341 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
5342 subsize, pc_tree->leaf_split[0]);
5344 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, subsize,
5345 output_enabled, dummy_cost, pc_tree->split[0]);
5346 nonrd_use_partition(cpi, td, tile_data, mi + hbs, tp, mi_row,
5347 mi_col + hbs, subsize, output_enabled, dummy_cost,
5349 nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis, tp,
5350 mi_row + hbs, mi_col, subsize, output_enabled,
5351 dummy_cost, pc_tree->split[2]);
5352 nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
5353 mi_row + hbs, mi_col + hbs, subsize, output_enabled,
5354 dummy_cost, pc_tree->split[3]);
5359 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
5360 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
5363 // Get a prediction(stored in x->est_pred) for the whole 64x64 superblock.
5364 static void get_estimated_pred(VP9_COMP *cpi, const TileInfo *const tile,
5365 MACROBLOCK *x, int mi_row, int mi_col) {
5366 VP9_COMMON *const cm = &cpi->common;
5367 const int is_key_frame = frame_is_intra_only(cm);
5368 MACROBLOCKD *xd = &x->e_mbd;
5370 set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
5372 if (!is_key_frame) {
5373 MODE_INFO *mi = xd->mi[0];
5374 YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
5375 const YV12_BUFFER_CONFIG *yv12_g = NULL;
5376 const BLOCK_SIZE bsize = BLOCK_32X32 + (mi_col + 4 < cm->mi_cols) * 2 +
5377 (mi_row + 4 < cm->mi_rows);
5378 unsigned int y_sad_g, y_sad_thr;
5379 unsigned int y_sad = UINT_MAX;
5381 assert(yv12 != NULL);
5383 if (!(is_one_pass_cbr_svc(cpi) && cpi->svc.spatial_layer_id) ||
5384 cpi->svc.use_gf_temporal_ref_current_layer) {
5385 // For now, GOLDEN will not be used for non-zero spatial layers, since
5386 // it may not be a temporal reference.
5387 yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
5390 // Only compute y_sad_g (sad for golden reference) for speed < 8.
5391 if (cpi->oxcf.speed < 8 && yv12_g && yv12_g != yv12 &&
5392 (cpi->ref_frame_flags & VP9_GOLD_FLAG)) {
5393 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
5394 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
5395 y_sad_g = cpi->fn_ptr[bsize].sdf(
5396 x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
5397 xd->plane[0].pre[0].stride);
5402 if (cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR &&
5403 cpi->rc.is_src_frame_alt_ref) {
5404 yv12 = get_ref_frame_buffer(cpi, ALTREF_FRAME);
5405 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
5406 &cm->frame_refs[ALTREF_FRAME - 1].sf);
5407 mi->ref_frame[0] = ALTREF_FRAME;
5410 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
5411 &cm->frame_refs[LAST_FRAME - 1].sf);
5412 mi->ref_frame[0] = LAST_FRAME;
5414 mi->ref_frame[1] = NONE;
5415 mi->sb_type = BLOCK_64X64;
5416 mi->mv[0].as_int = 0;
5417 mi->interp_filter = BILINEAR;
5420 const MV dummy_mv = { 0, 0 };
5421 y_sad = vp9_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col,
5423 x->sb_use_mv_part = 1;
5424 x->sb_mvcol_part = mi->mv[0].as_mv.col;
5425 x->sb_mvrow_part = mi->mv[0].as_mv.row;
5428 // Pick ref frame for partitioning, bias last frame when y_sad_g and y_sad
5429 // are close if short_circuit_low_temp_var is on.
5430 y_sad_thr = cpi->sf.short_circuit_low_temp_var ? (y_sad * 7) >> 3 : y_sad;
5431 if (y_sad_g < y_sad_thr) {
5432 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
5433 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
5434 mi->ref_frame[0] = GOLDEN_FRAME;
5435 mi->mv[0].as_int = 0;
5437 x->pred_mv[LAST_FRAME] = mi->mv[0].as_mv;
5440 set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]);
5441 xd->plane[0].dst.buf = x->est_pred;
5442 xd->plane[0].dst.stride = 64;
5443 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
5445 #if CONFIG_VP9_HIGHBITDEPTH
5447 case 8: memset(x->est_pred, 128, 64 * 64 * sizeof(x->est_pred[0])); break;
5449 memset(x->est_pred, 128 * 4, 64 * 64 * sizeof(x->est_pred[0]));
5452 memset(x->est_pred, 128 * 16, 64 * 64 * sizeof(x->est_pred[0]));
5456 memset(x->est_pred, 128, 64 * 64 * sizeof(x->est_pred[0]));
5457 #endif // CONFIG_VP9_HIGHBITDEPTH
5461 static void encode_nonrd_sb_row(VP9_COMP *cpi, ThreadData *td,
5462 TileDataEnc *tile_data, int mi_row,
5464 SPEED_FEATURES *const sf = &cpi->sf;
5465 VP9_COMMON *const cm = &cpi->common;
5466 TileInfo *const tile_info = &tile_data->tile_info;
5467 MACROBLOCK *const x = &td->mb;
5468 MACROBLOCKD *const xd = &x->e_mbd;
5469 const int mi_col_start = tile_info->mi_col_start;
5470 const int mi_col_end = tile_info->mi_col_end;
5472 const int sb_row = mi_row >> MI_BLOCK_SIZE_LOG2;
5473 const int num_sb_cols =
5474 get_num_cols(tile_data->tile_info, MI_BLOCK_SIZE_LOG2);
5477 // Initialize the left context for the new SB row
5478 memset(&xd->left_context, 0, sizeof(xd->left_context));
5479 memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
5481 // Code each SB in the row
5482 for (mi_col = mi_col_start, sb_col_in_tile = 0; mi_col < mi_col_end;
5483 mi_col += MI_BLOCK_SIZE, ++sb_col_in_tile) {
5484 const struct segmentation *const seg = &cm->seg;
5486 const int idx_str = cm->mi_stride * mi_row + mi_col;
5487 MODE_INFO **mi = cm->mi_grid_visible + idx_str;
5488 PARTITION_SEARCH_TYPE partition_search_type = sf->partition_search_type;
5489 BLOCK_SIZE bsize = BLOCK_64X64;
5493 (*(cpi->row_mt_sync_read_ptr))(&tile_data->row_mt_sync, sb_row,
5496 if (cpi->use_skin_detection) {
5497 vp9_compute_skin_sb(cpi, BLOCK_16X16, mi_row, mi_col);
5500 x->source_variance = UINT_MAX;
5501 for (i = 0; i < MAX_REF_FRAMES; ++i) {
5502 x->pred_mv[i].row = INT16_MAX;
5503 x->pred_mv[i].col = INT16_MAX;
5505 vp9_rd_cost_init(&dummy_rdc);
5506 x->color_sensitivity[0] = 0;
5507 x->color_sensitivity[1] = 0;
5509 x->skip_low_source_sad = 0;
5510 x->lowvar_highsumdiff = 0;
5511 x->content_state_sb = 0;
5512 x->zero_temp_sad_source = 0;
5513 x->sb_use_mv_part = 0;
5514 x->sb_mvcol_part = 0;
5515 x->sb_mvrow_part = 0;
5516 x->sb_pickmode_part = 0;
5517 x->arf_frame_usage = 0;
5518 x->lastgolden_frame_usage = 0;
5520 if (cpi->compute_source_sad_onepass && cpi->sf.use_source_sad) {
5521 int shift = cpi->Source->y_stride * (mi_row << 3) + (mi_col << 3);
5522 int sb_offset2 = ((cm->mi_cols + 7) >> 3) * (mi_row >> 3) + (mi_col >> 3);
5523 int64_t source_sad = avg_source_sad(cpi, x, shift, sb_offset2);
5524 if (sf->adapt_partition_source_sad &&
5525 (cpi->oxcf.rc_mode == VPX_VBR && !cpi->rc.is_src_frame_alt_ref &&
5526 source_sad > sf->adapt_partition_thresh &&
5527 (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)))
5528 partition_search_type = REFERENCE_PARTITION;
5532 const uint8_t *const map =
5533 seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
5534 int segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
5535 seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
5537 if (cpi->roi.enabled && cpi->roi.skip[BACKGROUND_SEG_SKIP_ID] &&
5538 cpi->rc.frames_since_key > FRAMES_NO_SKIPPING_AFTER_KEY &&
5539 x->content_state_sb > kLowSadLowSumdiff) {
5540 // For ROI with skip, force segment = 0 (no skip) over whole
5541 // superblock to avoid artifacts if temporal change in source_sad is
5544 const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64];
5545 const int bh = num_8x8_blocks_high_lookup[BLOCK_64X64];
5546 const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
5547 const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
5548 const int block_index = mi_row * cm->mi_cols + mi_col;
5549 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
5550 for (yi = 0; yi < ymis; yi++)
5551 for (xi = 0; xi < xmis; xi++) {
5552 int map_offset = block_index + yi * cm->mi_cols + xi;
5553 cpi->segmentation_map[map_offset] = 0;
5555 set_segment_index(cpi, x, mi_row, mi_col, BLOCK_64X64, 0);
5559 partition_search_type = FIXED_PARTITION;
5563 // Set the partition type of the 64X64 block
5564 switch (partition_search_type) {
5565 case VAR_BASED_PARTITION:
5566 // TODO(jingning, marpan): The mode decision and encoding process
5567 // support both intra and inter sub8x8 block coding for RTC mode.
5568 // Tune the thresholds accordingly to use sub8x8 block coding for
5569 // coding performance improvement.
5570 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
5571 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
5572 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
5574 case ML_BASED_PARTITION:
5575 get_estimated_pred(cpi, tile_info, x, mi_row, mi_col);
5576 x->max_partition_size = BLOCK_64X64;
5577 x->min_partition_size = BLOCK_8X8;
5578 x->sb_pickmode_part = 1;
5579 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col,
5580 BLOCK_64X64, &dummy_rdc, 1, INT64_MAX,
5583 case SOURCE_VAR_BASED_PARTITION:
5584 set_source_var_based_partition(cpi, tile_info, x, mi, mi_row, mi_col);
5585 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
5586 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
5588 case FIXED_PARTITION:
5589 if (!seg_skip) bsize = sf->always_this_block_size;
5590 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
5591 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
5592 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
5595 assert(partition_search_type == REFERENCE_PARTITION);
5596 x->sb_pickmode_part = 1;
5597 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
5598 // Use nonrd_pick_partition on scene-cut for VBR mode.
5599 // nonrd_pick_partition does not support 4x4 partition, so avoid it
5600 // on key frame for now.
5601 if ((cpi->oxcf.rc_mode == VPX_VBR && cpi->rc.high_source_sad &&
5602 cpi->oxcf.speed < 6 && !frame_is_intra_only(cm) &&
5603 (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))) {
5604 // Use lower max_partition_size for low resoultions.
5605 if (cm->width <= 352 && cm->height <= 288)
5606 x->max_partition_size = BLOCK_32X32;
5608 x->max_partition_size = BLOCK_64X64;
5609 x->min_partition_size = BLOCK_8X8;
5610 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col,
5611 BLOCK_64X64, &dummy_rdc, 1, INT64_MAX,
5614 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
5615 // TODO(marpan): Seems like nonrd_select_partition does not support
5616 // 4x4 partition. Since 4x4 is used on key frame, use this switch
5618 if (frame_is_intra_only(cm))
5619 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
5620 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
5622 nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
5623 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
5629 // Update ref_frame usage for inter frame if this group is ARF group.
5630 if (!cpi->rc.is_src_frame_alt_ref && !cpi->refresh_golden_frame &&
5631 !cpi->refresh_alt_ref_frame && cpi->rc.alt_ref_gf_group &&
5632 cpi->sf.use_altref_onepass) {
5633 int sboffset = ((cm->mi_cols + 7) >> 3) * (mi_row >> 3) + (mi_col >> 3);
5634 if (cpi->count_arf_frame_usage != NULL)
5635 cpi->count_arf_frame_usage[sboffset] = x->arf_frame_usage;
5636 if (cpi->count_lastgolden_frame_usage != NULL)
5637 cpi->count_lastgolden_frame_usage[sboffset] = x->lastgolden_frame_usage;
5640 (*(cpi->row_mt_sync_write_ptr))(&tile_data->row_mt_sync, sb_row,
5641 sb_col_in_tile, num_sb_cols);
5644 // end RTC play code
5646 static INLINE uint32_t variance(const diff *const d) {
5647 return d->sse - (uint32_t)(((int64_t)d->sum * d->sum) >> 8);
5650 #if CONFIG_VP9_HIGHBITDEPTH
5651 static INLINE uint32_t variance_highbd(diff *const d) {
5652 const int64_t var = (int64_t)d->sse - (((int64_t)d->sum * d->sum) >> 8);
5653 return (var >= 0) ? (uint32_t)var : 0;
5655 #endif // CONFIG_VP9_HIGHBITDEPTH
5657 static int set_var_thresh_from_histogram(VP9_COMP *cpi) {
5658 const SPEED_FEATURES *const sf = &cpi->sf;
5659 const VP9_COMMON *const cm = &cpi->common;
5661 const uint8_t *src = cpi->Source->y_buffer;
5662 const uint8_t *last_src = cpi->Last_Source->y_buffer;
5663 const int src_stride = cpi->Source->y_stride;
5664 const int last_stride = cpi->Last_Source->y_stride;
5666 // Pick cutoff threshold
5667 const int cutoff = (VPXMIN(cm->width, cm->height) >= 720)
5668 ? (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100)
5669 : (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100);
5670 DECLARE_ALIGNED(16, int, hist[VAR_HIST_BINS]);
5671 diff *var16 = cpi->source_diff_var;
5676 memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0]));
5678 for (i = 0; i < cm->mb_rows; i++) {
5679 for (j = 0; j < cm->mb_cols; j++) {
5680 #if CONFIG_VP9_HIGHBITDEPTH
5681 if (cm->use_highbitdepth) {
5682 switch (cm->bit_depth) {
5684 vpx_highbd_8_get16x16var(src, src_stride, last_src, last_stride,
5685 &var16->sse, &var16->sum);
5686 var16->var = variance(var16);
5689 vpx_highbd_10_get16x16var(src, src_stride, last_src, last_stride,
5690 &var16->sse, &var16->sum);
5691 var16->var = variance_highbd(var16);
5694 assert(cm->bit_depth == VPX_BITS_12);
5695 vpx_highbd_12_get16x16var(src, src_stride, last_src, last_stride,
5696 &var16->sse, &var16->sum);
5697 var16->var = variance_highbd(var16);
5701 vpx_get16x16var(src, src_stride, last_src, last_stride, &var16->sse,
5703 var16->var = variance(var16);
5706 vpx_get16x16var(src, src_stride, last_src, last_stride, &var16->sse,
5708 var16->var = variance(var16);
5709 #endif // CONFIG_VP9_HIGHBITDEPTH
5711 if (var16->var >= VAR_HIST_MAX_BG_VAR)
5712 hist[VAR_HIST_BINS - 1]++;
5714 hist[var16->var / VAR_HIST_FACTOR]++;
5721 src = src - cm->mb_cols * 16 + 16 * src_stride;
5722 last_src = last_src - cm->mb_cols * 16 + 16 * last_stride;
5725 cpi->source_var_thresh = 0;
5727 if (hist[VAR_HIST_BINS - 1] < cutoff) {
5728 for (i = 0; i < VAR_HIST_BINS - 1; i++) {
5732 cpi->source_var_thresh = (i + 1) * VAR_HIST_FACTOR;
5738 return sf->search_type_check_frequency;
5741 static void source_var_based_partition_search_method(VP9_COMP *cpi) {
5742 VP9_COMMON *const cm = &cpi->common;
5743 SPEED_FEATURES *const sf = &cpi->sf;
5745 if (cm->frame_type == KEY_FRAME) {
5746 // For key frame, use SEARCH_PARTITION.
5747 sf->partition_search_type = SEARCH_PARTITION;
5748 } else if (cm->intra_only) {
5749 sf->partition_search_type = FIXED_PARTITION;
5751 if (cm->last_width != cm->width || cm->last_height != cm->height) {
5752 if (cpi->source_diff_var) vpx_free(cpi->source_diff_var);
5754 CHECK_MEM_ERROR(cm, cpi->source_diff_var,
5755 vpx_calloc(cm->MBs, sizeof(diff)));
5758 if (!cpi->frames_till_next_var_check)
5759 cpi->frames_till_next_var_check = set_var_thresh_from_histogram(cpi);
5761 if (cpi->frames_till_next_var_check > 0) {
5762 sf->partition_search_type = FIXED_PARTITION;
5763 cpi->frames_till_next_var_check--;
5768 static int get_skip_encode_frame(const VP9_COMMON *cm, ThreadData *const td) {
5769 unsigned int intra_count = 0, inter_count = 0;
5772 for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
5773 intra_count += td->counts->intra_inter[j][0];
5774 inter_count += td->counts->intra_inter[j][1];
5777 return (intra_count << 2) < inter_count && cm->frame_type != KEY_FRAME &&
5781 void vp9_init_tile_data(VP9_COMP *cpi) {
5782 VP9_COMMON *const cm = &cpi->common;
5783 const int tile_cols = 1 << cm->log2_tile_cols;
5784 const int tile_rows = 1 << cm->log2_tile_rows;
5785 int tile_col, tile_row;
5786 TOKENEXTRA *pre_tok = cpi->tile_tok[0][0];
5787 TOKENLIST *tplist = cpi->tplist[0][0];
5789 int tplist_count = 0;
5791 if (cpi->tile_data == NULL || cpi->allocated_tiles < tile_cols * tile_rows) {
5792 if (cpi->tile_data != NULL) vpx_free(cpi->tile_data);
5795 vpx_malloc(tile_cols * tile_rows * sizeof(*cpi->tile_data)));
5796 cpi->allocated_tiles = tile_cols * tile_rows;
5798 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
5799 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
5800 TileDataEnc *tile_data =
5801 &cpi->tile_data[tile_row * tile_cols + tile_col];
5803 for (i = 0; i < BLOCK_SIZES; ++i) {
5804 for (j = 0; j < MAX_MODES; ++j) {
5805 tile_data->thresh_freq_fact[i][j] = RD_THRESH_INIT_FACT;
5806 #if CONFIG_RATE_CTRL
5807 if (cpi->oxcf.use_simple_encode_api) {
5808 tile_data->thresh_freq_fact_prev[i][j] = RD_THRESH_INIT_FACT;
5810 #endif // CONFIG_RATE_CTRL
5811 #if CONFIG_CONSISTENT_RECODE
5812 tile_data->thresh_freq_fact_prev[i][j] = RD_THRESH_INIT_FACT;
5813 #endif // CONFIG_CONSISTENT_RECODE
5814 tile_data->mode_map[i][j] = j;
5817 #if CONFIG_MULTITHREAD
5818 tile_data->row_base_thresh_freq_fact = NULL;
5823 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
5824 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
5825 TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
5826 TileInfo *tile_info = &this_tile->tile_info;
5827 if (cpi->sf.adaptive_rd_thresh_row_mt &&
5828 this_tile->row_base_thresh_freq_fact == NULL)
5829 vp9_row_mt_alloc_rd_thresh(cpi, this_tile);
5830 vp9_tile_init(tile_info, cm, tile_row, tile_col);
5832 cpi->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
5833 pre_tok = cpi->tile_tok[tile_row][tile_col];
5834 tile_tok = allocated_tokens(*tile_info);
5836 cpi->tplist[tile_row][tile_col] = tplist + tplist_count;
5837 tplist = cpi->tplist[tile_row][tile_col];
5838 tplist_count = get_num_vert_units(*tile_info, MI_BLOCK_SIZE_LOG2);
5843 void vp9_encode_sb_row(VP9_COMP *cpi, ThreadData *td, int tile_row,
5844 int tile_col, int mi_row) {
5845 VP9_COMMON *const cm = &cpi->common;
5846 const int tile_cols = 1 << cm->log2_tile_cols;
5847 TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
5848 const TileInfo *const tile_info = &this_tile->tile_info;
5849 TOKENEXTRA *tok = NULL;
5851 int tile_mb_cols = (tile_info->mi_col_end - tile_info->mi_col_start + 1) >> 1;
5853 tile_sb_row = mi_cols_aligned_to_sb(mi_row - tile_info->mi_row_start) >>
5855 get_start_tok(cpi, tile_row, tile_col, mi_row, &tok);
5856 cpi->tplist[tile_row][tile_col][tile_sb_row].start = tok;
5858 if (cpi->sf.use_nonrd_pick_mode)
5859 encode_nonrd_sb_row(cpi, td, this_tile, mi_row, &tok);
5860 #if !CONFIG_REALTIME_ONLY
5862 encode_rd_sb_row(cpi, td, this_tile, mi_row, &tok);
5865 cpi->tplist[tile_row][tile_col][tile_sb_row].stop = tok;
5866 cpi->tplist[tile_row][tile_col][tile_sb_row].count =
5867 (unsigned int)(cpi->tplist[tile_row][tile_col][tile_sb_row].stop -
5868 cpi->tplist[tile_row][tile_col][tile_sb_row].start);
5869 assert(tok - cpi->tplist[tile_row][tile_col][tile_sb_row].start <=
5870 get_token_alloc(MI_BLOCK_SIZE >> 1, tile_mb_cols));
5875 void vp9_encode_tile(VP9_COMP *cpi, ThreadData *td, int tile_row,
5877 VP9_COMMON *const cm = &cpi->common;
5878 const int tile_cols = 1 << cm->log2_tile_cols;
5879 TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
5880 const TileInfo *const tile_info = &this_tile->tile_info;
5881 const int mi_row_start = tile_info->mi_row_start;
5882 const int mi_row_end = tile_info->mi_row_end;
5885 for (mi_row = mi_row_start; mi_row < mi_row_end; mi_row += MI_BLOCK_SIZE)
5886 vp9_encode_sb_row(cpi, td, tile_row, tile_col, mi_row);
5889 static void encode_tiles(VP9_COMP *cpi) {
5890 VP9_COMMON *const cm = &cpi->common;
5891 const int tile_cols = 1 << cm->log2_tile_cols;
5892 const int tile_rows = 1 << cm->log2_tile_rows;
5893 int tile_col, tile_row;
5895 vp9_init_tile_data(cpi);
5897 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
5898 for (tile_col = 0; tile_col < tile_cols; ++tile_col)
5899 vp9_encode_tile(cpi, &cpi->td, tile_row, tile_col);
5902 static int compare_kmeans_data(const void *a, const void *b) {
5903 if (((const KMEANS_DATA *)a)->value > ((const KMEANS_DATA *)b)->value) {
5905 } else if (((const KMEANS_DATA *)a)->value <
5906 ((const KMEANS_DATA *)b)->value) {
5913 static void compute_boundary_ls(const double *ctr_ls, int k,
5914 double *boundary_ls) {
5915 // boundary_ls[j] is the upper bound of data centered at ctr_ls[j]
5917 for (j = 0; j < k - 1; ++j) {
5918 boundary_ls[j] = (ctr_ls[j] + ctr_ls[j + 1]) / 2.;
5920 boundary_ls[k - 1] = DBL_MAX;
5923 int vp9_get_group_idx(double value, double *boundary_ls, int k) {
5925 while (value >= boundary_ls[group_idx]) {
5927 if (group_idx == k - 1) {
5934 void vp9_kmeans(double *ctr_ls, double *boundary_ls, int *count_ls, int k,
5935 KMEANS_DATA *arr, int size) {
5939 double sum[MAX_KMEANS_GROUPS];
5940 int count[MAX_KMEANS_GROUPS];
5942 vpx_clear_system_state();
5944 assert(k >= 2 && k <= MAX_KMEANS_GROUPS);
5946 qsort(arr, size, sizeof(*arr), compare_kmeans_data);
5948 // initialize the center points
5949 for (j = 0; j < k; ++j) {
5950 ctr_ls[j] = arr[(size * (2 * j + 1)) / (2 * k)].value;
5953 for (itr = 0; itr < 10; ++itr) {
5954 compute_boundary_ls(ctr_ls, k, boundary_ls);
5955 for (i = 0; i < MAX_KMEANS_GROUPS; ++i) {
5960 // Both the data and centers are sorted in ascending order.
5961 // As each data point is processed in order, its corresponding group index
5962 // can only increase. So we only need to reset the group index to zero here.
5964 for (i = 0; i < size; ++i) {
5965 while (arr[i].value >= boundary_ls[group_idx]) {
5966 // place samples into clusters
5968 if (group_idx == k - 1) {
5972 sum[group_idx] += arr[i].value;
5976 for (group_idx = 0; group_idx < k; ++group_idx) {
5977 if (count[group_idx] > 0)
5978 ctr_ls[group_idx] = sum[group_idx] / count[group_idx];
5981 count[group_idx] = 0;
5985 // compute group_idx, boundary_ls and count_ls
5986 for (j = 0; j < k; ++j) {
5989 compute_boundary_ls(ctr_ls, k, boundary_ls);
5991 for (i = 0; i < size; ++i) {
5992 while (arr[i].value >= boundary_ls[group_idx]) {
5994 if (group_idx == k - 1) {
5998 arr[i].group_idx = group_idx;
5999 ++count_ls[group_idx];
6003 static void encode_frame_internal(VP9_COMP *cpi) {
6004 SPEED_FEATURES *const sf = &cpi->sf;
6005 ThreadData *const td = &cpi->td;
6006 MACROBLOCK *const x = &td->mb;
6007 VP9_COMMON *const cm = &cpi->common;
6008 MACROBLOCKD *const xd = &x->e_mbd;
6009 const int gf_group_index = cpi->twopass.gf_group.index;
6011 xd->mi = cm->mi_grid_visible;
6013 vp9_zero(*td->counts);
6014 vp9_zero(cpi->td.rd_counts);
6016 xd->lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0 &&
6017 cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
6019 #if CONFIG_VP9_HIGHBITDEPTH
6020 if (cm->use_highbitdepth)
6021 x->fwd_txfm4x4 = xd->lossless ? vp9_highbd_fwht4x4 : vpx_highbd_fdct4x4;
6023 x->fwd_txfm4x4 = xd->lossless ? vp9_fwht4x4 : vpx_fdct4x4;
6024 x->highbd_inv_txfm_add =
6025 xd->lossless ? vp9_highbd_iwht4x4_add : vp9_highbd_idct4x4_add;
6027 x->fwd_txfm4x4 = xd->lossless ? vp9_fwht4x4 : vpx_fdct4x4;
6028 #endif // CONFIG_VP9_HIGHBITDEPTH
6029 x->inv_txfm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
6030 #if CONFIG_CONSISTENT_RECODE
6031 x->optimize = sf->optimize_coefficients == 1 && cpi->oxcf.pass != 1;
6033 if (xd->lossless) x->optimize = 0;
6034 x->sharpness = cpi->oxcf.sharpness;
6035 x->adjust_rdmult_by_segment = (cpi->oxcf.aq_mode == VARIANCE_AQ);
6037 cm->tx_mode = select_tx_mode(cpi, xd);
6039 vp9_frame_init_quantizer(cpi);
6041 vp9_initialize_rd_consts(cpi);
6042 vp9_initialize_me_consts(cpi, x, cm->base_qindex);
6043 init_encode_frame_mb_context(cpi);
6044 cm->use_prev_frame_mvs =
6045 !cm->error_resilient_mode && cm->width == cm->last_width &&
6046 cm->height == cm->last_height && !cm->intra_only && cm->last_show_frame;
6047 // Special case: set prev_mi to NULL when the previous mode info
6048 // context cannot be used.
6050 cm->use_prev_frame_mvs ? cm->prev_mip + cm->mi_stride + 1 : NULL;
6052 x->quant_fp = cpi->sf.use_quant_fp;
6053 vp9_zero(x->skip_txfm);
6054 if (sf->use_nonrd_pick_mode) {
6055 // Initialize internal buffer pointers for rtc coding, where non-RD
6056 // mode decision is used and hence no buffer pointer swap needed.
6058 struct macroblock_plane *const p = x->plane;
6059 struct macroblockd_plane *const pd = xd->plane;
6060 PICK_MODE_CONTEXT *ctx = &cpi->td.pc_root->none;
6062 for (i = 0; i < MAX_MB_PLANE; ++i) {
6063 p[i].coeff = ctx->coeff_pbuf[i][0];
6064 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
6065 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
6066 p[i].eobs = ctx->eobs_pbuf[i][0];
6068 vp9_zero(x->zcoeff_blk);
6070 if (cm->frame_type != KEY_FRAME && cpi->rc.frames_since_golden == 0 &&
6071 !(cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR) &&
6073 cpi->ref_frame_flags &= (~VP9_GOLD_FLAG);
6075 if (sf->partition_search_type == SOURCE_VAR_BASED_PARTITION)
6076 source_var_based_partition_search_method(cpi);
6077 } else if (gf_group_index && gf_group_index < MAX_ARF_GOP_SIZE &&
6078 cpi->sf.enable_tpl_model) {
6079 TplDepFrame *tpl_frame = &cpi->tpl_stats[cpi->twopass.gf_group.index];
6080 TplDepStats *tpl_stats = tpl_frame->tpl_stats_ptr;
6082 int tpl_stride = tpl_frame->stride;
6083 int64_t intra_cost_base = 0;
6084 int64_t mc_dep_cost_base = 0;
6087 for (row = 0; row < cm->mi_rows && tpl_frame->is_valid; ++row) {
6088 for (col = 0; col < cm->mi_cols; ++col) {
6089 TplDepStats *this_stats = &tpl_stats[row * tpl_stride + col];
6090 intra_cost_base += this_stats->intra_cost;
6091 mc_dep_cost_base += this_stats->mc_dep_cost;
6095 vpx_clear_system_state();
6097 if (tpl_frame->is_valid)
6098 cpi->rd.r0 = (double)intra_cost_base / mc_dep_cost_base;
6101 // Frame segmentation
6102 if (cpi->oxcf.aq_mode == PERCEPTUAL_AQ) build_kmeans_segmentation(cpi);
6105 struct vpx_usec_timer emr_timer;
6106 vpx_usec_timer_start(&emr_timer);
6109 cpi->row_mt_sync_read_ptr = vp9_row_mt_sync_read_dummy;
6110 cpi->row_mt_sync_write_ptr = vp9_row_mt_sync_write_dummy;
6111 // If allowed, encoding tiles in parallel with one thread handling one
6112 // tile when row based multi-threading is disabled.
6113 if (VPXMIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1)
6114 vp9_encode_tiles_mt(cpi);
6118 cpi->row_mt_sync_read_ptr = vp9_row_mt_sync_read;
6119 cpi->row_mt_sync_write_ptr = vp9_row_mt_sync_write;
6120 vp9_encode_tiles_row_mt(cpi);
6123 vpx_usec_timer_mark(&emr_timer);
6124 cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
6127 sf->skip_encode_frame =
6128 sf->skip_encode_sb ? get_skip_encode_frame(cm, td) : 0;
6131 // Keep record of the total distortion this time around for future use
6132 cpi->last_frame_distortion = cpi->frame_distortion;
6136 static INTERP_FILTER get_interp_filter(
6137 const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) {
6138 if (!is_alt_ref && threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
6139 threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] &&
6140 threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) {
6141 return EIGHTTAP_SMOOTH;
6142 } else if (threshes[EIGHTTAP_SHARP] > threshes[EIGHTTAP] &&
6143 threshes[EIGHTTAP_SHARP] > threshes[SWITCHABLE - 1]) {
6144 return EIGHTTAP_SHARP;
6145 } else if (threshes[EIGHTTAP] > threshes[SWITCHABLE - 1]) {
6152 static int compute_frame_aq_offset(struct VP9_COMP *cpi) {
6153 VP9_COMMON *const cm = &cpi->common;
6154 MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible;
6155 struct segmentation *const seg = &cm->seg;
6163 for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) {
6164 MODE_INFO **mi_8x8 = mi_8x8_ptr;
6165 for (mi_col = 0; mi_col < cm->mi_cols; mi_col++, mi_8x8++) {
6166 segment_id = mi_8x8[0]->segment_id;
6167 qdelta_index = get_segdata(seg, segment_id, SEG_LVL_ALT_Q);
6168 sum_delta += qdelta_index;
6171 mi_8x8_ptr += cm->mi_stride;
6174 return sum_delta / (cm->mi_rows * cm->mi_cols);
6177 #if CONFIG_CONSISTENT_RECODE || CONFIG_RATE_CTRL
6178 static void restore_encode_params(VP9_COMP *cpi) {
6179 VP9_COMMON *const cm = &cpi->common;
6180 const int tile_cols = 1 << cm->log2_tile_cols;
6181 const int tile_rows = 1 << cm->log2_tile_rows;
6182 int tile_col, tile_row;
6184 RD_OPT *rd_opt = &cpi->rd;
6185 for (i = 0; i < MAX_REF_FRAMES; i++) {
6186 for (j = 0; j < REFERENCE_MODES; j++)
6187 rd_opt->prediction_type_threshes[i][j] =
6188 rd_opt->prediction_type_threshes_prev[i][j];
6190 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; j++)
6191 rd_opt->filter_threshes[i][j] = rd_opt->filter_threshes_prev[i][j];
6194 if (cpi->tile_data != NULL) {
6195 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
6196 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
6197 TileDataEnc *tile_data =
6198 &cpi->tile_data[tile_row * tile_cols + tile_col];
6199 for (i = 0; i < BLOCK_SIZES; ++i) {
6200 for (j = 0; j < MAX_MODES; ++j) {
6201 tile_data->thresh_freq_fact[i][j] =
6202 tile_data->thresh_freq_fact_prev[i][j];
6208 cm->interp_filter = cpi->sf.default_interp_filter;
6210 #endif // CONFIG_CONSISTENT_RECODE || CONFIG_RATE_CTRL
6212 void vp9_encode_frame(VP9_COMP *cpi) {
6213 VP9_COMMON *const cm = &cpi->common;
6215 #if CONFIG_RATE_CTRL
6216 if (cpi->oxcf.use_simple_encode_api) {
6217 restore_encode_params(cpi);
6219 #endif // CONFIG_RATE_CTRL
6220 #if CONFIG_CONSISTENT_RECODE
6221 restore_encode_params(cpi);
6224 #if CONFIG_MISMATCH_DEBUG
6225 mismatch_reset_frame(MAX_MB_PLANE);
6228 // In the longer term the encoder should be generalized to match the
6229 // decoder such that we allow compound where one of the 3 buffers has a
6230 // different sign bias and that buffer is then the fixed ref. However, this
6231 // requires further work in the rd loop. For now the only supported encoder
6232 // side behavior is where the ALT ref buffer has opposite sign bias to
6234 if (!frame_is_intra_only(cm)) {
6235 if (vp9_compound_reference_allowed(cm)) {
6236 cpi->allow_comp_inter_inter = 1;
6237 vp9_setup_compound_reference_mode(cm);
6239 cpi->allow_comp_inter_inter = 0;
6243 if (cpi->sf.frame_parameter_update) {
6245 RD_OPT *const rd_opt = &cpi->rd;
6246 FRAME_COUNTS *counts = cpi->td.counts;
6247 RD_COUNTS *const rdc = &cpi->td.rd_counts;
6249 // This code does a single RD pass over the whole frame assuming
6250 // either compound, single or hybrid prediction as per whatever has
6251 // worked best for that type of frame in the past.
6252 // It also predicts whether another coding mode would have worked
6253 // better than this coding mode. If that is the case, it remembers
6254 // that for subsequent frames.
6255 // It also does the same analysis for transform size selection.
6256 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
6257 int64_t *const mode_thrs = rd_opt->prediction_type_threshes[frame_type];
6258 int64_t *const filter_thrs = rd_opt->filter_threshes[frame_type];
6259 const int is_alt_ref = frame_type == ALTREF_FRAME;
6261 /* prediction (compound, single or hybrid) mode selection */
6262 if (is_alt_ref || !cpi->allow_comp_inter_inter)
6263 cm->reference_mode = SINGLE_REFERENCE;
6264 else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] &&
6265 mode_thrs[COMPOUND_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT] &&
6266 check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
6267 cm->reference_mode = COMPOUND_REFERENCE;
6268 else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT])
6269 cm->reference_mode = SINGLE_REFERENCE;
6271 cm->reference_mode = REFERENCE_MODE_SELECT;
6273 if (cm->interp_filter == SWITCHABLE)
6274 cm->interp_filter = get_interp_filter(filter_thrs, is_alt_ref);
6276 encode_frame_internal(cpi);
6278 for (i = 0; i < REFERENCE_MODES; ++i)
6279 mode_thrs[i] = (mode_thrs[i] + rdc->comp_pred_diff[i] / cm->MBs) / 2;
6281 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
6282 filter_thrs[i] = (filter_thrs[i] + rdc->filter_diff[i] / cm->MBs) / 2;
6284 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
6285 int single_count_zero = 0;
6286 int comp_count_zero = 0;
6288 for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
6289 single_count_zero += counts->comp_inter[i][0];
6290 comp_count_zero += counts->comp_inter[i][1];
6293 if (comp_count_zero == 0) {
6294 cm->reference_mode = SINGLE_REFERENCE;
6295 vp9_zero(counts->comp_inter);
6296 } else if (single_count_zero == 0) {
6297 cm->reference_mode = COMPOUND_REFERENCE;
6298 vp9_zero(counts->comp_inter);
6302 if (cm->tx_mode == TX_MODE_SELECT) {
6304 int count8x8_lp = 0, count8x8_8x8p = 0;
6305 int count16x16_16x16p = 0, count16x16_lp = 0;
6308 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
6309 count4x4 += counts->tx.p32x32[i][TX_4X4];
6310 count4x4 += counts->tx.p16x16[i][TX_4X4];
6311 count4x4 += counts->tx.p8x8[i][TX_4X4];
6313 count8x8_lp += counts->tx.p32x32[i][TX_8X8];
6314 count8x8_lp += counts->tx.p16x16[i][TX_8X8];
6315 count8x8_8x8p += counts->tx.p8x8[i][TX_8X8];
6317 count16x16_16x16p += counts->tx.p16x16[i][TX_16X16];
6318 count16x16_lp += counts->tx.p32x32[i][TX_16X16];
6319 count32x32 += counts->tx.p32x32[i][TX_32X32];
6321 if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
6323 cm->tx_mode = ALLOW_8X8;
6324 reset_skip_tx_size(cm, TX_8X8);
6325 } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
6326 count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
6327 cm->tx_mode = ONLY_4X4;
6328 reset_skip_tx_size(cm, TX_4X4);
6329 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
6330 cm->tx_mode = ALLOW_32X32;
6331 } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
6332 cm->tx_mode = ALLOW_16X16;
6333 reset_skip_tx_size(cm, TX_16X16);
6337 FRAME_COUNTS *counts = cpi->td.counts;
6338 cm->reference_mode = SINGLE_REFERENCE;
6339 if (cpi->allow_comp_inter_inter && cpi->sf.use_compound_nonrd_pickmode &&
6340 cpi->rc.alt_ref_gf_group && !cpi->rc.is_src_frame_alt_ref &&
6341 cm->frame_type != KEY_FRAME)
6342 cm->reference_mode = REFERENCE_MODE_SELECT;
6344 encode_frame_internal(cpi);
6346 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
6347 int single_count_zero = 0;
6348 int comp_count_zero = 0;
6350 for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
6351 single_count_zero += counts->comp_inter[i][0];
6352 comp_count_zero += counts->comp_inter[i][1];
6354 if (comp_count_zero == 0) {
6355 cm->reference_mode = SINGLE_REFERENCE;
6356 vp9_zero(counts->comp_inter);
6357 } else if (single_count_zero == 0) {
6358 cm->reference_mode = COMPOUND_REFERENCE;
6359 vp9_zero(counts->comp_inter);
6364 // If segmented AQ is enabled compute the average AQ weighting.
6365 if (cm->seg.enabled && (cpi->oxcf.aq_mode != NO_AQ) &&
6366 (cm->seg.update_map || cm->seg.update_data)) {
6367 cm->seg.aq_av_offset = compute_frame_aq_offset(cpi);
6371 static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) {
6372 const PREDICTION_MODE y_mode = mi->mode;
6373 const PREDICTION_MODE uv_mode = mi->uv_mode;
6374 const BLOCK_SIZE bsize = mi->sb_type;
6376 if (bsize < BLOCK_8X8) {
6378 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
6379 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
6380 for (idy = 0; idy < 2; idy += num_4x4_h)
6381 for (idx = 0; idx < 2; idx += num_4x4_w)
6382 ++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode];
6384 ++counts->y_mode[size_group_lookup[bsize]][y_mode];
6387 ++counts->uv_mode[y_mode][uv_mode];
6390 static void update_zeromv_cnt(VP9_COMP *const cpi, const MODE_INFO *const mi,
6391 int mi_row, int mi_col, BLOCK_SIZE bsize) {
6392 const VP9_COMMON *const cm = &cpi->common;
6393 MV mv = mi->mv[0].as_mv;
6394 const int bw = num_8x8_blocks_wide_lookup[bsize];
6395 const int bh = num_8x8_blocks_high_lookup[bsize];
6396 const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
6397 const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
6398 const int block_index = mi_row * cm->mi_cols + mi_col;
6400 for (y = 0; y < ymis; y++)
6401 for (x = 0; x < xmis; x++) {
6402 int map_offset = block_index + y * cm->mi_cols + x;
6403 if (mi->ref_frame[0] == LAST_FRAME && is_inter_block(mi) &&
6404 mi->segment_id <= CR_SEGMENT_ID_BOOST2) {
6405 if (abs(mv.row) < 8 && abs(mv.col) < 8) {
6406 if (cpi->consec_zero_mv[map_offset] < 255)
6407 cpi->consec_zero_mv[map_offset]++;
6409 cpi->consec_zero_mv[map_offset] = 0;
6415 static void encode_superblock(VP9_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
6416 int output_enabled, int mi_row, int mi_col,
6417 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
6418 VP9_COMMON *const cm = &cpi->common;
6419 MACROBLOCK *const x = &td->mb;
6420 MACROBLOCKD *const xd = &x->e_mbd;
6421 MODE_INFO *mi = xd->mi[0];
6422 const int seg_skip =
6423 segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP);
6424 x->skip_recode = !x->select_tx_size && mi->sb_type >= BLOCK_8X8 &&
6425 cpi->oxcf.aq_mode != COMPLEXITY_AQ &&
6426 cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ &&
6427 cpi->sf.allow_skip_recode;
6429 if (!x->skip_recode && !cpi->sf.use_nonrd_pick_mode)
6430 memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
6432 x->skip_optimize = ctx->is_coded;
6434 x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
6435 x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
6436 x->q_index < QIDX_SKIP_THRESH);
6438 if (x->skip_encode) return;
6440 if (!is_inter_block(mi)) {
6442 #if CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
6443 if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) &&
6444 (xd->above_mi == NULL || xd->left_mi == NULL) &&
6445 need_top_left[mi->uv_mode])
6447 #endif // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
6449 for (plane = 0; plane < MAX_MB_PLANE; ++plane)
6450 vp9_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane, 1);
6451 if (output_enabled) sum_intra_stats(td->counts, mi);
6452 vp9_tokenize_sb(cpi, td, t, !output_enabled, seg_skip,
6453 VPXMAX(bsize, BLOCK_8X8));
6456 const int is_compound = has_second_ref(mi);
6457 set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]);
6458 for (ref = 0; ref < 1 + is_compound; ++ref) {
6459 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mi->ref_frame[ref]);
6460 assert(cfg != NULL);
6461 vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
6462 &xd->block_refs[ref]->sf);
6464 if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip)
6465 vp9_build_inter_predictors_sby(xd, mi_row, mi_col,
6466 VPXMAX(bsize, BLOCK_8X8));
6468 vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col,
6469 VPXMAX(bsize, BLOCK_8X8));
6471 #if CONFIG_MISMATCH_DEBUG
6472 if (output_enabled) {
6474 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
6475 const struct macroblockd_plane *pd = &xd->plane[plane];
6476 int pixel_c, pixel_r;
6477 const BLOCK_SIZE plane_bsize =
6478 get_plane_block_size(VPXMAX(bsize, BLOCK_8X8), &xd->plane[plane]);
6479 const int bw = get_block_width(plane_bsize);
6480 const int bh = get_block_height(plane_bsize);
6481 mi_to_pixel_loc(&pixel_c, &pixel_r, mi_col, mi_row, 0, 0,
6482 pd->subsampling_x, pd->subsampling_y);
6484 mismatch_record_block_pre(pd->dst.buf, pd->dst.stride, plane, pixel_c,
6486 xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH);
6491 vp9_encode_sb(x, VPXMAX(bsize, BLOCK_8X8), mi_row, mi_col, output_enabled);
6492 vp9_tokenize_sb(cpi, td, t, !output_enabled, seg_skip,
6493 VPXMAX(bsize, BLOCK_8X8));
6500 if (output_enabled) {
6501 if (cm->tx_mode == TX_MODE_SELECT && mi->sb_type >= BLOCK_8X8 &&
6502 !(is_inter_block(mi) && mi->skip)) {
6503 ++get_tx_counts(max_txsize_lookup[bsize], get_tx_size_context(xd),
6504 &td->counts->tx)[mi->tx_size];
6506 // The new intra coding scheme requires no change of transform size
6507 if (is_inter_block(mi)) {
6508 mi->tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
6509 max_txsize_lookup[bsize]);
6511 mi->tx_size = (bsize >= BLOCK_8X8) ? mi->tx_size : TX_4X4;
6515 ++td->counts->tx.tx_totals[mi->tx_size];
6516 ++td->counts->tx.tx_totals[get_uv_tx_size(mi, &xd->plane[1])];
6517 if (cm->seg.enabled && cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
6518 cpi->cyclic_refresh->content_mode)
6519 vp9_cyclic_refresh_update_sb_postencode(cpi, mi, mi_row, mi_col, bsize);
6520 if (cpi->oxcf.pass == 0 && cpi->svc.temporal_layer_id == 0 &&
6523 !cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame &&
6524 cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 1)))
6525 update_zeromv_cnt(cpi, mi, mi_row, mi_col, bsize);