int mi_col,
int64_t threshold,
BLOCK_SIZE bsize_min,
- int segment_id) {
+ int force_split) {
VP9_COMMON * const cm = &cpi->common;
variance_node vt;
const int block_width = num_8x8_blocks_wide_lookup[bsize];
assert(block_height == block_width);
tree_to_node(data, bsize, &vt);
- // No 64x64 blocks on segments other than base (un-boosted) segment.
- if (cyclic_refresh_segment_id_boosted(segment_id) && bsize == BLOCK_64X64)
+ if (force_split)
return 0;
// For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
}
return 0;
} else if (bsize > bsize_min) {
- get_variance(&vt.part_variances->none);
+ // Variance is already computed for 32x32 blocks to set the force_split.
+ if (bsize != BLOCK_32X32)
+ get_variance(&vt.part_variances->none);
// For key frame or low_res: for bsize above 32X32 or very high variance,
// take split.
if (cm->frame_type == KEY_FRAME &&
int i, j, k, m;
v64x64 vt;
v16x16 vt2[16];
+ int force_split[5];
uint8_t *s;
const uint8_t *d;
int sp;
#endif // CONFIG_VP9_HIGHBITDEPTH
}
+ // Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks,
+ force_split[0] = 0;
// Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances
// for splits.
for (i = 0; i < 4; i++) {
const int x32_idx = ((i & 1) << 5);
const int y32_idx = ((i >> 1) << 5);
const int i2 = i << 2;
+ force_split[i + 1] = 0;
for (j = 0; j < 4; j++) {
const int x16_idx = x32_idx + ((j & 1) << 4);
const int y16_idx = y32_idx + ((j >> 1) << 4);
}
}
+ // No 64x64 blocks on segments other than base (un-boosted) segment,
+ // so force split.
+ if (cyclic_refresh_segment_id_boosted(segment_id))
+ force_split[0] = 1;
+
// Fill the rest of the variance tree by summing split partition values.
for (i = 0; i < 4; i++) {
const int i2 = i << 2;
if (variance4x4downsample[i2 + j] == 1) {
v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] :
&vt.split[i].split[j];
- for (m = 0; m < 4; m++) {
+ for (m = 0; m < 4; m++)
fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
- }
fill_variance_tree(vtemp, BLOCK_16X16);
}
}
fill_variance_tree(&vt.split[i], BLOCK_32X32);
+ // If variance of this 32x32 block is above the threshold, force the block
+ // to split. This also forces a split on the upper (64x64) level.
+ get_variance(&vt.split[i].part_variances.none);
+ if (vt.split[i].part_variances.none.variance > cpi->vbp_threshold) {
+ force_split[i + 1] = 1;
+ force_split[0] = 1;
+ }
}
- fill_variance_tree(&vt, BLOCK_64X64);
+ if (!force_split[0])
+ fill_variance_tree(&vt, BLOCK_64X64);
// Now go through the entire structure, splitting every block size until
// we get to one that's got a variance lower than our threshold.
if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
!set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col,
cpi->vbp_threshold_bsize_max, BLOCK_16X16,
- segment_id)) {
+ force_split[0])) {
for (i = 0; i < 4; ++i) {
const int x32_idx = ((i & 1) << 2);
const int y32_idx = ((i >> 1) << 2);
if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32,
(mi_row + y32_idx), (mi_col + x32_idx),
cpi->vbp_threshold,
- BLOCK_16X16, segment_id)) {
+ BLOCK_16X16, force_split[i + 1])) {
for (j = 0; j < 4; ++j) {
const int x16_idx = ((j & 1) << 1);
const int y16_idx = ((j >> 1) << 1);
mi_row + y32_idx + y16_idx,
mi_col + x32_idx + x16_idx,
cpi->vbp_threshold_16x16,
- cpi->vbp_bsize_min, segment_id)) {
+ cpi->vbp_bsize_min, 0)) {
for (k = 0; k < 4; ++k) {
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
mi_row + y32_idx + y16_idx + y8_idx,
mi_col + x32_idx + x16_idx + x8_idx,
cpi->vbp_threshold_bsize_min,
- BLOCK_8X8, segment_id)) {
+ BLOCK_8X8, 0)) {
set_block_size(cpi, xd,
(mi_row + y32_idx + y16_idx + y8_idx),
(mi_col + x32_idx + x16_idx + x8_idx),