struct mb_plane {
DECLARE_ALIGNED(16, int16_t, qcoeff[64 * 64]);
DECLARE_ALIGNED(16, int16_t, dqcoeff[64 * 64]);
+ DECLARE_ALIGNED(16, uint16_t, eobs[256]);
};
#define BLOCK_OFFSET(x, i, n) ((x) + (i) * (n))
typedef struct macroblockd {
DECLARE_ALIGNED(16, int16_t, diff[64*64+32*32*2]); /* from idct diff */
DECLARE_ALIGNED(16, uint8_t, predictor[384]); // unused for superblocks
- DECLARE_ALIGNED(16, uint16_t, eobs[256+64*2]);
#if CONFIG_CODE_NONZEROCOUNT
DECLARE_ALIGNED(16, uint16_t, nzcs[256+64*2]);
#endif
// TODO(jkoleszar): returning a struct so it can be used in a const context,
// expect to refactor this further later.
-static INLINE struct plane_block_idx plane_block_idx(MACROBLOCKD *xd,
- int b_idx) {
- const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
- const int u_offset = 16 << (sb_type * 2);
- const int v_offset = 20 << (sb_type * 2);
+static INLINE struct plane_block_idx plane_block_idx(int y_blocks,
+ int b_idx) {
+ const int v_offset = y_blocks * 5 / 4;
struct plane_block_idx res;
- if (b_idx < u_offset) {
+ if (b_idx < y_blocks) {
res.plane = 0;
res.block = b_idx;
} else if (b_idx < v_offset) {
res.plane = 1;
- res.block = b_idx - u_offset;
+ res.block = b_idx - y_blocks;
} else {
- assert(b_idx < (24 << (sb_type * 2)));
+ assert(b_idx < y_blocks * 3 / 2);
res.plane = 2;
res.block = b_idx - v_offset;
}
vp9_short_iht4x4(BLOCK_OFFSET(xd->plane[0].dqcoeff, i, 16),
xd->block[i].diff, 16, tx_type);
} else {
- vp9_inverse_transform_b_4x4(xd, xd->eobs[i],
+ vp9_inverse_transform_b_4x4(xd,
+ xd->plane[0].eobs[i],
BLOCK_OFFSET(xd->plane[0].dqcoeff, i, 16),
xd->block[i].diff, 32);
}
int i;
for (i = 16; i < 20; i++) {
- vp9_inverse_transform_b_4x4(xd, xd->eobs[i],
+ vp9_inverse_transform_b_4x4(xd, xd->plane[1].eobs[i - 16],
BLOCK_OFFSET(xd->plane[1].dqcoeff, i - 16, 16),
xd->block[i].diff, 16);
}
for (i = 20; i < 24; i++) {
- vp9_inverse_transform_b_4x4(xd, xd->eobs[i],
+ vp9_inverse_transform_b_4x4(xd, xd->plane[2].eobs[i - 20],
BLOCK_OFFSET(xd->plane[2].dqcoeff, i - 20, 16),
xd->block[i].diff, 16);
}
const TX_TYPE tx_type = get_tx_type_4x4(xd, y_idx * 8 + x_idx);
if (tx_type == DCT_DCT) {
- vp9_inverse_transform_b_4x4(xd, xd->eobs[n],
+ vp9_inverse_transform_b_4x4(xd, xd->plane[0].eobs[n],
BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 16),
xd->diff + x_idx * 4 + y_idx * 4 * 32, 64);
} else {
for (n = 0; n < 16; n++) {
const int x_idx = n & 3, y_idx = n >> 2;
- vp9_inverse_transform_b_4x4(xd, xd->eobs[64 + n],
+ vp9_inverse_transform_b_4x4(xd, xd->plane[1].eobs[n],
BLOCK_OFFSET(xd->plane[1].dqcoeff, n, 16),
xd->diff + 1024 + x_idx * 4 + y_idx * 16 * 4,
32);
- vp9_inverse_transform_b_4x4(xd, xd->eobs[64 + 16 + n],
+ vp9_inverse_transform_b_4x4(xd, xd->plane[2].eobs[n],
BLOCK_OFFSET(xd->plane[2].dqcoeff, n, 16),
xd->diff + 1280 + x_idx * 4 + y_idx * 16 * 4,
32);
const TX_TYPE tx_type = get_tx_type_4x4(xd, y_idx * 16 + x_idx);
if (tx_type == DCT_DCT) {
- vp9_inverse_transform_b_4x4(xd, xd->eobs[n],
+ vp9_inverse_transform_b_4x4(xd, xd->plane[0].eobs[n],
BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 16),
xd->diff + x_idx * 4 + y_idx * 4 * 64, 128);
} else {
for (n = 0; n < 64; n++) {
const int x_idx = n & 7, y_idx = n >> 3, off = x_idx * 4 + y_idx * 32 * 4;
- vp9_inverse_transform_b_4x4(xd, xd->eobs[256 + n],
+ vp9_inverse_transform_b_4x4(xd, xd->plane[1].eobs[n],
BLOCK_OFFSET(xd->plane[1].dqcoeff, n, 16),
xd->diff + 4096 + off, 64);
- vp9_inverse_transform_b_4x4(xd, xd->eobs[256 + 64 + n],
+ vp9_inverse_transform_b_4x4(xd, xd->plane[2].eobs[n],
BLOCK_OFFSET(xd->plane[2].dqcoeff, n, 16),
xd->diff + 4096 + 1024 + off, 64);
}
vp9_ht_dequant_idct_add_16x16_c(tx_type, xd->plane[0].qcoeff,
xd->block[0].dequant, xd->predictor,
xd->dst.y_buffer, 16, xd->dst.y_stride,
- xd->eobs[0]);
+ xd->plane[0].eobs[0]);
} else {
vp9_dequant_idct_add_16x16(xd->plane[0].qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
- 16, xd->dst.y_stride, xd->eobs[0]);
+ 16, xd->dst.y_stride, xd->plane[0].eobs[0]);
}
vp9_dequant_idct_add_8x8(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer, 8,
- xd->dst.uv_stride, xd->eobs[16]);
+ xd->dst.uv_stride, xd->plane[1].eobs[0]);
vp9_dequant_idct_add_8x8(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16 + 64, xd->dst.v_buffer, 8,
- xd->dst.uv_stride, xd->eobs[20]);
+ xd->dst.uv_stride, xd->plane[2].eobs[0]);
}
static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
tx_type = get_tx_type_8x8(xd, ib);
if (tx_type != DCT_DCT) {
vp9_ht_dequant_idct_add_8x8_c(tx_type, q, dq, pre, dst, 16, stride,
- xd->eobs[idx]);
+ xd->plane[0].eobs[idx]);
} else {
vp9_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride,
- xd->eobs[idx]);
+ xd->plane[0].eobs[idx]);
}
}
} else {
vp9_intra_uv4x4_predict(xd, b, i8x8mode, b->predictor);
xd->itxm_add(BLOCK_OFFSET(xd->plane[1].qcoeff, i, 16),
b->dequant, b->predictor,
- *(b->base_dst) + b->dst, 8, b->dst_stride, xd->eobs[16 + i]);
+ *(b->base_dst) + b->dst, 8, b->dst_stride,
+ xd->plane[1].eobs[i]);
b = &xd->block[20 + i];
vp9_intra_uv4x4_predict(xd, b, i8x8mode, b->predictor);
xd->itxm_add(BLOCK_OFFSET(xd->plane[2].qcoeff, i, 16),
b->dequant, b->predictor,
- *(b->base_dst) + b->dst, 8, b->dst_stride, xd->eobs[20 + i]);
+ *(b->base_dst) + b->dst, 8, b->dst_stride,
+ xd->plane[2].eobs[i]);
}
} else if (xd->mode_info_context->mbmi.mode == SPLITMV) {
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer,
- xd->dst.uv_stride, xd->eobs + 16);
+ xd->dst.uv_stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16 + 64, xd->dst.v_buffer,
- xd->dst.uv_stride, xd->eobs + 20);
+ xd->dst.uv_stride, xd->plane[2].eobs);
} else {
vp9_dequant_idct_add_8x8(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer, 8,
- xd->dst.uv_stride, xd->eobs[16]);
+ xd->dst.uv_stride, xd->plane[1].eobs[0]);
vp9_dequant_idct_add_8x8(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16 + 64, xd->dst.v_buffer, 8,
- xd->dst.uv_stride, xd->eobs[20]);
+ xd->dst.uv_stride, xd->plane[2].eobs[0]);
}
#if 0 // def DEC_DEBUG
if (dec_debug) {
BLOCK_OFFSET(xd->plane[0].qcoeff, ib + iblock[j], 16),
b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16,
- b->dst_stride, xd->eobs[ib + iblock[j]]);
+ b->dst_stride,
+ xd->plane[0].eobs[ib + iblock[j]]);
} else {
xd->itxm_add(BLOCK_OFFSET(xd->plane[0].qcoeff, ib + iblock[j], 16),
b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride,
- xd->eobs[ib + iblock[j]]);
+ xd->plane[0].eobs[ib + iblock[j]]);
}
}
b = &xd->block[16 + i];
vp9_intra_uv4x4_predict(xd, b, i8x8mode, b->predictor);
xd->itxm_add(BLOCK_OFFSET(xd->plane[1].qcoeff, i, 16),
b->dequant, b->predictor,
- *(b->base_dst) + b->dst, 8, b->dst_stride, xd->eobs[16 + i]);
+ *(b->base_dst) + b->dst, 8, b->dst_stride,
+ xd->plane[1].eobs[i]);
b = &xd->block[20 + i];
vp9_intra_uv4x4_predict(xd, b, i8x8mode, b->predictor);
xd->itxm_add(BLOCK_OFFSET(xd->plane[2].qcoeff, i, 16),
b->dequant, b->predictor,
- *(b->base_dst) + b->dst, 8, b->dst_stride, xd->eobs[20 + i]);
+ *(b->base_dst) + b->dst, 8, b->dst_stride,
+ xd->plane[2].eobs[i]);
}
} else if (mode == B_PRED) {
for (i = 0; i < 16; i++) {
BLOCK_OFFSET(xd->plane[0].qcoeff, i, 16),
b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride,
- xd->eobs[i]);
+ xd->plane[0].eobs[i]);
} else {
xd->itxm_add(BLOCK_OFFSET(xd->plane[0].qcoeff, i, 16),
b->dequant, b->predictor,
- *(b->base_dst) + b->dst, 16, b->dst_stride, xd->eobs[i]);
+ *(b->base_dst) + b->dst, 16, b->dst_stride,
+ xd->plane[0].eobs[i]);
}
}
#if CONFIG_NEWBINTRAMODES
vp9_build_intra_predictors_mbuv(xd);
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer,
- xd->dst.uv_stride, xd->eobs + 16);
+ xd->dst.uv_stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16 + 64, xd->dst.v_buffer,
- xd->dst.uv_stride, xd->eobs + 20);
+ xd->dst.uv_stride, xd->plane[2].eobs);
} else if (mode == SPLITMV || get_tx_type_4x4(xd, 0) == DCT_DCT) {
xd->itxm_add_y_block(xd->plane[0].qcoeff,
xd->block[0].dequant,
xd);
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer,
- xd->dst.uv_stride, xd->eobs + 16);
+ xd->dst.uv_stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16 + 64, xd->dst.v_buffer,
- xd->dst.uv_stride, xd->eobs + 20);
+ xd->dst.uv_stride, xd->plane[2].eobs);
} else {
#if 0 // def DEC_DEBUG
if (dec_debug) {
BLOCK_OFFSET(xd->plane[0].qcoeff, i, 16),
b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16,
- b->dst_stride, xd->eobs[i]);
+ b->dst_stride, xd->plane[0].eobs[i]);
} else {
xd->itxm_add(BLOCK_OFFSET(xd->plane[0].qcoeff, i, 16),
b->dequant, b->predictor,
- *(b->base_dst) + b->dst, 16, b->dst_stride, xd->eobs[i]);
+ *(b->base_dst) + b->dst, 16, b->dst_stride,
+ xd->plane[0].eobs[i]);
}
}
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer,
- xd->dst.uv_stride, xd->eobs + 16);
+ xd->dst.uv_stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16 + 64, xd->dst.v_buffer,
- xd->dst.uv_stride, xd->eobs + 20);
+ xd->dst.uv_stride, xd->plane[2].eobs);
}
}
const int y_count = y_size * y_size;
const int uv_size = y_size / 2;
const int uv_count = uv_size * uv_size;
-
- const int u_eob_offset = 16 * y_count;
- const int v_eob_offset = u_eob_offset + 16 * uv_count;
int n;
for (n = 0; n < y_count; n++) {
mb->dst.y_buffer + y_offset,
mb->dst.y_buffer + y_offset,
mb->dst.y_stride, mb->dst.y_stride,
- mb->eobs[n * 16]);
+ mb->plane[0].eobs[n * 16]);
} else {
vp9_ht_dequant_idct_add_16x16_c(tx_type,
BLOCK_OFFSET(mb->plane[0].qcoeff, n, 256),
mb->dst.y_buffer + y_offset,
mb->dst.y_buffer + y_offset,
mb->dst.y_stride, mb->dst.y_stride,
- mb->eobs[n * 16]);
+ mb->plane[0].eobs[n * 16]);
}
}
mb->dst.u_buffer + uv_offset,
mb->dst.u_buffer + uv_offset,
mb->dst.uv_stride, mb->dst.uv_stride,
- mb->eobs[u_eob_offset + n * 16]);
+ mb->plane[1].eobs[n * 16]);
vp9_dequant_idct_add_16x16(BLOCK_OFFSET(mb->plane[2].qcoeff, n, 256),
mb->block[20].dequant,
mb->dst.v_buffer + uv_offset,
mb->dst.v_buffer + uv_offset,
mb->dst.uv_stride, mb->dst.uv_stride,
- mb->eobs[v_eob_offset + n * 16]);
+ mb->plane[2].eobs[n * 16]);
}
}
const int y_count = y_size * y_size;
const int uv_size = y_size / 2;
const int uv_count = uv_size * uv_size;
-
- const int u_eob_offset = 4 * y_count;
- const int v_eob_offset = u_eob_offset + 4 * uv_count;
int n;
// luma
xd->dst.y_buffer + y_offset,
xd->dst.y_buffer + y_offset,
xd->dst.y_stride, xd->dst.y_stride,
- xd->eobs[n * 4]);
+ xd->plane[0].eobs[n * 4]);
} else {
vp9_ht_dequant_idct_add_8x8_c(tx_type,
BLOCK_OFFSET(xd->plane[0].qcoeff, n, 64),
xd->dst.y_buffer + y_offset,
xd->dst.y_buffer + y_offset,
xd->dst.y_stride, xd->dst.y_stride,
- xd->eobs[n * 4]);
+ xd->plane[0].eobs[n * 4]);
}
}
xd->dst.u_buffer + uv_offset,
xd->dst.u_buffer + uv_offset,
xd->dst.uv_stride, xd->dst.uv_stride,
- xd->eobs[u_eob_offset + n * 4]);
+ xd->plane[1].eobs[n * 4]);
vp9_dequant_idct_add_8x8_c(BLOCK_OFFSET(xd->plane[2].qcoeff, n, 64),
xd->block[20].dequant,
xd->dst.v_buffer + uv_offset,
xd->dst.v_buffer + uv_offset,
xd->dst.uv_stride, xd->dst.uv_stride,
- xd->eobs[v_eob_offset + n * 4]);
+ xd->plane[2].eobs[n * 4]);
}
}
const int y_count = y_size * y_size;
const int uv_size = y_size / 2;
const int uv_count = uv_size * uv_size;
-
- const int u_eob_offset = y_count;
- const int v_eob_offset = u_eob_offset + uv_count;
int n;
for (n = 0; n < y_count; n++) {
xd->dst.y_buffer + y_offset,
xd->dst.y_buffer + y_offset,
xd->dst.y_stride, xd->dst.y_stride,
- xd->eobs[n]);
+ xd->plane[0].eobs[n]);
} else {
vp9_ht_dequant_idct_add_c(tx_type,
BLOCK_OFFSET(xd->plane[0].qcoeff, n, 16),
xd->dst.y_buffer + y_offset,
xd->dst.y_stride,
xd->dst.y_stride,
- xd->eobs[n]);
+ xd->plane[0].eobs[n]);
}
}
xd->block[16].dequant,
xd->dst.u_buffer + uv_offset,
xd->dst.u_buffer + uv_offset,
- xd->dst.uv_stride, xd->dst.uv_stride, xd->eobs[u_eob_offset + n]);
+ xd->dst.uv_stride, xd->dst.uv_stride, xd->plane[1].eobs[n]);
xd->itxm_add(BLOCK_OFFSET(xd->plane[2].qcoeff, n, 16),
xd->block[20].dequant,
xd->dst.v_buffer + uv_offset,
xd->dst.v_buffer + uv_offset,
- xd->dst.uv_stride, xd->dst.uv_stride, xd->eobs[v_eob_offset + n]);
+ xd->dst.uv_stride, xd->dst.uv_stride, xd->plane[2].eobs[n]);
}
}
xd->block[0].dequant,
xd->dst.y_buffer + y_offset,
xd->dst.y_buffer + y_offset,
- xd->dst.y_stride, xd->dst.y_stride, xd->eobs[n * 64]);
+ xd->dst.y_stride, xd->dst.y_stride, xd->plane[0].eobs[n * 64]);
}
vp9_dequant_idct_add_32x32(xd->plane[1].qcoeff,
xd->block[16].dequant, xd->dst.u_buffer, xd->dst.u_buffer,
- xd->dst.uv_stride, xd->dst.uv_stride, xd->eobs[256]);
+ xd->dst.uv_stride, xd->dst.uv_stride, xd->plane[1].eobs[0]);
vp9_dequant_idct_add_32x32(xd->plane[2].qcoeff,
xd->block[20].dequant, xd->dst.v_buffer, xd->dst.v_buffer,
- xd->dst.uv_stride, xd->dst.uv_stride, xd->eobs[320]);
+ xd->dst.uv_stride, xd->dst.uv_stride, xd->plane[2].eobs[0]);
break;
case TX_16X16:
decode_sb_16x16(xd, 4);
vp9_dequant_idct_add_32x32(xd->plane[0].qcoeff, xd->block[0].dequant,
xd->dst.y_buffer, xd->dst.y_buffer,
xd->dst.y_stride, xd->dst.y_stride,
- xd->eobs[0]);
+ xd->plane[0].eobs[0]);
vp9_dequant_idct_add_16x16(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->dst.u_buffer, xd->dst.u_buffer,
xd->dst.uv_stride, xd->dst.uv_stride,
- xd->eobs[64]);
+ xd->plane[1].eobs[0]);
vp9_dequant_idct_add_16x16(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->dst.v_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->dst.uv_stride,
- xd->eobs[80]);
+ xd->plane[2].eobs[0]);
break;
case TX_16X16:
decode_sb_16x16(xd, 2);
const int c = decode_coefs(pbi, xd, bc, i, PLANE_TYPE_Y_WITH_DC, seg_eob,
BLOCK_OFFSET(xd->plane[0].qcoeff, i, 16),
tx_size);
- xd->eobs[i] = c;
+ xd->plane[0].eobs[i] = c;
eobtotal += c;
}
const int c = decode_coefs(pbi, xd, bc, i, PLANE_TYPE_UV, seg_eob,
BLOCK_OFFSET(xd->plane[1].qcoeff, b, 16),
tx_size);
- xd->eobs[i] = c;
+ xd->plane[1].eobs[b] = c;
eobtotal += c;
}
for (i = offset * 5 / 4; i < count; i += inc) {
const int c = decode_coefs(pbi, xd, bc, i, PLANE_TYPE_UV, seg_eob,
BLOCK_OFFSET(xd->plane[2].qcoeff, b, 16),
tx_size);
- xd->eobs[i] = c;
+ xd->plane[2].eobs[b] = c;
eobtotal += c;
}
int c = decode_coefs(pbi, xd, bc, 0, PLANE_TYPE_Y_WITH_DC,
get_eob(xd, segment_id, 1024),
xd->plane[0].qcoeff, TX_32X32);
- xd->eobs[0] = c;
+ xd->plane[0].eobs[0] = c;
eobtotal += c;
// 16x16 chroma blocks
c = decode_coefs(pbi, xd, bc, 64, PLANE_TYPE_UV, seg_eob,
xd->plane[1].qcoeff, TX_16X16);
- xd->eobs[64] = c;
+ xd->plane[1].eobs[0] = c;
eobtotal += c;
c = decode_coefs(pbi, xd, bc, 80, PLANE_TYPE_UV, seg_eob,
xd->plane[2].qcoeff, TX_16X16);
- xd->eobs[80] = c;
+ xd->plane[2].eobs[0] = c;
eobtotal += c;
return eobtotal;
}
int c = decode_coefs(pbi, xd, bc, 0, PLANE_TYPE_Y_WITH_DC,
get_eob(xd, segment_id, 256),
xd->plane[0].qcoeff, TX_16X16);
- xd->eobs[0] = c;
+ xd->plane[0].eobs[0] = c;
eobtotal += c;
// 8x8 chroma blocks
c = decode_coefs(pbi, xd, bc, 16, PLANE_TYPE_UV,
seg_eob, xd->plane[1].qcoeff, TX_8X8);
- xd->eobs[16] = c;
+ xd->plane[1].eobs[0] = c;
eobtotal += c;
c = decode_coefs(pbi, xd, bc, 20, PLANE_TYPE_UV,
seg_eob, xd->plane[2].qcoeff, TX_8X8);
- xd->eobs[20] = c;
+ xd->plane[2].eobs[0] = c;
eobtotal += c;
return eobtotal;
}
const int c = decode_coefs(pbi, xd, bc, i, PLANE_TYPE_Y_WITH_DC, seg_eob,
BLOCK_OFFSET(xd->plane[0].qcoeff, i, 16),
TX_8X8);
- xd->eobs[i] = c;
+ xd->plane[0].eobs[i] = c;
eobtotal += c;
}
const int c = decode_coefs(pbi, xd, bc, i, PLANE_TYPE_UV, seg_eob,
BLOCK_OFFSET(xd->plane[1].qcoeff, i - 16, 16),
TX_4X4);
- xd->eobs[i] = c;
+ xd->plane[1].eobs[i - 16] = c;
eobtotal += c;
}
for (i = 20; i < 24; i++) {
const int c = decode_coefs(pbi, xd, bc, i, PLANE_TYPE_UV, seg_eob,
BLOCK_OFFSET(xd->plane[2].qcoeff, i - 20, 16),
TX_4X4);
- xd->eobs[i] = c;
+ xd->plane[2].eobs[i - 20] = c;
eobtotal += c;
}
} else {
c = decode_coefs(pbi, xd, bc, 16, PLANE_TYPE_UV, seg_eob,
xd->plane[1].qcoeff, TX_8X8);
- xd->eobs[16] = c;
+ xd->plane[1].eobs[0] = c;
eobtotal += c;
c = decode_coefs(pbi, xd, bc, 20, PLANE_TYPE_UV, seg_eob,
xd->plane[2].qcoeff, TX_8X8);
- xd->eobs[20] = c;
+ xd->plane[2].eobs[0] = c;
eobtotal += c;
}
static int decode_coefs_4x4(VP9D_COMP *dx, MACROBLOCKD *xd,
BOOL_DECODER* const bc,
PLANE_TYPE type, int i, int seg_eob) {
+ const struct plane_block_idx pb_idx = plane_block_idx(16, i);
const int c = decode_coefs(dx, xd, bc, i, type, seg_eob,
- MB_SUBBLOCK_FIELD(xd, qcoeff, i), TX_4X4);
- xd->eobs[i] = c;
+ BLOCK_OFFSET(xd->plane[pb_idx.plane].qcoeff, pb_idx.block, 16), TX_4X4);
+ xd->plane[pb_idx.plane].eobs[pb_idx.block] = c;
return c;
}
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
- vp9_dequant_idct_add(q, dq, pre, dst, 16, stride, xd->eobs[i * 4 + j]);
+ vp9_dequant_idct_add(q, dq, pre, dst, 16, stride,
+ xd->plane[0].eobs[i * 4 + j]);
q += 16;
pre += 4;
dst += 4;
uint8_t *origdest = dst;
uint8_t *origpred = pre;
- vp9_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride, xd->eobs[0]);
+ vp9_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride,
+ xd->plane[0].eobs[0]);
vp9_dequant_idct_add_8x8_c(&q[64], dq, origpred + 8,
- origdest + 8, 16, stride, xd->eobs[4]);
+ origdest + 8, 16, stride,
+ xd->plane[0].eobs[4]);
vp9_dequant_idct_add_8x8_c(&q[128], dq, origpred + 8 * 16,
origdest + 8 * stride, 16, stride,
- xd->eobs[8]);
+ xd->plane[0].eobs[8]);
vp9_dequant_idct_add_8x8_c(&q[192], dq, origpred + 8 * 16 + 8,
origdest + 8 * stride + 8, 16, stride,
- xd->eobs[12]);
+ xd->plane[0].eobs[12]);
}
void vp9_dequant_idct_add_y_block_lossless_c(int16_t *q, const int16_t *dq,
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
vp9_dequant_idct_add_lossless_c(q, dq, pre, dst, 16, stride,
- xd->eobs[i * 4 + j]);
+ xd->plane[0].eobs[i * 4 + j]);
q += 16;
pre += 4;
dst += 4;
void (*fwd_txm8x4)(int16_t *input, int16_t *output, int pitch);
void (*fwd_txm8x8)(int16_t *input, int16_t *output, int pitch);
void (*fwd_txm16x16)(int16_t *input, int16_t *output, int pitch);
- void (*quantize_b_4x4)(MACROBLOCK *x, int b_idx);
- void (*quantize_b_4x4_pair)(MACROBLOCK *x, int b_idx1, int b_idx2);
- void (*quantize_b_16x16)(MACROBLOCK *x, int b_idx, TX_TYPE tx_type);
- void (*quantize_b_8x8)(MACROBLOCK *x, int b_idx, TX_TYPE tx_type);
+ void (*quantize_b_4x4)(MACROBLOCK *x, int b_idx, int y_blocks);
+ void (*quantize_b_4x4_pair)(MACROBLOCK *x, int b_idx1, int b_idx2,
+ int y_blocks);
+ void (*quantize_b_16x16)(MACROBLOCK *x, int b_idx, TX_TYPE tx_type,
+ int y_blocks);
+ void (*quantize_b_8x8)(MACROBLOCK *x, int b_idx, TX_TYPE tx_type,
+ int y_blocks);
};
#endif // VP9_ENCODER_VP9_BLOCK_H_
b->diff, 16, tx_type);
} else {
x->fwd_txm4x4(be->src_diff, be->coeff, 32);
- x->quantize_b_4x4(x, ib);
- vp9_inverse_transform_b_4x4(&x->e_mbd, x->e_mbd.eobs[ib],
+ x->quantize_b_4x4(x, ib, 16);
+ vp9_inverse_transform_b_4x4(&x->e_mbd, xd->plane[0].eobs[ib],
BLOCK_OFFSET(xd->plane[0].dqcoeff, ib, 16),
b->diff, 32);
}
tx_type = get_tx_type_8x8(xd, ib);
if (tx_type != DCT_DCT) {
vp9_short_fht8x8(be->src_diff, (x->block + idx)->coeff, 16, tx_type);
- x->quantize_b_8x8(x, idx, tx_type);
+ x->quantize_b_8x8(x, idx, tx_type, 16);
vp9_short_iht8x8(dqcoeff, xd->block[ib].diff,
16, tx_type);
} else {
x->fwd_txm8x8(be->src_diff, (x->block + idx)->coeff, 32);
- x->quantize_b_8x8(x, idx, DCT_DCT);
+ x->quantize_b_8x8(x, idx, DCT_DCT, 16);
vp9_short_idct8x8(dqcoeff, xd->block[ib].diff, 32);
}
} else {
} else if (!(i & 1) &&
get_tx_type_4x4(xd, ib + iblock[i] + 1) == DCT_DCT) {
x->fwd_txm8x4(be->src_diff, be->coeff, 32);
- x->quantize_b_4x4_pair(x, ib + iblock[i], ib + iblock[i] + 1);
- vp9_inverse_transform_b_4x4(xd, xd->eobs[ib + iblock[i]],
+ x->quantize_b_4x4_pair(x, ib + iblock[i], ib + iblock[i] + 1, 16);
+ vp9_inverse_transform_b_4x4(xd, xd->plane[0].eobs[ib + iblock[i]],
dqcoeff, b->diff, 32);
- vp9_inverse_transform_b_4x4(xd, xd->eobs[ib + iblock[i] + 1],
+ vp9_inverse_transform_b_4x4(xd, xd->plane[0].eobs[ib + iblock[i] + 1],
dqcoeff + 16, (b + 1)->diff, 32);
i++;
} else {
x->fwd_txm4x4(be->src_diff, be->coeff, 32);
- x->quantize_b_4x4(x, ib + iblock[i]);
- vp9_inverse_transform_b_4x4(xd, xd->eobs[ib + iblock[i]],
+ x->quantize_b_4x4(x, ib + iblock[i], 16);
+ vp9_inverse_transform_b_4x4(xd, xd->plane[0].eobs[ib + iblock[i]],
dqcoeff, b->diff, 32);
}
}
BLOCKD *b = &x->e_mbd.block[ib];
BLOCK *be = &x->block[ib];
int16_t * const dqcoeff = MB_SUBBLOCK_FIELD(xd, dqcoeff, ib);
+ const int plane = ib < 20 ? 1 : 2;
+ const int block = ib < 20 ? ib - 16 : ib - 20;
assert(ib >= 16 && ib < 24);
vp9_intra_uv4x4_predict(&x->e_mbd, b, mode, b->predictor);
vp9_subtract_b(be, b, 8);
x->fwd_txm4x4(be->src_diff, be->coeff, 16);
- x->quantize_b_4x4(x, ib);
- vp9_inverse_transform_b_4x4(&x->e_mbd, x->e_mbd.eobs[ib],
+ x->quantize_b_4x4(x, ib, 16);
+ vp9_inverse_transform_b_4x4(&x->e_mbd, xd->plane[plane].eobs[block],
dqcoeff, b->diff, 16);
vp9_recon_uv_b_c(b->predictor, b->diff, *(b->base_dst) + b->dst,
MACROBLOCK *mb, int ib, PLANE_TYPE type,
const int16_t *dequant_ptr,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
- int tx_size) {
+ int tx_size, int y_blocks) {
const int ref = mb->e_mbd.mode_info_context->mbmi.ref_frame != INTRA_FRAME;
MACROBLOCKD *const xd = &mb->e_mbd;
vp9_token_state tokens[1025][2];
unsigned best_index[1025][2];
- const struct plane_block_idx pb_idx = plane_block_idx(xd, ib);
+ const struct plane_block_idx pb_idx = plane_block_idx(y_blocks, ib);
const int16_t *coeff_ptr = mb->coeff + ib * 16;
int16_t *qcoeff_ptr;
int16_t *dqcoeff_ptr;
- int eob = xd->eobs[ib], final_eob, sz = 0;
+ int eob = xd->plane[pb_idx.plane].eobs[pb_idx.block], final_eob, sz = 0;
const int i0 = 0;
int rc, x, next, i;
int64_t rdmult, rddiv, rd_cost0, rd_cost1;
nzc0 = nzc1 = nzc;
#endif
+ assert((!type && !pb_idx.plane) || (type && pb_idx.plane));
dqcoeff_ptr = BLOCK_OFFSET(xd->plane[pb_idx.plane].dqcoeff, pb_idx.block, 16);
qcoeff_ptr = BLOCK_OFFSET(xd->plane[pb_idx.plane].qcoeff, pb_idx.block, 16);
switch (tx_size) {
#endif
break;
}
+ assert(eob <= default_eob);
/* Now set up a Viterbi trellis to evaluate alternative roundings. */
rdmult = mb->rdmult * err_mult;
}
final_eob++;
- xd->eobs[ib] = final_eob;
+ xd->plane[pb_idx.plane].eobs[pb_idx.block] = final_eob;
*a = *l = (final_eob > 0);
#if CONFIG_CODE_NONZEROCOUNT
assert(final_nzc == final_nzc_exp);
for (b = 0; b < 16; b++) {
optimize_b(cm, x, b, PLANE_TYPE_Y_WITH_DC, x->e_mbd.block[b].dequant,
ta + vp9_block2above[TX_4X4][b],
- tl + vp9_block2left[TX_4X4][b], TX_4X4);
+ tl + vp9_block2left[TX_4X4][b], TX_4X4, 16);
}
}
for (b = 16; b < 24; b++) {
optimize_b(cm, x, b, PLANE_TYPE_UV, x->e_mbd.block[b].dequant,
ta + vp9_block2above[TX_4X4][b],
- tl + vp9_block2left[TX_4X4][b], TX_4X4);
+ tl + vp9_block2left[TX_4X4][b], TX_4X4, 16);
}
}
ENTROPY_CONTEXT above_ec = (a[0] + a[1]) != 0;
ENTROPY_CONTEXT left_ec = (l[0] + l[1]) != 0;
optimize_b(cm, x, b, PLANE_TYPE_Y_WITH_DC, x->e_mbd.block[b].dequant,
- &above_ec, &left_ec, TX_8X8);
+ &above_ec, &left_ec, TX_8X8, 16);
a[1] = a[0] = above_ec;
l[1] = l[0] = left_ec;
}
ENTROPY_CONTEXT above_ec = (a[0] + a[1]) != 0;
ENTROPY_CONTEXT left_ec = (l[0] + l[1]) != 0;
optimize_b(cm, x, b, PLANE_TYPE_UV, x->e_mbd.block[b].dequant,
- &above_ec, &left_ec, TX_8X8);
+ &above_ec, &left_ec, TX_8X8, 16);
}
}
ta = (t_above->y1[0] + t_above->y1[1] + t_above->y1[2] + t_above->y1[3]) != 0;
tl = (t_left->y1[0] + t_left->y1[1] + t_left->y1[2] + t_left->y1[3]) != 0;
optimize_b(cm, x, 0, PLANE_TYPE_Y_WITH_DC, x->e_mbd.block[0].dequant,
- &ta, &tl, TX_16X16);
+ &ta, &tl, TX_16X16, 16);
}
static void optimize_mb_16x16(VP9_COMMON *const cm, MACROBLOCK *x) {
ta = (a[0] + a[1] + a[2] + a[3] + a1[0] + a1[1] + a1[2] + a1[3]) != 0;
tl = (l[0] + l[1] + l[2] + l[3] + l1[0] + l1[1] + l1[2] + l1[3]) != 0;
optimize_b(cm, x, 0, PLANE_TYPE_Y_WITH_DC, x->e_mbd.block[0].dequant,
- &ta, &tl, TX_32X32);
+ &ta, &tl, TX_32X32, 64);
}
void vp9_optimize_sby_16x16(VP9_COMMON *const cm, MACROBLOCK *x) {
const int x_idx = n & 1, y_idx = n >> 1;
optimize_b(cm, x, n * 16, PLANE_TYPE_Y_WITH_DC, x->e_mbd.block[0].dequant,
- ta + x_idx, tl + y_idx, TX_16X16);
+ ta + x_idx, tl + y_idx, TX_16X16, 64);
}
}
const int x_idx = n & 3, y_idx = n >> 2;
optimize_b(cm, x, n * 4, PLANE_TYPE_Y_WITH_DC, x->e_mbd.block[0].dequant,
- ta + x_idx, tl + y_idx, TX_8X8);
+ ta + x_idx, tl + y_idx, TX_8X8, 64);
}
}
const int x_idx = n & 7, y_idx = n >> 3;
optimize_b(cm, x, n, PLANE_TYPE_Y_WITH_DC, x->e_mbd.block[0].dequant,
- ta + x_idx, tl + y_idx, TX_4X4);
+ ta + x_idx, tl + y_idx, TX_4X4, 64);
}
}
above_ec = (a[0] + a[1] + a1[0] + a1[1]) != 0;
left_ec = (l[0] + l[1] + l1[0] + l1[1]) != 0;
optimize_b(cm, x, b, PLANE_TYPE_UV, x->e_mbd.block[cidx].dequant,
- &above_ec, &left_ec, TX_16X16);
+ &above_ec, &left_ec, TX_16X16, 64);
}
}
above_ec = (a[0] + a[1]) != 0;
left_ec = (l[0] + l[1]) != 0;
optimize_b(cm, x, b, PLANE_TYPE_UV, x->e_mbd.block[cidx].dequant,
- &above_ec, &left_ec, TX_8X8);
+ &above_ec, &left_ec, TX_8X8, 64);
a[0] = a[1] = above_ec;
l[0] = l[1] = left_ec;
}
a = ta + vp9_block2above_sb[TX_4X4][b];
l = tl + vp9_block2left_sb[TX_4X4][b];
optimize_b(cm, x, b, PLANE_TYPE_UV, x->e_mbd.block[cidx].dequant,
- a, l, TX_4X4);
+ a, l, TX_4X4, 64);
}
}
const int x_idx = n & 1, y_idx = n >> 1;
optimize_b(cm, x, n * 64, PLANE_TYPE_Y_WITH_DC, x->e_mbd.block[0].dequant,
- ta + x_idx, tl + y_idx, TX_32X32);
+ ta + x_idx, tl + y_idx, TX_32X32, 256);
}
}
const int x_idx = n & 3, y_idx = n >> 2;
optimize_b(cm, x, n * 16, PLANE_TYPE_Y_WITH_DC, x->e_mbd.block[0].dequant,
- ta + x_idx, tl + y_idx, TX_16X16);
+ ta + x_idx, tl + y_idx, TX_16X16, 256);
}
}
const int x_idx = n & 7, y_idx = n >> 3;
optimize_b(cm, x, n * 4, PLANE_TYPE_Y_WITH_DC, x->e_mbd.block[0].dequant,
- ta + x_idx, tl + y_idx, TX_8X8);
+ ta + x_idx, tl + y_idx, TX_8X8, 256);
}
}
const int x_idx = n & 15, y_idx = n >> 4;
optimize_b(cm, x, n, PLANE_TYPE_Y_WITH_DC, x->e_mbd.block[0].dequant,
- ta + x_idx, tl + y_idx, TX_4X4);
+ ta + x_idx, tl + y_idx, TX_4X4, 256);
}
}
a_ec = (a[0] + a[1] + a1[0] + a1[1] + a2[0] + a2[1] + a3[0] + a3[1]) != 0;
l_ec = (l[0] + l[1] + l1[0] + l1[1] + l2[0] + l2[1] + l3[0] + l3[1]) != 0;
optimize_b(cm, x, b, PLANE_TYPE_UV, x->e_mbd.block[cidx].dequant,
- &a_ec, &l_ec, TX_32X32);
+ &a_ec, &l_ec, TX_32X32, 256);
}
}
above_ec = (a[0] + a[1] + a1[0] + a1[1]) != 0;
left_ec = (l[0] + l[1] + l1[0] + l1[1]) != 0;
optimize_b(cm, x, b, PLANE_TYPE_UV, x->e_mbd.block[cidx].dequant,
- &above_ec, &left_ec, TX_16X16);
+ &above_ec, &left_ec, TX_16X16, 256);
a[0] = a[1] = a1[0] = a1[1] = above_ec;
l[0] = l[1] = l1[0] = l1[1] = left_ec;
}
above_ec = (a[0] + a[1]) != 0;
left_ec = (l[0] + l[1]) != 0;
optimize_b(cm, x, b, PLANE_TYPE_UV, x->e_mbd.block[cidx].dequant,
- &above_ec, &left_ec, TX_8X8);
+ &above_ec, &left_ec, TX_8X8, 256);
a[0] = a[1] = above_ec;
l[0] = l[1] = left_ec;
}
a = ta + vp9_block2above_sb64[TX_4X4][b];
l = tl + vp9_block2left_sb64[TX_4X4][b];
optimize_b(cm, x, b, PLANE_TYPE_UV, x->e_mbd.block[cidx].dequant,
- a, l, TX_4X4);
+ a, l, TX_4X4, 256);
}
}
extern int enc_debug;
#endif
-static INLINE int plane_idx(MACROBLOCKD *xd, int b_idx) {
- const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
- if (b_idx < (16 << (sb_type * 2)))
- return 0; // Y
- else if (b_idx < (20 << (sb_type * 2)))
- return 16; // U
- assert(b_idx < (24 << (sb_type * 2)));
- return 20; // V
+static INLINE int plane_idx(int plane) {
+ return plane == 0 ? 0 :
+ plane == 1 ? 16 : 20;
}
void vp9_ht_quantize_b_4x4(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type) {
int nzc = 0;
#endif
- assert(plane_idx(xd, b_idx) == 0);
switch (tx_type) {
case ADST_DCT:
pt_scan = vp9_row_scan_4x4;
}
}
- xd->eobs[b_idx] = eob + 1;
+ xd->plane[0].eobs[b_idx] = eob + 1;
#if CONFIG_CODE_NONZEROCOUNT
xd->nzcs[b_idx] = nzc;
#endif
}
-void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx) {
+void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx, int y_blocks) {
MACROBLOCKD *const xd = &mb->e_mbd;
- const int c_idx = plane_idx(xd, b_idx);
- const struct plane_block_idx pb_idx = plane_block_idx(xd, b_idx);
+ const struct plane_block_idx pb_idx = plane_block_idx(y_blocks, b_idx);
+ const int c_idx = plane_idx(pb_idx.plane);
BLOCK *const b = &mb->block[c_idx];
BLOCKD *const d = &xd->block[c_idx];
int i, rc, eob;
int nzc = 0;
#endif
+ if (c_idx == 0) assert(pb_idx.plane == 0);
+ if (c_idx == 16) assert(pb_idx.plane == 1);
+ if (c_idx == 20) assert(pb_idx.plane == 2);
vpx_memset(qcoeff_ptr, 0, 32);
vpx_memset(dqcoeff_ptr, 0, 32);
}
}
- xd->eobs[b_idx] = eob + 1;
+ xd->plane[pb_idx.plane].eobs[pb_idx.block] = eob + 1;
#if CONFIG_CODE_NONZEROCOUNT
xd->nzcs[b_idx] = nzc;
#endif
if (tx_type != DCT_DCT) {
vp9_ht_quantize_b_4x4(x, i, tx_type);
} else {
- x->quantize_b_4x4(x, i);
+ x->quantize_b_4x4(x, i, 16);
}
}
}
xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_MB16X16;
for (i = 16; i < 24; i++)
- x->quantize_b_4x4(x, i);
+ x->quantize_b_4x4(x, i, 16);
xd->mode_info_context->mbmi.sb_type = real_sb_type;
}
vp9_quantize_mbuv_4x4(x);
}
-void vp9_regular_quantize_b_8x8(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type) {
+void vp9_regular_quantize_b_8x8(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
+ int y_blocks) {
MACROBLOCKD *const xd = &mb->e_mbd;
- const struct plane_block_idx pb_idx = plane_block_idx(xd, b_idx);
- const int c_idx = plane_idx(xd, b_idx);
+ const struct plane_block_idx pb_idx = plane_block_idx(y_blocks, b_idx);
+ const int c_idx = plane_idx(pb_idx.plane);
int16_t *qcoeff_ptr = BLOCK_OFFSET(xd->plane[pb_idx.plane].qcoeff,
pb_idx.block, 16);
int16_t *dqcoeff_ptr = BLOCK_OFFSET(xd->plane[pb_idx.plane].dqcoeff,
break;
}
+ if (c_idx == 0) assert(pb_idx.plane == 0);
+ if (c_idx == 16) assert(pb_idx.plane == 1);
+ if (c_idx == 20) assert(pb_idx.plane == 2);
vpx_memset(qcoeff_ptr, 0, 64 * sizeof(int16_t));
vpx_memset(dqcoeff_ptr, 0, 64 * sizeof(int16_t));
}
}
}
- xd->eobs[b_idx] = eob + 1;
+ xd->plane[pb_idx.plane].eobs[pb_idx.block] = eob + 1;
#if CONFIG_CODE_NONZEROCOUNT
xd->nzcs[b_idx] = nzc;
#endif
} else {
- xd->eobs[b_idx] = 0;
+ xd->plane[pb_idx.plane].eobs[pb_idx.block] = 0;
#if CONFIG_CODE_NONZEROCOUNT
xd->nzcs[b_idx] = 0;
#endif
#endif
for (i = 0; i < 16; i += 4) {
TX_TYPE tx_type = get_tx_type_8x8(&x->e_mbd, (i & 8) + ((i & 4) >> 1));
- x->quantize_b_8x8(x, i, tx_type);
+ x->quantize_b_8x8(x, i, tx_type, 16);
}
}
}
#endif
for (i = 16; i < 24; i += 4)
- x->quantize_b_8x8(x, i, DCT_DCT);
+ x->quantize_b_8x8(x, i, DCT_DCT, 16);
xd->mode_info_context->mbmi.sb_type = real_sb_type;
}
x->e_mbd.nzcs[i] = 0;
}
#endif
- x->quantize_b_16x16(x, 0, tx_type);
+ x->quantize_b_16x16(x, 0, tx_type, 16);
}
void vp9_quantize_mb_16x16(MACROBLOCK *x) {
#endif
}
-void vp9_regular_quantize_b_16x16(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type) {
+void vp9_regular_quantize_b_16x16(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
+ int y_blocks) {
MACROBLOCKD *const xd = &mb->e_mbd;
- const int c_idx = plane_idx(xd, b_idx);
- const struct plane_block_idx pb_idx = plane_block_idx(xd, b_idx);
+ const struct plane_block_idx pb_idx = plane_block_idx(y_blocks, b_idx);
+ const int c_idx = plane_idx(pb_idx.plane);
BLOCK *const b = &mb->block[c_idx];
BLOCKD *const d = &xd->block[c_idx];
const int *pt_scan;
break;
}
+ if (c_idx == 0) assert(pb_idx.plane == 0);
+ if (c_idx == 16) assert(pb_idx.plane == 1);
+ if (c_idx == 20) assert(pb_idx.plane == 2);
quantize(b->zrun_zbin_boost,
mb->coeff + 16 * b_idx,
256, b->skip_block,
BLOCK_OFFSET(xd->plane[pb_idx.plane].dqcoeff, pb_idx.block, 16),
d->dequant,
b->zbin_extra,
- &xd->eobs[b_idx],
+ &xd->plane[pb_idx.plane].eobs[pb_idx.block],
#if CONFIG_CODE_NONZEROCOUNT
&xd->nzcs[b_idx],
#endif
pt_scan, 1);
}
-void vp9_regular_quantize_b_32x32(MACROBLOCK *mb, int b_idx) {
+void vp9_regular_quantize_b_32x32(MACROBLOCK *mb, int b_idx, int y_blocks) {
MACROBLOCKD *const xd = &mb->e_mbd;
- const int c_idx = plane_idx(xd, b_idx);
- const struct plane_block_idx pb_idx = plane_block_idx(xd, b_idx);
+ const struct plane_block_idx pb_idx = plane_block_idx(y_blocks, b_idx);
+ const int c_idx = plane_idx(pb_idx.plane);
BLOCK *const b = &mb->block[c_idx];
BLOCKD *const d = &xd->block[c_idx];
+ if (c_idx == 0) assert(pb_idx.plane == 0);
+ if (c_idx == 16) assert(pb_idx.plane == 1);
+ if (c_idx == 20) assert(pb_idx.plane == 2);
quantize(b->zrun_zbin_boost,
mb->coeff + b_idx * 16,
1024, b->skip_block,
BLOCK_OFFSET(xd->plane[pb_idx.plane].dqcoeff, pb_idx.block, 16),
d->dequant,
b->zbin_extra,
- &xd->eobs[b_idx],
+ &xd->plane[pb_idx.plane].eobs[pb_idx.block],
#if CONFIG_CODE_NONZEROCOUNT
&xd->nzcs[b_idx],
#endif
}
void vp9_quantize_sby_32x32(MACROBLOCK *x) {
- vp9_regular_quantize_b_32x32(x, 0);
+ vp9_regular_quantize_b_32x32(x, 0, 64);
}
void vp9_quantize_sby_16x16(MACROBLOCK *x) {
for (n = 0; n < 4; n++) {
TX_TYPE tx_type = get_tx_type_16x16(&x->e_mbd,
(16 * (n & 2)) + ((n & 1) * 4));
- x->quantize_b_16x16(x, n * 16, tx_type);
+ x->quantize_b_16x16(x, n * 16, tx_type, 64);
}
}
for (n = 0; n < 16; n++) {
TX_TYPE tx_type = get_tx_type_8x8(&x->e_mbd,
(4 * (n & 12)) + ((n & 3) * 2));
- x->quantize_b_8x8(x, n * 4, tx_type);
+ x->quantize_b_8x8(x, n * 4, tx_type, 64);
}
}
if (tx_type != DCT_DCT) {
vp9_ht_quantize_b_4x4(x, n, tx_type);
} else {
- x->quantize_b_4x4(x, n);
+ x->quantize_b_4x4(x, n, 64);
}
}
}
void vp9_quantize_sbuv_16x16(MACROBLOCK *x) {
- x->quantize_b_16x16(x, 64, DCT_DCT);
- x->quantize_b_16x16(x, 80, DCT_DCT);
+ x->quantize_b_16x16(x, 64, DCT_DCT, 64);
+ x->quantize_b_16x16(x, 80, DCT_DCT, 64);
}
void vp9_quantize_sbuv_8x8(MACROBLOCK *x) {
int i;
for (i = 64; i < 96; i += 4)
- x->quantize_b_8x8(x, i, DCT_DCT);
+ x->quantize_b_8x8(x, i, DCT_DCT, 64);
}
void vp9_quantize_sbuv_4x4(MACROBLOCK *x) {
int i;
for (i = 64; i < 96; i++)
- x->quantize_b_4x4(x, i);
+ x->quantize_b_4x4(x, i, 64);
}
void vp9_quantize_sb64y_32x32(MACROBLOCK *x) {
int n;
for (n = 0; n < 4; n++)
- vp9_regular_quantize_b_32x32(x, n * 64);
+ vp9_regular_quantize_b_32x32(x, n * 64, 256);
}
void vp9_quantize_sb64y_16x16(MACROBLOCK *x) {
for (n = 0; n < 16; n++) {
TX_TYPE tx_type = get_tx_type_16x16(&x->e_mbd,
(16 * (n & 12)) + ((n & 3) * 4));
- x->quantize_b_16x16(x, n * 16, tx_type);
+ x->quantize_b_16x16(x, n * 16, tx_type, 256);
}
}
for (n = 0; n < 64; n++) {
TX_TYPE tx_type = get_tx_type_8x8(&x->e_mbd,
(4 * (n & 56)) + ((n & 7) * 2));
- x->quantize_b_8x8(x, n * 4, tx_type);
+ x->quantize_b_8x8(x, n * 4, tx_type, 256);
}
}
if (tx_type != DCT_DCT) {
vp9_ht_quantize_b_4x4(x, n, tx_type);
} else {
- x->quantize_b_4x4(x, n);
+ x->quantize_b_4x4(x, n, 256);
}
}
}
void vp9_quantize_sb64uv_32x32(MACROBLOCK *x) {
- vp9_regular_quantize_b_32x32(x, 256);
- vp9_regular_quantize_b_32x32(x, 320);
+ vp9_regular_quantize_b_32x32(x, 256, 256);
+ vp9_regular_quantize_b_32x32(x, 320, 256);
}
void vp9_quantize_sb64uv_16x16(MACROBLOCK *x) {
int i;
for (i = 256; i < 384; i += 16)
- x->quantize_b_16x16(x, i, DCT_DCT);
+ x->quantize_b_16x16(x, i, DCT_DCT, 256);
}
void vp9_quantize_sb64uv_8x8(MACROBLOCK *x) {
int i;
for (i = 256; i < 384; i += 4)
- x->quantize_b_8x8(x, i, DCT_DCT);
+ x->quantize_b_8x8(x, i, DCT_DCT, 256);
}
void vp9_quantize_sb64uv_4x4(MACROBLOCK *x) {
int i;
for (i = 256; i < 384; i++)
- x->quantize_b_4x4(x, i);
+ x->quantize_b_4x4(x, i, 256);
}
/* quantize_b_pair function pointer in MACROBLOCK structure is set to one of
* these two C functions if corresponding optimized routine is not available.
* NEON optimized version implements currently the fast quantization for pair
* of blocks. */
-void vp9_regular_quantize_b_4x4_pair(MACROBLOCK *x, int b_idx1, int b_idx2) {
- vp9_regular_quantize_b_4x4(x, b_idx1);
- vp9_regular_quantize_b_4x4(x, b_idx2);
+void vp9_regular_quantize_b_4x4_pair(MACROBLOCK *x, int b_idx1, int b_idx2,
+ int y_blocks) {
+ vp9_regular_quantize_b_4x4(x, b_idx1, y_blocks);
+ vp9_regular_quantize_b_4x4(x, b_idx2, y_blocks);
}
static void invert_quant(int16_t *quant, uint8_t *shift, int d) {
#endif
void vp9_ht_quantize_b_4x4(MACROBLOCK *mb, int b_ix, TX_TYPE type);
-void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx);
-void vp9_regular_quantize_b_4x4_pair(MACROBLOCK *mb, int b_idx1, int b_idx2);
-void vp9_regular_quantize_b_8x8(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type);
-void vp9_regular_quantize_b_16x16(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type);
-void vp9_regular_quantize_b_32x32(MACROBLOCK *mb, int b_idx);
+void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx, int y_blocks);
+void vp9_regular_quantize_b_4x4_pair(MACROBLOCK *mb, int b_idx1, int b_idx2,
+ int y_blocks);
+void vp9_regular_quantize_b_8x8(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
+ int y_blocks);
+void vp9_regular_quantize_b_16x16(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
+ int y_blocks);
+void vp9_regular_quantize_b_32x32(MACROBLOCK *mb, int b_idx,
+ int y_blocks);
void vp9_quantize_mb_4x4(MACROBLOCK *x);
void vp9_quantize_mb_8x8(MACROBLOCK *x);
int ib, PLANE_TYPE type,
ENTROPY_CONTEXT *a,
ENTROPY_CONTEXT *l,
- TX_SIZE tx_size) {
+ TX_SIZE tx_size,
+ int y_blocks) {
MACROBLOCKD *const xd = &mb->e_mbd;
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
int pt;
- const int eob = xd->eobs[ib];
int c = 0;
int cost = 0, pad;
const int *scan, *nb;
- const struct plane_block_idx pb_idx = plane_block_idx(xd, ib);
+ const struct plane_block_idx pb_idx = plane_block_idx(y_blocks, ib);
+ const int eob = xd->plane[pb_idx.plane].eobs[pb_idx.block];
const int16_t *qcoeff_ptr = BLOCK_OFFSET(xd->plane[pb_idx.plane].qcoeff,
pb_idx.block, 16);
const int ref = mbmi->ref_frame != INTRA_FRAME;
uint8_t token_cache[1024];
// Check for consistency of tx_size with mode info
+ assert((!type && !pb_idx.plane) || (type && pb_idx.plane));
if (type == PLANE_TYPE_Y_WITH_DC) {
assert(xd->mode_info_context->mbmi.txfm_size == tx_size);
} else {
abort();
break;
}
+ assert(eob <= seg_eob);
VP9_COMBINEENTROPYCONTEXTS(pt, a_ec, l_ec);
nb = vp9_get_coef_neighbors_handle(scan, &pad);
cost += cost_coeffs(cm, mb, b, PLANE_TYPE_Y_WITH_DC,
ta + vp9_block2above[TX_4X4][b],
tl + vp9_block2left[TX_4X4][b],
- TX_4X4);
+ TX_4X4, 16);
return cost;
}
cost += cost_coeffs(cm, mb, b, PLANE_TYPE_Y_WITH_DC,
ta + vp9_block2above[TX_8X8][b],
tl + vp9_block2left[TX_8X8][b],
- TX_8X8);
+ TX_8X8, 16);
return cost;
}
vpx_memcpy(&t_above, xd->above_context, sizeof(t_above));
vpx_memcpy(&t_left, xd->left_context, sizeof(t_left));
- return cost_coeffs(cm, mb, 0, PLANE_TYPE_Y_WITH_DC, ta, tl, TX_16X16);
+ return cost_coeffs(cm, mb, 0, PLANE_TYPE_Y_WITH_DC, ta, tl, TX_16X16, 16);
}
static void macro_block_yrd_16x16(VP9_COMMON *const cm, MACROBLOCK *mb,
for (b = 0; b < 64; b++)
cost += cost_coeffs(cm, x, b, PLANE_TYPE_Y_WITH_DC,
ta + vp9_block2above_sb[TX_4X4][b],
- tl + vp9_block2left_sb[TX_4X4][b], TX_4X4);
+ tl + vp9_block2left_sb[TX_4X4][b], TX_4X4, 64);
return cost;
}
for (b = 0; b < 64; b += 4)
cost += cost_coeffs(cm, x, b, PLANE_TYPE_Y_WITH_DC,
ta + vp9_block2above_sb[TX_8X8][b],
- tl + vp9_block2left_sb[TX_8X8][b], TX_8X8);
+ tl + vp9_block2left_sb[TX_8X8][b], TX_8X8, 64);
return cost;
}
for (b = 0; b < 64; b += 16)
cost += cost_coeffs(cm, x, b, PLANE_TYPE_Y_WITH_DC,
ta + vp9_block2above_sb[TX_16X16][b],
- tl + vp9_block2left_sb[TX_16X16][b], TX_16X16);
+ tl + vp9_block2left_sb[TX_16X16][b], TX_16X16, 64);
return cost;
}
vpx_memcpy(&t_above, xd->above_context, sizeof(t_above));
vpx_memcpy(&t_left, xd->left_context, sizeof(t_left));
- return cost_coeffs(cm, x, 0, PLANE_TYPE_Y_WITH_DC, ta, tl, TX_32X32);
+ return cost_coeffs(cm, x, 0, PLANE_TYPE_Y_WITH_DC, ta, tl, TX_32X32, 64);
}
static void super_block_yrd_32x32(VP9_COMMON *const cm, MACROBLOCK *x,
for (b = 0; b < 256; b++)
cost += cost_coeffs(cm, x, b, PLANE_TYPE_Y_WITH_DC,
ta + vp9_block2above_sb64[TX_4X4][b],
- tl + vp9_block2left_sb64[TX_4X4][b], TX_4X4);
+ tl + vp9_block2left_sb64[TX_4X4][b], TX_4X4, 256);
return cost;
}
for (b = 0; b < 256; b += 4)
cost += cost_coeffs(cm, x, b, PLANE_TYPE_Y_WITH_DC,
ta + vp9_block2above_sb64[TX_8X8][b],
- tl + vp9_block2left_sb64[TX_8X8][b], TX_8X8);
+ tl + vp9_block2left_sb64[TX_8X8][b], TX_8X8, 256);
return cost;
}
for (b = 0; b < 256; b += 16)
cost += cost_coeffs(cm, x, b, PLANE_TYPE_Y_WITH_DC,
ta + vp9_block2above_sb64[TX_16X16][b],
- tl + vp9_block2left_sb64[TX_16X16][b], TX_16X16);
+ tl + vp9_block2left_sb64[TX_16X16][b], TX_16X16, 256);
return cost;
}
for (b = 0; b < 256; b += 64)
cost += cost_coeffs(cm, x, b, PLANE_TYPE_Y_WITH_DC,
ta + vp9_block2above_sb64[TX_32X32][b],
- tl + vp9_block2left_sb64[TX_32X32][b], TX_32X32);
+ tl + vp9_block2left_sb64[TX_32X32][b], TX_32X32, 256);
return cost;
}
vp9_ht_quantize_b_4x4(x, be - x->block, tx_type);
} else {
x->fwd_txm4x4(be->src_diff, be->coeff, 32);
- x->quantize_b_4x4(x, be - x->block);
+ x->quantize_b_4x4(x, be - x->block, 16);
}
tempa = ta;
templ = tl;
ratey = cost_coeffs(cm, x, b - xd->block,
- PLANE_TYPE_Y_WITH_DC, &tempa, &templ, TX_4X4);
+ PLANE_TYPE_Y_WITH_DC, &tempa, &templ, TX_4X4, 16);
rate += ratey;
distortion = vp9_block_error(be->coeff,
BLOCK_OFFSET(xd->plane[0].dqcoeff, ib, 16),
vp9_short_fht8x8(be->src_diff, (x->block + idx)->coeff, 16, tx_type);
else
x->fwd_txm8x8(be->src_diff, (x->block + idx)->coeff, 32);
- x->quantize_b_8x8(x, idx, tx_type);
+ x->quantize_b_8x8(x, idx, tx_type, 16);
// compute quantization mse of 8x8 block
distortion = vp9_block_error_c((x->block + idx)->coeff,
tl1 = tl0 + 1;
rate_t = cost_coeffs(cm, x, idx, PLANE_TYPE_Y_WITH_DC,
- ta0, tl0, TX_8X8);
+ ta0, tl0, TX_8X8, 16);
rate += rate_t;
} else {
} else if (!(i & 1) &&
get_tx_type_4x4(xd, ib + iblock[i] + 1) == DCT_DCT) {
x->fwd_txm8x4(be->src_diff, be->coeff, 32);
- x->quantize_b_4x4_pair(x, ib + iblock[i], ib + iblock[i] + 1);
+ x->quantize_b_4x4_pair(x, ib + iblock[i], ib + iblock[i] + 1, 16);
do_two = 1;
} else {
x->fwd_txm4x4(be->src_diff, be->coeff, 32);
- x->quantize_b_4x4(x, ib + iblock[i]);
+ x->quantize_b_4x4(x, ib + iblock[i], 16);
}
distortion += vp9_block_error_c(be->coeff,
BLOCK_OFFSET(xd->plane[0].dqcoeff, ib + iblock[i], 16),
16 << do_two);
rate_t += cost_coeffs(cm, x, ib + iblock[i], PLANE_TYPE_Y_WITH_DC,
i&1 ? ta1 : ta0, i&2 ? tl1 : tl0,
- TX_4X4);
+ TX_4X4, 16);
if (do_two) {
i++;
rate_t += cost_coeffs(cm, x, ib + iblock[i], PLANE_TYPE_Y_WITH_DC,
i&1 ? ta1 : ta0, i&2 ? tl1 : tl0,
- TX_4X4);
+ TX_4X4, 16);
}
}
b = &xd->block[ib];
MACROBLOCKD *xd = &mb->e_mbd;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta, *tl;
- const BLOCK_SIZE_TYPE real_sb_type = xd->mode_info_context->mbmi.sb_type;
- xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_MB16X16;
if (backup) {
vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
cost += cost_coeffs(cm, mb, b, PLANE_TYPE_UV,
ta + vp9_block2above[TX_4X4][b],
tl + vp9_block2left[TX_4X4][b],
- TX_4X4);
+ TX_4X4, 16);
- xd->mode_info_context->mbmi.sb_type = real_sb_type;
return cost;
}
MACROBLOCKD *xd = &mb->e_mbd;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta, *tl;
- const BLOCK_SIZE_TYPE real_sb_type = xd->mode_info_context->mbmi.sb_type;
- xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_MB16X16;
if (backup) {
vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
for (b = 16; b < 24; b += 4)
cost += cost_coeffs(cm, mb, b, PLANE_TYPE_UV,
ta + vp9_block2above[TX_8X8][b],
- tl + vp9_block2left[TX_8X8][b], TX_8X8);
+ tl + vp9_block2left[TX_8X8][b], TX_8X8, 16);
- xd->mode_info_context->mbmi.sb_type = real_sb_type;
return cost;
}
for (b = 16; b < 24; b += 4)
cost += cost_coeffs(cm, x, b * 4, PLANE_TYPE_UV,
ta + vp9_block2above[TX_8X8][b],
- tl + vp9_block2left[TX_8X8][b], TX_16X16);
+ tl + vp9_block2left[TX_8X8][b], TX_16X16, 64);
return cost;
}
for (b = 16; b < 24; b += 4)
cost += cost_coeffs(cm, x, b * 16, PLANE_TYPE_UV,
ta + vp9_block2above[TX_8X8][b],
- tl + vp9_block2left[TX_8X8][b], TX_32X32);
+ tl + vp9_block2left[TX_8X8][b], TX_32X32, 256);
return cost;
}
vp9_subtract_b(be, bd, 16);
x->fwd_txm4x4(be->src_diff, be->coeff, 32);
- x->quantize_b_4x4(x, i);
+ x->quantize_b_4x4(x, i, 16);
thisdistortion = vp9_block_error(be->coeff,
BLOCK_OFFSET(xd->plane[0].dqcoeff, i, 16), 16);
*distortion += thisdistortion;
*labelyrate += cost_coeffs(cm, x, i, PLANE_TYPE_Y_WITH_DC,
ta + vp9_block2above[TX_4X4][i],
- tl + vp9_block2left[TX_4X4][i], TX_4X4);
+ tl + vp9_block2left[TX_4X4][i], TX_4X4, 16);
}
}
*distortion >>= 2;
if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) {
if (otherrd) {
x->fwd_txm8x8(be->src_diff, be2->coeff, 32);
- x->quantize_b_8x8(x, idx, DCT_DCT);
+ x->quantize_b_8x8(x, idx, DCT_DCT, 16);
thisdistortion = vp9_block_error_c(be2->coeff,
BLOCK_OFFSET(xd->plane[0].dqcoeff, idx, 16), 64);
otherdist += thisdistortion;
othercost += cost_coeffs(cm, x, idx, PLANE_TYPE_Y_WITH_DC,
tacp + vp9_block2above[TX_8X8][idx],
tlcp + vp9_block2left[TX_8X8][idx],
- TX_8X8);
+ TX_8X8, 16);
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
}
for (j = 0; j < 4; j += 2) {
bd = &xd->block[ib + iblock[j]];
be = &x->block[ib + iblock[j]];
x->fwd_txm8x4(be->src_diff, be->coeff, 32);
- x->quantize_b_4x4_pair(x, ib + iblock[j], ib + iblock[j] + 1);
+ x->quantize_b_4x4_pair(x, ib + iblock[j], ib + iblock[j] + 1, 16);
thisdistortion = vp9_block_error_c(be->coeff,
BLOCK_OFFSET(xd->plane[0].dqcoeff, ib + iblock[j], 16), 32);
*distortion += thisdistortion;
cost_coeffs(cm, x, ib + iblock[j], PLANE_TYPE_Y_WITH_DC,
ta + vp9_block2above[TX_4X4][ib + iblock[j]],
tl + vp9_block2left[TX_4X4][ib + iblock[j]],
- TX_4X4);
+ TX_4X4, 16);
*labelyrate +=
cost_coeffs(cm, x, ib + iblock[j] + 1,
PLANE_TYPE_Y_WITH_DC,
ta + vp9_block2above[TX_4X4][ib + iblock[j] + 1],
tl + vp9_block2left[TX_4X4][ib + iblock[j]],
- TX_4X4);
+ TX_4X4, 16);
}
} else /* 8x8 */ {
if (otherrd) {
for (j = 0; j < 4; j += 2) {
BLOCK *be = &x->block[ib + iblock[j]];
x->fwd_txm8x4(be->src_diff, be->coeff, 32);
- x->quantize_b_4x4_pair(x, ib + iblock[j], ib + iblock[j] + 1);
+ x->quantize_b_4x4_pair(x, ib + iblock[j], ib + iblock[j] + 1, 16);
thisdistortion = vp9_block_error_c(be->coeff,
BLOCK_OFFSET(xd->plane[0].dqcoeff, ib + iblock[j], 16), 32);
otherdist += thisdistortion;
cost_coeffs(cm, x, ib + iblock[j], PLANE_TYPE_Y_WITH_DC,
tacp + vp9_block2above[TX_4X4][ib + iblock[j]],
tlcp + vp9_block2left[TX_4X4][ib + iblock[j]],
- TX_4X4);
+ TX_4X4, 16);
othercost +=
cost_coeffs(cm, x, ib + iblock[j] + 1,
PLANE_TYPE_Y_WITH_DC,
tacp + vp9_block2above[TX_4X4][ib + iblock[j] + 1],
tlcp + vp9_block2left[TX_4X4][ib + iblock[j]],
- TX_4X4);
+ TX_4X4, 16);
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
}
}
x->fwd_txm8x8(be->src_diff, be2->coeff, 32);
- x->quantize_b_8x8(x, idx, DCT_DCT);
+ x->quantize_b_8x8(x, idx, DCT_DCT, 16);
thisdistortion = vp9_block_error_c(be2->coeff,
BLOCK_OFFSET(xd->plane[0].dqcoeff, idx, 16), 64);
*distortion += thisdistortion;
*labelyrate += cost_coeffs(cm, x, idx, PLANE_TYPE_Y_WITH_DC,
ta + vp9_block2above[TX_8X8][idx],
- tl + vp9_block2left[TX_8X8][idx], TX_8X8);
+ tl + vp9_block2left[TX_8X8][idx], TX_8X8,
+ 16);
}
}
}
if (x->e_mbd.mode_info_context->mbmi.txfm_size == TX_4X4) {
for (j = 0; j < 16; j++)
if (labels[j] == i)
- best_eobs[j] = x->e_mbd.eobs[j];
+ best_eobs[j] = x->e_mbd.plane[0].eobs[j];
} else {
for (j = 0; j < 4; j++) {
int ib = vp9_i8x8_block[j], idx = j * 4;
if (labels[ib] == i)
- best_eobs[idx] = x->e_mbd.eobs[idx];
+ best_eobs[idx] = x->e_mbd.plane[0].eobs[idx];
}
}
if (other_rd < best_other_rd)
bd->bmi.as_mv[0].as_int = bsi.mvs[i].as_int;
if (mbmi->second_ref_frame > 0)
bd->bmi.as_mv[1].as_int = bsi.second_mvs[i].as_int;
- x->e_mbd.eobs[i] = bsi.eobs[i];
+ x->e_mbd.plane[0].eobs[i] = bsi.eobs[i];
}
*returntotrate = bsi.r;
TOKENEXTRA **tp,
PLANE_TYPE type,
TX_SIZE tx_size,
+ int y_blocks,
int dry_run) {
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
int pt; /* near block/prev token context index */
int c = 0;
- const int eob = xd->eobs[ib]; /* one beyond last nonzero coeff */
TOKENEXTRA *t = *tp; /* store tokens starting here */
- const struct plane_block_idx pb_idx = plane_block_idx(xd, ib);
+ const struct plane_block_idx pb_idx = plane_block_idx(y_blocks, ib);
+ const int eob = xd->plane[pb_idx.plane].eobs[pb_idx.block];
const int16_t *qcoeff_ptr = BLOCK_OFFSET(xd->plane[pb_idx.plane].qcoeff,
pb_idx.block, 16);
int seg_eob, default_eob, pad;
assert(xd->nzcs[ib] == 0);
#endif
+ assert((!type && !pb_idx.plane) || (type && pb_idx.plane));
if (sb_type == BLOCK_SIZE_SB64X64) {
a = (ENTROPY_CONTEXT *)xd->above_context +
vp9_block2above_sb64[tx_size][ib];
int i = 0;
for (i = 0; i < 16; i++)
- skip &= (!xd->eobs[i]);
+ skip &= (!xd->plane[0].eobs[i]);
return skip;
}
int skip = 1;
int i;
- for (i = 16; i < 24; i++)
- skip &= (!xd->eobs[i]);
+ for (i = 0; i < 4; i++)
+ skip &= (!xd->plane[1].eobs[i]);
+ for (i = 0; i < 4; i++)
+ skip &= (!xd->plane[2].eobs[i]);
return skip;
}
int i = 0;
for (i = 0; i < 16; i += 4)
- skip &= (!xd->eobs[i]);
+ skip &= (!xd->plane[0].eobs[i]);
return skip;
}
int vp9_mbuv_is_skippable_8x8(MACROBLOCKD *xd) {
- return (!xd->eobs[16]) & (!xd->eobs[20]);
+ return (!xd->plane[1].eobs[0]) & (!xd->plane[2].eobs[0]);
}
static int mb_is_skippable_8x8(MACROBLOCKD *xd) {
}
int vp9_mby_is_skippable_16x16(MACROBLOCKD *xd) {
- return (!xd->eobs[0]);
+ return (!xd->plane[0].eobs[0]);
}
static int mb_is_skippable_16x16(MACROBLOCKD *xd) {
}
int vp9_sby_is_skippable_32x32(MACROBLOCKD *xd) {
- return (!xd->eobs[0]);
+ return (!xd->plane[0].eobs[0]);
}
int vp9_sbuv_is_skippable_16x16(MACROBLOCKD *xd) {
- return (!xd->eobs[64]) & (!xd->eobs[80]);
+ return (!xd->plane[1].eobs[0]) & (!xd->plane[2].eobs[0]);
}
static int sb_is_skippable_32x32(MACROBLOCKD *xd) {
int i = 0;
for (i = 0; i < 64; i += 16)
- skip &= (!xd->eobs[i]);
+ skip &= (!xd->plane[0].eobs[i]);
return skip;
}
int i = 0;
for (i = 0; i < 64; i += 4)
- skip &= (!xd->eobs[i]);
+ skip &= (!xd->plane[0].eobs[i]);
return skip;
}
int skip = 1;
int i = 0;
- for (i = 64; i < 96; i += 4)
- skip &= (!xd->eobs[i]);
+ for (i = 0; i < 16; i += 4)
+ skip &= (!xd->plane[1].eobs[i]);
+ for (i = 0; i < 16; i += 4)
+ skip &= (!xd->plane[2].eobs[i]);
return skip;
}
int i = 0;
for (i = 0; i < 64; i++)
- skip &= (!xd->eobs[i]);
+ skip &= (!xd->plane[0].eobs[i]);
return skip;
}
int skip = 1;
int i = 0;
- for (i = 64; i < 96; i++)
- skip &= (!xd->eobs[i]);
+ for (i = 0; i < 16; i++)
+ skip &= (!xd->plane[1].eobs[i]);
+ for (i = 0; i < 16; i++)
+ skip &= (!xd->plane[2].eobs[i]);
return skip;
}
switch (mbmi->txfm_size) {
case TX_32X32:
tokenize_b(cpi, xd, 0, t, PLANE_TYPE_Y_WITH_DC,
- TX_32X32, dry_run);
+ TX_32X32, 64, dry_run);
for (b = 64; b < 96; b += 16)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV,
- TX_16X16, dry_run);
+ TX_16X16, 64, dry_run);
break;
case TX_16X16:
for (b = 0; b < 64; b += 16)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_Y_WITH_DC,
- TX_16X16, dry_run);
+ TX_16X16, 64, dry_run);
for (b = 64; b < 96; b += 16)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV,
- TX_16X16, dry_run);
+ TX_16X16, 64, dry_run);
break;
case TX_8X8:
for (b = 0; b < 64; b += 4)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_Y_WITH_DC,
- TX_8X8, dry_run);
+ TX_8X8, 64, dry_run);
for (b = 64; b < 96; b += 4)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV,
- TX_8X8, dry_run);
+ TX_8X8, 64, dry_run);
break;
case TX_4X4:
for (b = 0; b < 64; b++)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_Y_WITH_DC,
- TX_4X4, dry_run);
+ TX_4X4, 64, dry_run);
for (b = 64; b < 96; b++)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV,
- TX_4X4, dry_run);
+ TX_4X4, 64, dry_run);
break;
default: assert(0);
}
int i = 0;
for (i = 0; i < 256; i += 64)
- skip &= (!xd->eobs[i]);
+ skip &= (!xd->plane[0].eobs[i]);
return skip;
}
int vp9_sb64uv_is_skippable_32x32(MACROBLOCKD *xd) {
- return (!xd->eobs[256]) & (!xd->eobs[320]);
+ return (!xd->plane[1].eobs[0]) & (!xd->plane[2].eobs[0]);
}
static int sb64_is_skippable_32x32(MACROBLOCKD *xd) {
int i = 0;
for (i = 0; i < 256; i += 16)
- skip &= (!xd->eobs[i]);
+ skip &= (!xd->plane[0].eobs[i]);
return skip;
}
int skip = 1;
int i = 0;
- for (i = 256; i < 384; i += 16)
- skip &= (!xd->eobs[i]);
+ for (i = 0; i < 64; i += 16)
+ skip &= (!xd->plane[1].eobs[i]);
+ for (i = 0; i < 64; i += 16)
+ skip &= (!xd->plane[2].eobs[i]);
return skip;
}
int i = 0;
for (i = 0; i < 256; i += 4)
- skip &= (!xd->eobs[i]);
+ skip &= (!xd->plane[0].eobs[i]);
return skip;
}
int skip = 1;
int i = 0;
- for (i = 256; i < 384; i += 4)
- skip &= (!xd->eobs[i]);
+ for (i = 0; i < 64; i += 4)
+ skip &= (!xd->plane[1].eobs[i]);
+ for (i = 0; i < 64; i += 4)
+ skip &= (!xd->plane[2].eobs[i]);
return skip;
}
int i = 0;
for (i = 0; i < 256; i++)
- skip &= (!xd->eobs[i]);
+ skip &= (!xd->plane[0].eobs[i]);
return skip;
}
int skip = 1;
int i = 0;
- for (i = 256; i < 384; i++)
- skip &= (!xd->eobs[i]);
+ for (i = 0; i < 64; i++)
+ skip &= (!xd->plane[1].eobs[i]);
+ for (i = 0; i < 64; i++)
+ skip &= (!xd->plane[2].eobs[i]);
return skip;
}
case TX_32X32:
for (b = 0; b < 256; b += 64)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_Y_WITH_DC,
- TX_32X32, dry_run);
+ TX_32X32, 256, dry_run);
for (b = 256; b < 384; b += 64)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV,
- TX_32X32, dry_run);
+ TX_32X32, 256, dry_run);
break;
case TX_16X16:
for (b = 0; b < 256; b += 16)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_Y_WITH_DC,
- TX_16X16, dry_run);
+ TX_16X16, 256, dry_run);
for (b = 256; b < 384; b += 16)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV,
- TX_16X16, dry_run);
+ TX_16X16, 256, dry_run);
break;
case TX_8X8:
for (b = 0; b < 256; b += 4)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_Y_WITH_DC,
- TX_8X8, dry_run);
+ TX_8X8, 256, dry_run);
for (b = 256; b < 384; b += 4)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV,
- TX_8X8, dry_run);
+ TX_8X8, 256, dry_run);
break;
case TX_4X4:
for (b = 0; b < 256; b++)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_Y_WITH_DC,
- TX_4X4, dry_run);
+ TX_4X4, 256, dry_run);
for (b = 256; b < 384; b++)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV,
- TX_4X4, dry_run);
+ TX_4X4, 256, dry_run);
break;
default: assert(0);
}
cpi->skip_false_count[mb_skip_context] += skip_inc;
if (tx_size == TX_16X16) {
- tokenize_b(cpi, xd, 0, t, PLANE_TYPE_Y_WITH_DC, TX_16X16, dry_run);
+ tokenize_b(cpi, xd, 0, t, PLANE_TYPE_Y_WITH_DC, TX_16X16, 16, dry_run);
for (b = 16; b < 24; b += 4) {
- tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV, TX_8X8, dry_run);
+ tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV, TX_8X8, 16, dry_run);
}
} else if (tx_size == TX_8X8) {
for (b = 0; b < 16; b += 4) {
- tokenize_b(cpi, xd, b, t, PLANE_TYPE_Y_WITH_DC, TX_8X8, dry_run);
+ tokenize_b(cpi, xd, b, t, PLANE_TYPE_Y_WITH_DC, TX_8X8, 16, dry_run);
}
if (xd->mode_info_context->mbmi.mode == I8X8_PRED ||
xd->mode_info_context->mbmi.mode == SPLITMV) {
for (b = 16; b < 24; b++) {
- tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV, TX_4X4, dry_run);
+ tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV, TX_4X4, 16, dry_run);
}
} else {
for (b = 16; b < 24; b += 4) {
- tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV, TX_8X8, dry_run);
+ tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV, TX_8X8, 16, dry_run);
}
}
} else {
for (b = 0; b < 16; b++)
- tokenize_b(cpi, xd, b, t, PLANE_TYPE_Y_WITH_DC, TX_4X4, dry_run);
+ tokenize_b(cpi, xd, b, t, PLANE_TYPE_Y_WITH_DC, TX_4X4, 16, dry_run);
for (b = 16; b < 24; b++)
- tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV, TX_4X4, dry_run);
+ tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV, TX_4X4, 16, dry_run);
}
if (dry_run)
*t = t_backup;