superblocks
pred_filter
lossless
- hybridtransform
- hybridtransform8x8
switchable_interp
newbestrefmv
new_mvref
- hybridtransform16x16
newmventropy
tx_select
"
TX_SIZE_MAX // Number of different transforms available
} TX_SIZE;
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
typedef enum {
DCT_DCT = 0, // DCT in both horizontal and vertical
ADST_DCT = 1, // ADST in horizontal, DCT in vertical
DCT_ADST = 2, // DCT in horizontal, ADST in vertical
ADST_ADST = 3 // ADST in both directions
} TX_TYPE;
-#endif
#define VP8_YMODES (B_PRED + 1)
#define VP8_UV_MODES (TM_PRED + 1)
union b_mode_info {
struct {
B_PREDICTION_MODE first;
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type;
-#endif
#if CONFIG_COMP_INTRA_PRED
B_PREDICTION_MODE second;
} MACROBLOCKD;
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
#define ACTIVE_HT 110 // quantization stepsize threshold
-#endif
-#if CONFIG_HYBRIDTRANSFORM8X8
#define ACTIVE_HT8 300
-#endif
-#if CONFIG_HYBRIDTRANSFORM16X16
#define ACTIVE_HT16 300
-#endif
// convert MB_PREDICTION_MODE to B_PREDICTION_MODE
static B_PREDICTION_MODE pred_mode_conv(MB_PREDICTION_MODE mode) {
return b_mode;
}
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
// transform mapping
static TX_TYPE txfm_map(B_PREDICTION_MODE bmode) {
// map transform type
}
return tx_type;
}
-#endif
-#if CONFIG_HYBRIDTRANSFORM
static TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, const BLOCKD *b) {
TX_TYPE tx_type = DCT_DCT;
if (xd->mode_info_context->mbmi.mode == B_PRED &&
}
return tx_type;
}
-#endif
-#if CONFIG_HYBRIDTRANSFORM8X8
static TX_TYPE get_tx_type_8x8(const MACROBLOCKD *xd, const BLOCKD *b) {
TX_TYPE tx_type = DCT_DCT;
if (xd->mode_info_context->mbmi.mode == I8X8_PRED &&
}
return tx_type;
}
-#endif
-#if CONFIG_HYBRIDTRANSFORM16X16
static TX_TYPE get_tx_type_16x16(const MACROBLOCKD *xd, const BLOCKD *b) {
TX_TYPE tx_type = DCT_DCT;
if (xd->mode_info_context->mbmi.mode < I8X8_PRED &&
}
return tx_type;
}
-#endif
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || \
- CONFIG_HYBRIDTRANSFORM16X16
static TX_TYPE get_tx_type(const MACROBLOCKD *xd, const BLOCKD *b) {
TX_TYPE tx_type = DCT_DCT;
int ib = (b - xd->block);
if (ib >= 16)
return tx_type;
-#if CONFIG_HYBRIDTRANSFORM16X16
if (xd->mode_info_context->mbmi.txfm_size == TX_16X16) {
tx_type = get_tx_type_16x16(xd, b);
}
-#endif
-#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
ib = (ib & 8) + ((ib & 4) >> 1);
tx_type = get_tx_type_8x8(xd, &xd->block[ib]);
}
-#endif
-#if CONFIG_HYBRIDTRANSFORM
if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) {
tx_type = get_tx_type_4x4(xd, b);
}
-#endif
return tx_type;
}
-#endif
extern void vp8_build_block_doffsets(MACROBLOCKD *xd);
extern void vp8_setup_block_dptrs(MACROBLOCKD *xd);
static const vp8_prob default_coef_probs [BLOCK_TYPES]
-[COEF_BANDS]
-[PREV_COEF_CONTEXTS]
-[ENTROPY_NODES] = {
+ [COEF_BANDS]
+ [PREV_COEF_CONTEXTS]
+ [ENTROPY_NODES] = {
{
/* Block Type ( 0 ) */
{
}
};
-#if CONFIG_HYBRIDTRANSFORM
static const vp8_prob default_hybrid_coef_probs [BLOCK_TYPES]
-[COEF_BANDS]
-[PREV_COEF_CONTEXTS]
-[ENTROPY_NODES] = {
+ [COEF_BANDS]
+ [PREV_COEF_CONTEXTS]
+ [ENTROPY_NODES] = {
{
/* Block Type ( 0 ) */
{
}
}
};
-#endif
static const vp8_prob
default_coef_probs_8x8[BLOCK_TYPES_8X8]
}
};
-#if CONFIG_HYBRIDTRANSFORM8X8
static const vp8_prob
default_hybrid_coef_probs_8x8[BLOCK_TYPES_8X8]
- [COEF_BANDS]
- [PREV_COEF_CONTEXTS]
- [ENTROPY_NODES] = {
+ [COEF_BANDS]
+ [PREV_COEF_CONTEXTS]
+ [ENTROPY_NODES] = {
{
/* block Type 0 */
{
}
}
};
-#endif
static const vp8_prob
default_coef_probs_16x16[BLOCK_TYPES_16X16]
}
};
-#if CONFIG_HYBRIDTRANSFORM16X16
static const vp8_prob
default_hybrid_coef_probs_16x16[BLOCK_TYPES_16X16]
[COEF_BANDS]
}
}
};
-#endif
7, 11, 14, 15,
};
-
-#if CONFIG_HYBRIDTRANSFORM
DECLARE_ALIGNED(16, const int, vp8_col_scan[16]) = {
0, 4, 8, 12,
1, 5, 9, 13,
8, 9, 10, 11,
12, 13, 14, 15
};
-#endif
DECLARE_ALIGNED(64, const int, vp8_coef_bands_8x8[64]) = { 0, 1, 2, 3, 5, 4, 4, 5,
void vp8_default_coef_probs(VP8_COMMON *pc) {
vpx_memcpy(pc->fc.coef_probs, default_coef_probs,
sizeof(pc->fc.coef_probs));
-#if CONFIG_HYBRIDTRANSFORM
vpx_memcpy(pc->fc.hybrid_coef_probs, default_hybrid_coef_probs,
sizeof(pc->fc.hybrid_coef_probs));
-#endif
vpx_memcpy(pc->fc.coef_probs_8x8, default_coef_probs_8x8,
sizeof(pc->fc.coef_probs_8x8));
-#if CONFIG_HYBRIDTRANSFORM8X8
vpx_memcpy(pc->fc.hybrid_coef_probs_8x8, default_hybrid_coef_probs_8x8,
sizeof(pc->fc.hybrid_coef_probs_8x8));
-#endif
vpx_memcpy(pc->fc.coef_probs_16x16, default_coef_probs_16x16,
sizeof(pc->fc.coef_probs_16x16));
-#if CONFIG_HYBRIDTRANSFORM16X16
vpx_memcpy(pc->fc.hybrid_coef_probs_16x16,
default_hybrid_coef_probs_16x16,
sizeof(pc->fc.hybrid_coef_probs_16x16));
-#endif
}
void vp8_coef_tree_initialize() {
}
}
-#if CONFIG_HYBRIDTRANSFORM
for (i = 0; i < BLOCK_TYPES; ++i)
for (j = 0; j < COEF_BANDS; ++j)
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
else cm->fc.hybrid_coef_probs[i][j][k][t] = prob;
}
}
-#endif
for (i = 0; i < BLOCK_TYPES_8X8; ++i)
for (j = 0; j < COEF_BANDS; ++j)
}
}
-#if CONFIG_HYBRIDTRANSFORM8X8
for (i = 0; i < BLOCK_TYPES_8X8; ++i)
for (j = 0; j < COEF_BANDS; ++j)
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
else cm->fc.hybrid_coef_probs_8x8[i][j][k][t] = prob;
}
}
-#endif
for (i = 0; i < BLOCK_TYPES_16X16; ++i)
for (j = 0; j < COEF_BANDS; ++j)
}
}
-#if CONFIG_HYBRIDTRANSFORM16X16
for (i = 0; i < BLOCK_TYPES_16X16; ++i)
for (j = 0; j < COEF_BANDS; ++j)
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
else cm->fc.hybrid_coef_probs_16x16[i][j][k][t] = prob;
}
}
-#endif
}
void vp8_default_coef_probs(struct VP8Common *);
extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]);
-#if CONFIG_HYBRIDTRANSFORM
extern DECLARE_ALIGNED(16, const int, vp8_col_scan[16]);
extern DECLARE_ALIGNED(16, const int, vp8_row_scan[16]);
-#endif
extern short vp8_default_zig_zag_mask[16];
extern DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]);
extern prototype_second_order(vp8_short_inv_walsh4x4_1_lossless_c);
#endif
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
#include "vp8/common/blockd.h"
void vp8_ihtllm_c(short *input, short *output, int pitch,
TX_TYPE tx_type, int tx_dim);
-#endif
-
typedef prototype_idct((*vp8_idct_fn_t));
typedef prototype_idct_scalar_add((*vp8_idct_scalar_add_fn_t));
#include "vp8/common/idct.h"
#include "vp8/common/systemdependent.h"
-#if CONFIG_HYBRIDTRANSFORM
#include "vp8/common/blockd.h"
-#endif
#include <math.h>
// TODO: these transforms can be further converted into integer forms
// for complexity optimization
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
float idct_4[16] = {
0.500000000000000, 0.653281482438188, 0.500000000000000, 0.270598050073099,
0.500000000000000, 0.270598050073099, -0.500000000000000, -0.653281482438188,
0.483002021635509, -0.466553967085785, 0.434217976756762, -0.387095214016348,
0.326790388032145, -0.255357107325375, 0.175227946595736, -0.089131608307532
};
-#endif
-#if CONFIG_HYBRIDTRANSFORM16X16 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8
float idct_16[256] = {
0.250000, 0.351851, 0.346760, 0.338330, 0.326641, 0.311806, 0.293969, 0.273300,
0.250000, 0.224292, 0.196424, 0.166664, 0.135299, 0.102631, 0.068975, 0.034654,
0.347761, -0.344612, 0.338341, -0.329007, 0.316693, -0.301511, 0.283599, -0.263118,
0.240255, -0.215215, 0.188227, -0.159534, 0.129396, -0.098087, 0.065889, -0.033094
};
-#endif
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
void vp8_ihtllm_c(short *input, short *output, int pitch,
TX_TYPE tx_type, int tx_dim) {
}
vp8_clear_system_state(); // Make it simd safe : __asm emms;
}
-#endif
void vp8_short_idct4x4llm_c(short *input, short *output, int pitch) {
int i;
vp8_prob sub_mv_ref_prob [SUBMVREF_COUNT][VP8_SUBMVREFS - 1];
vp8_prob mbsplit_prob [VP8_NUMMBSPLITS - 1];
vp8_prob coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#if CONFIG_HYBRIDTRANSFORM
vp8_prob hybrid_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#endif
vp8_prob coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_prob hybrid_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#endif
vp8_prob coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_prob hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#endif
#if CONFIG_NEWMVENTROPY
nmv_context nmvc;
vp8_prob pre_coef_probs [BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#if CONFIG_HYBRIDTRANSFORM
vp8_prob pre_hybrid_coef_probs [BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#endif
vp8_prob pre_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_prob pre_hybrid_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#endif
vp8_prob pre_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_prob pre_hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#endif
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_HYBRIDTRANSFORM
unsigned int hybrid_coef_counts [BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#endif
unsigned int coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_HYBRIDTRANSFORM8X8
unsigned int hybrid_coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#endif
unsigned int coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_HYBRIDTRANSFORM16X16
unsigned int hybrid_coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#endif
#if CONFIG_NEWMVENTROPY
nmv_context_counts NMVcount;
MB_PREDICTION_MODE mode;
int i;
int tx_size;
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || \
- CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type;
-#endif
#if CONFIG_SUPERBLOCKS
VP8_COMMON *pc = &pbi->common;
int orig_skip_flag = xd->mode_info_context->mbmi.mb_skip_coeff;
vp8_intra8x8_predict(b, i8x8mode, b->predictor);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
-#if CONFIG_HYBRIDTRANSFORM8X8
tx_type = get_tx_type(xd, &xd->block[idx]);
if (tx_type != DCT_DCT) {
vp8_ht_dequant_idct_add_8x8_c(tx_type,
} else {
vp8_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride);
}
-#else
- vp8_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride);
-#endif
q += 64;
} else {
for (j = 0; j < 4; j++) {
}
#endif
-#if CONFIG_HYBRIDTRANSFORM
tx_type = get_tx_type(xd, b);
if (tx_type != DCT_DCT) {
vp8_ht_dequant_idct_add_c(tx_type, b->qcoeff,
vp8_dequant_idct_add_c(b->qcoeff, b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
}
-#else
- if (xd->eobs[i] > 1) {
- DEQUANT_INVOKE(&pbi->dequant, idct_add)
- (b->qcoeff, b->dequant, b->predictor,
- *(b->base_dst) + b->dst, 16, b->dst_stride);
- } else {
- IDCT_INVOKE(RTCD_VTABLE(idct), idct1_scalar_add)
- (b->qcoeff[0] * b->dequant[0], b->predictor,
- *(b->base_dst) + b->dst, 16, b->dst_stride);
- ((int *)b->qcoeff)[0] = 0;
- }
-#endif
}
} else if (mode == SPLITMV) {
DEQUANT_INVOKE(&pbi->dequant, idct_add_y_block)
BLOCKD *b = &xd->block[24];
if (tx_size == TX_16X16) {
-#if CONFIG_HYBRIDTRANSFORM16X16
BLOCKD *bd = &xd->block[0];
tx_type = get_tx_type(xd, bd);
if (tx_type != DCT_DCT) {
xd->predictor, xd->dst.y_buffer,
16, xd->dst.y_stride);
}
-#else
- vp8_dequant_idct_add_16x16_c(xd->qcoeff, xd->block[0].dequant,
- xd->predictor, xd->dst.y_buffer,
- 16, xd->dst.y_stride);
-#endif
} else if (tx_size == TX_8X8) {
#if CONFIG_SUPERBLOCKS
void *orig = xd->mode_info_context;
}
}
}
-#if CONFIG_HYBRIDTRANSFORM
{
if (vp8_read_bit(bc)) {
/* read coef probability tree */
}
}
}
-#endif
if (pbi->common.txfm_mode != ONLY_4X4 && vp8_read_bit(bc)) {
// read coef probability tree
}
}
}
-#if CONFIG_HYBRIDTRANSFORM8X8
if (pbi->common.txfm_mode != ONLY_4X4 && vp8_read_bit(bc)) {
// read coef probability tree
for (i = 0; i < BLOCK_TYPES_8X8; i++)
}
}
}
-#endif
// 16x16
if (pbi->common.txfm_mode > ALLOW_8X8 && vp8_read_bit(bc)) {
}
}
}
-#if CONFIG_HYBRIDTRANSFORM16X16
if (pbi->common.txfm_mode > ALLOW_8X8 && vp8_read_bit(bc)) {
// read coef probability tree
for (i = 0; i < BLOCK_TYPES_16X16; ++i)
}
}
}
-#endif
}
int vp8_decode_frame(VP8D_COMP *pbi) {
vp8_copy(pbi->common.fc.pre_coef_probs,
pbi->common.fc.coef_probs);
-#if CONFIG_HYBRIDTRANSFORM
vp8_copy(pbi->common.fc.pre_hybrid_coef_probs,
pbi->common.fc.hybrid_coef_probs);
-#endif
vp8_copy(pbi->common.fc.pre_coef_probs_8x8,
pbi->common.fc.coef_probs_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(pbi->common.fc.pre_hybrid_coef_probs_8x8,
pbi->common.fc.hybrid_coef_probs_8x8);
-#endif
vp8_copy(pbi->common.fc.pre_coef_probs_16x16,
pbi->common.fc.coef_probs_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(pbi->common.fc.pre_hybrid_coef_probs_16x16,
pbi->common.fc.hybrid_coef_probs_16x16);
-#endif
vp8_copy(pbi->common.fc.pre_ymode_prob, pbi->common.fc.ymode_prob);
vp8_copy(pbi->common.fc.pre_uv_mode_prob, pbi->common.fc.uv_mode_prob);
vp8_copy(pbi->common.fc.pre_bmode_prob, pbi->common.fc.bmode_prob);
vp8_copy(pbi->common.fc.pre_mvc_hp, pbi->common.fc.mvc_hp);
#endif
vp8_zero(pbi->common.fc.coef_counts);
-#if CONFIG_HYBRIDTRANSFORM
vp8_zero(pbi->common.fc.hybrid_coef_counts);
-#endif
vp8_zero(pbi->common.fc.coef_counts_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_zero(pbi->common.fc.hybrid_coef_counts_8x8);
-#endif
vp8_zero(pbi->common.fc.coef_counts_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_zero(pbi->common.fc.hybrid_coef_counts_16x16);
-#endif
vp8_zero(pbi->common.fc.ymode_counts);
vp8_zero(pbi->common.fc.uv_mode_counts);
vp8_zero(pbi->common.fc.bmode_counts);
}
-#if CONFIG_HYBRIDTRANSFORM
void vp8_ht_dequant_idct_add_c(TX_TYPE tx_type, short *input, short *dq,
unsigned char *pred, unsigned char *dest,
int pitch, int stride) {
pred += pitch;
}
}
-#endif
-#if CONFIG_HYBRIDTRANSFORM8X8
void vp8_ht_dequant_idct_add_8x8_c(TX_TYPE tx_type, short *input, short *dq,
unsigned char *pred, unsigned char *dest,
int pitch, int stride) {
pred = origpred + (b + 1) / 2 * 4 * pitch + ((b + 1) % 2) * 4;
}
}
-#endif
void vp8_dequant_idct_add_c(short *input, short *dq, unsigned char *pred,
unsigned char *dest, int pitch, int stride) {
#endif
}
-#if CONFIG_HYBRIDTRANSFORM16X16
void vp8_ht_dequant_idct_add_16x16_c(TX_TYPE tx_type, short *input, short *dq,
unsigned char *pred, unsigned char *dest,
int pitch, int stride) {
pred += pitch;
}
}
-#endif
void vp8_dequant_idct_add_16x16_c(short *input, short *dq, unsigned char *pred,
unsigned char *dest, int pitch, int stride) {
#endif
extern prototype_dequant_idct_add(vp8_dequant_idct_add);
-#if CONFIG_HYBRIDTRANSFORM
// declare dequantization and inverse transform module of hybrid transform decoder
#ifndef vp8_ht_dequant_idct_add
#define vp8_ht_dequant_idct_add vp8_ht_dequant_idct_add_c
unsigned char *pred, unsigned char *dest,
int pitch, int stride);
-#endif
#ifndef vp8_dequant_dc_idct_add
#define vp8_dequant_dc_idct_add vp8_dequant_dc_idct_add_c
#define DEQUANT_INVOKE(ctx,fn) vp8_dequant_##fn
#endif
-#if CONFIG_HYBRIDTRANSFORM8X8
void vp8_ht_dequant_idct_add_8x8_c(TX_TYPE tx_type, short *input, short *dq,
unsigned char *pred, unsigned char *dest,
int pitch, int stride);
-#endif
-#if CONFIG_HYBRIDTRANSFORM16X16
void vp8_ht_dequant_idct_add_16x16_c(TX_TYPE tx_type, short *input, short *dq,
unsigned char *pred, unsigned char *dest,
int pitch, int stride);
-#endif
#if CONFIG_SUPERBLOCKS
void vp8_dequant_dc_idct_add_y_block_8x8_inplace_c(short *q, short *dq,
else return DCT_VAL_CATEGORY6;
}
-#if CONFIG_HYBRIDTRANSFORM
void static count_tokens_adaptive_scan(const MACROBLOCKD *xd, INT16 *qcoeff_ptr,
int block, PLANE_TYPE type,
TX_TYPE tx_type,
fc->coef_counts[type][band][pt][DCT_EOB_TOKEN]++;
}
}
-#endif
void static count_tokens(INT16 *qcoeff_ptr, int block, PLANE_TYPE type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
}
void static count_tokens_8x8(INT16 *qcoeff_ptr, int block, PLANE_TYPE type,
-#if CONFIG_HYBRIDTRANSFORM8X8
TX_TYPE tx_type,
-#endif
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
int eob, int seg_eob, FRAME_CONTEXT *fc) {
int c, pt, token, band;
int v = qcoeff_ptr[rc];
band = (type == 1 ? vp8_coef_bands[c] : vp8_coef_bands_8x8[c]);
token = get_token(v);
-#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type != DCT_DCT)
fc->hybrid_coef_counts_8x8[type][band][pt][token]++;
else
-#endif
fc->coef_counts_8x8[type][band][pt][token]++;
pt = vp8_prev_token_class[token];
}
if (eob < seg_eob) {
band = (type == 1 ? vp8_coef_bands[c] : vp8_coef_bands_8x8[c]);
-#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type != DCT_DCT)
fc->hybrid_coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN]++;
else
-#endif
fc->coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN]++;
}
}
void static count_tokens_16x16(INT16 *qcoeff_ptr, int block, PLANE_TYPE type,
-#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type,
-#endif
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
int eob, int seg_eob, FRAME_CONTEXT *fc) {
int c, pt, token;
int v = qcoeff_ptr[rc];
int band = vp8_coef_bands_16x16[c];
token = get_token(v);
-#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
fc->hybrid_coef_counts_16x16[type][band][pt][token]++;
else
-#endif
fc->coef_counts_16x16[type][band][pt][token]++;
pt = vp8_prev_token_class[token];
}
if (eob < seg_eob) {
int band = vp8_coef_bands_16x16[c];
-#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
fc->hybrid_coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN]++;
else
-#endif
fc->coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN]++;
}
}
BOOL_DECODER* const br,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
PLANE_TYPE type,
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type,
-#endif
int seg_eob, INT16 *qcoeff_ptr, int i,
const int *const scan, int block_type,
const int *coef_bands) {
default:
case TX_4X4:
coef_probs =
-#if CONFIG_HYBRIDTRANSFORM
tx_type != DCT_DCT ? fc->hybrid_coef_probs[type][0][0] :
-#endif
fc->coef_probs[type][0][0];
break;
case TX_8X8:
coef_probs =
-#if CONFIG_HYBRIDTRANSFORM8X8
tx_type != DCT_DCT ? fc->hybrid_coef_probs_8x8[type][0][0] :
-#endif
fc->coef_probs_8x8[type][0][0];
break;
case TX_16X16:
coef_probs =
-#if CONFIG_HYBRIDTRANSFORM16X16
tx_type != DCT_DCT ? fc->hybrid_coef_probs_16x16[type][0][0] :
-#endif
fc->coef_probs_16x16[type][0][0];
break;
}
}
if (block_type == TX_4X4) {
-#if CONFIG_HYBRIDTRANSFORM
count_tokens_adaptive_scan(xd, qcoeff_ptr, i, type,
tx_type,
a, l, c, seg_eob, fc);
-#else
- count_tokens(qcoeff_ptr, i, type,
- a, l, c, seg_eob, fc);
-#endif
}
else if (block_type == TX_8X8)
count_tokens_8x8(qcoeff_ptr, i, type,
-#if CONFIG_HYBRIDTRANSFORM8X8
tx_type,
-#endif
a, l, c, seg_eob, fc);
else
count_tokens_16x16(qcoeff_ptr, i, type,
-#if CONFIG_HYBRIDTRANSFORM16X16
tx_type,
-#endif
a, l, c, seg_eob, fc);
return c;
}
const int segment_id = xd->mode_info_context->mbmi.segment_id;
const int seg_active = segfeature_active(xd, segment_id, SEG_LVL_EOB);
INT16 *qcoeff_ptr = &xd->qcoeff[0];
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
- TX_TYPE tx_type = DCT_DCT;
-#endif
-#if CONFIG_HYBRIDTRANSFORM16X16
- tx_type = get_tx_type(xd, &xd->block[0]);
-#endif
+ TX_TYPE tx_type = get_tx_type(xd, &xd->block[0]);
type = PLANE_TYPE_Y_WITH_DC;
{
const int* const scan = vp8_default_zig_zag1d_16x16;
c = decode_coefs(pbi, xd, bc, A, L, type,
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
tx_type,
-#endif
seg_eob, qcoeff_ptr,
0, scan, TX_16X16, coef_bands_x_16x16);
eobs[0] = c;
// 8x8 chroma blocks
qcoeff_ptr += 256;
type = PLANE_TYPE_UV;
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
tx_type = DCT_DCT;
-#endif
if (seg_active)
seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
else
const int* const scan = vp8_default_zig_zag1d_8x8;
c = decode_coefs(pbi, xd, bc, a, l, type,
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
tx_type,
-#endif
seg_eob, qcoeff_ptr,
i, scan, TX_8X8, coef_bands_x_8x8);
a[0] = l[0] = ((eobs[i] = c) != !type);
const int segment_id = xd->mode_info_context->mbmi.segment_id;
const int seg_active = segfeature_active(xd, segment_id, SEG_LVL_EOB);
INT16 *qcoeff_ptr = &xd->qcoeff[0];
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type = DCT_DCT;
-#endif
int bufthred = (xd->mode_info_context->mbmi.mode == I8X8_PRED) ? 16 : 24;
if (xd->mode_info_context->mbmi.mode != B_PRED &&
else
seg_eob = 4;
c = decode_coefs(pbi, xd, bc, a, l, type,
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
tx_type,
-#endif
seg_eob, qcoeff_ptr + 24 * 16,
24, scan, TX_8X8, coef_bands_x);
a[0] = l[0] = ((eobs[24] = c) != !type);
ENTROPY_CONTEXT *const a = A + vp8_block2above_8x8[i];
ENTROPY_CONTEXT *const l = L + vp8_block2left_8x8[i];
const int *const scan = vp8_default_zig_zag1d_8x8;
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
tx_type = DCT_DCT;
-#endif
if (i == 16)
type = PLANE_TYPE_UV;
-#if CONFIG_HYBRIDTRANSFORM8X8
if (type == PLANE_TYPE_Y_WITH_DC) {
tx_type = get_tx_type(xd, xd->block + i);
}
-#endif
c = decode_coefs(pbi, xd, bc, a, l, type,
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
tx_type,
-#endif
seg_eob, qcoeff_ptr,
i, scan, TX_8X8, coef_bands_x_8x8);
a[0] = l[0] = ((eobs[i] = c) != !type);
if (bufthred == 16) {
type = PLANE_TYPE_UV;
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
tx_type = DCT_DCT;
-#endif
seg_eob = 16;
// use 4x4 transform for U, V components in I8X8 prediction mode
const int *scan = vp8_default_zig_zag1d;
c = decode_coefs(pbi, xd, bc, a, l, type,
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
tx_type,
-#endif
seg_eob, qcoeff_ptr,
i, scan, TX_4X4, coef_bands_x);
a[0] = l[0] = ((eobs[i] = c) != !type);
type = PLANE_TYPE_Y2;
c = decode_coefs(dx, xd, bc, a, l, type,
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
DCT_DCT,
-#endif
seg_eob, qcoeff_ptr + 24 * 16, 24,
scan, TX_4X4, coef_bands_x);
a[0] = l[0] = ((eobs[24] = c) != !type);
for (i = 0; i < 24; ++i) {
ENTROPY_CONTEXT *const a = A + vp8_block2above[i];
ENTROPY_CONTEXT *const l = L + vp8_block2left[i];
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type = DCT_DCT;
-#endif
if (i == 16)
type = PLANE_TYPE_UV;
-#if CONFIG_HYBRIDTRANSFORM
tx_type = get_tx_type(xd, &xd->block[i]);
switch(tx_type) {
case ADST_DCT :
scan = vp8_default_zig_zag1d;
break;
}
-#endif
- c = decode_coefs(dx, xd, bc, a, l, type,
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
- tx_type,
-#endif
+ c = decode_coefs(dx, xd, bc, a, l, type, tx_type,
seg_eob, qcoeff_ptr,
i, scan, TX_4X4, coef_bands_x);
a[0] = l[0] = ((eobs[i] = c) != !type);
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES][2];
-#if CONFIG_HYBRIDTRANSFORM
unsigned int hybrid_tree_update_hist [BLOCK_TYPES]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES][2];
-#endif
unsigned int tree_update_hist_8x8 [BLOCK_TYPES_8X8]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
-#if CONFIG_HYBRIDTRANSFORM8X8
unsigned int hybrid_tree_update_hist_8x8 [BLOCK_TYPES_8X8]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
-#endif
unsigned int tree_update_hist_16x16 [BLOCK_TYPES_16X16]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
-#if CONFIG_HYBRIDTRANSFORM16X16
unsigned int hybrid_tree_update_hist_16x16 [BLOCK_TYPES_16X16]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
-#endif
extern unsigned int active_section;
#endif
}
}
}
-#if CONFIG_HYBRIDTRANSFORM
for (i = 0; i < BLOCK_TYPES; ++i) {
for (j = 0; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
}
}
}
-#endif
-
if (cpi->common.txfm_mode != ONLY_4X4) {
for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
}
}
}
-#if CONFIG_HYBRIDTRANSFORM8X8
for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
for (j = 0; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
}
}
}
-#endif
}
if (cpi->common.txfm_mode > ALLOW_8X8) {
}
}
}
-#if CONFIG_HYBRIDTRANSFORM16X16
for (i = 0; i < BLOCK_TYPES_16X16; ++i) {
for (j = 0; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
}
}
}
-#endif
}
#if 0
}
}
-#if CONFIG_HYBRIDTRANSFORM
savings = 0;
update[0] = update[1] = 0;
for (i = 0; i < BLOCK_TYPES; ++i) {
}
}
}
-#endif
/* do not do this if not even allowed */
if (cpi->common.txfm_mode != ONLY_4X4) {
}
}
}
-#if CONFIG_HYBRIDTRANSFORM8X8
update[0] = update[1] = 0;
savings = 0;
for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
}
}
}
-#endif
}
if (cpi->common.txfm_mode > ALLOW_8X8) {
}
}
}
-#if CONFIG_HYBRIDTRANSFORM16X16
update[0] = update[1] = 0;
savings = 0;
for (i = 0; i < BLOCK_TYPES_16X16; ++i) {
}
}
}
-#endif
}
}
vp8_clear_system_state(); // __asm emms;
vp8_copy(cpi->common.fc.pre_coef_probs, cpi->common.fc.coef_probs);
-#if CONFIG_HYBRIDTRANSFORM
vp8_copy(cpi->common.fc.pre_hybrid_coef_probs, cpi->common.fc.hybrid_coef_probs);
-#endif
vp8_copy(cpi->common.fc.pre_coef_probs_8x8, cpi->common.fc.coef_probs_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cpi->common.fc.pre_hybrid_coef_probs_8x8, cpi->common.fc.hybrid_coef_probs_8x8);
-#endif
vp8_copy(cpi->common.fc.pre_coef_probs_16x16, cpi->common.fc.coef_probs_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cpi->common.fc.pre_hybrid_coef_probs_16x16, cpi->common.fc.hybrid_coef_probs_16x16);
-#endif
vp8_copy(cpi->common.fc.pre_ymode_prob, cpi->common.fc.ymode_prob);
vp8_copy(cpi->common.fc.pre_uv_mode_prob, cpi->common.fc.uv_mode_prob);
vp8_copy(cpi->common.fc.pre_bmode_prob, cpi->common.fc.bmode_prob);
unsigned int token_costs[TX_SIZE_MAX][BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
unsigned int hybrid_token_costs[TX_SIZE_MAX][BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
-#endif
int optimize;
#include "vp8/common/idct.h"
#include "vp8/common/systemdependent.h"
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
-
#include "vp8/common/blockd.h"
// TODO: these transforms can be converted into integer forms to reduce
0.175227946595735, -0.326790388032145, 0.434217976756762, -0.483002021635509,
0.466553967085785, -0.387095214016348, 0.255357107325376, -0.089131608307532
};
-#endif
-#if CONFIG_HYBRIDTRANSFORM16X16 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8
float dct_16[256] = {
0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000,
0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000,
0.065889, -0.129396, 0.188227, -0.240255, 0.283599, -0.316693, 0.338341, -0.347761,
0.344612, -0.329007, 0.301511, -0.263118, 0.215215, -0.159534, 0.098087, -0.033094
};
-#endif
static const int xC1S7 = 16069;
static const int xC2S6 = 15137;
}
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
void vp8_fht_c(short *input, short *output, int pitch,
TX_TYPE tx_type, int tx_dim) {
}
vp8_clear_system_state(); // Make it simd safe : __asm emms;
}
-#endif
void vp8_short_fdct4x4_c(short *input, short *output, int pitch) {
int i;
#endif
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
void vp8_fht_c(short *input, short *output, int pitch,
TX_TYPE tx_type, int tx_dim);
-#endif
#ifndef vp8_fdct_short16x16
#define vp8_fdct_short16x16 vp8_short_fdct16x16_c
vp8_zero(cpi->MVcount_hp);
#endif
vp8_zero(cpi->coef_counts);
-#if CONFIG_HYBRIDTRANSFORM
vp8_zero(cpi->hybrid_coef_counts);
-#endif
vp8_zero(cpi->coef_counts_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_zero(cpi->hybrid_coef_counts_8x8);
-#endif
vp8_zero(cpi->coef_counts_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_zero(cpi->hybrid_coef_counts_16x16);
-#endif
vp8cx_frame_init_quantizer(cpi);
MACROBLOCK *x, int ib) {
BLOCKD *b = &x->e_mbd.block[ib];
BLOCK *be = &x->block[ib];
-#if CONFIG_HYBRIDTRANSFORM
TX_TYPE tx_type;
-#endif
#if CONFIG_COMP_INTRA_PRED
if (b->bmi.as_mode.second == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
-#if CONFIG_HYBRIDTRANSFORM
tx_type = get_tx_type(&x->e_mbd, b);
if (tx_type != DCT_DCT) {
vp8_fht_c(be->src_diff, be->coeff, 32, tx_type, 4);
vp8_ht_quantize_b_4x4(be, b, tx_type);
vp8_ihtllm_c(b->dqcoeff, b->diff, 32, tx_type, 4);
- } else
-#endif
- {
+ } else {
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ;
x->quantize_b_4x4(be, b) ;
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32) ;
MACROBLOCKD *xd = &x->e_mbd;
BLOCK *b = &x->block[0];
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
-#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type;
-#endif
#if CONFIG_COMP_INTRA_PRED
if (xd->mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1))
xd->predictor, b->src_stride);
if (tx_size == TX_16X16) {
-#if CONFIG_HYBRIDTRANSFORM16X16
BLOCKD *bd = &xd->block[0];
tx_type = get_tx_type(xd, bd);
if (tx_type != DCT_DCT) {
if (x->optimize)
vp8_optimize_mby_16x16(x, rtcd);
vp8_ihtllm_c(bd->dqcoeff, bd->diff, 32, tx_type, 16);
- } else
-#endif
- {
+ } else {
vp8_transform_mby_16x16(x);
vp8_quantize_mby_16x16(x);
if (x->optimize)
BLOCK *be = &x->block[ib];
const int iblock[4] = {0, 1, 4, 5};
int i;
-#if CONFIG_HYBRIDTRANSFORM8X8
TX_TYPE tx_type;
-#endif
#if CONFIG_COMP_INTRA_PRED
if (b->bmi.as_mode.second == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
// generate residual blocks
vp8_subtract_4b_c(be, b, 16);
-#if CONFIG_HYBRIDTRANSFORM8X8
tx_type = get_tx_type(xd, xd->block + idx);
if (tx_type != DCT_DCT) {
vp8_fht_c(be->src_diff, (x->block + idx)->coeff, 32,
vp8_ihtllm_c(xd->block[idx].dqcoeff, xd->block[ib].diff, 32,
tx_type, 8);
} else {
-#endif
x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
x->quantize_b_8x8(x->block + idx, xd->block + idx);
vp8_idct_idct8(xd->block[idx].dqcoeff, xd->block[ib].diff, 32);
-#if CONFIG_HYBRIDTRANSFORM8X8
}
-#endif
} else {
for (i = 0; i < 4; i++) {
b = &xd->block[ib + iblock[i]];
scan = vp8_default_zig_zag1d;
bands = vp8_coef_bands;
default_eob = 16;
-#if CONFIG_HYBRIDTRANSFORM
// TODO: this isn't called (for intra4x4 modes), but will be left in
// since it could be used later
{
scan = vp8_default_zig_zag1d;
}
}
-#endif
break;
case TX_8X8:
scan = vp8_default_zig_zag1d_8x8;
update_reference_frames(cm);
vp8_copy(cpi->common.fc.coef_counts, cpi->coef_counts);
-#if CONFIG_HYBRIDTRANSFORM
vp8_copy(cpi->common.fc.hybrid_coef_counts, cpi->hybrid_coef_counts);
-#endif
vp8_copy(cpi->common.fc.coef_counts_8x8, cpi->coef_counts_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cpi->common.fc.hybrid_coef_counts_8x8, cpi->hybrid_coef_counts_8x8);
-#endif
vp8_copy(cpi->common.fc.coef_counts_16x16, cpi->coef_counts_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cpi->common.fc.hybrid_coef_counts_16x16,
cpi->hybrid_coef_counts_16x16);
-#endif
vp8_adapt_coef_probs(&cpi->common);
if (cpi->common.frame_type != KEY_FRAME) {
vp8_copy(cpi->common.fc.ymode_counts, cpi->ymode_count);
vp8_prob coef_probs[BLOCK_TYPES]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#if CONFIG_HYBRIDTRANSFORM
vp8_prob hybrid_coef_probs[BLOCK_TYPES]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#endif
vp8_prob coef_probs_8x8[BLOCK_TYPES_8X8]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_prob hybrid_coef_probs_8x8[BLOCK_TYPES_8X8]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#endif
vp8_prob coef_probs_16x16[BLOCK_TYPES_16X16]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_prob hybrid_coef_probs_16x16[BLOCK_TYPES_16X16]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#endif
vp8_prob ymode_prob [VP8_YMODES - 1]; /* interframe intra mode probs */
vp8_prob uv_mode_prob [VP8_YMODES][VP8_UV_MODES - 1];
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#if CONFIG_HYBRIDTRANSFORM
unsigned int hybrid_coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_hybrid_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_hybrid_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#endif
unsigned int coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#if CONFIG_HYBRIDTRANSFORM8X8
unsigned int hybrid_coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_hybrid_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_hybrid_branch_ct_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#endif
unsigned int coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#if CONFIG_HYBRIDTRANSFORM16X16
unsigned int hybrid_coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_hybrid_branch_ct_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#endif
int gfu_boost;
int last_boost;
extern int enc_debug;
#endif
-#if CONFIG_HYBRIDTRANSFORM
void vp8_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d, TX_TYPE tx_type) {
int i, rc, eob;
int zbin;
d->eob = eob + 1;
}
-#endif
void vp8_regular_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
#include "arm/quantize_arm.h"
#endif
-#if CONFIG_HYBRIDTRANSFORM
#define prototype_quantize_block_type(sym) \
void (sym)(BLOCK *b, BLOCKD *d, TX_TYPE type)
extern prototype_quantize_block_type(vp8_ht_quantize_b_4x4);
-#endif
#ifndef vp8_quantize_quantb_4x4
#define vp8_quantize_quantb_4x4 vp8_regular_quantize_b_4x4
vp8_copy(cc->last_mode_lf_deltas, xd->last_mode_lf_deltas);
vp8_copy(cc->coef_probs, cm->fc.coef_probs);
-#if CONFIG_HYBRIDTRANSFORM
vp8_copy(cc->hybrid_coef_probs, cm->fc.hybrid_coef_probs);
-#endif
vp8_copy(cc->coef_probs_8x8, cm->fc.coef_probs_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cc->hybrid_coef_probs_8x8, cm->fc.hybrid_coef_probs_8x8);
-#endif
vp8_copy(cc->coef_probs_16x16, cm->fc.coef_probs_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cc->hybrid_coef_probs_16x16, cm->fc.hybrid_coef_probs_16x16);
-#endif
#if CONFIG_SWITCHABLE_INTERP
vp8_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob);
#endif
vp8_copy(xd->last_mode_lf_deltas, cc->last_mode_lf_deltas);
vp8_copy(cm->fc.coef_probs, cc->coef_probs);
-#if CONFIG_HYBRIDTRANSFORM
vp8_copy(cm->fc.hybrid_coef_probs, cc->hybrid_coef_probs);
-#endif
vp8_copy(cm->fc.coef_probs_8x8, cc->coef_probs_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cm->fc.hybrid_coef_probs_8x8, cc->hybrid_coef_probs_8x8);
-#endif
vp8_copy(cm->fc.coef_probs_16x16, cc->coef_probs_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cm->fc.hybrid_coef_probs_16x16, cc->hybrid_coef_probs_16x16);
-#endif
#if CONFIG_SWITCHABLE_INTERP
vp8_copy(cm->fc.switchable_interp_prob, cc->switchable_interp_prob);
#endif
cpi->mb.token_costs[TX_4X4],
(const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs,
BLOCK_TYPES);
-#if CONFIG_HYBRIDTRANSFORM
fill_token_costs(
cpi->mb.hybrid_token_costs[TX_4X4],
(const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11])
cpi->common.fc.hybrid_coef_probs,
BLOCK_TYPES);
-#endif
fill_token_costs(
cpi->mb.token_costs[TX_8X8],
(const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs_8x8,
BLOCK_TYPES_8X8);
-#if CONFIG_HYBRIDTRANSFORM8X8
fill_token_costs(
cpi->mb.hybrid_token_costs[TX_8X8],
(const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11])
cpi->common.fc.hybrid_coef_probs_8x8,
BLOCK_TYPES_8X8);
-#endif
fill_token_costs(
cpi->mb.token_costs[TX_16X16],
(const vp8_prob(*)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs_16x16,
BLOCK_TYPES_16X16);
-#if CONFIG_HYBRIDTRANSFORM16X16
fill_token_costs(
cpi->mb.hybrid_token_costs[TX_16X16],
(const vp8_prob(*)[8][PREV_COEF_CONTEXTS][11])
cpi->common.fc.hybrid_coef_probs_16x16,
BLOCK_TYPES_16X16);
-#endif
/*rough estimate for costing*/
cpi->common.kf_ymode_probs_index = cpi->common.base_qindex >> 4;
short *qcoeff_ptr = b->qcoeff;
MACROBLOCKD *xd = &mb->e_mbd;
MB_MODE_INFO *mbmi = &mb->e_mbd.mode_info_context->mbmi;
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type = DCT_DCT;
-#endif
int segment_id = mbmi->segment_id;
switch (tx_size) {
scan = vp8_default_zig_zag1d;
band = vp8_coef_bands;
default_eob = 16;
-#if CONFIG_HYBRIDTRANSFORM
if (type == PLANE_TYPE_Y_WITH_DC) {
tx_type = get_tx_type_4x4(xd, b);
if (tx_type != DCT_DCT) {
}
}
}
-#endif
break;
case TX_8X8:
scan = vp8_default_zig_zag1d_8x8;
band = vp8_coef_bands_8x8;
default_eob = 64;
-#if CONFIG_HYBRIDTRANSFORM8X8
if (type == PLANE_TYPE_Y_WITH_DC) {
BLOCKD *bb;
int ib = (b - xd->block);
tx_type = get_tx_type_8x8(xd, bb);
}
}
-#endif
break;
case TX_16X16:
scan = vp8_default_zig_zag1d_16x16;
band = vp8_coef_bands_16x16;
default_eob = 256;
-#if CONFIG_HYBRIDTRANSFORM16X16
if (type == PLANE_TYPE_Y_WITH_DC) {
tx_type = get_tx_type_16x16(xd, b);
}
-#endif
break;
default:
break;
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT) {
for (; c < eob; c++) {
int v = qcoeff_ptr[scan[c]];
if (c < seg_eob)
cost += mb->hybrid_token_costs[tx_size][type][band[c]]
[pt][DCT_EOB_TOKEN];
- } else
-#endif
- {
+ } else {
for (; c < eob; c++) {
int v = qcoeff_ptr[scan[c]];
int t = vp8_dct_value_tokens_ptr[v].Token;
MACROBLOCKD *xd = &mb->e_mbd;
BLOCKD *b = &mb->e_mbd.block[0];
BLOCK *be = &mb->block[0];
-#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type;
-#endif
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
mb->src_diff,
mb->e_mbd.predictor,
mb->block[0].src_stride);
-#if CONFIG_HYBRIDTRANSFORM16X16
tx_type = get_tx_type_16x16(xd, b);
if (tx_type != DCT_DCT) {
vp8_fht_c(be->src_diff, be->coeff, 32, tx_type, 16);
} else
vp8_transform_mby_16x16(mb);
-#else
- vp8_transform_mby_16x16(mb);
-#endif
vp8_quantize_mby_16x16(mb);
-#if CONFIG_HYBRIDTRANSFORM16X16
// TODO(jingning) is it possible to quickly determine whether to force
// trailing coefficients to be zero, instead of running trellis
// optimization in the rate-distortion optimization loop?
if (mb->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED)
vp8_optimize_mby_16x16(mb, rtcd);
-#endif
d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 0);
ENTROPY_CONTEXT ta = *a, tempa = *a;
ENTROPY_CONTEXT tl = *l, templ = *l;
-#if CONFIG_HYBRIDTRANSFORM
TX_TYPE tx_type = DCT_DCT;
TX_TYPE best_tx_type = DCT_DCT;
-#endif
/*
* The predictor buffer is a 2d buffer with a stride of 16. Create
* a temp buffer that meets the stride requirements, but we are only
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), subb)(be, b, 16);
b->bmi.as_mode.first = mode;
-#if CONFIG_HYBRIDTRANSFORM
tx_type = get_tx_type_4x4(xd, b);
if (tx_type != DCT_DCT) {
vp8_fht_c(be->src_diff, be->coeff, 32, tx_type, 4);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
x->quantize_b_4x4(be, b);
}
-#else
- x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
- x->quantize_b_4x4(be, b);
-#endif
tempa = ta;
templ = tl;
*bestdistortion = distortion;
best_rd = this_rd;
*best_mode = mode;
-#if CONFIG_HYBRIDTRANSFORM
best_tx_type = tx_type;
-#endif
#if CONFIG_COMP_INTRA_PRED
*best_second_mode = mode2;
b->bmi.as_mode.second = (B_PREDICTION_MODE)(*best_second_mode);
#endif
-#if CONFIG_HYBRIDTRANSFORM
// inverse transform
if (best_tx_type != DCT_DCT)
vp8_ihtllm_c(best_dqcoeff, b->diff, 32, best_tx_type, 4);
else
IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(
best_dqcoeff, b->diff, 32);
-#else
- IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(
- best_dqcoeff, b->diff, 32);
-#endif
vp8_recon_b(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
vp8_subtract_4b_c(be, b, 16);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
-#if CONFIG_HYBRIDTRANSFORM8X8
TX_TYPE tx_type = get_tx_type_8x8(xd, b);
if (tx_type != DCT_DCT)
vp8_fht_c(be->src_diff, (x->block + idx)->coeff, 32, tx_type, 8);
else
x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
-#else
- x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
-#endif
x->quantize_b_8x8(x->block + idx, xd->block + idx);
// compute quantization mse of 8x8 block
#ifdef ENTROPY_STATS
INT64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_HYBRIDTRANSFORM
INT64 hybrid_context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#endif
INT64 context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_HYBRIDTRANSFORM8X8
INT64 hybrid_context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#endif
INT64 context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_HYBRIDTRANSFORM16X16
INT64 hybrid_context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#endif
extern unsigned int tree_update_hist[BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES][2];
-#if CONFIG_HYBRIDTRANSFORM
extern unsigned int hybrid_tree_update_hist[BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES][2];
-#endif
extern unsigned int tree_update_hist_8x8[BLOCK_TYPES_8X8][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
-#if CONFIG_HYBRIDTRANSFORM8X8
extern unsigned int hybrid_tree_update_hist_8x8[BLOCK_TYPES_8X8][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
-#endif
extern unsigned int tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
-#if CONFIG_HYBRIDTRANSFORM16X16
extern unsigned int hybrid_tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
-#endif
#endif /* ENTROPY_STATS */
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
const int eob = b->eob; /* one beyond last nonzero coeff */
TOKENEXTRA *t = *tp; /* store tokens starting here */
const short *qcoeff_ptr = b->qcoeff;
-#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type = get_tx_type(xd, b);
-#endif
int seg_eob = 256;
int segment_id = xd->mode_info_context->mbmi.segment_id;
}
t->Token = x;
-#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs_16x16[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs_16x16[type][band][pt];
t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
(band > 1 && type == PLANE_TYPE_Y_NO_DC));
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
++cpi->hybrid_coef_counts_16x16[type][band][pt][x];
else
-#endif
++cpi->coef_counts_16x16[type][band][pt][x];
}
pt = vp8_prev_token_class[x];
int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0; /* start at DC unless type 0 */
TOKENEXTRA *t = *tp; /* store tokens starting here */
const short *qcoeff_ptr = b->qcoeff;
-#if CONFIG_HYBRIDTRANSFORM8X8
TX_TYPE tx_type = get_tx_type(xd, b);
-#endif
const int eob = b->eob;
int seg_eob = 64;
int segment_id = xd->mode_info_context->mbmi.segment_id;
x = DCT_EOB_TOKEN;
t->Token = x;
-#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs_8x8[type][band][pt];
t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type != DCT_DCT)
++cpi->hybrid_coef_counts_8x8[type][band][pt][x];
else
-#endif
++cpi->coef_counts_8x8[type][band][pt][x];
}
pt = vp8_prev_token_class[x];
const int16_t *qcoeff_ptr = b->qcoeff;
int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0;
-#if CONFIG_HYBRIDTRANSFORM
TX_TYPE tx_type = get_tx_type(xd, &xd->block[block]);
switch (tx_type) {
case ADST_DCT:
pt_scan = vp8_default_zig_zag1d;
break;
}
-#endif
a = (ENTROPY_CONTEXT *)xd->above_context + vp8_block2above[block];
l = (ENTROPY_CONTEXT *)xd->left_context + vp8_block2left[block];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
token = DCT_EOB_TOKEN;
t->Token = token;
-#if CONFIG_HYBRIDTRANSFORM
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
(band > 1 && type == PLANE_TYPE_Y_NO_DC));
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM
if (tx_type != DCT_DCT)
++cpi->hybrid_coef_counts[type][band][pt][token];
else
-#endif
++cpi->coef_counts[type][band][pt][token];
}
pt = vp8_prev_token_class[token];
int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
-#if CONFIG_HYBRIDTRANSFORM8X8
TX_TYPE tx_type = get_tx_type(xd, b);
-#endif
const int band = vp8_coef_bands_8x8[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
(void) b;
t->Token = DCT_EOB_TOKEN;
-#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs_8x8[type][band][pt];
// t->section = 8;
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type == DCT_DCT)
++cpi->hybrid_coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
else
-#endif
++cpi->coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
}
pt = 0; /* 0 <-> all coeff data is zero */
int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
-#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type = get_tx_type(xd, b);
-#endif
const int band = vp8_coef_bands_16x16[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
(void) b;
t->Token = DCT_EOB_TOKEN;
-#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs_16x16[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs_16x16[type][band][pt];
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
++cpi->hybrid_coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN];
else
-#endif
++cpi->coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN];
}
pt = 0; /* 0 <-> all coeff data is zero */
int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
-#if CONFIG_HYBRIDTRANSFORM
TX_TYPE tx_type = get_tx_type(xd, b);
-#endif
const int band = vp8_coef_bands[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
t->Token = DCT_EOB_TOKEN;
-#if CONFIG_HYBRIDTRANSFORM
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM
if (tx_type != DCT_DCT)
++cpi->hybrid_coef_counts[type][band][pt][DCT_EOB_TOKEN];
else
-#endif
++cpi->coef_counts[type][band][pt][DCT_EOB_TOKEN];
}
pt = 0; /* 0 <-> all coeff data is zero */