return mode >= NEARESTMV && mode <= NEWMV;
}
-#define VP9_INTRA_MODES (TM_PRED + 1)
+#define INTRA_MODES (TM_PRED + 1)
-#define VP9_INTER_MODES (1 + NEWMV - NEARESTMV)
+#define INTER_MODES (1 + NEWMV - NEARESTMV)
static INLINE int inter_mode_offset(MB_PREDICTION_MODE mode) {
return (mode - NEARESTMV);
for (k = 0; k < taps; ++k)
sum += src[src_x + k] * filter_x[k];
- dst[x] = clip_pixel(ROUND_POWER_OF_TWO(sum, VP9_FILTER_BITS));
+ dst[x] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
/* Move to the next source pixel */
x_q4 += x_step_q4;
sum += src[src_x + k] * filter_x[k];
dst[x] = ROUND_POWER_OF_TWO(dst[x] +
- clip_pixel(ROUND_POWER_OF_TWO(sum, VP9_FILTER_BITS)), 1);
+ clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1);
/* Move to the next source pixel */
x_q4 += x_step_q4;
sum += src[(src_y + k) * src_stride] * filter_y[k];
dst[y * dst_stride] =
- clip_pixel(ROUND_POWER_OF_TWO(sum, VP9_FILTER_BITS));
+ clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
/* Move to the next source pixel */
y_q4 += y_step_q4;
sum += src[(src_y + k) * src_stride] * filter_y[k];
dst[y * dst_stride] = ROUND_POWER_OF_TWO(dst[y * dst_stride] +
- clip_pixel(ROUND_POWER_OF_TWO(sum, VP9_FILTER_BITS)), 1);
+ clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1);
/* Move to the next source pixel */
y_q4 += y_step_q4;
#include "./vpx_config.h"
#include "vpx/vpx_integer.h"
-#define VP9_FILTER_BITS 7
+#define FILTER_BITS 7
typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
#include "vp9/common/vp9_onyxc_int.h"
#include "vp9/common/vp9_seg_common.h"
-const vp9_prob vp9_kf_uv_mode_prob[VP9_INTRA_MODES]
- [VP9_INTRA_MODES - 1] = {
+const vp9_prob vp9_kf_uv_mode_prob[INTRA_MODES]
+ [INTRA_MODES - 1] = {
{ 144, 11, 54, 157, 195, 130, 46, 58, 108 } /* y = dc */,
{ 118, 15, 123, 148, 131, 101, 44, 93, 131 } /* y = v */,
{ 113, 12, 23, 188, 226, 142, 26, 32, 125 } /* y = h */,
};
static const vp9_prob default_if_y_probs[BLOCK_SIZE_GROUPS]
- [VP9_INTRA_MODES - 1] = {
+ [INTRA_MODES - 1] = {
{ 65, 32, 18, 144, 162, 194, 41, 51, 98 } /* block_size < 8x8 */,
{ 132, 68, 18, 165, 217, 196, 45, 40, 78 } /* block_size < 16x16 */,
{ 173, 80, 19, 176, 240, 193, 64, 35, 46 } /* block_size < 32x32 */,
{ 221, 135, 38, 194, 248, 121, 96, 85, 29 } /* block_size >= 32x32 */
};
-static const vp9_prob default_if_uv_probs[VP9_INTRA_MODES]
- [VP9_INTRA_MODES - 1] = {
+static const vp9_prob default_if_uv_probs[INTRA_MODES]
+ [INTRA_MODES - 1] = {
{ 120, 7, 76, 176, 208, 126, 28, 54, 103 } /* y = dc */,
{ 48, 12, 154, 155, 139, 90, 34, 117, 119 } /* y = v */,
{ 67, 6, 25, 204, 243, 158, 13, 21, 96 } /* y = h */,
}
};
-const vp9_prob vp9_kf_y_mode_prob[VP9_INTRA_MODES]
- [VP9_INTRA_MODES]
- [VP9_INTRA_MODES - 1] = {
+const vp9_prob vp9_kf_y_mode_prob[INTRA_MODES]
+ [INTRA_MODES]
+ [INTRA_MODES - 1] = {
{ /* above = dc */
{ 137, 30, 42, 148, 151, 207, 70, 52, 91 } /* left = dc */,
{ 92, 45, 102, 136, 116, 180, 74, 90, 100 } /* left = v */,
};
static const vp9_prob default_inter_mode_probs[INTER_MODE_CONTEXTS]
- [VP9_INTER_MODES - 1] = {
+ [INTER_MODES - 1] = {
{2, 173, 34}, // 0 = both zero mv
{7, 145, 85}, // 1 = one zero mv + one a predicted mv
{7, 166, 63}, // 2 = two predicted mvs
};
/* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
-const vp9_tree_index vp9_intra_mode_tree[VP9_INTRA_MODES * 2 - 2] = {
+const vp9_tree_index vp9_intra_mode_tree[INTRA_MODES * 2 - 2] = {
-DC_PRED, 2, /* 0 = DC_NODE */
-TM_PRED, 4, /* 1 = TM_NODE */
-V_PRED, 6, /* 2 = V_NODE */
-PARTITION_VERT, -PARTITION_SPLIT
};
-struct vp9_token vp9_intra_mode_encodings[VP9_INTRA_MODES];
-struct vp9_token vp9_inter_mode_encodings[VP9_INTER_MODES];
+struct vp9_token vp9_intra_mode_encodings[INTRA_MODES];
+struct vp9_token vp9_inter_mode_encodings[INTER_MODES];
struct vp9_token vp9_partition_encodings[PARTITION_TYPES];
192, 128, 64
};
-static const vp9_prob default_switchable_interp_prob[VP9_SWITCHABLE_FILTERS+1]
- [VP9_SWITCHABLE_FILTERS-1] = {
+static const vp9_prob default_switchable_interp_prob[SWITCHABLE_FILTERS+1]
+ [SWITCHABLE_FILTERS-1] = {
{ 235, 162, },
{ 36, 255, },
{ 34, 3, },
vp9_copy(cm->fc.mbskip_probs, default_mbskip_probs);
}
-const vp9_tree_index vp9_switchable_interp_tree[VP9_SWITCHABLE_FILTERS*2-2] = {
+const vp9_tree_index vp9_switchable_interp_tree[SWITCHABLE_FILTERS*2-2] = {
-EIGHTTAP, 2,
-EIGHTTAP_SMOOTH, -EIGHTTAP_SHARP
};
-struct vp9_token vp9_switchable_interp_encodings[VP9_SWITCHABLE_FILTERS];
+struct vp9_token vp9_switchable_interp_encodings[SWITCHABLE_FILTERS];
void vp9_entropy_mode_init() {
vp9_tokens_from_tree(vp9_intra_mode_encodings, vp9_intra_mode_tree);
counts->single_ref[i][j]);
for (i = 0; i < INTER_MODE_CONTEXTS; i++)
- update_mode_probs(VP9_INTER_MODES, vp9_inter_mode_tree,
+ update_mode_probs(INTER_MODES, vp9_inter_mode_tree,
counts->inter_mode[i], pre_fc->inter_mode_probs[i],
fc->inter_mode_probs[i], NEARESTMV);
for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
- update_mode_probs(VP9_INTRA_MODES, vp9_intra_mode_tree,
+ update_mode_probs(INTRA_MODES, vp9_intra_mode_tree,
counts->y_mode[i], pre_fc->y_mode_prob[i],
fc->y_mode_prob[i], 0);
- for (i = 0; i < VP9_INTRA_MODES; ++i)
- update_mode_probs(VP9_INTRA_MODES, vp9_intra_mode_tree,
+ for (i = 0; i < INTRA_MODES; ++i)
+ update_mode_probs(INTRA_MODES, vp9_intra_mode_tree,
counts->uv_mode[i], pre_fc->uv_mode_prob[i],
fc->uv_mode_prob[i], 0);
fc->partition_prob[INTER_FRAME][i], 0);
if (cm->mcomp_filter_type == SWITCHABLE) {
- for (i = 0; i <= VP9_SWITCHABLE_FILTERS; i++)
- update_mode_probs(VP9_SWITCHABLE_FILTERS, vp9_switchable_interp_tree,
+ for (i = 0; i <= SWITCHABLE_FILTERS; i++)
+ update_mode_probs(SWITCHABLE_FILTERS, vp9_switchable_interp_tree,
counts->switchable_interp[i],
pre_fc->switchable_interp_prob[i],
fc->switchable_interp_prob[i], 0);
#define SUBMVREF_COUNT 5
#define TX_SIZE_CONTEXTS 2
-#define VP9_MODE_UPDATE_PROB 252
-#define VP9_SWITCHABLE_FILTERS 3 // number of switchable filters
+#define MODE_UPDATE_PROB 252
+#define SWITCHABLE_FILTERS 3 // number of switchable filters
// #define MODE_STATS
unsigned int p8x8[TX_SIZE_CONTEXTS][TX_SIZES - 2];
};
-extern const vp9_prob vp9_kf_uv_mode_prob[VP9_INTRA_MODES][VP9_INTRA_MODES - 1];
-extern const vp9_prob vp9_kf_y_mode_prob[VP9_INTRA_MODES][VP9_INTRA_MODES]
- [VP9_INTRA_MODES - 1];
+extern const vp9_prob vp9_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
+extern const vp9_prob vp9_kf_y_mode_prob[INTRA_MODES][INTRA_MODES]
+ [INTRA_MODES - 1];
extern const vp9_tree_index vp9_intra_mode_tree[];
extern const vp9_tree_index vp9_inter_mode_tree[];
-extern struct vp9_token vp9_intra_mode_encodings[VP9_INTRA_MODES];
-extern struct vp9_token vp9_inter_mode_encodings[VP9_INTER_MODES];
+extern struct vp9_token vp9_intra_mode_encodings[INTRA_MODES];
+extern struct vp9_token vp9_inter_mode_encodings[INTER_MODES];
// probability models for partition information
extern const vp9_tree_index vp9_partition_tree[];
extern struct vp9_token vp9_partition_encodings[PARTITION_TYPES];
extern const vp9_tree_index vp9_switchable_interp_tree
- [2 * (VP9_SWITCHABLE_FILTERS - 1)];
+ [2 * (SWITCHABLE_FILTERS - 1)];
-extern struct vp9_token vp9_switchable_interp_encodings[VP9_SWITCHABLE_FILTERS];
+extern struct vp9_token vp9_switchable_interp_encodings[SWITCHABLE_FILTERS];
void vp9_entropy_mode_init();
void vp9_adapt_mv_probs(struct VP9Common *cm, int usehp);
int vp9_use_mv_hp(const MV *ref);
-#define VP9_NMV_UPDATE_PROB 252
+#define NMV_UPDATE_PROB 252
/* Symbols for coding which components are zero jointly */
#define MV_JOINTS 4
// The VP9_BILINEAR_FILTERS_2TAP macro returns a pointer to the bilinear
// filter kernel as a 2 tap filter.
-#define VP9_BILINEAR_FILTERS_2TAP(x) \
+#define BILINEAR_FILTERS_2TAP(x) \
(vp9_bilinear_filters[(x)] + SUBPEL_TAPS/2 - 1)
#endif // VP9_COMMON_VP9_FILTER_H_
#define NUM_FRAME_CONTEXTS (1 << NUM_FRAME_CONTEXTS_LOG2)
typedef struct frame_contexts {
- vp9_prob y_mode_prob[BLOCK_SIZE_GROUPS][VP9_INTRA_MODES - 1];
- vp9_prob uv_mode_prob[VP9_INTRA_MODES][VP9_INTRA_MODES - 1];
+ vp9_prob y_mode_prob[BLOCK_SIZE_GROUPS][INTRA_MODES - 1];
+ vp9_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
vp9_prob partition_prob[NUM_FRAME_TYPES][NUM_PARTITION_CONTEXTS]
[PARTITION_TYPES - 1];
vp9_coeff_probs_model coef_probs[TX_SIZES][BLOCK_TYPES];
- vp9_prob switchable_interp_prob[VP9_SWITCHABLE_FILTERS + 1]
- [VP9_SWITCHABLE_FILTERS - 1];
- vp9_prob inter_mode_probs[INTER_MODE_CONTEXTS][VP9_INTER_MODES - 1];
+ vp9_prob switchable_interp_prob[SWITCHABLE_FILTERS + 1]
+ [SWITCHABLE_FILTERS - 1];
+ vp9_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1];
vp9_prob intra_inter_prob[INTRA_INTER_CONTEXTS];
vp9_prob comp_inter_prob[COMP_INTER_CONTEXTS];
vp9_prob single_ref_prob[REF_CONTEXTS][2];
} FRAME_CONTEXT;
typedef struct {
- unsigned int y_mode[BLOCK_SIZE_GROUPS][VP9_INTRA_MODES];
- unsigned int uv_mode[VP9_INTRA_MODES][VP9_INTRA_MODES];
+ unsigned int y_mode[BLOCK_SIZE_GROUPS][INTRA_MODES];
+ unsigned int uv_mode[INTRA_MODES][INTRA_MODES];
unsigned int partition[NUM_PARTITION_CONTEXTS][PARTITION_TYPES];
vp9_coeff_count_model coef[TX_SIZES][BLOCK_TYPES];
unsigned int eob_branch[TX_SIZES][BLOCK_TYPES][REF_TYPES]
[COEF_BANDS][PREV_COEF_CONTEXTS];
- unsigned int switchable_interp[VP9_SWITCHABLE_FILTERS + 1]
- [VP9_SWITCHABLE_FILTERS];
- unsigned int inter_mode[INTER_MODE_CONTEXTS][VP9_INTER_MODES];
+ unsigned int switchable_interp[SWITCHABLE_FILTERS + 1]
+ [SWITCHABLE_FILTERS];
+ unsigned int inter_mode[INTER_MODE_CONTEXTS][INTER_MODES];
unsigned int intra_inter[INTRA_INTER_CONTEXTS][2];
unsigned int comp_inter[COMP_INTER_CONTEXTS][2];
unsigned int single_ref[REF_CONTEXTS][2][2];
{ RGB_TO_YUV(0xCC33FF) }, /* Magenta */
};
-static const unsigned char B_PREDICTION_MODE_colors[VP9_INTRA_MODES][3] = {
+static const unsigned char B_PREDICTION_MODE_colors[INTRA_MODES][3] = {
{ RGB_TO_YUV(0x6633ff) }, /* Purple */
{ RGB_TO_YUV(0xcc33ff) }, /* Magenta */
{ RGB_TO_YUV(0xff33cc) }, /* Pink */
const int left_mv_pred = is_inter_mode(left_mbmi->mode);
const int left_interp = left_in_image && left_mv_pred
? left_mbmi->interp_filter
- : VP9_SWITCHABLE_FILTERS;
+ : SWITCHABLE_FILTERS;
// above
const int above_mv_pred = is_inter_mode(above_mbmi->mode);
const int above_interp = above_in_image && above_mv_pred
? above_mbmi->interp_filter
- : VP9_SWITCHABLE_FILTERS;
+ : SWITCHABLE_FILTERS;
if (left_interp == above_interp)
return left_interp;
- else if (left_interp == VP9_SWITCHABLE_FILTERS &&
- above_interp != VP9_SWITCHABLE_FILTERS)
+ else if (left_interp == SWITCHABLE_FILTERS &&
+ above_interp != SWITCHABLE_FILTERS)
return above_interp;
- else if (left_interp != VP9_SWITCHABLE_FILTERS &&
- above_interp == VP9_SWITCHABLE_FILTERS)
+ else if (left_interp != SWITCHABLE_FILTERS &&
+ above_interp == SWITCHABLE_FILTERS)
return left_interp;
else
- return VP9_SWITCHABLE_FILTERS;
+ return SWITCHABLE_FILTERS;
}
// Returns a context number for the given MB prediction signal
unsigned char vp9_get_pred_context_intra_inter(const MACROBLOCKD *xd) {
typedef void (*intra_pred_fn)(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left);
-static intra_pred_fn pred[VP9_INTRA_MODES][4];
+static intra_pred_fn pred[INTRA_MODES][4];
static intra_pred_fn dc_pred[2][2][4];
static void init_intra_pred_fn_ptrs(void) {
#include "vp9/common/vp9_scale.h"
static INLINE int scaled_x(int val, const struct scale_factors *scale) {
- return val * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT;
+ return val * scale->x_scale_fp >> REF_SCALE_SHIFT;
}
static INLINE int scaled_y(int val, const struct scale_factors *scale) {
- return val * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT;
+ return val * scale->y_scale_fp >> REF_SCALE_SHIFT;
}
static int unscaled_value(int val, const struct scale_factors *scale) {
// and use fixed point scaling factors in decoding and encoding routines.
// Hardware implementations can calculate scale factor in device driver
// and use multiplication and shifting on hardware instead of division.
- return (other_size << VP9_REF_SCALE_SHIFT) / this_size;
+ return (other_size << REF_SCALE_SHIFT) / this_size;
}
static int check_scale_factors(int other_w, int other_h,
int other_w, int other_h,
int this_w, int this_h) {
if (!check_scale_factors(other_w, other_h, this_w, this_h)) {
- scale->x_scale_fp = VP9_REF_INVALID_SCALE;
- scale->y_scale_fp = VP9_REF_INVALID_SCALE;
+ scale->x_scale_fp = REF_INVALID_SCALE;
+ scale->y_scale_fp = REF_INVALID_SCALE;
return;
}
#include "vp9/common/vp9_mv.h"
#include "vp9/common/vp9_convolve.h"
-#define VP9_REF_SCALE_SHIFT 14
-#define VP9_REF_NO_SCALE (1 << VP9_REF_SCALE_SHIFT)
-#define VP9_REF_INVALID_SCALE -1
+#define REF_SCALE_SHIFT 14
+#define REF_NO_SCALE (1 << REF_SCALE_SHIFT)
+#define REF_INVALID_SCALE -1
struct scale_factors {
int x_scale_fp; // horizontal fixed point scale factor
int this_w, int this_h);
static int vp9_is_valid_scale(const struct scale_factors *sf) {
- return sf->x_scale_fp != VP9_REF_INVALID_SCALE &&
- sf->y_scale_fp != VP9_REF_INVALID_SCALE;
+ return sf->x_scale_fp != REF_INVALID_SCALE &&
+ sf->y_scale_fp != REF_INVALID_SCALE;
}
static int vp9_is_scaled(const struct scale_factors *sf) {
- return sf->x_scale_fp != VP9_REF_NO_SCALE ||
- sf->y_scale_fp != VP9_REF_NO_SCALE;
+ return sf->x_scale_fp != REF_NO_SCALE ||
+ sf->y_scale_fp != REF_NO_SCALE;
}
#endif // VP9_COMMON_VP9_SCALE_H_
for (j = 0; j < output_width; j++) {
output_ptr[j] = ROUND_POWER_OF_TWO((int)src_ptr[0] * vp9_filter[0] +
(int)src_ptr[pixel_step] * vp9_filter[1],
- VP9_FILTER_BITS);
+ FILTER_BITS);
src_ptr++;
}
for (j = 0; j < output_width; j++) {
output_ptr[j] = ROUND_POWER_OF_TWO((int)src_ptr[0] * vp9_filter[0] +
(int)src_ptr[pixel_step] * vp9_filter[1],
- VP9_FILTER_BITS);
+ FILTER_BITS);
src_ptr++;
}
// This is meant to be a large, positive constant that can still be efficiently
// loaded as an immediate (on platforms like ARM, for example).
// Even relatively modest values like 100 would work fine.
-#define VP9_LOTS_OF_BITS 0x40000000
+#define LOTS_OF_BITS 0x40000000
int vp9_reader_init(vp9_reader *r, const uint8_t *buffer, size_t size) {
const uint8_t *buffer = r->buffer;
VP9_BD_VALUE value = r->value;
int count = r->count;
- int shift = VP9_BD_VALUE_SIZE - 8 - (count + 8);
+ int shift = BD_VALUE_SIZE - 8 - (count + 8);
int loop_end = 0;
const int bits_left = (int)((buffer_end - buffer)*CHAR_BIT);
const int x = shift + CHAR_BIT - bits_left;
if (x >= 0) {
- count += VP9_LOTS_OF_BITS;
+ count += LOTS_OF_BITS;
loop_end = x;
}
const uint8_t *vp9_reader_find_end(vp9_reader *r) {
// Find the end of the coded buffer
- while (r->count > CHAR_BIT && r->count < VP9_BD_VALUE_SIZE) {
+ while (r->count > CHAR_BIT && r->count < BD_VALUE_SIZE) {
r->count -= CHAR_BIT;
r->buffer--;
}
//
// When reading a byte from the user's buffer, count is filled with 8 and
// one byte is filled into the value buffer. When we reach the end of the
- // data, count is additionally filled with VP9_LOTS_OF_BITS. So when
- // count == VP9_LOTS_OF_BITS - 1, the user's data has been exhausted.
+ // data, count is additionally filled with LOTS_OF_BITS. So when
+ // count == LOTS_OF_BITS - 1, the user's data has been exhausted.
//
// 1 if we have tried to decode bits after the end of stream was encountered.
// 0 No error.
- return r->count > VP9_BD_VALUE_SIZE && r->count < VP9_LOTS_OF_BITS;
+ return r->count > BD_VALUE_SIZE && r->count < LOTS_OF_BITS;
}
typedef size_t VP9_BD_VALUE;
-#define VP9_BD_VALUE_SIZE ((int)sizeof(VP9_BD_VALUE)*CHAR_BIT)
+#define BD_VALUE_SIZE ((int)sizeof(VP9_BD_VALUE)*CHAR_BIT)
typedef struct {
const uint8_t *buffer_end;
value = br->value;
count = br->count;
- bigsplit = (VP9_BD_VALUE)split << (VP9_BD_VALUE_SIZE - 8);
+ bigsplit = (VP9_BD_VALUE)split << (BD_VALUE_SIZE - 8);
range = split;
}
static void update_mv(vp9_reader *r, vp9_prob *p) {
- if (vp9_read(r, VP9_NMV_UPDATE_PROB))
+ if (vp9_read(r, NMV_UPDATE_PROB))
*p = (vp9_read_literal(r, 7) << 1) | 1;
}
static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
int i, j;
- for (j = 0; j < VP9_SWITCHABLE_FILTERS + 1; ++j)
- for (i = 0; i < VP9_SWITCHABLE_FILTERS - 1; ++i)
- if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ for (j = 0; j < SWITCHABLE_FILTERS + 1; ++j)
+ for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
+ if (vp9_read(r, MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
}
static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
int i, j;
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
- for (j = 0; j < VP9_INTER_MODES - 1; ++j)
- if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ for (j = 0; j < INTER_MODES - 1; ++j)
+ if (vp9_read(r, MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
}
if (cm->comp_pred_mode == HYBRID_PREDICTION)
for (i = 0; i < COMP_INTER_CONTEXTS; i++)
- if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ if (vp9_read(r, MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &cm->fc.comp_inter_prob[i]);
if (cm->comp_pred_mode != COMP_PREDICTION_ONLY)
for (i = 0; i < REF_CONTEXTS; i++) {
- if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ if (vp9_read(r, MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][0]);
- if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ if (vp9_read(r, MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][1]);
}
if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY)
for (i = 0; i < REF_CONTEXTS; i++)
- if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ if (vp9_read(r, MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &cm->fc.comp_ref_prob[i]);
}
// TODO(jkoleszar): does this clear more than MBSKIP_CONTEXTS? Maybe remove.
// vpx_memset(cm->fc.mbskip_probs, 0, sizeof(cm->fc.mbskip_probs));
for (k = 0; k < MBSKIP_CONTEXTS; ++k)
- if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ if (vp9_read(r, MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &cm->fc.mbskip_probs[k]);
if (cm->frame_type != KEY_FRAME && !cm->intra_only) {
read_switchable_interp_probs(&cm->fc, r);
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
- if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ if (vp9_read(r, MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &cm->fc.intra_inter_prob[i]);
read_comp_pred(cm, r);
for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
- for (i = 0; i < VP9_INTRA_MODES - 1; ++i)
- if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ for (i = 0; i < INTRA_MODES - 1; ++i)
+ if (vp9_read(r, MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &cm->fc.y_mode_prob[j][i]);
for (j = 0; j < NUM_PARTITION_CONTEXTS; ++j)
for (i = 0; i < PARTITION_TYPES - 1; ++i)
- if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ if (vp9_read(r, MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &cm->fc.partition_prob[INTER_FRAME][j][i]);
read_mv_probs(r, nmvc, xd->allow_high_precision_mv);
for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
for (j = 0; j < TX_SIZES - 3; ++j)
- if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ if (vp9_read(r, MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &tx_probs->p8x8[i][j]);
for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
for (j = 0; j < TX_SIZES - 2; ++j)
- if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ if (vp9_read(r, MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &tx_probs->p16x16[i][j]);
for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
for (j = 0; j < TX_SIZES - 1; ++j)
- if (vp9_read(r, VP9_MODE_UPDATE_PROB))
+ if (vp9_read(r, MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &tx_probs->p32x32[i][j]);
}
#endif
#ifdef ENTROPY_STATS
-int intra_mode_stats[VP9_INTRA_MODES]
- [VP9_INTRA_MODES]
- [VP9_INTRA_MODES];
+int intra_mode_stats[INTRA_MODES]
+ [INTRA_MODES]
+ [INTRA_MODES];
vp9_coeff_stats tree_update_hist[TX_SIZES][BLOCK_TYPES];
extern unsigned int active_section;
int64_t tx_count_32x32p_stats[TX_SIZE_CONTEXTS][TX_SIZES];
int64_t tx_count_16x16p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 1];
int64_t tx_count_8x8p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 2];
-int64_t switchable_interp_stats[VP9_SWITCHABLE_FILTERS+1]
- [VP9_SWITCHABLE_FILTERS];
+int64_t switchable_interp_stats[SWITCHABLE_FILTERS+1]
+ [SWITCHABLE_FILTERS];
void init_tx_count_stats() {
vp9_zero(tx_count_32x32p_stats);
static void update_switchable_interp_stats(VP9_COMMON *cm) {
int i, j;
- for (i = 0; i < VP9_SWITCHABLE_FILTERS+1; ++i)
- for (j = 0; j < VP9_SWITCHABLE_FILTERS; ++j) {
+ for (i = 0; i < SWITCHABLE_FILTERS+1; ++i)
+ for (j = 0; j < SWITCHABLE_FILTERS; ++j) {
switchable_interp_stats[i][j] += cm->fc.switchable_interp_count[i][j];
}
}
fclose(fp);
printf(
- "vp9_default_switchable_filter_count[VP9_SWITCHABLE_FILTERS+1]"
- "[VP9_SWITCHABLE_FILTERS] = {\n");
- for (i = 0; i < VP9_SWITCHABLE_FILTERS+1; i++) {
+ "vp9_default_switchable_filter_count[SWITCHABLE_FILTERS+1]"
+ "[SWITCHABLE_FILTERS] = {\n");
+ for (i = 0; i < SWITCHABLE_FILTERS+1; i++) {
printf(" { ");
- for (j = 0; j < VP9_SWITCHABLE_FILTERS; j++) {
+ for (j = 0; j < SWITCHABLE_FILTERS; j++) {
printf("%"PRId64", ", switchable_interp_stats[i][j]);
}
printf("},\n");
n--;
for (i = 0; i < n; ++i) {
- vp9_cond_prob_diff_update(w, &Pcur[i], VP9_MODE_UPDATE_PROB, bct[i]);
+ vp9_cond_prob_diff_update(w, &Pcur[i], MODE_UPDATE_PROB, bct[i]);
}
}
vp9_writer* const bc) {
VP9_COMMON *const cm = &cpi->common;
int j;
- vp9_prob pnew[VP9_INTRA_MODES - 1];
- unsigned int bct[VP9_INTRA_MODES - 1][2];
+ vp9_prob pnew[INTRA_MODES - 1];
+ unsigned int bct[INTRA_MODES - 1][2];
for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
- update_mode(bc, VP9_INTRA_MODES, vp9_intra_mode_tree, pnew,
+ update_mode(bc, INTRA_MODES, vp9_intra_mode_tree, pnew,
cm->fc.y_mode_prob[j], bct,
(unsigned int *)cpi->y_mode_count[j]);
}
for (k = 0; k < MBSKIP_CONTEXTS; ++k)
vp9_cond_prob_diff_update(w, &cm->fc.mbskip_probs[k],
- VP9_MODE_UPDATE_PROB, cm->counts.mbskip[k]);
+ MODE_UPDATE_PROB, cm->counts.mbskip[k]);
}
static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) {
static void update_switchable_interp_probs(VP9_COMP *const cpi,
vp9_writer* const bc) {
VP9_COMMON *const pc = &cpi->common;
- unsigned int branch_ct[VP9_SWITCHABLE_FILTERS + 1]
- [VP9_SWITCHABLE_FILTERS - 1][2];
- vp9_prob new_prob[VP9_SWITCHABLE_FILTERS + 1][VP9_SWITCHABLE_FILTERS - 1];
+ unsigned int branch_ct[SWITCHABLE_FILTERS + 1]
+ [SWITCHABLE_FILTERS - 1][2];
+ vp9_prob new_prob[SWITCHABLE_FILTERS + 1][SWITCHABLE_FILTERS - 1];
int i, j;
- for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j) {
+ for (j = 0; j <= SWITCHABLE_FILTERS; ++j) {
vp9_tree_probs_from_distribution(
vp9_switchable_interp_tree,
new_prob[j], branch_ct[j],
pc->counts.switchable_interp[j], 0);
}
- for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j) {
- for (i = 0; i < VP9_SWITCHABLE_FILTERS - 1; ++i) {
+ for (j = 0; j <= SWITCHABLE_FILTERS; ++j) {
+ for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) {
vp9_cond_prob_diff_update(bc, &pc->fc.switchable_interp_prob[j][i],
- VP9_MODE_UPDATE_PROB, branch_ct[j][i]);
+ MODE_UPDATE_PROB, branch_ct[j][i]);
}
}
#ifdef MODE_STATS
int i, j;
for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
- unsigned int branch_ct[VP9_INTER_MODES - 1][2];
- vp9_prob new_prob[VP9_INTER_MODES - 1];
+ unsigned int branch_ct[INTER_MODES - 1][2];
+ vp9_prob new_prob[INTER_MODES - 1];
vp9_tree_probs_from_distribution(vp9_inter_mode_tree,
new_prob, branch_ct,
pc->counts.inter_mode[i], NEARESTMV);
- for (j = 0; j < VP9_INTER_MODES - 1; ++j)
+ for (j = 0; j < INTER_MODES - 1; ++j)
vp9_cond_prob_diff_update(bc, &pc->fc.inter_mode_probs[i][j],
- VP9_MODE_UPDATE_PROB, branch_ct[j]);
+ MODE_UPDATE_PROB, branch_ct[j]);
}
}
ct_8x8p);
for (j = 0; j < TX_SIZES - 3; j++)
vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j],
- VP9_MODE_UPDATE_PROB, ct_8x8p[j]);
+ MODE_UPDATE_PROB, ct_8x8p[j]);
}
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
ct_16x16p);
for (j = 0; j < TX_SIZES - 2; j++)
vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j],
- VP9_MODE_UPDATE_PROB, ct_16x16p[j]);
+ MODE_UPDATE_PROB, ct_16x16p[j]);
}
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p);
for (j = 0; j < TX_SIZES - 1; j++)
vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j],
- VP9_MODE_UPDATE_PROB, ct_32x32p[j]);
+ MODE_UPDATE_PROB, ct_32x32p[j]);
}
#ifdef MODE_STATS
if (!cpi->dummy_packing)
if (cm->mcomp_filter_type == SWITCHABLE) {
// Check to see if only one of the filters is actually used
- int count[VP9_SWITCHABLE_FILTERS];
+ int count[SWITCHABLE_FILTERS];
int i, j, c = 0;
- for (i = 0; i < VP9_SWITCHABLE_FILTERS; ++i) {
+ for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
count[i] = 0;
- for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j)
+ for (j = 0; j <= SWITCHABLE_FILTERS; ++j)
count[i] += cm->counts.switchable_interp[j][i];
c += (count[i] > 0);
}
if (c == 1) {
// Only one filter is used. So set the filter at frame level
- for (i = 0; i < VP9_SWITCHABLE_FILTERS; ++i) {
+ for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
if (count[i]) {
cm->mcomp_filter_type = i;
break;
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
- VP9_MODE_UPDATE_PROB,
+ MODE_UPDATE_PROB,
cpi->intra_inter_count[i]);
if (cm->allow_comp_inter_inter) {
if (use_hybrid_pred)
for (i = 0; i < COMP_INTER_CONTEXTS; i++)
vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
- VP9_MODE_UPDATE_PROB,
+ MODE_UPDATE_PROB,
cpi->comp_inter_count[i]);
}
}
if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) {
for (i = 0; i < REF_CONTEXTS; i++) {
vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
- VP9_MODE_UPDATE_PROB,
+ MODE_UPDATE_PROB,
cpi->single_ref_count[i][0]);
vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
- VP9_MODE_UPDATE_PROB,
+ MODE_UPDATE_PROB,
cpi->single_ref_count[i][1]);
}
}
if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY)
for (i = 0; i < REF_CONTEXTS; i++)
vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
- VP9_MODE_UPDATE_PROB,
+ MODE_UPDATE_PROB,
cpi->comp_ref_count[i]);
update_mbintra_mode_probs(cpi, &header_bc);
int comp_pred_diff;
int single_pred_diff;
int64_t tx_rd_diff[TX_MODES];
- int64_t best_filter_diff[VP9_SWITCHABLE_FILTERS + 1];
+ int64_t best_filter_diff[SWITCHABLE_FILTERS + 1];
// Bit flag for each mode whether it has high error in comparison to others.
unsigned int modes_with_high_error;
int mbmode_cost[MB_MODE_COUNT];
unsigned inter_mode_cost[INTER_MODE_CONTEXTS][MB_MODE_COUNT - NEARESTMV];
int intra_uv_mode_cost[2][MB_MODE_COUNT];
- int y_mode_costs[VP9_INTRA_MODES][VP9_INTRA_MODES][VP9_INTRA_MODES];
- int switchable_interp_costs[VP9_SWITCHABLE_FILTERS + 1]
- [VP9_SWITCHABLE_FILTERS];
+ int y_mode_costs[INTRA_MODES][INTRA_MODES][INTRA_MODES];
+ int switchable_interp_costs[SWITCHABLE_FILTERS + 1]
+ [SWITCHABLE_FILTERS];
// These define limits to motion vector components to prevent them
// from extending outside the UMV borders
* This also avoids the need for divide by zero checks in
* vp9_activity_masking().
*/
-#define VP9_ACTIVITY_AVG_MIN (64)
+#define ACTIVITY_AVG_MIN (64)
/* Motion vector component magnitude threshold for defining fast motion. */
#define FAST_MOTION_MV_THRESH (24)
mb_activity = tt_activity_measure(x);
}
- if (mb_activity < VP9_ACTIVITY_AVG_MIN)
- mb_activity = VP9_ACTIVITY_AVG_MIN;
+ if (mb_activity < ACTIVITY_AVG_MIN)
+ mb_activity = ACTIVITY_AVG_MIN;
return mb_activity;
}
cpi->activity_avg = (unsigned int) (activity_sum / cpi->common.MBs);
#endif // ACT_MEDIAN
- if (cpi->activity_avg < VP9_ACTIVITY_AVG_MIN)
- cpi->activity_avg = VP9_ACTIVITY_AVG_MIN;
+ if (cpi->activity_avg < ACTIVITY_AVG_MIN)
+ cpi->activity_avg = ACTIVITY_AVG_MIN;
// Experimental code: return fixed value normalized for several clips
if (ALT_ACT_MEASURE)
cpi->rd_comp_pred_diff[COMP_PREDICTION_ONLY] += ctx->comp_pred_diff;
cpi->rd_comp_pred_diff[HYBRID_PREDICTION] += ctx->hybrid_pred_diff;
- for (i = 0; i <= VP9_SWITCHABLE_FILTERS; i++)
+ for (i = 0; i <= SWITCHABLE_FILTERS; i++)
cpi->rd_filter_diff[i] += ctx->best_filter_diff[i];
}
}
cpi->rd_filter_threshes[frame_type][1] >
cpi->rd_filter_threshes[frame_type][2] &&
cpi->rd_filter_threshes[frame_type][1] >
- cpi->rd_filter_threshes[frame_type][VP9_SWITCHABLE_FILTERS]) {
+ cpi->rd_filter_threshes[frame_type][SWITCHABLE_FILTERS]) {
filter_type = EIGHTTAP_SMOOTH;
} else if (cpi->rd_filter_threshes[frame_type][2] >
cpi->rd_filter_threshes[frame_type][0] &&
cpi->rd_filter_threshes[frame_type][2] >
- cpi->rd_filter_threshes[frame_type][VP9_SWITCHABLE_FILTERS]) {
+ cpi->rd_filter_threshes[frame_type][SWITCHABLE_FILTERS]) {
filter_type = EIGHTTAP_SHARP;
} else if (cpi->rd_filter_threshes[frame_type][0] >
- cpi->rd_filter_threshes[frame_type][VP9_SWITCHABLE_FILTERS]) {
+ cpi->rd_filter_threshes[frame_type][SWITCHABLE_FILTERS]) {
filter_type = EIGHTTAP;
} else {
filter_type = SWITCHABLE;
cpi->rd_prediction_type_threshes[frame_type][i] >>= 1;
}
- for (i = 0; i <= VP9_SWITCHABLE_FILTERS; i++) {
+ for (i = 0; i <= SWITCHABLE_FILTERS; i++) {
const int64_t diff = cpi->rd_filter_diff[i] / cpi->common.MBs;
cpi->rd_filter_threshes[frame_type][i] =
(cpi->rd_filter_threshes[frame_type][i] + diff) / 2;
for (j = 0; j < MV_JOINTS - 1; ++j)
update_mv(bc, branch_ct_joint[j], &mvc->joints[j], prob.joints[j],
- VP9_NMV_UPDATE_PROB);
+ NMV_UPDATE_PROB);
for (i = 0; i < 2; ++i) {
update_mv(bc, branch_ct_sign[i], &mvc->comps[i].sign,
- prob.comps[i].sign, VP9_NMV_UPDATE_PROB);
+ prob.comps[i].sign, NMV_UPDATE_PROB);
for (j = 0; j < MV_CLASSES - 1; ++j)
update_mv(bc, branch_ct_classes[i][j], &mvc->comps[i].classes[j],
- prob.comps[i].classes[j], VP9_NMV_UPDATE_PROB);
+ prob.comps[i].classes[j], NMV_UPDATE_PROB);
for (j = 0; j < CLASS0_SIZE - 1; ++j)
update_mv(bc, branch_ct_class0[i][j], &mvc->comps[i].class0[j],
- prob.comps[i].class0[j], VP9_NMV_UPDATE_PROB);
+ prob.comps[i].class0[j], NMV_UPDATE_PROB);
for (j = 0; j < MV_OFFSET_BITS; ++j)
update_mv(bc, branch_ct_bits[i][j], &mvc->comps[i].bits[j],
- prob.comps[i].bits[j], VP9_NMV_UPDATE_PROB);
+ prob.comps[i].bits[j], NMV_UPDATE_PROB);
}
for (i = 0; i < 2; ++i) {
for (k = 0; k < 3; ++k)
update_mv(bc, branch_ct_class0_fp[i][j][k],
&mvc->comps[i].class0_fp[j][k],
- prob.comps[i].class0_fp[j][k], VP9_NMV_UPDATE_PROB);
+ prob.comps[i].class0_fp[j][k], NMV_UPDATE_PROB);
}
for (j = 0; j < 3; ++j)
update_mv(bc, branch_ct_fp[i][j], &mvc->comps[i].fp[j],
- prob.comps[i].fp[j], VP9_NMV_UPDATE_PROB);
+ prob.comps[i].fp[j], NMV_UPDATE_PROB);
}
if (usehp) {
for (i = 0; i < 2; ++i) {
update_mv(bc, branch_ct_class0_hp[i], &mvc->comps[i].class0_hp,
- prob.comps[i].class0_hp, VP9_NMV_UPDATE_PROB);
+ prob.comps[i].class0_hp, NMV_UPDATE_PROB);
update_mv(bc, branch_ct_hp[i], &mvc->comps[i].hp,
- prob.comps[i].hp, VP9_NMV_UPDATE_PROB);
+ prob.comps[i].hp, NMV_UPDATE_PROB);
}
}
}
const vp9_tree_p KT = vp9_intra_mode_tree;
int i, j;
- for (i = 0; i < VP9_INTRA_MODES; i++) {
- for (j = 0; j < VP9_INTRA_MODES; j++) {
+ for (i = 0; i < INTRA_MODES; i++) {
+ for (j = 0; j < INTRA_MODES; j++) {
vp9_cost_tokens((int *)c->mb.y_mode_costs[i][j], vp9_kf_y_mode_prob[i][j],
KT);
}
vp9_cost_tokens(c->mb.mbmode_cost, x->fc.y_mode_prob[1],
vp9_intra_mode_tree);
vp9_cost_tokens(c->mb.intra_uv_mode_cost[1],
- x->fc.uv_mode_prob[VP9_INTRA_MODES - 1], vp9_intra_mode_tree);
+ x->fc.uv_mode_prob[INTRA_MODES - 1], vp9_intra_mode_tree);
vp9_cost_tokens(c->mb.intra_uv_mode_cost[0],
- vp9_kf_uv_mode_prob[VP9_INTRA_MODES - 1],
+ vp9_kf_uv_mode_prob[INTRA_MODES - 1],
vp9_intra_mode_tree);
- for (i = 0; i <= VP9_SWITCHABLE_FILTERS; ++i)
+ for (i = 0; i <= SWITCHABLE_FILTERS; ++i)
vp9_cost_tokens((int *)c->mb.switchable_interp_costs[i],
x->fc.switchable_interp_prob[i],
vp9_switchable_interp_tree);
#ifdef ENTROPY_STATS
-extern int intra_mode_stats[VP9_INTRA_MODES]
- [VP9_INTRA_MODES]
- [VP9_INTRA_MODES];
+extern int intra_mode_stats[INTRA_MODES]
+ [INTRA_MODES]
+ [INTRA_MODES];
#endif
#ifdef MODE_STATS
void vp9_update_mode_context_stats(VP9_COMP *cpi) {
VP9_COMMON *cm = &cpi->common;
int i, j;
- unsigned int (*inter_mode_counts)[VP9_INTER_MODES - 1][2] =
+ unsigned int (*inter_mode_counts)[INTER_MODES - 1][2] =
cm->fc.inter_mode_counts;
- int64_t (*mv_ref_stats)[VP9_INTER_MODES - 1][2] = cpi->mv_ref_stats;
+ int64_t (*mv_ref_stats)[INTER_MODES - 1][2] = cpi->mv_ref_stats;
FILE *f;
// Read the past stats counters
// Add in the values for this frame
for (i = 0; i < INTER_MODE_CONTEXTS; i++) {
- for (j = 0; j < VP9_INTER_MODES - 1; j++) {
+ for (j = 0; j < INTER_MODES - 1; j++) {
mv_ref_stats[i][j][0] += (int64_t)inter_mode_counts[i][j][0];
mv_ref_stats[i][j][1] += (int64_t)inter_mode_counts[i][j][1];
}
fprintf(f, "#include \"vp9_entropy.h\"\n");
fprintf(
f,
- "const int inter_mode_probs[INTER_MODE_CONTEXTS][VP9_INTER_MODES - 1] =");
+ "const int inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1] =");
fprintf(f, "{\n");
for (j = 0; j < INTER_MODE_CONTEXTS; j++) {
fprintf(f, " {/* %d */ ", j);
fprintf(f, " ");
- for (i = 0; i < VP9_INTER_MODES - 1; i++) {
+ for (i = 0; i < INTER_MODES - 1; i++) {
int this_prob;
int64_t count = cpi->mv_ref_stats[j][i][0] + cpi->mv_ref_stats[j][i][1];
if (count)
fprintf(fmode, "\n#include \"vp9_entropymode.h\"\n\n");
fprintf(fmode, "const unsigned int vp9_kf_default_bmode_counts ");
- fprintf(fmode, "[VP9_INTRA_MODES][VP9_INTRA_MODES]"
- "[VP9_INTRA_MODES] =\n{\n");
+ fprintf(fmode, "[INTRA_MODES][INTRA_MODES]"
+ "[INTRA_MODES] =\n{\n");
- for (i = 0; i < VP9_INTRA_MODES; i++) {
+ for (i = 0; i < INTRA_MODES; i++) {
fprintf(fmode, " { // Above Mode : %d\n", i);
- for (j = 0; j < VP9_INTRA_MODES; j++) {
+ for (j = 0; j < INTRA_MODES; j++) {
fprintf(fmode, " {");
- for (k = 0; k < VP9_INTRA_MODES; k++) {
+ for (k = 0; k < INTRA_MODES; k++) {
if (!intra_mode_stats[i][j][k])
fprintf(fmode, " %5d, ", 1);
else
vp9_coeff_probs_model coef_probs[TX_SIZES][BLOCK_TYPES];
- vp9_prob y_mode_prob[4][VP9_INTRA_MODES - 1];
- vp9_prob uv_mode_prob[VP9_INTRA_MODES][VP9_INTRA_MODES - 1];
+ vp9_prob y_mode_prob[4][INTRA_MODES - 1];
+ vp9_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
vp9_prob partition_prob[2][NUM_PARTITION_CONTEXTS][PARTITION_TYPES - 1];
- vp9_prob switchable_interp_prob[VP9_SWITCHABLE_FILTERS + 1]
- [VP9_SWITCHABLE_FILTERS - 1];
+ vp9_prob switchable_interp_prob[SWITCHABLE_FILTERS + 1]
+ [SWITCHABLE_FILTERS - 1];
- int inter_mode_counts[INTER_MODE_CONTEXTS][VP9_INTER_MODES - 1][2];
- vp9_prob inter_mode_probs[INTER_MODE_CONTEXTS][VP9_INTER_MODES - 1];
+ int inter_mode_counts[INTER_MODE_CONTEXTS][INTER_MODES - 1][2];
+ vp9_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1];
struct tx_probs tx_probs;
vp9_prob mbskip_probs[MBSKIP_CONTEXTS];
// FIXME(rbultje) can this overflow?
int rd_tx_select_threshes[4][TX_MODES];
- int64_t rd_filter_diff[VP9_SWITCHABLE_FILTERS + 1];
- int64_t rd_filter_threshes[4][VP9_SWITCHABLE_FILTERS + 1];
- int64_t rd_filter_cache[VP9_SWITCHABLE_FILTERS + 1];
+ int64_t rd_filter_diff[SWITCHABLE_FILTERS + 1];
+ int64_t rd_filter_threshes[4][SWITCHABLE_FILTERS + 1];
+ int64_t rd_filter_cache[SWITCHABLE_FILTERS + 1];
int RDMULT;
int RDDIV;
int cq_target_quality;
- int y_mode_count[4][VP9_INTRA_MODES];
- int y_uv_mode_count[VP9_INTRA_MODES][VP9_INTRA_MODES];
+ int y_mode_count[4][INTRA_MODES];
+ int y_uv_mode_count[INTRA_MODES][INTRA_MODES];
unsigned int partition_count[NUM_PARTITION_CONTEXTS][PARTITION_TYPES];
nmv_context_counts NMVcount;
int dummy_packing; /* flag to indicate if packing is dummy */
- unsigned int switchable_interp_count[VP9_SWITCHABLE_FILTERS + 1]
- [VP9_SWITCHABLE_FILTERS];
+ unsigned int switchable_interp_count[SWITCHABLE_FILTERS + 1]
+ [SWITCHABLE_FILTERS];
unsigned int txfm_stepdown_count[TX_SIZES];
#endif
#ifdef ENTROPY_STATS
- int64_t mv_ref_stats[INTER_MODE_CONTEXTS][VP9_INTER_MODES - 1][2];
+ int64_t mv_ref_stats[INTER_MODE_CONTEXTS][INTER_MODES - 1][2];
#endif
} VP9_COMP;
int64_t sse;
int segment_yrate;
MB_PREDICTION_MODE modes[4];
- SEG_RDSTAT rdstat[4][VP9_INTER_MODES];
+ SEG_RDSTAT rdstat[4][INTER_MODES];
int mvthresh;
} BEST_SEG_INFO;
if (best_rd == INT64_MAX) {
int iy, midx;
for (iy = i + 1; iy < 4; ++iy)
- for (midx = 0; midx < VP9_INTER_MODES; ++midx)
+ for (midx = 0; midx < INTER_MODES; ++midx)
bsi->rdstat[iy][midx].brdcost = INT64_MAX;
bsi->segment_rd = INT64_MAX;
return;
if (this_segment_rd > bsi->segment_rd) {
int iy, midx;
for (iy = i + 1; iy < 4; ++iy)
- for (midx = 0; midx < VP9_INTER_MODES; ++midx)
+ for (midx = 0; midx < INTER_MODES; ++midx)
bsi->rdstat[iy][midx].brdcost = INT64_MAX;
bsi->segment_rd = INT64_MAX;
return;
int_mv *second_ref_mv,
int64_t comp_pred_diff[NB_PREDICTION_TYPES],
int64_t tx_size_diff[TX_MODES],
- int64_t best_filter_diff[VP9_SWITCHABLE_FILTERS + 1]) {
+ int64_t best_filter_diff[SWITCHABLE_FILTERS + 1]) {
MACROBLOCKD *const xd = &x->e_mbd;
// Take a snapshot of the coding context so it can be
// doesn't actually work this way
memcpy(ctx->tx_rd_diff, tx_size_diff, sizeof(ctx->tx_rd_diff));
memcpy(ctx->best_filter_diff, best_filter_diff,
- sizeof(*best_filter_diff) * (VP9_SWITCHABLE_FILTERS + 1));
+ sizeof(*best_filter_diff) * (SWITCHABLE_FILTERS + 1));
}
static void setup_pred_block(const MACROBLOCKD *xd,
scale[frame_type].x_offset_q4 =
ROUND_POWER_OF_TWO(mi_col * MI_SIZE * scale[frame_type].x_scale_fp,
- VP9_REF_SCALE_SHIFT) & 0xf;
+ REF_SCALE_SHIFT) & 0xf;
scale[frame_type].y_offset_q4 =
ROUND_POWER_OF_TWO(mi_row * MI_SIZE * scale[frame_type].y_scale_fp,
- VP9_REF_SCALE_SHIFT) & 0xf;
+ REF_SCALE_SHIFT) & 0xf;
// TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
// use the UV scaling factors.
int tmp_rate_sum = 0;
int64_t tmp_dist_sum = 0;
- cpi->rd_filter_cache[VP9_SWITCHABLE_FILTERS] = INT64_MAX;
- for (i = 0; i < VP9_SWITCHABLE_FILTERS; ++i) {
+ cpi->rd_filter_cache[SWITCHABLE_FILTERS] = INT64_MAX;
+ for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
int j;
int64_t rs_rd;
mbmi->interp_filter = i;
if (i > 0 && intpel_mv) {
cpi->rd_filter_cache[i] = RDCOST(x->rdmult, x->rddiv,
tmp_rate_sum, tmp_dist_sum);
- cpi->rd_filter_cache[VP9_SWITCHABLE_FILTERS] =
- MIN(cpi->rd_filter_cache[VP9_SWITCHABLE_FILTERS],
+ cpi->rd_filter_cache[SWITCHABLE_FILTERS] =
+ MIN(cpi->rd_filter_cache[SWITCHABLE_FILTERS],
cpi->rd_filter_cache[i] + rs_rd);
rd = cpi->rd_filter_cache[i];
if (cm->mcomp_filter_type == SWITCHABLE)
model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum);
cpi->rd_filter_cache[i] = RDCOST(x->rdmult, x->rddiv,
rate_sum, dist_sum);
- cpi->rd_filter_cache[VP9_SWITCHABLE_FILTERS] =
- MIN(cpi->rd_filter_cache[VP9_SWITCHABLE_FILTERS],
+ cpi->rd_filter_cache[SWITCHABLE_FILTERS] =
+ MIN(cpi->rd_filter_cache[SWITCHABLE_FILTERS],
cpi->rd_filter_cache[i] + rs_rd);
rd = cpi->rd_filter_cache[i];
if (cm->mcomp_filter_type == SWITCHABLE)
int64_t best_tx_diff[TX_MODES];
int64_t best_pred_diff[NB_PREDICTION_TYPES];
int64_t best_pred_rd[NB_PREDICTION_TYPES];
- int64_t best_filter_rd[VP9_SWITCHABLE_FILTERS + 1];
- int64_t best_filter_diff[VP9_SWITCHABLE_FILTERS + 1];
+ int64_t best_filter_rd[SWITCHABLE_FILTERS + 1];
+ int64_t best_filter_diff[SWITCHABLE_FILTERS + 1];
MB_MODE_INFO best_mbmode = { 0 };
int j;
int mode_index, best_mode_index = 0;
best_pred_rd[i] = INT64_MAX;
for (i = 0; i < TX_MODES; i++)
best_tx_rd[i] = INT64_MAX;
- for (i = 0; i <= VP9_SWITCHABLE_FILTERS; i++)
+ for (i = 0; i <= SWITCHABLE_FILTERS; i++)
best_filter_rd[i] = INT64_MAX;
for (i = 0; i < TX_SIZES; i++)
rate_uv_intra[i] = INT_MAX;
union b_mode_info tmp_best_bmodes[16];
MB_MODE_INFO tmp_best_mbmode;
PARTITION_INFO tmp_best_partition;
- BEST_SEG_INFO bsi[VP9_SWITCHABLE_FILTERS];
+ BEST_SEG_INFO bsi[SWITCHABLE_FILTERS];
int pred_exists = 0;
int uv_skippable;
if (is_comp_pred) {
cpi->rd_threshes[bsize][THR_NEWG] : this_rd_thresh;
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
- cpi->rd_filter_cache[VP9_SWITCHABLE_FILTERS] = INT64_MAX;
+ cpi->rd_filter_cache[SWITCHABLE_FILTERS] = INT64_MAX;
if (cm->mcomp_filter_type != BILINEAR) {
tmp_best_filter = EIGHTTAP;
if (x->source_variance <
vp9_zero(cpi->rd_filter_cache);
} else {
for (switchable_filter_index = 0;
- switchable_filter_index < VP9_SWITCHABLE_FILTERS;
+ switchable_filter_index < SWITCHABLE_FILTERS;
++switchable_filter_index) {
int newbest, rs;
int64_t rs_rd;
cpi->rd_filter_cache[switchable_filter_index] = tmp_rd;
rs = get_switchable_rate(x);
rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
- cpi->rd_filter_cache[VP9_SWITCHABLE_FILTERS] =
- MIN(cpi->rd_filter_cache[VP9_SWITCHABLE_FILTERS],
+ cpi->rd_filter_cache[SWITCHABLE_FILTERS] =
+ MIN(cpi->rd_filter_cache[SWITCHABLE_FILTERS],
tmp_rd + rs_rd);
if (cm->mcomp_filter_type == SWITCHABLE)
tmp_rd += rs_rd;
if (!disable_skip && ref_frame == INTRA_FRAME) {
for (i = 0; i < NB_PREDICTION_TYPES; ++i)
best_pred_rd[i] = MIN(best_pred_rd[i], this_rd);
- for (i = 0; i <= VP9_SWITCHABLE_FILTERS; i++)
+ for (i = 0; i <= SWITCHABLE_FILTERS; i++)
best_filter_rd[i] = MIN(best_filter_rd[i], this_rd);
}
if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME &&
cm->mcomp_filter_type != BILINEAR) {
int64_t ref = cpi->rd_filter_cache[cm->mcomp_filter_type == SWITCHABLE ?
- VP9_SWITCHABLE_FILTERS : cm->mcomp_filter_type];
- for (i = 0; i <= VP9_SWITCHABLE_FILTERS; i++) {
+ SWITCHABLE_FILTERS : cm->mcomp_filter_type];
+ for (i = 0; i <= SWITCHABLE_FILTERS; i++) {
int64_t adj_rd;
// In cases of poor prediction, filter_cache[] can contain really big
// values, which actually are bigger than this_rd itself. This can
}
if (!x->skip) {
- for (i = 0; i <= VP9_SWITCHABLE_FILTERS; i++) {
+ for (i = 0; i <= SWITCHABLE_FILTERS; i++) {
if (best_filter_rd[i] == INT64_MAX)
best_filter_diff[i] = 0;
else
best_filter_diff[i] = best_rd - best_filter_rd[i];
}
if (cm->mcomp_filter_type == SWITCHABLE)
- assert(best_filter_diff[VP9_SWITCHABLE_FILTERS] == 0);
+ assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
} else {
vpx_memset(best_filter_diff, 0, sizeof(best_filter_diff));
}
uint8_t temp2[68 * 64];
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 33, 64, hfilter);
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 64 * 64); // compound pred buffer
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 33, 64, hfilter);
uint8_t temp2[68 * 64];
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 65, 32, hfilter);
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 32 * 64); // compound pred buffer
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 65, 32, hfilter);
uint8_t temp2[36 * 32];
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 17, 32, hfilter);
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 32 * 16); // compound pred buffer
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 17, 32, hfilter);
uint8_t temp2[36 * 32];
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 33, 16, hfilter);
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 16 * 32); // compound pred buffer
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 33, 16, hfilter);
const int16_t *hfilter, *vfilter;
uint16_t fdata3[5 * 4]; // Temp data buffer used in filtering
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
// First filter 1d Horizontal
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 4 * 4); // compound pred buffer
uint16_t fdata3[5 * 4]; // Temp data buffer used in filtering
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
// First filter 1d Horizontal
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
uint8_t temp2[20 * 16];
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 9, 8, hfilter);
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 8 * 8); // compound pred buffer
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 9, 8, hfilter);
uint8_t temp2[20 * 16];
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 17, 16, hfilter);
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 16 * 16); // compound pred buffer
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 17, 16, hfilter);
uint8_t temp2[68 * 64];
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 65, 64, hfilter);
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 64 * 64); // compound pred buffer
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 65, 64, hfilter);
uint8_t temp2[36 * 32];
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 33, 32, hfilter);
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 32 * 32); // compound pred buffer
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 33, 32, hfilter);
uint8_t temp2[20 * 16];
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 9, 16, hfilter);
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 16 * 8); // compound pred buffer
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 9, 16, hfilter);
uint8_t temp2[20 * 16];
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 17, 8, hfilter);
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 8 * 16); // compound pred buffer
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 17, 8, hfilter);
uint8_t temp2[20 * 16];
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 5, 8, hfilter);
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 8 * 4); // compound pred buffer
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 5, 8, hfilter);
uint8_t temp2[20 * 16];
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 9, 4, hfilter);
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp3, 4 * 8); // compound pred buffer
const int16_t *hfilter, *vfilter;
- hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
- vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
+ hfilter = BILINEAR_FILTERS_2TAP(xoffset);
+ vfilter = BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 9, 4, hfilter);