union b_mode_info bmi[16];
} MODE_INFO;
-typedef struct {
+typedef struct blockd {
short *qcoeff;
short *dqcoeff;
unsigned char *predictor;
union b_mode_info bmi;
} BLOCKD;
-typedef struct MacroBlockD {
+typedef struct macroblockd {
DECLARE_ALIGNED(16, short, diff[400]); /* from idct diff */
DECLARE_ALIGNED(16, unsigned char, predictor[384]);
DECLARE_ALIGNED(16, short, qcoeff[400]);
/* assorted loopfilter functions which get used elsewhere */
struct VP8Common;
-struct MacroBlockD;
+struct macroblockd;
void vp8_loop_filter_init(struct VP8Common *cm);
void vp8_loop_filter_frame_init(struct VP8Common *cm,
- struct MacroBlockD *mbd,
+ struct macroblockd *mbd,
int default_filt_lvl);
-void vp8_loop_filter_frame(struct VP8Common *cm, struct MacroBlockD *mbd);
+void vp8_loop_filter_frame(struct VP8Common *cm, struct macroblockd *mbd);
void vp8_loop_filter_partial_frame(struct VP8Common *cm,
- struct MacroBlockD *mbd,
+ struct macroblockd *mbd,
int default_filt_lvl);
void vp8_loop_filter_frame_yonly(struct VP8Common *cm,
- struct MacroBlockD *mbd,
+ struct macroblockd *mbd,
int default_filt_lvl);
void vp8_loop_filter_update_sharpness(loop_filter_info_n *lfi,
common_forward_decls() {
cat <<EOF
-#include "vp8/common/blockd.h"
struct loop_filter_info;
+struct blockd;
+struct macroblockd;
+struct loop_filter_info;
+
+/* Encoder forward decls */
+struct block;
+struct macroblock;
+struct variance_vtable;
/* Encoder forward decls */
struct variance_vtable;
prototype void vp8_recon4b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
specialize vp8_recon4b sse2
-prototype void vp8_recon_mb "MACROBLOCKD *x"
+prototype void vp8_recon_mb "struct macroblockd *x"
specialize vp8_recon_mb
-prototype void vp8_recon_mby "MACROBLOCKD *x"
+prototype void vp8_recon_mby "struct macroblockd *x"
specialize vp8_recon_mby
-prototype void vp8_build_intra_predictors_mby_s "MACROBLOCKD *x"
+prototype void vp8_build_intra_predictors_mby_s "struct macroblockd *x"
specialize vp8_build_intra_predictors_mby_s
-prototype void vp8_build_intra_predictors_sby_s "MACROBLOCKD *x"
+prototype void vp8_build_intra_predictors_sby_s "struct macroblockd *x"
specialize vp8_build_intra_predictors_sby_s;
-prototype void vp8_build_intra_predictors_sbuv_s "MACROBLOCKD *x"
+prototype void vp8_build_intra_predictors_sbuv_s "struct macroblockd *x"
specialize vp8_build_intra_predictors_sbuv_s;
-prototype void vp8_build_intra_predictors_mby "MACROBLOCKD *x"
+prototype void vp8_build_intra_predictors_mby "struct macroblockd *x"
specialize vp8_build_intra_predictors_mby;
-prototype void vp8_build_comp_intra_predictors_mby "MACROBLOCKD *x"
+prototype void vp8_build_comp_intra_predictors_mby "struct macroblockd *x"
specialize vp8_build_comp_intra_predictors_mby;
-prototype void vp8_build_intra_predictors_mby_s "MACROBLOCKD *x"
+prototype void vp8_build_intra_predictors_mby_s "struct macroblockd *x"
specialize vp8_build_intra_predictors_mby_s;
-prototype void vp8_build_intra_predictors_mbuv "MACROBLOCKD *x"
+prototype void vp8_build_intra_predictors_mbuv "struct macroblockd *x"
specialize vp8_build_intra_predictors_mbuv;
-prototype void vp8_build_intra_predictors_mbuv_s "MACROBLOCKD *x"
+prototype void vp8_build_intra_predictors_mbuv_s "struct macroblockd *x"
specialize vp8_build_intra_predictors_mbuv_s;
-prototype void vp8_build_comp_intra_predictors_mbuv "MACROBLOCKD *x"
+prototype void vp8_build_comp_intra_predictors_mbuv "struct macroblockd *x"
specialize vp8_build_comp_intra_predictors_mbuv;
-prototype void vp8_intra4x4_predict "BLOCKD *x, int b_mode, unsigned char *predictor"
+prototype void vp8_intra4x4_predict "struct blockd *x, int b_mode, unsigned char *predictor"
specialize vp8_intra4x4_predict;
-prototype void vp8_comp_intra4x4_predict "BLOCKD *x, int b_mode, int second_mode, unsigned char *predictor"
+prototype void vp8_comp_intra4x4_predict "struct blockd *x, int b_mode, int second_mode, unsigned char *predictor"
specialize vp8_comp_intra4x4_predict;
-prototype void vp8_intra8x8_predict "BLOCKD *x, int b_mode, unsigned char *predictor"
+prototype void vp8_intra8x8_predict "struct blockd *x, int b_mode, unsigned char *predictor"
specialize vp8_intra8x8_predict;
-prototype void vp8_comp_intra8x8_predict "BLOCKD *x, int b_mode, int second_mode, unsigned char *predictor"
+prototype void vp8_comp_intra8x8_predict "struct blockd *x, int b_mode, int second_mode, unsigned char *predictor"
specialize vp8_comp_intra8x8_predict;
-prototype void vp8_intra_uv4x4_predict "BLOCKD *x, int b_mode, unsigned char *predictor"
+prototype void vp8_intra_uv4x4_predict "struct blockd *x, int b_mode, unsigned char *predictor"
specialize vp8_intra_uv4x4_predict;
-prototype void vp8_comp_intra_uv4x4_predict "BLOCKD *x, int b_mode, int second_mode, unsigned char *predictor"
+prototype void vp8_comp_intra_uv4x4_predict "struct blockd *x, int b_mode, int second_mode, unsigned char *predictor"
specialize vp8_comp_intra_uv4x4_predict;
#
prototype unsigned int vp8_get_mb_ss "const short *"
specialize vp8_get_mb_ss mmx sse2
+# ENCODEMB INVOKE
+prototype int vp8_mbblock_error "struct macroblock *mb, int dc"
+specialize vp8_mbblock_error mmx sse2
+vp8_mbblock_error_sse2=vp8_mbblock_error_xmm
+
+prototype int vp8_block_error "short *coeff, short *dqcoeff, int block_size"
+specialize vp8_block_error mmx sse2
+vp8_block_error_sse2=vp8_block_error_xmm
+
+prototype void vp8_subtract_b "struct block *be, struct blockd *bd, int pitch"
+specialize vp8_subtract_b mmx sse2
+
+prototype int vp8_mbuverror "struct macroblock *mb"
+specialize vp8_mbuverror mmx sse2
+vp8_mbuverror_sse2=vp8_mbuverror_xmm
+
+prototype void vp8_subtract_b "struct block *be, struct blockd *bd, int pitch"
+specialize vp8_subtract_b mmx sse2
+
+prototype void vp8_subtract_mby "short *diff, unsigned char *src, unsigned char *pred, int stride"
+specialize vp8_subtract_mby mmx sse2
+
+prototype void vp8_subtract_mbuv "short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride"
+specialize vp8_subtract_mbuv mmx sse2
#
# Structured Similarity (SSIM)
int offset;
} search_site;
-typedef struct {
+typedef struct block {
// 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries
short *src_diff;
short *coeff;
int64_t txfm_rd_diff[NB_TXFM_MODES];
} PICK_MODE_CONTEXT;
-typedef struct {
+typedef struct macroblock {
DECLARE_ALIGNED(16, short, src_diff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
DECLARE_ALIGNED(16, short, coeff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
DECLARE_ALIGNED(16, unsigned char, thismb[256]); // 16x16 Y
*/
#include "vpx_ports/config.h"
+#include "vpx_rtcd.h"
#include "vp8/common/idct.h"
#include "quantize.h"
#include "vp8/common/reconintra.h"
#include "dct.h"
#include "vp8/common/g_common.h"
#include "encodeintra.h"
-#include "vpx_rtcd.h"
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
}
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
+ vp8_subtract_b(be, b, 16);
tx_type = get_tx_type(&x->e_mbd, b);
if (tx_type != DCT_DCT) {
vp8_build_comp_intra_predictors_mby(xd);
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src),
- xd->predictor, b->src_stride);
+ vp8_subtract_mby(x->src_diff, *(b->base_src), xd->predictor, b->src_stride);
if (tx_size == TX_16X16) {
BLOCKD *bd = &xd->block[0];
}
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff,
- x->src.u_buffer, x->src.v_buffer,
- xd->predictor, x->src.uv_stride);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ xd->predictor, x->src.uv_stride);
+
if (tx_size == TX_4X4) {
vp8_transform_mbuv_4x4(x);
vp8_quantize_mbuv_4x4(x);
for (i = 0; i < 4; i++) {
b = &xd->block[ib + iblock[i]];
be = &x->block[ib + iblock[i]];
- ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
+ vp8_subtract_b(be, b, 16);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
x->quantize_b_4x4(be, b);
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32);
}
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 8);
+ vp8_subtract_b(be, b, 8);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 16);
x->quantize_b_4x4(be, b);
#include "vpx_mem/vpx_mem.h"
#include "rdopt.h"
#include "vp8/common/systemdependent.h"
+#include "vpx_rtcd.h"
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
BLOCK *b = &x->block[0];
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
- ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
+ vp8_subtract_mby(x->src_diff, *(b->base_src), x->e_mbd.predictor,
+ b->src_stride);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->e_mbd.predictor, x->src.uv_stride);
}
static void build_dcblock_4x4(MACROBLOCK *x) {
vp8_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src),
- xd->predictor, b->src_stride);
+ vp8_subtract_mby(x->src_diff, *(b->base_src), xd->predictor, b->src_stride);
vp8_transform_mby_4x4(x);
vp8_quantize_mby_4x4(x);
#include "vpx_ports/config.h"
#include "block.h"
-#define prototype_mberr(sym) \
- int (sym)(MACROBLOCK *mb, int dc)
-
-#define prototype_berr(sym) \
- int (sym)(short *coeff, short *dqcoeff, int block_size)
-
-#define prototype_mbuverr(sym) \
- int (sym)(MACROBLOCK *mb)
-
-#define prototype_subb(sym) \
- void (sym)(BLOCK *be,BLOCKD *bd, int pitch)
-
-#define prototype_submby(sym) \
- void (sym)(short *diff, unsigned char *src, unsigned char *pred, int stride)
-
-#define prototype_submbuv(sym) \
- void (sym)(short *diff, unsigned char *usrc, unsigned char *vsrc,\
- unsigned char *pred, int stride)
-
-#if ARCH_X86 || ARCH_X86_64
-#include "x86/encodemb_x86.h"
-#endif
-
-#if ARCH_ARM
-#include "arm/encodemb_arm.h"
-#endif
-
-#ifndef vp8_encodemb_berr
-#define vp8_encodemb_berr vp8_block_error_c
-#endif
-extern prototype_berr(vp8_encodemb_berr);
-
-#ifndef vp8_encodemb_mberr
-#define vp8_encodemb_mberr vp8_mbblock_error_c
-#endif
-extern prototype_mberr(vp8_encodemb_mberr);
-
-#ifndef vp8_encodemb_mbuverr
-#define vp8_encodemb_mbuverr vp8_mbuverror_c
-#endif
-extern prototype_mbuverr(vp8_encodemb_mbuverr);
-
-#ifndef vp8_encodemb_subb
-#define vp8_encodemb_subb vp8_subtract_b_c
-#endif
-extern prototype_subb(vp8_encodemb_subb);
-
-#ifndef vp8_encodemb_submby
-#define vp8_encodemb_submby vp8_subtract_mby_c
-#endif
-extern prototype_submby(vp8_encodemb_submby);
-
-#ifndef vp8_encodemb_submbuv
-#define vp8_encodemb_submbuv vp8_subtract_mbuv_c
-#endif
-extern prototype_submbuv(vp8_encodemb_submbuv);
-
-
-typedef struct {
- prototype_berr(*berr);
- prototype_mberr(*mberr);
- prototype_mbuverr(*mbuverr);
- prototype_subb(*subb);
- prototype_submby(*submby);
- prototype_submbuv(*submbuv);
-} vp8_encodemb_rtcd_vtable_t;
-
typedef struct {
MB_PREDICTION_MODE mode;
MV_REFERENCE_FRAME ref_frame;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
-
- cpi->rtcd.encodemb.berr = vp8_block_error_c;
- cpi->rtcd.encodemb.mberr = vp8_mbblock_error_c;
- cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_c;
- cpi->rtcd.encodemb.subb = vp8_subtract_b_c;
- cpi->rtcd.encodemb.submby = vp8_subtract_mby_c;
- cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_c;
-
cpi->rtcd.search.full_search = vp8_full_search_sad;
cpi->rtcd.search.refining_search = vp8_refining_search_sad;
cpi->rtcd.search.diamond_search = vp8_diamond_search_sad;
typedef struct VP8_ENCODER_RTCD {
VP8_COMMON_RTCD *common;
vp8_fdct_rtcd_vtable_t fdct;
- vp8_encodemb_rtcd_vtable_t encodemb;
vp8_search_rtcd_vtable_t search;
vp8_temporal_rtcd_vtable_t temporal;
} VP8_ENCODER_RTCD;
#include "vp8/common/seg_common.h"
#include "vp8/common/pred_common.h"
#include "vp8/common/entropy.h"
-
+#include "vpx_rtcd.h"
#if CONFIG_NEWBESTREFMV
#include "vp8/common/mvref_common.h"
#endif
BLOCK *beptr;
int d;
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
- mb->src_diff,
- *(mb->block[0].base_src),
- xd->predictor,
- mb->block[0].src_stride);
+ vp8_subtract_mby(mb->src_diff, *(mb->block[0].base_src), xd->predictor,
+ mb->block[0].src_stride);
// Fdct and building the 2nd order block
for (beptr = mb->block; beptr < mb->block + 16; beptr += 2) {
mb->quantize_b_4x4(mb_y2, x_y2);
// Distortion
- d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 1);
+ vp8_mbblock_error(mb, 1);
- d += ENCODEMB_INVOKE(&rtcd->encodemb, berr)(mb_y2->coeff, x_y2->dqcoeff, 16);
+ d += vp8_block_error(mb_y2->coeff, x_y2->dqcoeff, 16);
*Distortion = (d >> 2);
// rate
BLOCKD *const x_y2 = xd->block + 24;
int d;
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
- mb->src_diff,
- *(mb->block[0].base_src),
- xd->predictor,
- mb->block[0].src_stride);
+ vp8_subtract_mby(mb->src_diff, *(mb->block[0].base_src), xd->predictor,
+ mb->block[0].src_stride);
vp8_transform_mby_8x8(mb);
vp8_quantize_mby_8x8(mb);
xd->dqcoeff[128] = 0;
xd->dqcoeff[192] = 0;
- d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 0);
- d += ENCODEMB_INVOKE(&rtcd->encodemb, berr)(mb_y2->coeff, x_y2->dqcoeff, 16);
+ d = vp8_mbblock_error(mb, 0);
+ d += vp8_block_error(mb_y2->coeff, x_y2->dqcoeff, 16);
*Distortion = (d >> 2);
// rate
BLOCK *be = &mb->block[0];
TX_TYPE tx_type;
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
- mb->src_diff,
- *(mb->block[0].base_src),
- mb->e_mbd.predictor,
- mb->block[0].src_stride);
+ vp8_subtract_mby(mb->src_diff, *(mb->block[0].base_src), mb->e_mbd.predictor,
+ mb->block[0].src_stride);
tx_type = get_tx_type_16x16(xd, b);
if (tx_type != DCT_DCT) {
if (mb->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED)
vp8_optimize_mby_16x16(mb, rtcd);
- d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 0);
+ d = vp8_mbblock_error(mb, 0);
*Distortion = (d >> 2);
// rate
xd->dqcoeff[128] = 0;
xd->dqcoeff[192] = 0;
- d += ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(x, 0);
- d += ENCODEMB_INVOKE(&rtcd->encodemb, berr)(by2->coeff, bdy2->dqcoeff, 16);
+ d += vp8_mbblock_error(x, 0);
+ d += vp8_block_error(by2->coeff, bdy2->dqcoeff, 16);
xd->above_context = ta + x_idx;
xd->left_context = tl + y_idx;
r += vp8_rdcost_mby_8x8(x, 0);
rate += bmode_costs[mode2];
}
#endif
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), subb)(be, b, 16);
+ vp8_subtract_b(be, b, 16);
b->bmi.as_mode.first = mode;
tx_type = get_tx_type_4x4(xd, b);
ratey = cost_coeffs(x, b, PLANE_TYPE_Y_WITH_DC, &tempa, &templ, TX_4X4);
rate += ratey;
- distortion = ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)(
- be->coeff, b->dqcoeff, 16) >> 2;
+ distortion = vp8_block_error(be->coeff, b->dqcoeff, 16) >> 2;
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
static int64_t rd_inter16x16_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
int *distortion, int fullpixel, int *skip) {
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer,
- x->src.v_buffer,
- x->e_mbd.predictor,
- x->src.uv_stride);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->e_mbd.predictor, x->src.uv_stride);
vp8_transform_mbuv_4x4(x);
vp8_quantize_mbuv_4x4(x);
*rate = rd_cost_mbuv(x);
- *distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ *distortion = vp8_mbuverror(x) / 4;
*skip = mbuv_is_skippable_4x4(&x->e_mbd);
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
xd->above_context = ta + x_idx;
xd->left_context = tl + y_idx;
r += rd_cost_mbuv_8x8(x, 0);
- d += ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ d += vp8_mbuverror(x) / 4;
skippable = skippable && mbuv_is_skippable_8x8(xd);
}
static int64_t rd_inter16x16_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
int *distortion, int fullpixel, int *skip) {
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer,
- x->src.v_buffer,
- x->e_mbd.predictor,
- x->src.uv_stride);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->e_mbd.predictor, x->src.uv_stride);
vp8_transform_mbuv_8x8(x);
vp8_quantize_mbuv_8x8(x);
*rate = rd_cost_mbuv_8x8(x, 1);
- *distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ *distortion = vp8_mbuverror(x) / 4;
*skip = mbuv_is_skippable_8x8(&x->e_mbd);
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
static int64_t rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
int *distortion, int *skippable, int fullpixel) {
vp8_build_inter4x4_predictors_mbuv(&x->e_mbd);
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->e_mbd.predictor, x->src.uv_stride);
vp8_transform_mbuv_4x4(x);
vp8_quantize_mbuv_4x4(x);
*rate = rd_cost_mbuv(x);
- *distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ *distortion = vp8_mbuverror(x) / 4;
*skippable = mbuv_is_skippable_4x4(&x->e_mbd);
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
#endif
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
- x->src.uv_stride);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->e_mbd.predictor, x->src.uv_stride);
vp8_transform_mbuv_4x4(x);
vp8_quantize_mbuv_4x4(x);
rate = rate_to
+ x->intra_uv_mode_cost[x->e_mbd.frame_type][mbmi->uv_mode];
- distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ distortion = vp8_mbuverror(x) / 4;
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
mbmi->uv_mode = mode;
vp8_build_intra_predictors_mbuv(&x->e_mbd);
- ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
- x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
- x->src.uv_stride);
+ vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
+ x->e_mbd.predictor, x->src.uv_stride);
vp8_transform_mbuv_8x8(x);
vp8_quantize_mbuv_8x8(x);
rate_to = rd_cost_mbuv_8x8(x, 1);
rate = rate_to + x->intra_uv_mode_cost[x->e_mbd.frame_type][mbmi->uv_mode];
- distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ distortion = vp8_mbuverror(x) / 4;
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
if (this_rd < best_rd) {
vp8_quantize_mbuv_8x8(x);
s &= mbuv_is_skippable_8x8(xd);
- d += ENCODEMB_INVOKE(&rtcd->encodemb, mbuverr)(x) >> 2;
+ d += vp8_mbuverror(x) >> 2;
xd->above_context = ta + x_idx;
xd->left_context = tl + y_idx;
r += rd_cost_mbuv_8x8(x, 0);
vp8_build_inter_predictors_b(bd, 16, xd->subpixel_predict);
if (xd->mode_info_context->mbmi.second_ref_frame)
vp8_build_2nd_inter_predictors_b(bd, 16, xd->subpixel_predict_avg);
- ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, bd, 16);
+ vp8_subtract_b(be, bd, 16);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
x->quantize_b_4x4(be, bd);
- thisdistortion = vp8_block_error_c(be->coeff, bd->dqcoeff, 16);
+ thisdistortion = vp8_block_error(be->coeff, bd->dqcoeff, 16);
*distortion += thisdistortion;
*labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
ta + vp8_block2above[i],
+++ /dev/null
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#ifndef ENCODEMB_X86_H
-#define ENCODEMB_X86_H
-
-
-/* Note:
- *
- * This platform is commonly built for runtime CPU detection. If you modify
- * any of the function mappings present in this file, be sure to also update
- * them in the function pointer initialization code
- */
-#if HAVE_MMX
-extern prototype_berr(vp8_block_error_mmx);
-extern prototype_mberr(vp8_mbblock_error_mmx);
-extern prototype_mbuverr(vp8_mbuverror_mmx);
-extern prototype_subb(vp8_subtract_b_mmx);
-extern prototype_submby(vp8_subtract_mby_mmx);
-extern prototype_submbuv(vp8_subtract_mbuv_mmx);
-
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef vp8_encodemb_berr
-#define vp8_encodemb_berr vp8_block_error_mmx
-
-#undef vp8_encodemb_mberr
-#define vp8_encodemb_mberr vp8_mbblock_error_mmx
-
-#undef vp8_encodemb_mbuverr
-#define vp8_encodemb_mbuverr vp8_mbuverror_mmx
-
-#undef vp8_encodemb_subb
-#define vp8_encodemb_subb vp8_subtract_b_mmx
-
-#undef vp8_encodemb_submby
-#define vp8_encodemb_submby vp8_subtract_mby_mmx
-
-#undef vp8_encodemb_submbuv
-#define vp8_encodemb_submbuv vp8_subtract_mbuv_mmx
-
-#endif
-#endif
-
-
-#if HAVE_SSE2
-extern prototype_berr(vp8_block_error_xmm);
-extern prototype_mberr(vp8_mbblock_error_xmm);
-extern prototype_mbuverr(vp8_mbuverror_xmm);
-extern prototype_subb(vp8_subtract_b_sse2);
-extern prototype_submby(vp8_subtract_mby_sse2);
-extern prototype_submbuv(vp8_subtract_mbuv_sse2);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef vp8_encodemb_berr
-#define vp8_encodemb_berr vp8_block_error_xmm
-
-#undef vp8_encodemb_mberr
-#define vp8_encodemb_mberr vp8_mbblock_error_xmm
-
-#undef vp8_encodemb_mbuverr
-#define vp8_encodemb_mbuverr vp8_mbuverror_xmm
-
-#undef vp8_encodemb_subb
-#define vp8_encodemb_subb vp8_subtract_b_sse2
-
-#undef vp8_encodemb_submby
-#define vp8_encodemb_submby vp8_subtract_mby_sse2
-
-#undef vp8_encodemb_submbuv
-#define vp8_encodemb_submbuv vp8_subtract_mbuv_sse2
-
-#endif
-#endif
-
-
-#endif
*/
/* Override default functions with fastest ones for this CPU. */
-#if HAVE_MMX
- if (flags & HAS_MMX) {
- cpi->rtcd.encodemb.berr = vp8_block_error_mmx;
- cpi->rtcd.encodemb.mberr = vp8_mbblock_error_mmx;
- cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_mmx;
- cpi->rtcd.encodemb.subb = vp8_subtract_b_mmx;
- cpi->rtcd.encodemb.submby = vp8_subtract_mby_mmx;
- cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_mmx;
- }
-#endif
-
#if HAVE_SSE2
if (flags & HAS_SSE2) {
- cpi->rtcd.encodemb.berr = vp8_block_error_xmm;
- cpi->rtcd.encodemb.mberr = vp8_mbblock_error_xmm;
- cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_xmm;
- cpi->rtcd.encodemb.subb = vp8_subtract_b_sse2;
- cpi->rtcd.encodemb.submby = vp8_subtract_mby_sse2;
- cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_sse2;
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_sse2;
}
VP8_CX_SRCS-yes += encoder/mbgraph.h
-VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/encodemb_x86.h
VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/dct_x86.h
VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/mcomp_x86.h
VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/quantize_x86.h