From: Paul Wilkins Date: Tue, 14 Aug 2012 10:32:29 +0000 (+0100) Subject: Code clean up. X-Git-Tag: v1.3.0~1217^2~308 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=39892cceadc48aac8ffca69f9700c854b0bd2441;p=platform%2Fupstream%2Flibvpx.git Code clean up. References to MACROBLOCKD that use "x" changed to "xd" to comply with convention elsewhere that x = MACROBLOCK and xd = MACROBLOCKD. Simplify some repeat references using local variables. Change-Id: I0ba2e79536add08140a6c8b19698fcf5077246bc --- diff --git a/vp8/common/invtrans.c b/vp8/common/invtrans.c index 1357839..a99f298 100644 --- a/vp8/common/invtrans.c +++ b/vp8/common/invtrans.c @@ -68,25 +68,27 @@ void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD } -void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) { +void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, + MACROBLOCKD *xd) { int i; + BLOCKD *blockd = xd->block; - if (x->mode_info_context->mbmi.mode != B_PRED && - x->mode_info_context->mbmi.mode != I8X8_PRED && - x->mode_info_context->mbmi.mode != SPLITMV) { + if (xd->mode_info_context->mbmi.mode != B_PRED && + xd->mode_info_context->mbmi.mode != I8X8_PRED && + xd->mode_info_context->mbmi.mode != SPLITMV) { /* do 2nd order transform on the dc block */ - IDCT_INVOKE(rtcd, iwalsh16)(&x->block[24].dqcoeff[0], x->block[24].diff); - recon_dcblock(x); + IDCT_INVOKE(rtcd, iwalsh16)(&blockd[24].dqcoeff[0], blockd[24].diff); + recon_dcblock(xd); } for (i = 0; i < 16; i++) { - vp8_inverse_transform_b(rtcd, &x->block[i], 32); + vp8_inverse_transform_b(rtcd, &blockd[i], 32); } for (i = 16; i < 24; i++) { - vp8_inverse_transform_b(rtcd, &x->block[i], 16); + vp8_inverse_transform_b(rtcd, &blockd[i], 16); } } @@ -102,53 +104,65 @@ void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *inpu } -void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) { +void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd, + MACROBLOCKD *xd) { int i; + BLOCKD *blockd = xd->block; // do 2nd order transform on the dc block - IDCT_INVOKE(rtcd, ihaar2)(x->block[24].dqcoeff, x->block[24].diff, 8); + IDCT_INVOKE(rtcd, ihaar2)(blockd[24].dqcoeff, blockd[24].diff, 8); - recon_dcblock_8x8(x); // need to change for 8x8 + recon_dcblock_8x8(xd); // need to change for 8x8 for (i = 0; i < 9; i += 8) { - vp8_inverse_transform_b_8x8(rtcd, &x->block[i].dqcoeff[0], &x->block[i].diff[0], 32); + vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0], + &blockd[i].diff[0], 32); } for (i = 2; i < 11; i += 8) { - vp8_inverse_transform_b_8x8(rtcd, &x->block[i + 2].dqcoeff[0], &x->block[i].diff[0], 32); + vp8_inverse_transform_b_8x8(rtcd, &blockd[i + 2].dqcoeff[0], + &blockd[i].diff[0], 32); } } -void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) { +void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd, + MACROBLOCKD *xd) { int i; + BLOCKD *blockd = xd->block; for (i = 16; i < 24; i += 4) { - vp8_inverse_transform_b_8x8(rtcd, &x->block[i].dqcoeff[0], &x->block[i].diff[0], 16); + vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0], + &blockd[i].diff[0], 16); } } -void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) { +void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, + MACROBLOCKD *xd) { int i; + BLOCKD *blockd = xd->block; - if (x->mode_info_context->mbmi.mode != B_PRED && - x->mode_info_context->mbmi.mode != SPLITMV) { + if (xd->mode_info_context->mbmi.mode != B_PRED && + xd->mode_info_context->mbmi.mode != SPLITMV) { // do 2nd order transform on the dc block - IDCT_INVOKE(rtcd, ihaar2)(&x->block[24].dqcoeff[0], x->block[24].diff, 8);// dqcoeff[0] - recon_dcblock_8x8(x); // need to change for 8x8 + IDCT_INVOKE(rtcd, ihaar2)(&blockd[24].dqcoeff[0], + blockd[24].diff, 8);// dqcoeff[0] + recon_dcblock_8x8(xd); // need to change for 8x8 } for (i = 0; i < 9; i += 8) { - vp8_inverse_transform_b_8x8(rtcd, &x->block[i].dqcoeff[0], &x->block[i].diff[0], 32); + vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0], + &blockd[i].diff[0], 32); } for (i = 2; i < 11; i += 8) { - vp8_inverse_transform_b_8x8(rtcd, &x->block[i + 2].dqcoeff[0], &x->block[i].diff[0], 32); + vp8_inverse_transform_b_8x8(rtcd, &blockd[i + 2].dqcoeff[0], + &blockd[i].diff[0], 32); } - for (i = 16; i < 24; i += 4) { - vp8_inverse_transform_b_8x8(rtcd, &x->block[i].dqcoeff[0], &x->block[i].diff[0], 16); + vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0], + &blockd[i].diff[0], 16); } } @@ -160,26 +174,36 @@ void vp8_inverse_transform_b_16x16(const vp8_idct_rtcd_vtable_t *rtcd, IDCT_INVOKE(rtcd, idct16x16)(input_dqcoeff, output_coeff, pitch); } -void vp8_inverse_transform_mby_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) { - vp8_inverse_transform_b_16x16(rtcd, &x->block[0].dqcoeff[0], &x->block[0].diff[0], 32); +void vp8_inverse_transform_mby_16x16(const vp8_idct_rtcd_vtable_t *rtcd, + MACROBLOCKD *xd) { + vp8_inverse_transform_b_16x16(rtcd, &xd->block[0].dqcoeff[0], + &xd->block[0].diff[0], 32); } // U,V blocks are 8x8 per macroblock, so just run 8x8 -void vp8_inverse_transform_mbuv_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) { +void vp8_inverse_transform_mbuv_16x16(const vp8_idct_rtcd_vtable_t *rtcd, + MACROBLOCKD *xd) { int i; + BLOCKD *blockd = xd->block; + for (i = 16; i < 24; i += 4) - vp8_inverse_transform_b_8x8(rtcd, &x->block[i].dqcoeff[0], &x->block[i].diff[0], 16); + vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0], + &blockd[i].diff[0], 16); } -void vp8_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) { +void vp8_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd, + MACROBLOCKD *xd) { int i; + BLOCKD *blockd = xd->block; // Luma - vp8_inverse_transform_b_16x16(rtcd, &x->block[0].dqcoeff[0], &x->block[0].diff[0], 32); + vp8_inverse_transform_b_16x16(rtcd, &blockd[0].dqcoeff[0], + &blockd[0].diff[0], 32); // U, V // Chroma blocks are downscaled, so run an 8x8 on them. for (i = 16; i < 24; i+= 4) - vp8_inverse_transform_b_8x8(rtcd, &x->block[i].dqcoeff[0], &x->block[i].diff[0], 16); + vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0], + &blockd[i].diff[0], 16); } #endif diff --git a/vp8/common/mbpitch.c b/vp8/common/mbpitch.c index 1a84317..4c5c56c 100644 --- a/vp8/common/mbpitch.c +++ b/vp8/common/mbpitch.c @@ -41,77 +41,84 @@ static void setup_block } -static void setup_macroblock(MACROBLOCKD *x, BLOCKSET bs) { +static void setup_macroblock(MACROBLOCKD *xd, BLOCKSET bs) { int block; unsigned char **y, **u, **v; unsigned char **y2, **u2, **v2; + BLOCKD *blockd = xd->block; + int stride; if (bs == DEST) { - y = &x->dst.y_buffer; - u = &x->dst.u_buffer; - v = &x->dst.v_buffer; + y = &xd->dst.y_buffer; + u = &xd->dst.u_buffer; + v = &xd->dst.v_buffer; } else { - y = &x->pre.y_buffer; - u = &x->pre.u_buffer; - v = &x->pre.v_buffer; + y = &xd->pre.y_buffer; + u = &xd->pre.u_buffer; + v = &xd->pre.v_buffer; - y2 = &x->second_pre.y_buffer; - u2 = &x->second_pre.u_buffer; - v2 = &x->second_pre.v_buffer; + y2 = &xd->second_pre.y_buffer; + u2 = &xd->second_pre.u_buffer; + v2 = &xd->second_pre.v_buffer; } + stride = xd->dst.y_stride; for (block = 0; block < 16; block++) { /* y blocks */ - setup_block(&x->block[block], x->dst.y_stride, y, y2, x->dst.y_stride, - (block >> 2) * 4 * x->dst.y_stride + (block & 3) * 4, bs); + setup_block(&blockd[block], stride, y, y2, stride, + (block >> 2) * 4 * stride + (block & 3) * 4, bs); } + stride = xd->dst.uv_stride; for (block = 16; block < 20; block++) { /* U and V blocks */ - setup_block(&x->block[block], x->dst.uv_stride, u, u2, x->dst.uv_stride, - ((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4, bs); + setup_block(&blockd[block], stride, u, u2, stride, + ((block - 16) >> 1) * 4 * stride + (block & 1) * 4, bs); - setup_block(&x->block[block + 4], x->dst.uv_stride, v, v2, x->dst.uv_stride, - ((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4, bs); + setup_block(&blockd[block + 4], stride, v, v2, stride, + ((block - 16) >> 1) * 4 * stride + (block & 1) * 4, bs); } } -void vp8_setup_block_dptrs(MACROBLOCKD *x) { +void vp8_setup_block_dptrs(MACROBLOCKD *xd) { int r, c; + BLOCKD *blockd = xd->block; for (r = 0; r < 4; r++) { for (c = 0; c < 4; c++) { - x->block[r * 4 + c].diff = &x->diff[r * 4 * 16 + c * 4]; - x->block[r * 4 + c].predictor = x->predictor + r * 4 * 16 + c * 4; + blockd[r * 4 + c].diff = &xd->diff[r * 4 * 16 + c * 4]; + blockd[r * 4 + c].predictor = xd->predictor + r * 4 * 16 + c * 4; } } for (r = 0; r < 2; r++) { for (c = 0; c < 2; c++) { - x->block[16 + r * 2 + c].diff = &x->diff[256 + r * 4 * 8 + c * 4]; - x->block[16 + r * 2 + c].predictor = x->predictor + 256 + r * 4 * 8 + c * 4; + blockd[16 + r * 2 + c].diff = &xd->diff[256 + r * 4 * 8 + c * 4]; + blockd[16 + r * 2 + c].predictor = + xd->predictor + 256 + r * 4 * 8 + c * 4; } } for (r = 0; r < 2; r++) { for (c = 0; c < 2; c++) { - x->block[20 + r * 2 + c].diff = &x->diff[320 + r * 4 * 8 + c * 4]; - x->block[20 + r * 2 + c].predictor = x->predictor + 320 + r * 4 * 8 + c * 4; + blockd[20 + r * 2 + c].diff = &xd->diff[320 + r * 4 * 8 + c * 4]; + blockd[20 + r * 2 + c].predictor = + xd->predictor + 320 + r * 4 * 8 + c * 4; } } - x->block[24].diff = &x->diff[384]; + blockd[24].diff = &xd->diff[384]; for (r = 0; r < 25; r++) { - x->block[r].qcoeff = x->qcoeff + r * 16; - x->block[r].dqcoeff = x->dqcoeff + r * 16; + blockd[r].qcoeff = xd->qcoeff + r * 16; + blockd[r].dqcoeff = xd->dqcoeff + r * 16; } } -void vp8_build_block_doffsets(MACROBLOCKD *x) { +void vp8_build_block_doffsets(MACROBLOCKD *xd) { /* handle the destination pitch features */ - setup_macroblock(x, DEST); - setup_macroblock(x, PRED); + setup_macroblock(xd, DEST); + setup_macroblock(xd, PRED); } diff --git a/vp8/common/reconinter.c b/vp8/common/reconinter.c index b174d6a..1e6dfa3 100644 --- a/vp8/common/reconinter.c +++ b/vp8/common/reconinter.c @@ -479,8 +479,9 @@ void filter_mb(unsigned char *src, int src_stride, #endif // CONFIG_PRED_FILTER /*encoder only*/ -void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) { +void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd) { int i, j; + BLOCKD *blockd = xd->block; /* build uv mvs */ for (i = 0; i < 2; i++) { @@ -490,36 +491,38 @@ void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) { int voffset = 20 + i * 2 + j; int temp; - temp = x->block[yoffset ].bmi.as_mv.first.as_mv.row - + x->block[yoffset + 1].bmi.as_mv.first.as_mv.row - + x->block[yoffset + 4].bmi.as_mv.first.as_mv.row - + x->block[yoffset + 5].bmi.as_mv.first.as_mv.row; + temp = blockd[yoffset ].bmi.as_mv.first.as_mv.row + + blockd[yoffset + 1].bmi.as_mv.first.as_mv.row + + blockd[yoffset + 4].bmi.as_mv.first.as_mv.row + + blockd[yoffset + 5].bmi.as_mv.first.as_mv.row; if (temp < 0) temp -= 4; else temp += 4; - x->block[uoffset].bmi.as_mv.first.as_mv.row = (temp / 8) & x->fullpixel_mask; + xd->block[uoffset].bmi.as_mv.first.as_mv.row = (temp / 8) & + xd->fullpixel_mask; - temp = x->block[yoffset ].bmi.as_mv.first.as_mv.col - + x->block[yoffset + 1].bmi.as_mv.first.as_mv.col - + x->block[yoffset + 4].bmi.as_mv.first.as_mv.col - + x->block[yoffset + 5].bmi.as_mv.first.as_mv.col; + temp = blockd[yoffset ].bmi.as_mv.first.as_mv.col + + blockd[yoffset + 1].bmi.as_mv.first.as_mv.col + + blockd[yoffset + 4].bmi.as_mv.first.as_mv.col + + blockd[yoffset + 5].bmi.as_mv.first.as_mv.col; if (temp < 0) temp -= 4; else temp += 4; - x->block[uoffset].bmi.as_mv.first.as_mv.col = (temp / 8) & x->fullpixel_mask; + blockd[uoffset].bmi.as_mv.first.as_mv.col = (temp / 8) & + xd->fullpixel_mask; - x->block[voffset].bmi.as_mv.first.as_mv.row = - x->block[uoffset].bmi.as_mv.first.as_mv.row; - x->block[voffset].bmi.as_mv.first.as_mv.col = - x->block[uoffset].bmi.as_mv.first.as_mv.col; + blockd[voffset].bmi.as_mv.first.as_mv.row = + blockd[uoffset].bmi.as_mv.first.as_mv.row; + blockd[voffset].bmi.as_mv.first.as_mv.col = + blockd[uoffset].bmi.as_mv.first.as_mv.col; - if (x->mode_info_context->mbmi.second_ref_frame) { - temp = x->block[yoffset ].bmi.as_mv.second.as_mv.row - + x->block[yoffset + 1].bmi.as_mv.second.as_mv.row - + x->block[yoffset + 4].bmi.as_mv.second.as_mv.row - + x->block[yoffset + 5].bmi.as_mv.second.as_mv.row; + if (xd->mode_info_context->mbmi.second_ref_frame) { + temp = blockd[yoffset ].bmi.as_mv.second.as_mv.row + + blockd[yoffset + 1].bmi.as_mv.second.as_mv.row + + blockd[yoffset + 4].bmi.as_mv.second.as_mv.row + + blockd[yoffset + 5].bmi.as_mv.second.as_mv.row; if (temp < 0) { temp -= 4; @@ -527,12 +530,13 @@ void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) { temp += 4; } - x->block[uoffset].bmi.as_mv.second.as_mv.row = (temp / 8) & x->fullpixel_mask; + blockd[uoffset].bmi.as_mv.second.as_mv.row = (temp / 8) & + xd->fullpixel_mask; - temp = x->block[yoffset ].bmi.as_mv.second.as_mv.col - + x->block[yoffset + 1].bmi.as_mv.second.as_mv.col - + x->block[yoffset + 4].bmi.as_mv.second.as_mv.col - + x->block[yoffset + 5].bmi.as_mv.second.as_mv.col; + temp = blockd[yoffset ].bmi.as_mv.second.as_mv.col + + blockd[yoffset + 1].bmi.as_mv.second.as_mv.col + + blockd[yoffset + 4].bmi.as_mv.second.as_mv.col + + blockd[yoffset + 5].bmi.as_mv.second.as_mv.col; if (temp < 0) { temp -= 4; @@ -540,30 +544,31 @@ void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) { temp += 4; } - x->block[uoffset].bmi.as_mv.second.as_mv.col = (temp / 8) & x->fullpixel_mask; + blockd[uoffset].bmi.as_mv.second.as_mv.col = (temp / 8) & + xd->fullpixel_mask; - x->block[voffset].bmi.as_mv.second.as_mv.row = - x->block[uoffset].bmi.as_mv.second.as_mv.row; - x->block[voffset].bmi.as_mv.second.as_mv.col = - x->block[uoffset].bmi.as_mv.second.as_mv.col; + blockd[voffset].bmi.as_mv.second.as_mv.row = + blockd[uoffset].bmi.as_mv.second.as_mv.row; + blockd[voffset].bmi.as_mv.second.as_mv.col = + blockd[uoffset].bmi.as_mv.second.as_mv.col; } } } for (i = 16; i < 24; i += 2) { - BLOCKD *d0 = &x->block[i]; - BLOCKD *d1 = &x->block[i + 1]; + BLOCKD *d0 = &blockd[i]; + BLOCKD *d1 = &blockd[i + 1]; if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int) - build_inter_predictors2b(x, d0, 8); + build_inter_predictors2b(xd, d0, 8); else { - vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict); - vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict); + vp8_build_inter_predictors_b(d0, 8, xd->subpixel_predict); + vp8_build_inter_predictors_b(d1, 8, xd->subpixel_predict); } - if (x->mode_info_context->mbmi.second_ref_frame) { - vp8_build_2nd_inter_predictors_b(d0, 8, x->subpixel_predict_avg); - vp8_build_2nd_inter_predictors_b(d1, 8, x->subpixel_predict_avg); + if (xd->mode_info_context->mbmi.second_ref_frame) { + vp8_build_2nd_inter_predictors_b(d0, 8, xd->subpixel_predict_avg); + vp8_build_2nd_inter_predictors_b(d1, 8, xd->subpixel_predict_avg); } } } @@ -652,20 +657,20 @@ void vp8_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd, } } -void vp8_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *x, +void vp8_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *xd, unsigned char *dst_u, unsigned char *dst_v, int dst_uvstride) { int offset; unsigned char *uptr, *vptr; - int pre_stride = x->block[0].pre_stride; + int pre_stride = xd->block[0].pre_stride; int_mv _o16x16mv; int_mv _16x16mv; - _16x16mv.as_int = x->mode_info_context->mbmi.mv[0].as_int; + _16x16mv.as_int = xd->mode_info_context->mbmi.mv[0].as_int; - if (x->mode_info_context->mbmi.need_to_clamp_mvs) - clamp_mv_to_umv_border(&_16x16mv.as_mv, x); + if (xd->mode_info_context->mbmi.need_to_clamp_mvs) + clamp_mv_to_umv_border(&_16x16mv.as_mv, xd); _o16x16mv = _16x16mv; /* calc uv motion vectors */ @@ -682,16 +687,16 @@ void vp8_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *x, _16x16mv.as_mv.row /= 2; _16x16mv.as_mv.col /= 2; - _16x16mv.as_mv.row &= x->fullpixel_mask; - _16x16mv.as_mv.col &= x->fullpixel_mask; + _16x16mv.as_mv.row &= xd->fullpixel_mask; + _16x16mv.as_mv.col &= xd->fullpixel_mask; pre_stride >>= 1; offset = (_16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3); - uptr = x->pre.u_buffer + offset; - vptr = x->pre.v_buffer + offset; + uptr = xd->pre.u_buffer + offset; + vptr = xd->pre.v_buffer + offset; #if CONFIG_PRED_FILTER - if (x->mode_info_context->mbmi.pred_filter_enabled) { + if (xd->mode_info_context->mbmi.pred_filter_enabled) { int i; unsigned char *pSrc = uptr; unsigned char *pDst = dst_u; @@ -722,23 +727,27 @@ void vp8_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *x, } else #endif if (_o16x16mv.as_int & 0x000f000f) { - x->subpixel_predict8x8(uptr, pre_stride, _o16x16mv.as_mv.col & 15, _o16x16mv.as_mv.row & 15, dst_u, dst_uvstride); - x->subpixel_predict8x8(vptr, pre_stride, _o16x16mv.as_mv.col & 15, _o16x16mv.as_mv.row & 15, dst_v, dst_uvstride); + xd->subpixel_predict8x8(uptr, pre_stride, _o16x16mv.as_mv.col & 15, + _o16x16mv.as_mv.row & 15, dst_u, dst_uvstride); + xd->subpixel_predict8x8(vptr, pre_stride, _o16x16mv.as_mv.col & 15, + _o16x16mv.as_mv.row & 15, dst_v, dst_uvstride); } else { - RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, dst_u, dst_uvstride); - RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, dst_v, dst_uvstride); + RECON_INVOKE(&xd->rtcd->recon, copy8x8) + (uptr, pre_stride, dst_u, dst_uvstride); + RECON_INVOKE(&xd->rtcd->recon, copy8x8) + (vptr, pre_stride, dst_v, dst_uvstride); } } -void vp8_build_1st_inter16x16_predictors_mb(MACROBLOCKD *x, +void vp8_build_1st_inter16x16_predictors_mb(MACROBLOCKD *xd, unsigned char *dst_y, unsigned char *dst_u, unsigned char *dst_v, int dst_ystride, int dst_uvstride) { - vp8_build_1st_inter16x16_predictors_mby(x, dst_y, dst_ystride); - vp8_build_1st_inter16x16_predictors_mbuv(x, dst_u, dst_v, dst_uvstride); + vp8_build_1st_inter16x16_predictors_mby(xd, dst_y, dst_ystride); + vp8_build_1st_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride); } /* @@ -755,7 +764,7 @@ void vp8_build_1st_inter16x16_predictors_mb(MACROBLOCKD *x, * which sometimes leads to better prediction than from a * single reference framer. */ -void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *x, +void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd, unsigned char *dst_y, int dst_ystride) { unsigned char *ptr; @@ -764,13 +773,13 @@ void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *x, int mv_row; int mv_col; - unsigned char *ptr_base = x->second_pre.y_buffer; - int pre_stride = x->block[0].pre_stride; + unsigned char *ptr_base = xd->second_pre.y_buffer; + int pre_stride = xd->block[0].pre_stride; - _16x16mv.as_int = x->mode_info_context->mbmi.mv[1].as_int; + _16x16mv.as_int = xd->mode_info_context->mbmi.mv[1].as_int; - if (x->mode_info_context->mbmi.need_to_clamp_secondmv) - clamp_mv_to_umv_border(&_16x16mv.as_mv, x); + if (xd->mode_info_context->mbmi.need_to_clamp_secondmv) + clamp_mv_to_umv_border(&_16x16mv.as_mv, xd); mv_row = _16x16mv.as_mv.row; mv_col = _16x16mv.as_mv.col; @@ -778,7 +787,7 @@ void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *x, ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3); #if CONFIG_PRED_FILTER - if (x->mode_info_context->mbmi.pred_filter_enabled) { + if (xd->mode_info_context->mbmi.pred_filter_enabled) { if ((mv_row | mv_col) & 7) { // Sub-pel filter needs extended input int len = 15 + (INTERP_EXTEND << 1); @@ -790,28 +799,28 @@ void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *x, Temp, len, len, len); // Sub-pel filter - x->subpixel_predict_avg16x16(pTemp, len, (mv_col & 7) << 1, - (mv_row & 7) << 1, dst_y, dst_ystride); + xd->subpixel_predict_avg16x16(pTemp, len, (mv_col & 7) << 1, + (mv_row & 7) << 1, dst_y, dst_ystride); } else { // TODO Needs to AVERAGE with the dst_y // For now, do not apply the prediction filter in these cases! - RECON_INVOKE(&x->rtcd->recon, avg16x16)(ptr, pre_stride, dst_y, - dst_ystride); + RECON_INVOKE(&xd->rtcd->recon, avg16x16)(ptr, pre_stride, dst_y, + dst_ystride); } } else #endif // CONFIG_PRED_FILTER { if ((mv_row | mv_col) & 7) { - x->subpixel_predict_avg16x16(ptr, pre_stride, (mv_col & 7) << 1, - (mv_row & 7) << 1, dst_y, dst_ystride); + xd->subpixel_predict_avg16x16(ptr, pre_stride, (mv_col & 7) << 1, + (mv_row & 7) << 1, dst_y, dst_ystride); } else { - RECON_INVOKE(&x->rtcd->recon, avg16x16)(ptr, pre_stride, dst_y, - dst_ystride); + RECON_INVOKE(&xd->rtcd->recon, avg16x16)(ptr, pre_stride, dst_y, + dst_ystride); } } } -void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *x, +void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd, unsigned char *dst_u, unsigned char *dst_v, int dst_uvstride) { @@ -823,12 +832,12 @@ void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *x, int mv_col; int omv_row, omv_col; - int pre_stride = x->block[0].pre_stride; + int pre_stride = xd->block[0].pre_stride; - _16x16mv.as_int = x->mode_info_context->mbmi.mv[1].as_int; + _16x16mv.as_int = xd->mode_info_context->mbmi.mv[1].as_int; - if (x->mode_info_context->mbmi.need_to_clamp_secondmv) - clamp_mv_to_umv_border(&_16x16mv.as_mv, x); + if (xd->mode_info_context->mbmi.need_to_clamp_secondmv) + clamp_mv_to_umv_border(&_16x16mv.as_mv, xd); mv_row = _16x16mv.as_mv.row; mv_col = _16x16mv.as_mv.col; @@ -839,16 +848,16 @@ void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *x, mv_row = (mv_row + (mv_row > 0)) >> 1; mv_col = (mv_col + (mv_col > 0)) >> 1; - mv_row &= x->fullpixel_mask; - mv_col &= x->fullpixel_mask; + mv_row &= xd->fullpixel_mask; + mv_col &= xd->fullpixel_mask; pre_stride >>= 1; offset = (mv_row >> 3) * pre_stride + (mv_col >> 3); - uptr = x->second_pre.u_buffer + offset; - vptr = x->second_pre.v_buffer + offset; + uptr = xd->second_pre.u_buffer + offset; + vptr = xd->second_pre.v_buffer + offset; #if CONFIG_PRED_FILTER - if (x->mode_info_context->mbmi.pred_filter_enabled) { + if (xd->mode_info_context->mbmi.pred_filter_enabled) { int i; int len = 7 + (INTERP_EXTEND << 1); unsigned char Temp[32 * 32]; // Data required by sub-pel filter @@ -864,13 +873,13 @@ void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *x, Temp, len, len, len); // Sub-pel filter - x->subpixel_predict_avg8x8(pTemp, len, omv_col & 15, - omv_row & 15, pDst, dst_uvstride); + xd->subpixel_predict_avg8x8(pTemp, len, omv_col & 15, + omv_row & 15, pDst, dst_uvstride); } else { // TODO Needs to AVERAGE with the dst_[u|v] // For now, do not apply the prediction filter here! - RECON_INVOKE(&x->rtcd->recon, avg8x8)(pSrc, pre_stride, pDst, - dst_uvstride); + RECON_INVOKE(&xd->rtcd->recon, avg8x8)(pSrc, pre_stride, pDst, + dst_uvstride); } // V @@ -880,110 +889,115 @@ void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *x, } else #endif // CONFIG_PRED_FILTER if ((omv_row | omv_col) & 15) { - x->subpixel_predict_avg8x8(uptr, pre_stride, omv_col & 15, omv_row & 15, dst_u, dst_uvstride); - x->subpixel_predict_avg8x8(vptr, pre_stride, omv_col & 15, omv_row & 15, dst_v, dst_uvstride); + xd->subpixel_predict_avg8x8(uptr, pre_stride, omv_col & 15, + omv_row & 15, dst_u, dst_uvstride); + xd->subpixel_predict_avg8x8(vptr, pre_stride, omv_col & 15, + omv_row & 15, dst_v, dst_uvstride); } else { - RECON_INVOKE(&x->rtcd->recon, avg8x8)(uptr, pre_stride, dst_u, dst_uvstride); - RECON_INVOKE(&x->rtcd->recon, avg8x8)(vptr, pre_stride, dst_v, dst_uvstride); + RECON_INVOKE(&xd->rtcd->recon, avg8x8)(uptr, pre_stride, dst_u, dst_uvstride); + RECON_INVOKE(&xd->rtcd->recon, avg8x8)(vptr, pre_stride, dst_v, dst_uvstride); } } -void vp8_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *x, +void vp8_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *xd, unsigned char *dst_y, unsigned char *dst_u, unsigned char *dst_v, int dst_ystride, int dst_uvstride) { - vp8_build_2nd_inter16x16_predictors_mby(x, dst_y, dst_ystride); - vp8_build_2nd_inter16x16_predictors_mbuv(x, dst_u, dst_v, dst_uvstride); + vp8_build_2nd_inter16x16_predictors_mby(xd, dst_y, dst_ystride); + vp8_build_2nd_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride); } -static void build_inter4x4_predictors_mb(MACROBLOCKD *x) { +static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) { int i; - - if (x->mode_info_context->mbmi.partitioning < 3) { - x->block[ 0].bmi = x->mode_info_context->bmi[ 0]; - x->block[ 2].bmi = x->mode_info_context->bmi[ 2]; - x->block[ 8].bmi = x->mode_info_context->bmi[ 8]; - x->block[10].bmi = x->mode_info_context->bmi[10]; - - if (x->mode_info_context->mbmi.need_to_clamp_mvs) { - clamp_mv_to_umv_border(&x->block[ 0].bmi.as_mv.first.as_mv, x); - clamp_mv_to_umv_border(&x->block[ 2].bmi.as_mv.first.as_mv, x); - clamp_mv_to_umv_border(&x->block[ 8].bmi.as_mv.first.as_mv, x); - clamp_mv_to_umv_border(&x->block[10].bmi.as_mv.first.as_mv, x); - if (x->mode_info_context->mbmi.second_ref_frame) { - clamp_mv_to_umv_border(&x->block[ 0].bmi.as_mv.second.as_mv, x); - clamp_mv_to_umv_border(&x->block[ 2].bmi.as_mv.second.as_mv, x); - clamp_mv_to_umv_border(&x->block[ 8].bmi.as_mv.second.as_mv, x); - clamp_mv_to_umv_border(&x->block[10].bmi.as_mv.second.as_mv, x); + MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi; + BLOCKD *blockd = xd->block; + + if (xd->mode_info_context->mbmi.partitioning < 3) { + blockd[ 0].bmi = xd->mode_info_context->bmi[ 0]; + blockd[ 2].bmi = xd->mode_info_context->bmi[ 2]; + blockd[ 8].bmi = xd->mode_info_context->bmi[ 8]; + blockd[10].bmi = xd->mode_info_context->bmi[10]; + + if (mbmi->need_to_clamp_mvs) { + clamp_mv_to_umv_border(&blockd[ 0].bmi.as_mv.first.as_mv, xd); + clamp_mv_to_umv_border(&blockd[ 2].bmi.as_mv.first.as_mv, xd); + clamp_mv_to_umv_border(&blockd[ 8].bmi.as_mv.first.as_mv, xd); + clamp_mv_to_umv_border(&blockd[10].bmi.as_mv.first.as_mv, xd); + if (mbmi->second_ref_frame) { + clamp_mv_to_umv_border(&blockd[ 0].bmi.as_mv.second.as_mv, xd); + clamp_mv_to_umv_border(&blockd[ 2].bmi.as_mv.second.as_mv, xd); + clamp_mv_to_umv_border(&blockd[ 8].bmi.as_mv.second.as_mv, xd); + clamp_mv_to_umv_border(&blockd[10].bmi.as_mv.second.as_mv, xd); } } - build_inter_predictors4b(x, &x->block[ 0], 16); - build_inter_predictors4b(x, &x->block[ 2], 16); - build_inter_predictors4b(x, &x->block[ 8], 16); - build_inter_predictors4b(x, &x->block[10], 16); + build_inter_predictors4b(xd, &blockd[ 0], 16); + build_inter_predictors4b(xd, &blockd[ 2], 16); + build_inter_predictors4b(xd, &blockd[ 8], 16); + build_inter_predictors4b(xd, &blockd[10], 16); - if (x->mode_info_context->mbmi.second_ref_frame) { - build_2nd_inter_predictors4b(x, &x->block[ 0], 16); - build_2nd_inter_predictors4b(x, &x->block[ 2], 16); - build_2nd_inter_predictors4b(x, &x->block[ 8], 16); - build_2nd_inter_predictors4b(x, &x->block[10], 16); + if (mbmi->second_ref_frame) { + build_2nd_inter_predictors4b(xd, &blockd[ 0], 16); + build_2nd_inter_predictors4b(xd, &blockd[ 2], 16); + build_2nd_inter_predictors4b(xd, &blockd[ 8], 16); + build_2nd_inter_predictors4b(xd, &blockd[10], 16); } } else { for (i = 0; i < 16; i += 2) { - BLOCKD *d0 = &x->block[i]; - BLOCKD *d1 = &x->block[i + 1]; - - x->block[i + 0].bmi = x->mode_info_context->bmi[i + 0]; - x->block[i + 1].bmi = x->mode_info_context->bmi[i + 1]; - - if (x->mode_info_context->mbmi.need_to_clamp_mvs) { - clamp_mv_to_umv_border(&x->block[i + 0].bmi.as_mv.first.as_mv, x); - clamp_mv_to_umv_border(&x->block[i + 1].bmi.as_mv.first.as_mv, x); - if (x->mode_info_context->mbmi.second_ref_frame) { - clamp_mv_to_umv_border(&x->block[i + 0].bmi.as_mv.second.as_mv, x); - clamp_mv_to_umv_border(&x->block[i + 1].bmi.as_mv.second.as_mv, x); + BLOCKD *d0 = &blockd[i]; + BLOCKD *d1 = &blockd[i + 1]; + + blockd[i + 0].bmi = xd->mode_info_context->bmi[i + 0]; + blockd[i + 1].bmi = xd->mode_info_context->bmi[i + 1]; + + if (mbmi->need_to_clamp_mvs) { + clamp_mv_to_umv_border(&blockd[i + 0].bmi.as_mv.first.as_mv, xd); + clamp_mv_to_umv_border(&blockd[i + 1].bmi.as_mv.first.as_mv, xd); + if (mbmi->second_ref_frame) { + clamp_mv_to_umv_border(&blockd[i + 0].bmi.as_mv.second.as_mv, xd); + clamp_mv_to_umv_border(&blockd[i + 1].bmi.as_mv.second.as_mv, xd); } } if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int) - build_inter_predictors2b(x, d0, 16); + build_inter_predictors2b(xd, d0, 16); else { - vp8_build_inter_predictors_b(d0, 16, x->subpixel_predict); - vp8_build_inter_predictors_b(d1, 16, x->subpixel_predict); + vp8_build_inter_predictors_b(d0, 16, xd->subpixel_predict); + vp8_build_inter_predictors_b(d1, 16, xd->subpixel_predict); } - if (x->mode_info_context->mbmi.second_ref_frame) { - vp8_build_2nd_inter_predictors_b(d0, 16, x->subpixel_predict_avg); - vp8_build_2nd_inter_predictors_b(d1, 16, x->subpixel_predict_avg); + if (mbmi->second_ref_frame) { + vp8_build_2nd_inter_predictors_b(d0, 16, xd->subpixel_predict_avg); + vp8_build_2nd_inter_predictors_b(d1, 16, xd->subpixel_predict_avg); } } } for (i = 16; i < 24; i += 2) { - BLOCKD *d0 = &x->block[i]; - BLOCKD *d1 = &x->block[i + 1]; + BLOCKD *d0 = &blockd[i]; + BLOCKD *d1 = &blockd[i + 1]; if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int) - build_inter_predictors2b(x, d0, 8); + build_inter_predictors2b(xd, d0, 8); else { - vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict); - vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict); + vp8_build_inter_predictors_b(d0, 8, xd->subpixel_predict); + vp8_build_inter_predictors_b(d1, 8, xd->subpixel_predict); } - if (x->mode_info_context->mbmi.second_ref_frame) { - vp8_build_2nd_inter_predictors_b(d0, 8, x->subpixel_predict_avg); - vp8_build_2nd_inter_predictors_b(d1, 8, x->subpixel_predict_avg); + if (mbmi->second_ref_frame) { + vp8_build_2nd_inter_predictors_b(d0, 8, xd->subpixel_predict_avg); + vp8_build_2nd_inter_predictors_b(d1, 8, xd->subpixel_predict_avg); } } } static -void build_4x4uvmvs(MACROBLOCKD *x) { +void build_4x4uvmvs(MACROBLOCKD *xd) { int i, j; + BLOCKD *blockd = xd->block; for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { @@ -993,42 +1007,44 @@ void build_4x4uvmvs(MACROBLOCKD *x) { int temp; - temp = x->mode_info_context->bmi[yoffset + 0].as_mv.first.as_mv.row - + x->mode_info_context->bmi[yoffset + 1].as_mv.first.as_mv.row - + x->mode_info_context->bmi[yoffset + 4].as_mv.first.as_mv.row - + x->mode_info_context->bmi[yoffset + 5].as_mv.first.as_mv.row; + temp = xd->mode_info_context->bmi[yoffset + 0].as_mv.first.as_mv.row + + xd->mode_info_context->bmi[yoffset + 1].as_mv.first.as_mv.row + + xd->mode_info_context->bmi[yoffset + 4].as_mv.first.as_mv.row + + xd->mode_info_context->bmi[yoffset + 5].as_mv.first.as_mv.row; if (temp < 0) temp -= 4; else temp += 4; - x->block[uoffset].bmi.as_mv.first.as_mv.row = (temp / 8) & x->fullpixel_mask; + blockd[uoffset].bmi.as_mv.first.as_mv.row = (temp / 8) & + xd->fullpixel_mask; - temp = x->mode_info_context->bmi[yoffset + 0].as_mv.first.as_mv.col - + x->mode_info_context->bmi[yoffset + 1].as_mv.first.as_mv.col - + x->mode_info_context->bmi[yoffset + 4].as_mv.first.as_mv.col - + x->mode_info_context->bmi[yoffset + 5].as_mv.first.as_mv.col; + temp = xd->mode_info_context->bmi[yoffset + 0].as_mv.first.as_mv.col + + xd->mode_info_context->bmi[yoffset + 1].as_mv.first.as_mv.col + + xd->mode_info_context->bmi[yoffset + 4].as_mv.first.as_mv.col + + xd->mode_info_context->bmi[yoffset + 5].as_mv.first.as_mv.col; if (temp < 0) temp -= 4; else temp += 4; - x->block[uoffset].bmi.as_mv.first.as_mv.col = (temp / 8) & x->fullpixel_mask; + blockd[uoffset].bmi.as_mv.first.as_mv.col = (temp / 8) & + xd->fullpixel_mask; // if (x->mode_info_context->mbmi.need_to_clamp_mvs) - clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.as_mv.first.as_mv, x); + clamp_uvmv_to_umv_border(&blockd[uoffset].bmi.as_mv.first.as_mv, xd); // if (x->mode_info_context->mbmi.need_to_clamp_mvs) - clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.as_mv.first.as_mv, x); + clamp_uvmv_to_umv_border(&blockd[uoffset].bmi.as_mv.first.as_mv, xd); - x->block[voffset].bmi.as_mv.first.as_mv.row = - x->block[uoffset].bmi.as_mv.first.as_mv.row; - x->block[voffset].bmi.as_mv.first.as_mv.col = - x->block[uoffset].bmi.as_mv.first.as_mv.col; + blockd[voffset].bmi.as_mv.first.as_mv.row = + blockd[uoffset].bmi.as_mv.first.as_mv.row; + blockd[voffset].bmi.as_mv.first.as_mv.col = + blockd[uoffset].bmi.as_mv.first.as_mv.col; - if (x->mode_info_context->mbmi.second_ref_frame) { - temp = x->mode_info_context->bmi[yoffset + 0].as_mv.second.as_mv.row - + x->mode_info_context->bmi[yoffset + 1].as_mv.second.as_mv.row - + x->mode_info_context->bmi[yoffset + 4].as_mv.second.as_mv.row - + x->mode_info_context->bmi[yoffset + 5].as_mv.second.as_mv.row; + if (xd->mode_info_context->mbmi.second_ref_frame) { + temp = xd->mode_info_context->bmi[yoffset + 0].as_mv.second.as_mv.row + + xd->mode_info_context->bmi[yoffset + 1].as_mv.second.as_mv.row + + xd->mode_info_context->bmi[yoffset + 4].as_mv.second.as_mv.row + + xd->mode_info_context->bmi[yoffset + 5].as_mv.second.as_mv.row; if (temp < 0) { temp -= 4; @@ -1036,12 +1052,13 @@ void build_4x4uvmvs(MACROBLOCKD *x) { temp += 4; } - x->block[uoffset].bmi.as_mv.second.as_mv.row = (temp / 8) & x->fullpixel_mask; + blockd[uoffset].bmi.as_mv.second.as_mv.row = (temp / 8) & + xd->fullpixel_mask; - temp = x->mode_info_context->bmi[yoffset + 0].as_mv.second.as_mv.col - + x->mode_info_context->bmi[yoffset + 1].as_mv.second.as_mv.col - + x->mode_info_context->bmi[yoffset + 4].as_mv.second.as_mv.col - + x->mode_info_context->bmi[yoffset + 5].as_mv.second.as_mv.col; + temp = xd->mode_info_context->bmi[yoffset + 0].as_mv.second.as_mv.col + + xd->mode_info_context->bmi[yoffset + 1].as_mv.second.as_mv.col + + xd->mode_info_context->bmi[yoffset + 4].as_mv.second.as_mv.col + + xd->mode_info_context->bmi[yoffset + 5].as_mv.second.as_mv.col; if (temp < 0) { temp -= 4; @@ -1049,38 +1066,42 @@ void build_4x4uvmvs(MACROBLOCKD *x) { temp += 4; } - x->block[uoffset].bmi.as_mv.second.as_mv.col = (temp / 8) & x->fullpixel_mask; + blockd[uoffset].bmi.as_mv.second.as_mv.col = (temp / 8) & + xd->fullpixel_mask; - // if (x->mode_info_context->mbmi.need_to_clamp_mvs) - clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.as_mv.second.as_mv, x); + // if (mbmi->need_to_clamp_mvs) + clamp_uvmv_to_umv_border( + &blockd[uoffset].bmi.as_mv.second.as_mv, xd); - // if (x->mode_info_context->mbmi.need_to_clamp_mvs) - clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.as_mv.second.as_mv, x); + // if (mbmi->need_to_clamp_mvs) + clamp_uvmv_to_umv_border( + &blockd[uoffset].bmi.as_mv.second.as_mv, xd); - x->block[voffset].bmi.as_mv.second.as_mv.row = - x->block[uoffset].bmi.as_mv.second.as_mv.row; - x->block[voffset].bmi.as_mv.second.as_mv.col = - x->block[uoffset].bmi.as_mv.second.as_mv.col; + blockd[voffset].bmi.as_mv.second.as_mv.row = + blockd[uoffset].bmi.as_mv.second.as_mv.row; + blockd[voffset].bmi.as_mv.second.as_mv.col = + blockd[uoffset].bmi.as_mv.second.as_mv.col; } } } } -void vp8_build_inter_predictors_mb(MACROBLOCKD *x) { - if (x->mode_info_context->mbmi.mode != SPLITMV) { - vp8_build_1st_inter16x16_predictors_mb(x, x->predictor, &x->predictor[256], - &x->predictor[320], 16, 8); +void vp8_build_inter_predictors_mb(MACROBLOCKD *xd) { + if (xd->mode_info_context->mbmi.mode != SPLITMV) { + vp8_build_1st_inter16x16_predictors_mb(xd, xd->predictor, + &xd->predictor[256], + &xd->predictor[320], 16, 8); - if (x->mode_info_context->mbmi.second_ref_frame) { + if (xd->mode_info_context->mbmi.second_ref_frame) { /* 256 = offset of U plane in Y+U+V buffer; * 320 = offset of V plane in Y+U+V buffer. * (256=16x16, 320=16x16+8x8). */ - vp8_build_2nd_inter16x16_predictors_mb(x, x->predictor, - &x->predictor[256], - &x->predictor[320], 16, 8); + vp8_build_2nd_inter16x16_predictors_mb(xd, xd->predictor, + &xd->predictor[256], + &xd->predictor[320], 16, 8); } } else { - build_4x4uvmvs(x); - build_inter4x4_predictors_mb(x); + build_4x4uvmvs(xd); + build_inter4x4_predictors_mb(xd); } } diff --git a/vp8/common/reconintra.c b/vp8/common/reconintra.c index 6e9bb64..1795a71 100644 --- a/vp8/common/reconintra.c +++ b/vp8/common/reconintra.c @@ -196,24 +196,28 @@ void d153_predictor(unsigned char *ypred_ptr, int y_stride, int n, } } -void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x) { +void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd, + MACROBLOCKD *xd) { int i; for (i = 16; i < 24; i += 2) { - BLOCKD *b = &x->block[i]; - RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride); + BLOCKD *b = &xd->block[i]; + RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, + *(b->base_dst) + b->dst, b->dst_stride); } } -void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *x, unsigned char *ypred_ptr, int y_stride, int mode) { +void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *xd, + unsigned char *ypred_ptr, + int y_stride, int mode) { - unsigned char *yabove_row = x->dst.y_buffer - x->dst.y_stride; + unsigned char *yabove_row = xd->dst.y_buffer - xd->dst.y_stride; unsigned char yleft_col[16]; unsigned char ytop_left = yabove_row[-1]; int r, c, i; for (i = 0; i < 16; i++) { - yleft_col[i] = x->dst.y_buffer [i * x->dst.y_stride - 1]; + yleft_col[i] = xd->dst.y_buffer [i * xd->dst.y_stride - 1]; } /* for Y */ @@ -225,19 +229,19 @@ void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *x, unsigned char *ypre int average = 0; - if (x->up_available || x->left_available) { - if (x->up_available) { + if (xd->up_available || xd->left_available) { + if (xd->up_available) { for (i = 0; i < 16; i++) { average += yabove_row[i]; } } - if (x->left_available) { + if (xd->left_available) { for (i = 0; i < 16; i++) { average += yleft_col[i]; } } - shift = 3 + x->up_available + x->left_available; + shift = 3 + xd->up_available + xd->left_available; expected_dc = (average + (1 << (shift - 1))) >> shift; } else { expected_dc = 128; @@ -329,49 +333,51 @@ void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *x, unsigned char *ypre } } -void vp8_build_intra_predictors_mby(MACROBLOCKD *x) { - vp8_build_intra_predictors_mby_internal(x, x->predictor, 16, - x->mode_info_context->mbmi.mode); +void vp8_build_intra_predictors_mby(MACROBLOCKD *xd) { + vp8_build_intra_predictors_mby_internal(xd, xd->predictor, 16, + xd->mode_info_context->mbmi.mode); } -void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x) { - vp8_build_intra_predictors_mby_internal(x, x->dst.y_buffer, x->dst.y_stride, - x->mode_info_context->mbmi.mode); +void vp8_build_intra_predictors_mby_s(MACROBLOCKD *xd) { + vp8_build_intra_predictors_mby_internal(xd, xd->dst.y_buffer, + xd->dst.y_stride, + xd->mode_info_context->mbmi.mode); } #if CONFIG_COMP_INTRA_PRED -void vp8_build_comp_intra_predictors_mby(MACROBLOCKD *x) { +void vp8_build_comp_intra_predictors_mby(MACROBLOCKD *xd) { unsigned char predictor[2][256]; int i; - vp8_build_intra_predictors_mby_internal(x, predictor[0], 16, - x->mode_info_context->mbmi.mode); - vp8_build_intra_predictors_mby_internal(x, predictor[1], 16, - x->mode_info_context->mbmi.second_mode); + vp8_build_intra_predictors_mby_internal( + xd, predictor[0], 16, xd->mode_info_context->mbmi.mode); + vp8_build_intra_predictors_mby_internal( + xd, predictor[1], 16, xd->mode_info_context->mbmi.second_mode); for (i = 0; i < 256; i++) { - x->predictor[i] = (predictor[0][i] + predictor[1][i] + 1) >> 1; + xd->predictor[i] = (predictor[0][i] + predictor[1][i] + 1) >> 1; } } #endif -void vp8_build_intra_predictors_mbuv_internal(MACROBLOCKD *x, +void vp8_build_intra_predictors_mbuv_internal(MACROBLOCKD *xd, unsigned char *upred_ptr, unsigned char *vpred_ptr, int uv_stride, int mode) { - unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride; + YV12_BUFFER_CONFIG * dst = &xd->dst; + unsigned char *uabove_row = dst->u_buffer - dst->uv_stride; unsigned char uleft_col[16]; unsigned char utop_left = uabove_row[-1]; - unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride; + unsigned char *vabove_row = dst->v_buffer - dst->uv_stride; unsigned char vleft_col[20]; unsigned char vtop_left = vabove_row[-1]; int i, j; for (i = 0; i < 8; i++) { - uleft_col[i] = x->dst.u_buffer [i * x->dst.uv_stride - 1]; - vleft_col[i] = x->dst.v_buffer [i * x->dst.uv_stride - 1]; + uleft_col[i] = dst->u_buffer [i * dst->uv_stride - 1]; + vleft_col[i] = dst->v_buffer [i * dst->uv_stride - 1]; } switch (mode) { @@ -383,25 +389,25 @@ void vp8_build_intra_predictors_mbuv_internal(MACROBLOCKD *x, int Uaverage = 0; int Vaverage = 0; - if (x->up_available) { + if (xd->up_available) { for (i = 0; i < 8; i++) { Uaverage += uabove_row[i]; Vaverage += vabove_row[i]; } } - if (x->left_available) { + if (xd->left_available) { for (i = 0; i < 8; i++) { Uaverage += uleft_col[i]; Vaverage += vleft_col[i]; } } - if (!x->up_available && !x->left_available) { + if (!xd->up_available && !xd->left_available) { expected_udc = 128; expected_vdc = 128; } else { - shift = 2 + x->up_available + x->left_available; + shift = 2 + xd->up_available + xd->left_available; expected_udc = (Uaverage + (1 << (shift - 1))) >> shift; expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift; } @@ -512,49 +518,47 @@ void vp8_build_intra_predictors_mbuv_internal(MACROBLOCKD *x, } } -void vp8_build_intra_predictors_mbuv(MACROBLOCKD *x) { - vp8_build_intra_predictors_mbuv_internal(x, - &x->predictor[256], - &x->predictor[320], - 8, - x->mode_info_context->mbmi.uv_mode); +void vp8_build_intra_predictors_mbuv(MACROBLOCKD *xd) { + vp8_build_intra_predictors_mbuv_internal( + xd, &xd->predictor[256], &xd->predictor[320], + 8, xd->mode_info_context->mbmi.uv_mode); } -void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x) { - vp8_build_intra_predictors_mbuv_internal(x, - x->dst.u_buffer, - x->dst.v_buffer, - x->dst.uv_stride, - x->mode_info_context->mbmi.uv_mode); +void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *xd) { + vp8_build_intra_predictors_mbuv_internal( + xd, xd->dst.u_buffer, xd->dst.v_buffer, + xd->dst.uv_stride, xd->mode_info_context->mbmi.uv_mode); } #if CONFIG_COMP_INTRA_PRED -void vp8_build_comp_intra_predictors_mbuv(MACROBLOCKD *x) { +void vp8_build_comp_intra_predictors_mbuv(MACROBLOCKD *xd) { unsigned char predictor[2][2][64]; int i; - vp8_build_intra_predictors_mbuv_internal(x, predictor[0][0], predictor[1][0], 8, - x->mode_info_context->mbmi.uv_mode); - vp8_build_intra_predictors_mbuv_internal(x, predictor[0][1], predictor[1][1], 8, - x->mode_info_context->mbmi.second_uv_mode); + vp8_build_intra_predictors_mbuv_internal( + xd, predictor[0][0], predictor[1][0], 8, + xd->mode_info_context->mbmi.uv_mode); + vp8_build_intra_predictors_mbuv_internal( + xd, predictor[0][1], predictor[1][1], 8, + xd->mode_info_context->mbmi.second_uv_mode); for (i = 0; i < 64; i++) { - x->predictor[256 + i] = (predictor[0][0][i] + predictor[0][1][i] + 1) >> 1; - x->predictor[256 + 64 + i] = (predictor[1][0][i] + predictor[1][1][i] + 1) >> 1; + xd->predictor[256 + i] = (predictor[0][0][i] + predictor[0][1][i] + 1) >> 1; + xd->predictor[256 + 64 + i] = (predictor[1][0][i] + predictor[1][1][i] + 1) >> 1; } } #endif -void vp8_intra8x8_predict(BLOCKD *x, +void vp8_intra8x8_predict(BLOCKD *xd, int mode, unsigned char *predictor) { - unsigned char *yabove_row = *(x->base_dst) + x->dst - x->dst_stride; + unsigned char *yabove_row = *(xd->base_dst) + xd->dst - xd->dst_stride; unsigned char yleft_col[8]; unsigned char ytop_left = yabove_row[-1]; int r, c, i; for (i = 0; i < 8; i++) { - yleft_col[i] = (*(x->base_dst))[x->dst - 1 + i * x->dst_stride]; + yleft_col[i] = (*(xd->base_dst))[xd->dst - 1 + i * xd->dst_stride]; } switch (mode) { case DC_PRED: { @@ -639,14 +643,14 @@ void vp8_intra8x8_predict(BLOCKD *x, } #if CONFIG_COMP_INTRA_PRED -void vp8_comp_intra8x8_predict(BLOCKD *x, +void vp8_comp_intra8x8_predict(BLOCKD *xd, int mode, int second_mode, unsigned char *out_predictor) { unsigned char predictor[2][8 * 16]; int i, j; - vp8_intra8x8_predict(x, mode, predictor[0]); - vp8_intra8x8_predict(x, second_mode, predictor[1]); + vp8_intra8x8_predict(xd, mode, predictor[0]); + vp8_intra8x8_predict(xd, second_mode, predictor[1]); for (i = 0; i < 8 * 16; i += 16) { for (j = i; j < i + 8; j++) { @@ -656,17 +660,17 @@ void vp8_comp_intra8x8_predict(BLOCKD *x, } #endif -void vp8_intra_uv4x4_predict(BLOCKD *x, +void vp8_intra_uv4x4_predict(BLOCKD *xd, int mode, unsigned char *predictor) { - unsigned char *above_row = *(x->base_dst) + x->dst - x->dst_stride; + unsigned char *above_row = *(xd->base_dst) + xd->dst - xd->dst_stride; unsigned char left_col[4]; unsigned char top_left = above_row[-1]; int r, c, i; for (i = 0; i < 4; i++) { - left_col[i] = (*(x->base_dst))[x->dst - 1 + i * x->dst_stride]; + left_col[i] = (*(xd->base_dst))[xd->dst - 1 + i * xd->dst_stride]; } switch (mode) { case DC_PRED: { @@ -752,14 +756,14 @@ void vp8_intra_uv4x4_predict(BLOCKD *x, } #if CONFIG_COMP_INTRA_PRED -void vp8_comp_intra_uv4x4_predict(BLOCKD *x, +void vp8_comp_intra_uv4x4_predict(BLOCKD *xd, int mode, int mode2, unsigned char *out_predictor) { unsigned char predictor[2][8 * 4]; int i, j; - vp8_intra_uv4x4_predict(x, mode, predictor[0]); - vp8_intra_uv4x4_predict(x, mode2, predictor[1]); + vp8_intra_uv4x4_predict(xd, mode, predictor[0]); + vp8_intra_uv4x4_predict(xd, mode2, predictor[1]); for (i = 0; i < 4 * 8; i += 8) { for (j = i; j < i + 4; j++) { diff --git a/vp8/decoder/detokenize.c b/vp8/decoder/detokenize.c index 5f9768d..15764de 100644 --- a/vp8/decoder/detokenize.c +++ b/vp8/decoder/detokenize.c @@ -100,20 +100,20 @@ DECLARE_ALIGNED(16, const int, coef_bands_x_16x16[256]) = { static const unsigned char cat6_prob[14] = { 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0 }; -void vp8_reset_mb_tokens_context(MACROBLOCKD *x) { +void vp8_reset_mb_tokens_context(MACROBLOCKD *xd) { /* Clear entropy contexts for Y2 blocks */ - if ((x->mode_info_context->mbmi.mode != B_PRED && - x->mode_info_context->mbmi.mode != I8X8_PRED && - x->mode_info_context->mbmi.mode != SPLITMV) + if ((xd->mode_info_context->mbmi.mode != B_PRED && + xd->mode_info_context->mbmi.mode != I8X8_PRED && + xd->mode_info_context->mbmi.mode != SPLITMV) #if CONFIG_TX16X16 || x->mode_info_context->mbmi.txfm_size == TX_16X16 #endif ) { - vpx_memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); - vpx_memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); + vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); + vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); } else { - vpx_memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1); - vpx_memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1); + vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1); + vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1); } } diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c index 507659d..8778335 100644 --- a/vp8/encoder/encodeframe.c +++ b/vp8/encoder/encodeframe.c @@ -365,7 +365,7 @@ static void update_state(VP8_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) { int i; MACROBLOCKD *xd = &x->e_mbd; MODE_INFO *mi = &ctx->mic; - MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi; + MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi; int mb_mode = mi->mbmi.mode; int mb_mode_index = ctx->best_mode_index; diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c index ee6b247..d9eb705 100644 --- a/vp8/encoder/rdopt.c +++ b/vp8/encoder/rdopt.c @@ -1581,7 +1581,7 @@ static int labels2mode(MACROBLOCK *x, int const *labelings, int which_label, int_mv *second_best_ref_mv, int *mvcost[2]) { MACROBLOCKD *const xd = & x->e_mbd; MODE_INFO *const mic = xd->mode_info_context; - MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi; + MB_MODE_INFO * mbmi = &mic->mbmi; const int mis = xd->mode_info_stride; int i, cost = 0, thismvcost = 0; @@ -1695,6 +1695,7 @@ static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) { int i; unsigned int distortion = 0; + MACROBLOCKD *xd = &x->e_mbd; for (i = 0; i < 16; i++) { if (labels[i] == which_label) { @@ -1702,9 +1703,9 @@ static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x, BLOCK *be = &x->block[i]; int thisdistortion; - vp8_build_inter_predictors_b(bd, 16, x->e_mbd.subpixel_predict); - if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) - vp8_build_2nd_inter_predictors_b(bd, 16, x->e_mbd.subpixel_predict_avg); + vp8_build_inter_predictors_b(bd, 16, xd->subpixel_predict); + if (xd->mode_info_context->mbmi.second_ref_frame) + vp8_build_2nd_inter_predictors_b(bd, 16, xd->subpixel_predict_avg); ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, bd, 16); x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32); @@ -3583,7 +3584,6 @@ int vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x) { int mode8x8[2][4]; mbmi->ref_frame = INTRA_FRAME; - rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv); rate = rateuv;