From: Jingning Han Date: Thu, 11 Apr 2013 19:12:11 +0000 (-0700) Subject: Make intra predictor support rectangular blocks X-Git-Tag: v1.3.0~1106^2~268 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=815e95fbebdc8de7e5033cb60b6ae8d846d92152;p=platform%2Fupstream%2Flibvpx.git Make intra predictor support rectangular blocks The intra predictor supports configurable block sizes. It can handle intra prediction down to 4x4 sizes, when enabled in BLOCK_SIZE_TYPE. Change-Id: I7399ec2512393aa98aadda9813ca0c83e19af854 --- diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h index ada2ded..03636f4 100644 --- a/vp9/common/vp9_blockd.h +++ b/vp9/common/vp9_blockd.h @@ -237,6 +237,15 @@ static INLINE int mb_height_log2(BLOCK_SIZE_TYPE sb_type) { } } +// parse block dimension in the unit of 4x4 blocks +static INLINE int b_width_log2(BLOCK_SIZE_TYPE sb_type) { + return mb_width_log2(sb_type) + 2; +} + +static INLINE int b_height_log2(BLOCK_SIZE_TYPE sb_type) { + return mb_height_log2(sb_type) + 2; +} + typedef enum { BLOCK_4X4_LG2 = 0, BLOCK_8X8_LG2 = 2, diff --git a/vp9/common/vp9_reconintra.c b/vp9/common/vp9_reconintra.c index 640ce29..5458075 100644 --- a/vp9/common/vp9_reconintra.c +++ b/vp9/common/vp9_reconintra.c @@ -15,9 +15,6 @@ #include "vp9/common/vp9_reconintra.h" #include "vpx_mem/vpx_mem.h" -// For skip_recon_mb(), add vp9_build_intra_predictors_mby_s(MACROBLOCKD *xd) -// and vp9_build_intra_predictors_mbuv_s(MACROBLOCKD *xd). - // Using multiplication and shifting instead of division in diagonal prediction. // iscale table is calculated from ((1 << 16) + (i + 2) / 2) / (i+2) and used as // ((A + B) * iscale[i] + (1 << 15)) >> 16; @@ -37,20 +34,21 @@ static INLINE int iscale_round(int value, int i) { return ROUND_POWER_OF_TWO(value * iscale[i], 16); } -static void d27_predictor(uint8_t *ypred_ptr, int y_stride, int n, +static void d27_predictor(uint8_t *ypred_ptr, int y_stride, + int bw, int bh, uint8_t *yabove_row, uint8_t *yleft_col) { int r, c; r = 0; - for (c = 0; c < n - 2; c++) { + for (c = 0; c < bw - 2; c++) { int a = c & 1 ? yleft_col[r + 1] : ROUND_POWER_OF_TWO(yleft_col[r] + yleft_col[r + 1], 1); int b = yabove_row[c + 2]; ypred_ptr[c] = iscale_round(2 * a + (c + 1) * b, 1 + c); } - for (r = 1; r < n / 2 - 1; r++) { - for (c = 0; c < n - 2 - 2 * r; c++) { + for (r = 1; r < bh / 2 - 1; r++) { + for (c = 0; c < bw - 2 - 2 * r; c++) { int a = c & 1 ? yleft_col[r + 1] : ROUND_POWER_OF_TWO(yleft_col[r] + yleft_col[r + 1], 1); int b = ypred_ptr[(r - 1) * y_stride + c + 2]; @@ -58,8 +56,8 @@ static void d27_predictor(uint8_t *ypred_ptr, int y_stride, int n, } } - for (; r < n - 1; r++) { - for (c = 0; c < n; c++) { + for (; r < bh - 1; r++) { + for (c = 0; c < bw; c++) { int v = c & 1 ? yleft_col[r + 1] : ROUND_POWER_OF_TWO(yleft_col[r] + yleft_col[r + 1], 1); int h = r - c / 2; @@ -68,19 +66,19 @@ static void d27_predictor(uint8_t *ypred_ptr, int y_stride, int n, } c = 0; - r = n - 1; + r = bh - 1; ypred_ptr[r * y_stride] = ROUND_POWER_OF_TWO(ypred_ptr[(r - 1) * y_stride] + yleft_col[r], 1); - for (r = n - 2; r >= n / 2; --r) { - int w = c + (n - 1 - r) * 2; + for (r = bh - 2; r >= bh / 2; --r) { + int w = c + (bh - 1 - r) * 2; ypred_ptr[r * y_stride + w] = ROUND_POWER_OF_TWO(ypred_ptr[(r - 1) * y_stride + w] + ypred_ptr[r * y_stride + w - 1], 1); } - for (c = 1; c < n; c++) { - for (r = n - 1; r >= n / 2 + c / 2; --r) { - int w = c + (n - 1 - r) * 2; + for (c = 1; c < bw; c++) { + for (r = bh - 1; r >= bh / 2 + c / 2; --r) { + int w = c + (bh - 1 - r) * 2; ypred_ptr[r * y_stride + w] = ROUND_POWER_OF_TWO(ypred_ptr[(r - 1) * y_stride + w] + ypred_ptr[r * y_stride + w - 1], 1); @@ -88,20 +86,21 @@ static void d27_predictor(uint8_t *ypred_ptr, int y_stride, int n, } } -static void d63_predictor(uint8_t *ypred_ptr, int y_stride, int n, +static void d63_predictor(uint8_t *ypred_ptr, int y_stride, + int bw, int bh, uint8_t *yabove_row, uint8_t *yleft_col) { int r, c; c = 0; - for (r = 0; r < n - 2; r++) { + for (r = 0; r < bh - 2; r++) { int a = r & 1 ? yabove_row[c + 1] : ROUND_POWER_OF_TWO(yabove_row[c] + yabove_row[c + 1], 1); int b = yleft_col[r + 2]; ypred_ptr[r * y_stride] = iscale_round(2 * a + (r + 1) * b, 1 + r); } - for (c = 1; c < n / 2 - 1; c++) { - for (r = 0; r < n - 2 - 2 * c; r++) { + for (c = 1; c < bw / 2 - 1; c++) { + for (r = 0; r < bh - 2 - 2 * c; r++) { int a = r & 1 ? yabove_row[c + 1] : ROUND_POWER_OF_TWO(yabove_row[c] + yabove_row[c + 1], 1); int b = ypred_ptr[(r + 2) * y_stride + c - 1]; @@ -109,8 +108,8 @@ static void d63_predictor(uint8_t *ypred_ptr, int y_stride, int n, } } - for (; c < n - 1; ++c) { - for (r = 0; r < n; r++) { + for (; c < bw - 1; ++c) { + for (r = 0; r < bh; r++) { int v = r & 1 ? yabove_row[c + 1] : ROUND_POWER_OF_TWO(yabove_row[c] + yabove_row[c + 1], 1); int w = c - r / 2; @@ -119,18 +118,18 @@ static void d63_predictor(uint8_t *ypred_ptr, int y_stride, int n, } r = 0; - c = n - 1; + c = bw - 1; ypred_ptr[c] = ROUND_POWER_OF_TWO(ypred_ptr[(c - 1)] + yabove_row[c], 1); - for (c = n - 2; c >= n / 2; --c) { - int h = r + (n - 1 - c) * 2; + for (c = bw - 2; c >= bw / 2; --c) { + int h = r + (bw - 1 - c) * 2; ypred_ptr[h * y_stride + c] = ROUND_POWER_OF_TWO(ypred_ptr[h * y_stride + c - 1] + ypred_ptr[(h - 1) * y_stride + c], 1); } - for (r = 1; r < n; r++) { - for (c = n - 1; c >= n / 2 + r / 2; --c) { - int h = r + (n - 1 - c) * 2; + for (r = 1; r < bh; r++) { + for (c = bw - 1; c >= bw / 2 + r / 2; --c) { + int h = r + (bw - 1 - c) * 2; ypred_ptr[h * y_stride + c] = ROUND_POWER_OF_TWO(ypred_ptr[h * y_stride + c - 1] + ypred_ptr[(h - 1) * y_stride + c], 1); @@ -138,11 +137,12 @@ static void d63_predictor(uint8_t *ypred_ptr, int y_stride, int n, } } -static void d45_predictor(uint8_t *ypred_ptr, int y_stride, int n, +static void d45_predictor(uint8_t *ypred_ptr, int y_stride, + int bw, int bh, uint8_t *yabove_row, uint8_t *yleft_col) { int r, c; - for (r = 0; r < n - 1; ++r) { + for (r = 0; r < bh - 1; ++r) { for (c = 0; c <= r; ++c) { ypred_ptr[(r - c) * y_stride + c] = iscale_round( yabove_row[r + 1] * (c + 1) + yleft_col[r + 1] * (r - c + 1), r); @@ -157,8 +157,8 @@ static void d45_predictor(uint8_t *ypred_ptr, int y_stride, int n, ypred_ptr[(r - c) * y_stride + c] = iscale_round(yabove_ext * (c + 1) + yleft_ext * (r - c + 1), r); } - for (r = 1; r < n; ++r) { - for (c = n - r; c < n; ++c) { + for (r = 1; r < bh; ++r) { + for (c = bw - r; c < bw; ++c) { const int yabove_ext = ypred_ptr[(r - 1) * y_stride + c]; const int yleft_ext = ypred_ptr[r * y_stride + c - 1]; ypred_ptr[r * y_stride + c] = @@ -167,59 +167,62 @@ static void d45_predictor(uint8_t *ypred_ptr, int y_stride, int n, } } -static void d117_predictor(uint8_t *ypred_ptr, int y_stride, int n, +static void d117_predictor(uint8_t *ypred_ptr, int y_stride, + int bw, int bh, uint8_t *yabove_row, uint8_t *yleft_col) { int r, c; - for (c = 0; c < n; c++) + for (c = 0; c < bw; c++) ypred_ptr[c] = ROUND_POWER_OF_TWO(yabove_row[c - 1] + yabove_row[c], 1); ypred_ptr += y_stride; - for (c = 0; c < n; c++) + for (c = 0; c < bw; c++) ypred_ptr[c] = yabove_row[c - 1]; ypred_ptr += y_stride; - for (r = 2; r < n; ++r) { + for (r = 2; r < bh; ++r) { ypred_ptr[0] = yleft_col[r - 2]; - for (c = 1; c < n; c++) + for (c = 1; c < bw; c++) ypred_ptr[c] = ypred_ptr[-2 * y_stride + c - 1]; ypred_ptr += y_stride; } } -static void d135_predictor(uint8_t *ypred_ptr, int y_stride, int n, +static void d135_predictor(uint8_t *ypred_ptr, int y_stride, + int bw, int bh, uint8_t *yabove_row, uint8_t *yleft_col) { int r, c; ypred_ptr[0] = yabove_row[-1]; - for (c = 1; c < n; c++) + for (c = 1; c < bw; c++) ypred_ptr[c] = yabove_row[c - 1]; - for (r = 1; r < n; ++r) + for (r = 1; r < bh; ++r) ypred_ptr[r * y_stride] = yleft_col[r - 1]; ypred_ptr += y_stride; - for (r = 1; r < n; ++r) { - for (c = 1; c < n; c++) { + for (r = 1; r < bh; ++r) { + for (c = 1; c < bw; c++) { ypred_ptr[c] = ypred_ptr[-y_stride + c - 1]; } ypred_ptr += y_stride; } } -static void d153_predictor(uint8_t *ypred_ptr, int y_stride, int n, +static void d153_predictor(uint8_t *ypred_ptr, int y_stride, + int bw, int bh, uint8_t *yabove_row, uint8_t *yleft_col) { int r, c; ypred_ptr[0] = ROUND_POWER_OF_TWO(yabove_row[-1] + yleft_col[0], 1); - for (r = 1; r < n; r++) + for (r = 1; r < bh; r++) ypred_ptr[r * y_stride] = ROUND_POWER_OF_TWO(yleft_col[r - 1] + yleft_col[r], 1); ypred_ptr++; ypred_ptr[0] = yabove_row[-1]; - for (r = 1; r < n; r++) + for (r = 1; r < bh; r++) ypred_ptr[r * y_stride] = yleft_col[r - 1]; ypred_ptr++; - for (c = 0; c < n - 2; c++) + for (c = 0; c < bw - 2; c++) ypred_ptr[c] = yabove_row[c]; ypred_ptr += y_stride; - for (r = 1; r < n; ++r) { - for (c = 0; c < n - 2; c++) + for (r = 1; r < bh; ++r) { + for (c = 0; c < bw - 2; c++) ypred_ptr[c] = ypred_ptr[-y_stride + c - 2]; ypred_ptr += y_stride; } @@ -288,12 +291,12 @@ static INLINE int log2_minus_1(int n) { } } - -void vp9_build_intra_predictors_internal(uint8_t *src, int src_stride, - uint8_t *ypred_ptr, - int y_stride, int mode, int bsize, - int up_available, int left_available, - int right_available) { +void vp9_build_intra_predictors(uint8_t *src, int src_stride, + uint8_t *ypred_ptr, + int y_stride, int mode, + int bw, int bh, + int up_available, int left_available, + int right_available) { int r, c, i; uint8_t yleft_col[64], yabove_data[65], ytop_left; uint8_t *yabove_row = yabove_data + 1; @@ -307,22 +310,22 @@ void vp9_build_intra_predictors_internal(uint8_t *src, int src_stride, */ if (left_available) { - for (i = 0; i < bsize; i++) + for (i = 0; i < bh; i++) yleft_col[i] = src[i * src_stride - 1]; } else { - vpx_memset(yleft_col, 129, bsize); + vpx_memset(yleft_col, 129, bh); } if (up_available) { uint8_t *yabove_ptr = src - src_stride; - vpx_memcpy(yabove_row, yabove_ptr, bsize); + vpx_memcpy(yabove_row, yabove_ptr, bw); if (left_available) { ytop_left = yabove_ptr[-1]; } else { ytop_left = 127; } } else { - vpx_memset(yabove_row, 127, bsize); + vpx_memset(yabove_row, 127, bw); ytop_left = 127; } yabove_row[-1] = ytop_left; @@ -332,49 +335,49 @@ void vp9_build_intra_predictors_internal(uint8_t *src, int src_stride, case DC_PRED: { int expected_dc; int i; - int shift; int average = 0; - int log2_bsize_minus_1 = log2_minus_1(bsize); + int count = 0; if (up_available || left_available) { if (up_available) { - for (i = 0; i < bsize; i++) { + for (i = 0; i < bw; i++) { average += yabove_row[i]; } + count += bw; } if (left_available) { - for (i = 0; i < bsize; i++) { + for (i = 0; i < bh; i++) { average += yleft_col[i]; } + count += bh; } - shift = log2_bsize_minus_1 + up_available + left_available; - expected_dc = ROUND_POWER_OF_TWO(average, shift); + expected_dc = (average + (count >> 1)) / count; } else { expected_dc = 128; } - for (r = 0; r < bsize; r++) { - vpx_memset(ypred_ptr, expected_dc, bsize); + for (r = 0; r < bh; r++) { + vpx_memset(ypred_ptr, expected_dc, bw); ypred_ptr += y_stride; } } break; case V_PRED: - for (r = 0; r < bsize; r++) { - memcpy(ypred_ptr, yabove_row, bsize); + for (r = 0; r < bh; r++) { + memcpy(ypred_ptr, yabove_row, bw); ypred_ptr += y_stride; } break; case H_PRED: - for (r = 0; r < bsize; r++) { - vpx_memset(ypred_ptr, yleft_col[r], bsize); + for (r = 0; r < bh; r++) { + vpx_memset(ypred_ptr, yleft_col[r], bw); ypred_ptr += y_stride; } break; case TM_PRED: - for (r = 0; r < bsize; r++) { - for (c = 0; c < bsize; c++) { + for (r = 0; r < bh; r++) { + for (c = 0; c < bw; c++) { ypred_ptr[c] = clip_pixel(yleft_col[r] + yabove_row[c] - ytop_left); } @@ -382,22 +385,22 @@ void vp9_build_intra_predictors_internal(uint8_t *src, int src_stride, } break; case D45_PRED: - d45_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col); + d45_predictor(ypred_ptr, y_stride, bw, bh, yabove_row, yleft_col); break; case D135_PRED: - d135_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col); + d135_predictor(ypred_ptr, y_stride, bw, bh, yabove_row, yleft_col); break; case D117_PRED: - d117_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col); + d117_predictor(ypred_ptr, y_stride, bw, bh, yabove_row, yleft_col); break; case D153_PRED: - d153_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col); + d153_predictor(ypred_ptr, y_stride, bw, bh, yabove_row, yleft_col); break; case D27_PRED: - d27_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col); + d27_predictor(ypred_ptr, y_stride, bw, bh, yabove_row, yleft_col); break; case D63_PRED: - d63_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col); + d63_predictor(ypred_ptr, y_stride, bw, bh, yabove_row, yleft_col); break; case I8X8_PRED: case B_PRED: @@ -551,10 +554,10 @@ void vp9_build_interintra_16x16_predictors_mby(MACROBLOCKD *xd, uint8_t *ypred, int ystride) { uint8_t intrapredictor[256]; - vp9_build_intra_predictors_internal( + vp9_build_intra_predictors( xd->dst.y_buffer, xd->dst.y_stride, intrapredictor, 16, - xd->mode_info_context->mbmi.interintra_mode, 16, + xd->mode_info_context->mbmi.interintra_mode, 16, 16, xd->up_available, xd->left_available, xd->right_available); combine_interintra(xd->mode_info_context->mbmi.interintra_mode, ypred, ystride, intrapredictor, 16, 16); @@ -566,15 +569,15 @@ void vp9_build_interintra_16x16_predictors_mbuv(MACROBLOCKD *xd, int uvstride) { uint8_t uintrapredictor[64]; uint8_t vintrapredictor[64]; - vp9_build_intra_predictors_internal( + vp9_build_intra_predictors( xd->dst.u_buffer, xd->dst.uv_stride, uintrapredictor, 8, - xd->mode_info_context->mbmi.interintra_uv_mode, 8, + xd->mode_info_context->mbmi.interintra_uv_mode, 8, 8, xd->up_available, xd->left_available, xd->right_available); - vp9_build_intra_predictors_internal( + vp9_build_intra_predictors( xd->dst.v_buffer, xd->dst.uv_stride, vintrapredictor, 8, - xd->mode_info_context->mbmi.interintra_uv_mode, 8, + xd->mode_info_context->mbmi.interintra_uv_mode, 8, 8, xd->up_available, xd->left_available, xd->right_available); combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode, upred, uvstride, uintrapredictor, 8, 8); @@ -586,10 +589,10 @@ void vp9_build_interintra_32x32_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred, int ystride) { uint8_t intrapredictor[1024]; - vp9_build_intra_predictors_internal( + vp9_build_intra_predictors( xd->dst.y_buffer, xd->dst.y_stride, intrapredictor, 32, - xd->mode_info_context->mbmi.interintra_mode, 32, + xd->mode_info_context->mbmi.interintra_mode, 32, 32, xd->up_available, xd->left_available, xd->right_available); combine_interintra(xd->mode_info_context->mbmi.interintra_mode, ypred, ystride, intrapredictor, 32, 32); @@ -601,15 +604,15 @@ void vp9_build_interintra_32x32_predictors_sbuv(MACROBLOCKD *xd, int uvstride) { uint8_t uintrapredictor[256]; uint8_t vintrapredictor[256]; - vp9_build_intra_predictors_internal( + vp9_build_intra_predictors( xd->dst.u_buffer, xd->dst.uv_stride, uintrapredictor, 16, - xd->mode_info_context->mbmi.interintra_uv_mode, 16, + xd->mode_info_context->mbmi.interintra_uv_mode, 16, 16, xd->up_available, xd->left_available, xd->right_available); - vp9_build_intra_predictors_internal( + vp9_build_intra_predictors( xd->dst.v_buffer, xd->dst.uv_stride, vintrapredictor, 16, - xd->mode_info_context->mbmi.interintra_uv_mode, 16, + xd->mode_info_context->mbmi.interintra_uv_mode, 16, 16, xd->up_available, xd->left_available, xd->right_available); combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode, upred, uvstride, uintrapredictor, 16, 16); @@ -632,10 +635,10 @@ void vp9_build_interintra_64x64_predictors_sby(MACROBLOCKD *xd, int ystride) { uint8_t intrapredictor[4096]; const int mode = xd->mode_info_context->mbmi.interintra_mode; - vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, - intrapredictor, 64, mode, 64, - xd->up_available, xd->left_available, - xd->right_available); + vp9_build_intra_predictors(xd->dst.y_buffer, xd->dst.y_stride, + intrapredictor, 64, mode, 64, 64, + xd->up_available, xd->left_available, + xd->right_available); combine_interintra(xd->mode_info_context->mbmi.interintra_mode, ypred, ystride, intrapredictor, 64, 64); } @@ -647,14 +650,14 @@ void vp9_build_interintra_64x64_predictors_sbuv(MACROBLOCKD *xd, uint8_t uintrapredictor[1024]; uint8_t vintrapredictor[1024]; const int mode = xd->mode_info_context->mbmi.interintra_uv_mode; - vp9_build_intra_predictors_internal(xd->dst.u_buffer, xd->dst.uv_stride, - uintrapredictor, 32, mode, 32, - xd->up_available, xd->left_available, - xd->right_available); - vp9_build_intra_predictors_internal(xd->dst.v_buffer, xd->dst.uv_stride, - vintrapredictor, 32, mode, 32, - xd->up_available, xd->left_available, - xd->right_available); + vp9_build_intra_predictors(xd->dst.u_buffer, xd->dst.uv_stride, + uintrapredictor, 32, mode, 32, 32, + xd->up_available, xd->left_available, + xd->right_available); + vp9_build_intra_predictors(xd->dst.v_buffer, xd->dst.uv_stride, + vintrapredictor, 32, mode, 32, 32, + xd->up_available, xd->left_available, + xd->right_available); combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode, upred, uvstride, uintrapredictor, 32, 32); combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode, @@ -672,36 +675,44 @@ void vp9_build_interintra_64x64_predictors_sb(MACROBLOCKD *xd, } #endif // CONFIG_COMP_INTERINTRA_PRED +void vp9_build_intra_predictors_sby_s(MACROBLOCKD *xd, + BLOCK_SIZE_TYPE bsize) { + const int bwl = b_width_log2(bsize), bw = 4 << bwl; + const int bhl = b_height_log2(bsize), bh = 4 << bhl; + + vp9_build_intra_predictors(xd->dst.y_buffer, xd->dst.y_stride, + xd->dst.y_buffer, xd->dst.y_stride, + xd->mode_info_context->mbmi.mode, + bw, bh, + xd->up_available, xd->left_available, + xd->right_available); +} + +void vp9_build_intra_predictors_sbuv_s(MACROBLOCKD *xd, + BLOCK_SIZE_TYPE bsize) { + const int bwl = b_width_log2(bsize) - 1, bw = 4 << bwl; + const int bhl = b_height_log2(bsize) - 1, bh = 4 << bhl; + + vp9_build_intra_predictors(xd->dst.u_buffer, xd->dst.uv_stride, + xd->dst.u_buffer, xd->dst.uv_stride, + xd->mode_info_context->mbmi.uv_mode, + bw, bh, xd->up_available, + xd->left_available, xd->right_available); + vp9_build_intra_predictors(xd->dst.v_buffer, xd->dst.uv_stride, + xd->dst.v_buffer, xd->dst.uv_stride, + xd->mode_info_context->mbmi.uv_mode, + bw, bh, xd->up_available, + xd->left_available, xd->right_available); +} + +// TODO(jingning): merge mby and mbuv into the above sby and sbmu functions void vp9_build_intra_predictors_mby(MACROBLOCKD *xd) { - vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, - xd->predictor, 16, - xd->mode_info_context->mbmi.mode, 16, - xd->up_available, xd->left_available, - xd->right_available); -} - -void vp9_build_intra_predictors_mby_s(MACROBLOCKD *xd) { - vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, - xd->dst.y_buffer, xd->dst.y_stride, - xd->mode_info_context->mbmi.mode, 16, - xd->up_available, xd->left_available, - xd->right_available); -} - -void vp9_build_intra_predictors_sby_s(MACROBLOCKD *xd) { - vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, - xd->dst.y_buffer, xd->dst.y_stride, - xd->mode_info_context->mbmi.mode, 32, - xd->up_available, xd->left_available, - xd->right_available); -} - -void vp9_build_intra_predictors_sb64y_s(MACROBLOCKD *xd) { - vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, - xd->dst.y_buffer, xd->dst.y_stride, - xd->mode_info_context->mbmi.mode, 64, - xd->up_available, xd->left_available, - xd->right_available); + vp9_build_intra_predictors(xd->dst.y_buffer, xd->dst.y_stride, + xd->predictor, 16, + xd->mode_info_context->mbmi.mode, + 16, 16, + xd->up_available, xd->left_available, + xd->right_available); } void vp9_build_intra_predictors_mbuv_internal(MACROBLOCKD *xd, @@ -709,14 +720,16 @@ void vp9_build_intra_predictors_mbuv_internal(MACROBLOCKD *xd, uint8_t *vpred_ptr, int uv_stride, int mode, int bsize) { - vp9_build_intra_predictors_internal(xd->dst.u_buffer, xd->dst.uv_stride, - upred_ptr, uv_stride, mode, bsize, - xd->up_available, xd->left_available, - xd->right_available); - vp9_build_intra_predictors_internal(xd->dst.v_buffer, xd->dst.uv_stride, - vpred_ptr, uv_stride, mode, bsize, - xd->up_available, xd->left_available, - xd->right_available); + vp9_build_intra_predictors(xd->dst.u_buffer, xd->dst.uv_stride, + upred_ptr, uv_stride, mode, + bsize, bsize, + xd->up_available, xd->left_available, + xd->right_available); + vp9_build_intra_predictors(xd->dst.v_buffer, xd->dst.uv_stride, + vpred_ptr, uv_stride, mode, + bsize, bsize, + xd->up_available, xd->left_available, + xd->right_available); } void vp9_build_intra_predictors_mbuv(MACROBLOCKD *xd) { @@ -726,28 +739,6 @@ void vp9_build_intra_predictors_mbuv(MACROBLOCKD *xd) { 8); } -void vp9_build_intra_predictors_mbuv_s(MACROBLOCKD *xd) { - vp9_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer, - xd->dst.v_buffer, - xd->dst.uv_stride, - xd->mode_info_context->mbmi.uv_mode, - 8); -} - -void vp9_build_intra_predictors_sbuv_s(MACROBLOCKD *xd) { - vp9_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer, - xd->dst.v_buffer, xd->dst.uv_stride, - xd->mode_info_context->mbmi.uv_mode, - 16); -} - -void vp9_build_intra_predictors_sb64uv_s(MACROBLOCKD *xd) { - vp9_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer, - xd->dst.v_buffer, xd->dst.uv_stride, - xd->mode_info_context->mbmi.uv_mode, - 32); -} - void vp9_intra8x8_predict(MACROBLOCKD *xd, BLOCKD *b, int mode, @@ -758,10 +749,10 @@ void vp9_intra8x8_predict(MACROBLOCKD *xd, const int have_left = (block_idx & 1) || xd->left_available; const int have_right = !(block_idx & 1) || xd->right_available; - vp9_build_intra_predictors_internal(*(b->base_dst) + b->dst, - b->dst_stride, predictor, pre_stride, - mode, 8, have_top, have_left, - have_right); + vp9_build_intra_predictors(*(b->base_dst) + b->dst, + b->dst_stride, predictor, pre_stride, + mode, 8, 8, have_top, have_left, + have_right); } void vp9_intra_uv4x4_predict(MACROBLOCKD *xd, @@ -773,10 +764,10 @@ void vp9_intra_uv4x4_predict(MACROBLOCKD *xd, const int have_left = (block_idx & 1) || xd->left_available; const int have_right = !(block_idx & 1) || xd->right_available; - vp9_build_intra_predictors_internal(*(b->base_dst) + b->dst, - b->dst_stride, predictor, pre_stride, - mode, 4, have_top, have_left, - have_right); + vp9_build_intra_predictors(*(b->base_dst) + b->dst, + b->dst_stride, predictor, pre_stride, + mode, 4, 4, have_top, have_left, + have_right); } /* TODO: try different ways of use Y-UV mode correlation diff --git a/vp9/common/vp9_rtcd_defs.sh b/vp9/common/vp9_rtcd_defs.sh index 87503e0..8ce9574 100644 --- a/vp9/common/vp9_rtcd_defs.sh +++ b/vp9/common/vp9_rtcd_defs.sh @@ -86,33 +86,18 @@ specialize vp9_recon_sby_s prototype void vp9_recon_sbuv_s "struct macroblockd *x, uint8_t *udst, uint8_t *vdst, enum BLOCK_SIZE_TYPE bsize" specialize void vp9_recon_sbuv_s -prototype void vp9_build_intra_predictors_mby_s "struct macroblockd *x" -specialize vp9_build_intra_predictors_mby_s - -prototype void vp9_build_intra_predictors_sby_s "struct macroblockd *x" +prototype void vp9_build_intra_predictors_sby_s "struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize" specialize vp9_build_intra_predictors_sby_s; -prototype void vp9_build_intra_predictors_sbuv_s "struct macroblockd *x" +prototype void vp9_build_intra_predictors_sbuv_s "struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize" specialize vp9_build_intra_predictors_sbuv_s; prototype void vp9_build_intra_predictors_mby "struct macroblockd *x" specialize vp9_build_intra_predictors_mby; -prototype void vp9_build_intra_predictors_mby_s "struct macroblockd *x" -specialize vp9_build_intra_predictors_mby_s; - prototype void vp9_build_intra_predictors_mbuv "struct macroblockd *x" specialize vp9_build_intra_predictors_mbuv; -prototype void vp9_build_intra_predictors_mbuv_s "struct macroblockd *x" -specialize vp9_build_intra_predictors_mbuv_s; - -prototype void vp9_build_intra_predictors_sb64y_s "struct macroblockd *x" -specialize vp9_build_intra_predictors_sb64y_s; - -prototype void vp9_build_intra_predictors_sb64uv_s "struct macroblockd *x" -specialize vp9_build_intra_predictors_sb64uv_s; - prototype void vp9_intra4x4_predict "struct macroblockd *xd, struct blockd *x, int b_mode, uint8_t *predictor, int pre_stride" specialize vp9_intra4x4_predict; diff --git a/vp9/decoder/vp9_decodframe.c b/vp9/decoder/vp9_decodframe.c index 6013591..e952cc7 100644 --- a/vp9/decoder/vp9_decodframe.c +++ b/vp9/decoder/vp9_decodframe.c @@ -183,25 +183,15 @@ static void propagate_nzcs(VP9_COMMON *cm, MACROBLOCKD *xd) { } #endif -/* skip_recon_mb() is Modified: Instead of writing the result to predictor buffer and then copying it - * to dst buffer, we can write the result directly to dst buffer. This eliminates unnecessary copy. - */ -static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd, - int mb_row, int mb_col) { +static void skip_recon_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, + int mb_row, int mb_col, + BLOCK_SIZE_TYPE bsize) { MODE_INFO *m = xd->mode_info_context; BLOCK_SIZE_TYPE sb_type = m->mbmi.sb_type; if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { - if (sb_type == BLOCK_SIZE_SB64X64) { - vp9_build_intra_predictors_sb64uv_s(xd); - vp9_build_intra_predictors_sb64y_s(xd); - } else if (sb_type == BLOCK_SIZE_SB32X32) { - vp9_build_intra_predictors_sbuv_s(xd); - vp9_build_intra_predictors_sby_s(xd); - } else { - vp9_build_intra_predictors_mbuv_s(xd); - vp9_build_intra_predictors_mby_s(xd); - } + vp9_build_intra_predictors_sbuv_s(xd, bsize); + vp9_build_intra_predictors_sby_s(xd, bsize); } else { if (sb_type == BLOCK_SIZE_SB64X64) { vp9_build_inter64x64_predictors_sb(xd, mb_row, mb_col); @@ -409,7 +399,7 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd, if (!xd->mode_info_context->mbmi.mb_skip_coeff) vp9_decode_mb_tokens_4x4_uv(pbi, xd, bc); #endif - vp9_build_intra_predictors_mbuv_s(xd); + vp9_build_intra_predictors_sbuv_s(xd, BLOCK_SIZE_MB16X16); xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant, xd->dst.u_buffer, xd->dst.uv_stride, xd->dst.u_buffer, xd->dst.uv_stride, xd->plane[1].eobs); @@ -709,27 +699,21 @@ static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int mb_col, // Special case: Force the loopfilter to skip when eobtotal and // mb_skip_coeff are zero. - skip_recon_mb(pbi, xd, mb_row, mb_col); + skip_recon_sb(pbi, xd, mb_row, mb_col, bsize); return; } - // TODO(jingning): need to combine intra/inter predictor functions and + // TODO(jingning): need to combine inter predictor functions and // make them block size independent. // generate prediction - if (bsize == BLOCK_SIZE_SB64X64) { - assert(bsize == BLOCK_SIZE_SB64X64); - if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { - vp9_build_intra_predictors_sb64y_s(xd); - vp9_build_intra_predictors_sb64uv_s(xd); - } else { - vp9_build_inter64x64_predictors_sb(xd, mb_row, mb_col); - } + if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { + vp9_build_intra_predictors_sby_s(xd, bsize); + vp9_build_intra_predictors_sbuv_s(xd, bsize); } else { - assert(bsize == BLOCK_SIZE_SB32X32); - if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { - vp9_build_intra_predictors_sby_s(xd); - vp9_build_intra_predictors_sbuv_s(xd); + if (bsize == BLOCK_SIZE_SB64X64) { + vp9_build_inter64x64_predictors_sb(xd, mb_row, mb_col); } else { + assert(bsize == BLOCK_SIZE_SB32X32); vp9_build_inter32x32_predictors_sb(xd, mb_row, mb_col); } } @@ -804,7 +788,7 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd, // Special case: Force the loopfilter to skip when eobtotal and // mb_skip_coeff are zero. xd->mode_info_context->mbmi.mb_skip_coeff = 1; - skip_recon_mb(pbi, xd, mb_row, mb_col); + skip_recon_sb(pbi, xd, mb_row, mb_col, BLOCK_SIZE_MB16X16); return; } #if 0 // def DEC_DEBUG @@ -819,9 +803,9 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd, // do prediction if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { if (mode != I8X8_PRED) { - vp9_build_intra_predictors_mbuv_s(xd); + vp9_build_intra_predictors_sbuv_s(xd, BLOCK_SIZE_MB16X16); if (mode != B_PRED) - vp9_build_intra_predictors_mby_s(xd); + vp9_build_intra_predictors_sby_s(xd, BLOCK_SIZE_MB16X16); } } else { #if 0 // def DEC_DEBUG diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c index 2e9a93c..e25cd1e 100644 --- a/vp9/encoder/vp9_encodeframe.c +++ b/vp9/encoder/vp9_encodeframe.c @@ -2184,13 +2184,8 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, } if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { - if (bsize == BLOCK_SIZE_SB32X32) { - vp9_build_intra_predictors_sby_s(&x->e_mbd); - vp9_build_intra_predictors_sbuv_s(&x->e_mbd); - } else { - vp9_build_intra_predictors_sb64y_s(&x->e_mbd); - vp9_build_intra_predictors_sb64uv_s(&x->e_mbd); - } + vp9_build_intra_predictors_sby_s(&x->e_mbd, bsize); + vp9_build_intra_predictors_sbuv_s(&x->e_mbd, bsize); if (output_enabled) sum_intra_stats(cpi, x); } else { diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c index a1eea1e..21adf3f 100644 --- a/vp9/encoder/vp9_rdopt.c +++ b/vp9/encoder/vp9_rdopt.c @@ -1113,11 +1113,8 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x, x->e_mbd.mode_info_context->mbmi.mode = mode; if (bsize == BLOCK_SIZE_MB16X16) { vp9_build_intra_predictors_mby(&x->e_mbd); - } else if (bsize == BLOCK_SIZE_SB32X32) { - vp9_build_intra_predictors_sby_s(&x->e_mbd); } else { - assert(bsize == BLOCK_SIZE_SB64X64); - vp9_build_intra_predictors_sb64y_s(&x->e_mbd); + vp9_build_intra_predictors_sby_s(&x->e_mbd, bsize); } super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, @@ -1627,14 +1624,10 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x, for (mode = DC_PRED; mode <= TM_PRED; mode++) { x->e_mbd.mode_info_context->mbmi.uv_mode = mode; - if (bsize == BLOCK_SIZE_MB16X16) { + if (bsize == BLOCK_SIZE_MB16X16) vp9_build_intra_predictors_mbuv(&x->e_mbd); - } else if (bsize == BLOCK_SIZE_SB32X32) { - vp9_build_intra_predictors_sbuv_s(&x->e_mbd); - } else { - assert(bsize == BLOCK_SIZE_SB64X64); - vp9_build_intra_predictors_sb64uv_s(&x->e_mbd); - } + else + vp9_build_intra_predictors_sbuv_s(&x->e_mbd, bsize); super_block_uvrd(&cpi->common, x, &this_rate_tokenonly, &this_distortion, &s, bsize); @@ -4616,13 +4609,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, if (ref_frame == INTRA_FRAME) { TX_SIZE uv_tx; - - if (bsize == BLOCK_SIZE_SB64X64) { - vp9_build_intra_predictors_sb64y_s(xd); - } else { - assert(bsize == BLOCK_SIZE_SB32X32); - vp9_build_intra_predictors_sby_s(xd); - } + vp9_build_intra_predictors_sby_s(xd, bsize); super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, bsize, txfm_cache);