From 6125a1ed811df7ab5d99db5760a47c572ace7d9d Mon Sep 17 00:00:00 2001 From: John Koleszar Date: Thu, 7 Feb 2013 10:09:05 -0800 Subject: [PATCH] Pass macroblock index to pick inter functions Pass the current mb row and column around rather than the recon_yoffset and recon_uvoffset, since those offsets will change from predictor to predictor, based on the reference frame selection. Change-Id: If3f9df059e00f5048ca729d3d083ff428e1859c1 --- vp9/common/vp9_blockd.h | 4 +- vp9/encoder/vp9_encodeframe.c | 144 ++++++++++++++---------------------------- vp9/encoder/vp9_rdopt.c | 75 +++++++++------------- vp9/encoder/vp9_rdopt.h | 20 +++++- 4 files changed, 94 insertions(+), 149 deletions(-) diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h index 28fafbd..0d51f06 100644 --- a/vp9/common/vp9_blockd.h +++ b/vp9/common/vp9_blockd.h @@ -323,9 +323,7 @@ typedef struct macroblockd { int fullpixel_mask; YV12_BUFFER_CONFIG pre; /* Filtered copy of previous frame reconstruction */ - struct { - uint8_t *y_buffer, *u_buffer, *v_buffer; - } second_pre; + YV12_BUFFER_CONFIG second_pre; YV12_BUFFER_CONFIG dst; MODE_INFO *prev_mode_info_context; diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c index bb2b35c..927a1b9 100644 --- a/vp9/encoder/vp9_encodeframe.c +++ b/vp9/encoder/vp9_encodeframe.c @@ -47,15 +47,12 @@ int enc_debug = 0; extern void select_interp_filter_type(VP9_COMP *cpi); static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t, - int recon_yoffset, int recon_uvoffset, int output_enabled, int mb_row, int mb_col); static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t, - int recon_yoffset, int recon_uvoffset, int output_enabled, int mb_row, int mb_col); static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t, - int recon_yoffset, int recon_uvoffset, int output_enabled, int mb_row, int mb_col); static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x); @@ -623,24 +620,12 @@ static unsigned find_seg_id(uint8_t *buf, int block_size, } static void set_offsets(VP9_COMP *cpi, - int mb_row, int mb_col, int block_size, - int *ref_yoffset, int *ref_uvoffset) { + int mb_row, int mb_col, int block_size) { MACROBLOCK *const x = &cpi->mb; VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *mbmi; const int dst_fb_idx = cm->new_fb_idx; - const int recon_y_stride = cm->yv12_fb[dst_fb_idx].y_stride; - const int recon_uv_stride = cm->yv12_fb[dst_fb_idx].uv_stride; - const int recon_yoffset = 16 * mb_row * recon_y_stride + 16 * mb_col; - const int recon_uvoffset = 8 * mb_row * recon_uv_stride + 8 * mb_col; - const int src_y_stride = x->src.y_stride; - const int src_uv_stride = x->src.uv_stride; - const int src_yoffset = 16 * mb_row * src_y_stride + 16 * mb_col; - const int src_uvoffset = 8 * mb_row * src_uv_stride + 8 * mb_col; - const int ref_fb_idx = cm->active_ref_idx[cpi->lst_fb_idx]; - const int ref_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; - const int ref_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; const int idx_map = mb_row * cm->mb_cols + mb_col; const int idx_str = xd->mode_info_stride * mb_row + mb_col; @@ -666,9 +651,9 @@ static void set_offsets(VP9_COMP *cpi, xd->prev_mode_info_context = cm->prev_mi + idx_str; // Set up destination pointers - xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; - xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; - xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; + setup_pred_block(&xd->dst, + &cm->yv12_fb[dst_fb_idx], + mb_row, mb_col); /* Set up limit values for MV components to prevent them from * extending beyond the UMV borders assuming 16x16 block size */ @@ -692,14 +677,8 @@ static void set_offsets(VP9_COMP *cpi, xd->left_available = (mb_col > cm->cur_tile_mb_col_start); xd->right_available = (mb_col + block_size < cm->cur_tile_mb_col_end); - /* Reference buffer offsets */ - *ref_yoffset = (mb_row * ref_y_stride * 16) + (mb_col * 16); - *ref_uvoffset = (mb_row * ref_uv_stride * 8) + (mb_col * 8); - /* set up source buffers */ - x->src.y_buffer = cpi->Source->y_buffer + src_yoffset; - x->src.u_buffer = cpi->Source->u_buffer + src_uvoffset; - x->src.v_buffer = cpi->Source->v_buffer + src_uvoffset; + setup_pred_block(&x->src, cpi->Source, mb_row, mb_col); /* R/D setup */ x->rddiv = cpi->RDDIV; @@ -742,8 +721,8 @@ static void set_offsets(VP9_COMP *cpi, } static int pick_mb_modes(VP9_COMP *cpi, - int mb_row, - int mb_col, + int mb_row0, + int mb_col0, TOKENEXTRA **tp, int *totalrate, int *totaldist) { @@ -751,16 +730,15 @@ static int pick_mb_modes(VP9_COMP *cpi, MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; int i; - int recon_yoffset, recon_uvoffset; int splitmodes_used = 0; ENTROPY_CONTEXT_PLANES left_context[2]; ENTROPY_CONTEXT_PLANES above_context[2]; ENTROPY_CONTEXT_PLANES *initial_above_context_ptr = cm->above_context - + mb_col; + + mb_col0; /* Function should not modify L & A contexts; save and restore on exit */ vpx_memcpy(left_context, - cm->left_context + (mb_row & 2), + cm->left_context + (mb_row0 & 2), sizeof(left_context)); vpx_memcpy(above_context, initial_above_context_ptr, @@ -769,17 +747,18 @@ static int pick_mb_modes(VP9_COMP *cpi, /* Encode MBs in raster order within the SB */ for (i = 0; i < 4; i++) { const int x_idx = i & 1, y_idx = i >> 1; + const int mb_row = mb_row0 + y_idx; + const int mb_col = mb_col0 + x_idx; MB_MODE_INFO *mbmi; - if ((mb_row + y_idx >= cm->mb_rows) || (mb_col + x_idx >= cm->mb_cols)) { + if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols)) { // MB lies outside frame, move on continue; } // Index of the MB in the SB 0..3 xd->mb_index = i; - set_offsets(cpi, mb_row + y_idx, mb_col + x_idx, 16, - &recon_yoffset, &recon_uvoffset); + set_offsets(cpi, mb_row, mb_col, 16); if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp9_activity_masking(cpi, x); @@ -800,8 +779,8 @@ static int pick_mb_modes(VP9_COMP *cpi, *totaldist += d; // Dummy encode, do not do the tokenization - encode_macroblock(cpi, tp, recon_yoffset, recon_uvoffset, 0, - mb_row + y_idx, mb_col + x_idx); + encode_macroblock(cpi, tp, 0, mb_row, mb_col); + // Note the encoder may have changed the segment_id // Save the coding context @@ -814,16 +793,14 @@ static int pick_mb_modes(VP9_COMP *cpi, if (enc_debug) printf("inter pick_mb_modes %d %d\n", mb_row, mb_col); #endif - vp9_pick_mode_inter_macroblock(cpi, x, recon_yoffset, - recon_uvoffset, &r, &d); + vp9_pick_mode_inter_macroblock(cpi, x, mb_row, mb_col, &r, &d); *totalrate += r; *totaldist += d; splitmodes_used += (mbmi->mode == SPLITMV); // Dummy encode, do not do the tokenization - encode_macroblock(cpi, tp, recon_yoffset, recon_uvoffset, 0, - mb_row + y_idx, mb_col + x_idx); + encode_macroblock(cpi, tp, 0, mb_row, mb_col); seg_id = mbmi->segment_id; if (cpi->mb.e_mbd.segmentation_enabled && seg_id == 0) { @@ -846,7 +823,7 @@ static int pick_mb_modes(VP9_COMP *cpi, } /* Restore L & A coding context to those in place on entry */ - vpx_memcpy(cm->left_context + (mb_row & 2), + vpx_memcpy(cm->left_context + (mb_row0 & 2), left_context, sizeof(left_context)); vpx_memcpy(initial_above_context_ptr, @@ -865,9 +842,8 @@ static void pick_sb_modes(VP9_COMP *cpi, VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; - int recon_yoffset, recon_uvoffset; - set_offsets(cpi, mb_row, mb_col, 32, &recon_yoffset, &recon_uvoffset); + set_offsets(cpi, mb_row, mb_col, 32); xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB32X32; if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp9_activity_masking(cpi, x); @@ -883,11 +859,7 @@ static void pick_sb_modes(VP9_COMP *cpi, vpx_memcpy(&x->sb32_context[xd->sb_index].mic, xd->mode_info_context, sizeof(MODE_INFO)); } else { - vp9_rd_pick_inter_mode_sb32(cpi, x, - recon_yoffset, - recon_uvoffset, - totalrate, - totaldist); + vp9_rd_pick_inter_mode_sb32(cpi, x, mb_row, mb_col, totalrate, totaldist); } } @@ -900,9 +872,8 @@ static void pick_sb64_modes(VP9_COMP *cpi, VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; - int recon_yoffset, recon_uvoffset; - set_offsets(cpi, mb_row, mb_col, 64, &recon_yoffset, &recon_uvoffset); + set_offsets(cpi, mb_row, mb_col, 64); xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB64X64; if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp9_activity_masking(cpi, x); @@ -918,11 +889,7 @@ static void pick_sb64_modes(VP9_COMP *cpi, vpx_memcpy(&x->sb64_context.mic, xd->mode_info_context, sizeof(MODE_INFO)); } else { - vp9_rd_pick_inter_mode_sb64(cpi, x, - recon_yoffset, - recon_uvoffset, - totalrate, - totaldist); + vp9_rd_pick_inter_mode_sb64(cpi, x, mb_row, mb_col, totalrate, totaldist); } } @@ -990,14 +957,13 @@ static void encode_sb(VP9_COMP *cpi, VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; - int recon_yoffset, recon_uvoffset; cpi->sb32_count[is_sb]++; if (is_sb) { - set_offsets(cpi, mb_row, mb_col, 32, &recon_yoffset, &recon_uvoffset); + set_offsets(cpi, mb_row, mb_col, 32); update_state(cpi, &x->sb32_context[xd->sb_index], 32, output_enabled); - encode_superblock32(cpi, tp, recon_yoffset, recon_uvoffset, + encode_superblock32(cpi, tp, output_enabled, mb_row, mb_col); if (output_enabled) update_stats(cpi); @@ -1019,15 +985,14 @@ static void encode_sb(VP9_COMP *cpi, continue; } - set_offsets(cpi, mb_row + y_idx, mb_col + x_idx, 16, - &recon_yoffset, &recon_uvoffset); + set_offsets(cpi, mb_row + y_idx, mb_col + x_idx, 16); xd->mb_index = i; update_state(cpi, &x->mb_context[xd->sb_index][i], 16, output_enabled); if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp9_activity_masking(cpi, x); - encode_macroblock(cpi, tp, recon_yoffset, recon_uvoffset, + encode_macroblock(cpi, tp, output_enabled, mb_row + y_idx, mb_col + x_idx); if (output_enabled) update_stats(cpi); @@ -1062,11 +1027,9 @@ static void encode_sb64(VP9_COMP *cpi, cpi->sb64_count[is_sb[0] == 2]++; if (is_sb[0] == 2) { - int recon_yoffset, recon_uvoffset; - - set_offsets(cpi, mb_row, mb_col, 64, &recon_yoffset, &recon_uvoffset); + set_offsets(cpi, mb_row, mb_col, 64); update_state(cpi, &x->sb64_context, 64, 1); - encode_superblock64(cpi, tp, recon_yoffset, recon_uvoffset, + encode_superblock64(cpi, tp, 1, mb_row, mb_col); update_stats(cpi); @@ -2025,7 +1988,6 @@ static void update_sb64_skip_coeff_state(VP9_COMP *cpi, } static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t, - int recon_yoffset, int recon_uvoffset, int output_enabled, int mb_row, int mb_col) { VP9_COMMON *const cm = &cpi->common; @@ -2122,9 +2084,9 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t, else ref_fb_idx = cpi->common.active_ref_idx[cpi->alt_fb_idx]; - xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset; - xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset; - xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset; + setup_pred_block(&xd->pre, + &cpi->common.yv12_fb[ref_fb_idx], + mb_row, mb_col); if (mbmi->second_ref_frame > 0) { int second_ref_fb_idx; @@ -2136,12 +2098,9 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t, else second_ref_fb_idx = cpi->common.active_ref_idx[cpi->alt_fb_idx]; - xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer + - recon_yoffset; - xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer + - recon_uvoffset; - xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer + - recon_uvoffset; + setup_pred_block(&xd->second_pre, + &cpi->common.yv12_fb[second_ref_fb_idx], + mb_row, mb_col); } if (!x->skip) { @@ -2282,7 +2241,6 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t, } static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t, - int recon_yoffset, int recon_uvoffset, int output_enabled, int mb_row, int mb_col) { VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &cpi->mb; @@ -2361,9 +2319,9 @@ static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t, else ref_fb_idx = cpi->common.active_ref_idx[cpi->alt_fb_idx]; - xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset; - xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset; - xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset; + setup_pred_block(&xd->pre, + &cpi->common.yv12_fb[ref_fb_idx], + mb_row, mb_col); if (xd->mode_info_context->mbmi.second_ref_frame > 0) { int second_ref_fb_idx; @@ -2375,12 +2333,9 @@ static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t, else second_ref_fb_idx = cpi->common.active_ref_idx[cpi->alt_fb_idx]; - xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer + - recon_yoffset; - xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer + - recon_uvoffset; - xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer + - recon_uvoffset; + setup_pred_block(&xd->second_pre, + &cpi->common.yv12_fb[second_ref_fb_idx], + mb_row, mb_col); } vp9_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer, @@ -2513,7 +2468,6 @@ static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t, } static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t, - int recon_yoffset, int recon_uvoffset, int output_enabled, int mb_row, int mb_col) { VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &cpi->mb; @@ -2591,12 +2545,9 @@ static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t, else ref_fb_idx = cpi->common.active_ref_idx[cpi->alt_fb_idx]; - xd->pre.y_buffer = - cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset; - xd->pre.u_buffer = - cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset; - xd->pre.v_buffer = - cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset; + setup_pred_block(&xd->pre, + &cpi->common.yv12_fb[ref_fb_idx], + mb_row, mb_col); if (xd->mode_info_context->mbmi.second_ref_frame > 0) { int second_ref_fb_idx; @@ -2608,12 +2559,9 @@ static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t, else second_ref_fb_idx = cpi->common.active_ref_idx[cpi->alt_fb_idx]; - xd->second_pre.y_buffer = - cpi->common.yv12_fb[second_ref_fb_idx].y_buffer + recon_yoffset; - xd->second_pre.u_buffer = - cpi->common.yv12_fb[second_ref_fb_idx].u_buffer + recon_uvoffset; - xd->second_pre.v_buffer = - cpi->common.yv12_fb[second_ref_fb_idx].v_buffer + recon_uvoffset; + setup_pred_block(&xd->second_pre, + &cpi->common.yv12_fb[second_ref_fb_idx], + mb_row, mb_col); } vp9_build_inter64x64_predictors_sb(xd, xd->dst.y_buffer, diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c index 3703741..325d6ed 100644 --- a/vp9/encoder/vp9_rdopt.c +++ b/vp9/encoder/vp9_rdopt.c @@ -3167,20 +3167,16 @@ static void inter_mode_cost(VP9_COMP *cpi, MACROBLOCK *x, static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x, int idx, MV_REFERENCE_FRAME frame_type, int block_size, - int recon_yoffset, int recon_uvoffset, + int mb_row, int mb_col, int_mv frame_nearest_mv[MAX_REF_FRAMES], int_mv frame_near_mv[MAX_REF_FRAMES], int frame_mdcounts[4][4], - uint8_t *y_buffer[4], - uint8_t *u_buffer[4], - uint8_t *v_buffer[4]) { + YV12_BUFFER_CONFIG yv12_mb[4]) { YV12_BUFFER_CONFIG *yv12 = &cpi->common.yv12_fb[idx]; MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; - y_buffer[frame_type] = yv12->y_buffer + recon_yoffset; - u_buffer[frame_type] = yv12->u_buffer + recon_uvoffset; - v_buffer[frame_type] = yv12->v_buffer + recon_uvoffset; + setup_pred_block(&yv12_mb[frame_type], yv12, mb_row, mb_col); // Gets an initial list of candidate vectors from neighbours and orders them vp9_find_mv_refs(&cpi->common, xd, xd->mode_info_context, @@ -3194,7 +3190,7 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x, vp9_find_best_ref_mvs(xd, cpi->common.error_resilient_mode || cpi->common.frame_parallel_decoding_mode ? - 0 : y_buffer[frame_type], + 0 : yv12_mb[frame_type].y_buffer, yv12->y_stride, mbmi->ref_mvs[frame_type], &frame_nearest_mv[frame_type], @@ -3202,7 +3198,7 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x, // Further refinement that is encode side only to test the top few candidates // in full and choose the best as the centre point for subsequent searches. - mv_pred(cpi, x, y_buffer[frame_type], yv12->y_stride, + mv_pred(cpi, x, yv12_mb[frame_type].y_buffer, yv12->y_stride, frame_type, block_size); } @@ -3219,7 +3215,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, int *rate_y, int *distortion_y, int *rate_uv, int *distortion_uv, int *mode_excluded, int *disable_skip, - int recon_yoffset, int mode_index, + int mode_index, int_mv frame_mv[MB_MODE_COUNT] [MAX_REF_FRAMES]) { VP9_COMMON *cm = &cpi->common; @@ -3514,7 +3510,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, } static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, - int recon_yoffset, int recon_uvoffset, + int mb_row, int mb_col, int *returnrate, int *returndistortion, int64_t *returnintra) { static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG, @@ -3565,7 +3561,7 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES]; int frame_mdcounts[4][4]; - uint8_t *y_buffer[4], *u_buffer[4], *v_buffer[4]; + YV12_BUFFER_CONFIG yv12_mb[4]; unsigned int ref_costs[MAX_REF_FRAMES]; int_mv seg_mvs[NB_PARTITIONINGS][16 /* n_blocks */][MAX_REF_FRAMES - 1]; @@ -3597,23 +3593,23 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, if (cpi->ref_frame_flags & VP9_LAST_FLAG) { setup_buffer_inter(cpi, x, cpi->common.active_ref_idx[cpi->lst_fb_idx], - LAST_FRAME, BLOCK_16X16, recon_yoffset, recon_uvoffset, + LAST_FRAME, BLOCK_16X16, mb_row, mb_col, frame_mv[NEARESTMV], frame_mv[NEARMV], - frame_mdcounts, y_buffer, u_buffer, v_buffer); + frame_mdcounts, yv12_mb); } if (cpi->ref_frame_flags & VP9_GOLD_FLAG) { setup_buffer_inter(cpi, x, cpi->common.active_ref_idx[cpi->gld_fb_idx], - GOLDEN_FRAME, BLOCK_16X16, recon_yoffset, recon_uvoffset, + GOLDEN_FRAME, BLOCK_16X16, mb_row, mb_col, frame_mv[NEARESTMV], frame_mv[NEARMV], - frame_mdcounts, y_buffer, u_buffer, v_buffer); + frame_mdcounts, yv12_mb); } if (cpi->ref_frame_flags & VP9_ALT_FLAG) { setup_buffer_inter(cpi, x, cpi->common.active_ref_idx[cpi->alt_fb_idx], - ALTREF_FRAME, BLOCK_16X16, recon_yoffset, recon_uvoffset, + ALTREF_FRAME, BLOCK_16X16, mb_row, mb_col, frame_mv[NEARESTMV], frame_mv[NEARMV], - frame_mdcounts, y_buffer, u_buffer, v_buffer); + frame_mdcounts, yv12_mb); } *returnintra = INT64_MAX; @@ -3731,9 +3727,7 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, if (mbmi->ref_frame) { int ref = mbmi->ref_frame; - xd->pre.y_buffer = y_buffer[ref]; - xd->pre.u_buffer = u_buffer[ref]; - xd->pre.v_buffer = v_buffer[ref]; + xd->pre = yv12_mb[ref]; best_ref_mv = mbmi->ref_mvs[ref][0]; vpx_memcpy(mdcounts, frame_mdcounts[ref], sizeof(mdcounts)); } @@ -3741,9 +3735,7 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, if (mbmi->second_ref_frame > 0) { int ref = mbmi->second_ref_frame; - xd->second_pre.y_buffer = y_buffer[ref]; - xd->second_pre.u_buffer = u_buffer[ref]; - xd->second_pre.v_buffer = v_buffer[ref]; + xd->second_pre = yv12_mb[ref]; second_best_ref_mv = mbmi->ref_mvs[ref][0]; } @@ -3980,7 +3972,7 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, #endif &rate_y, &distortion, &rate_uv, &distortion_uv, - &mode_excluded, &disable_skip, recon_yoffset, + &mode_excluded, &disable_skip, mode_index, frame_mv); if (this_rd == INT64_MAX) continue; @@ -4464,7 +4456,7 @@ void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, } static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, - int recon_yoffset, int recon_uvoffset, + int mb_row, int mb_col, int *returnrate, int *returndistortion, int block_size) { @@ -4478,9 +4470,7 @@ static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, int comp_pred, i; int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES]; int frame_mdcounts[4][4]; - uint8_t *y_buffer[4]; - uint8_t *u_buffer[4]; - uint8_t *v_buffer[4]; + YV12_BUFFER_CONFIG yv12_mb[4]; static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG, VP9_ALT_FLAG }; int idx_list[4] = {0, @@ -4526,9 +4516,9 @@ static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) { if (cpi->ref_frame_flags & flag_list[ref_frame]) { setup_buffer_inter(cpi, x, idx_list[ref_frame], ref_frame, block_size, - recon_yoffset, recon_uvoffset, frame_mv[NEARESTMV], + mb_row, mb_col, frame_mv[NEARESTMV], frame_mv[NEARMV], frame_mdcounts, - y_buffer, u_buffer, v_buffer); + yv12_mb); } frame_mv[NEWMV][ref_frame].as_int = INVALID_MV; frame_mv[ZEROMV][ref_frame].as_int = 0; @@ -4650,9 +4640,7 @@ static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, continue; mbmi->second_ref_frame = second_ref; - xd->second_pre.y_buffer = y_buffer[second_ref]; - xd->second_pre.u_buffer = u_buffer[second_ref]; - xd->second_pre.v_buffer = v_buffer[second_ref]; + xd->second_pre = yv12_mb[second_ref]; mode_excluded = mode_excluded ? mode_excluded : cm->comp_pred_mode == SINGLE_PREDICTION_ONLY; @@ -4670,9 +4658,7 @@ static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, } } - xd->pre.y_buffer = y_buffer[ref_frame]; - xd->pre.u_buffer = u_buffer[ref_frame]; - xd->pre.v_buffer = v_buffer[ref_frame]; + xd->pre = yv12_mb[ref_frame]; vpx_memcpy(mdcounts, frame_mdcounts[ref_frame], sizeof(mdcounts)); // If the segment reference frame feature is enabled.... @@ -4750,7 +4736,7 @@ static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, #endif &rate_y, &distortion_y, &rate_uv, &distortion_uv, - &mode_excluded, &disable_skip, recon_yoffset, + &mode_excluded, &disable_skip, mode_index, frame_mv); if (this_rd == INT64_MAX) continue; @@ -5019,24 +5005,23 @@ static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, } int64_t vp9_rd_pick_inter_mode_sb32(VP9_COMP *cpi, MACROBLOCK *x, - int recon_yoffset, int recon_uvoffset, + int mb_row, int mb_col, int *returnrate, int *returndistortion) { - return vp9_rd_pick_inter_mode_sb(cpi, x, recon_yoffset, recon_uvoffset, + return vp9_rd_pick_inter_mode_sb(cpi, x, mb_row, mb_col, returnrate, returndistortion, BLOCK_32X32); } int64_t vp9_rd_pick_inter_mode_sb64(VP9_COMP *cpi, MACROBLOCK *x, - int recon_yoffset, int recon_uvoffset, + int mb_row, int mb_col, int *returnrate, int *returndistortion) { - return vp9_rd_pick_inter_mode_sb(cpi, x, recon_yoffset, recon_uvoffset, + return vp9_rd_pick_inter_mode_sb(cpi, x, mb_row, mb_col, returnrate, returndistortion, BLOCK_64X64); } void vp9_pick_mode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x, - int recon_yoffset, - int recon_uvoffset, + int mb_row, int mb_col, int *totalrate, int *totaldist) { MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi; @@ -5054,7 +5039,7 @@ void vp9_pick_mode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x, { int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled; - rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, + rd_pick_inter_mode(cpi, x, mb_row, mb_col, &rate, &distortion, &intra_error); /* restore cpi->zbin_mode_boost_enabled */ diff --git a/vp9/encoder/vp9_rdopt.h b/vp9/encoder/vp9_rdopt.h index 8ee2c0b..710ae58 100644 --- a/vp9/encoder/vp9_rdopt.h +++ b/vp9/encoder/vp9_rdopt.h @@ -29,15 +29,15 @@ extern void vp9_rd_pick_intra_mode_sb64(VP9_COMP *cpi, MACROBLOCK *x, int *r, int *d); extern void vp9_pick_mode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x, - int ref_yoffset, int ref_uvoffset, + int mb_row, int mb_col, int *r, int *d); extern int64_t vp9_rd_pick_inter_mode_sb32(VP9_COMP *cpi, MACROBLOCK *x, - int ref_yoffset, int ref_uvoffset, + int mb_row, int mb_col, int *r, int *d); extern int64_t vp9_rd_pick_inter_mode_sb64(VP9_COMP *cpi, MACROBLOCK *x, - int ref_yoffset, int ref_uvoffset, + int mb_row, int mb_col, int *r, int *d); extern void vp9_init_me_luts(); @@ -45,4 +45,18 @@ extern void vp9_init_me_luts(); extern void vp9_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv); +static void setup_pred_block(YV12_BUFFER_CONFIG *dst, + const YV12_BUFFER_CONFIG *src, + int mb_row, int mb_col) { + const int recon_y_stride = src->y_stride; + const int recon_uv_stride = src->uv_stride; + const int recon_yoffset = 16 * mb_row * recon_y_stride + 16 * mb_col; + const int recon_uvoffset = 8 * mb_row * recon_uv_stride + 8 * mb_col; + + *dst = *src; + dst->y_buffer += recon_yoffset; + dst->u_buffer += recon_uvoffset; + dst->v_buffer += recon_uvoffset; +} + #endif // VP9_ENCODER_VP9_RDOPT_H_ -- 2.7.4