From: Fritz Koenig Date: Thu, 22 Jul 2010 12:07:32 +0000 (-0400) Subject: Swap alt/gold/new/last frame buffer ptrs instead of copying. X-Git-Tag: 1.0_branch~970 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=0ce39012823b522c611db87f0810c540124e6e9d;p=profile%2Fivi%2Flibvpx.git Swap alt/gold/new/last frame buffer ptrs instead of copying. At the end of the decode, frame buffers were being copied. The frames are not updated after the copy, they are just for reference on later frames. This change allows multiple references to the same frame buffer instead of copying it. Changes needed to be made to the encoder to handle this. The encoder is still doing frame buffer copies in similar places where pointer reference could be done. Change-Id: I7c38be4d23979cc49b5f17241ca3a78703803e66 --- diff --git a/vp8/common/alloccommon.c b/vp8/common/alloccommon.c index c3368b5..d0a138d 100644 --- a/vp8/common/alloccommon.c +++ b/vp8/common/alloccommon.c @@ -31,13 +31,15 @@ void vp8_update_mode_info_border(MODE_INFO *mi, int rows, int cols) vpx_memset(&mi[i*cols-1], 0, sizeof(MODE_INFO)); } } + void vp8_de_alloc_frame_buffers(VP8_COMMON *oci) { + int i; + + for (i = 0; i < NUM_YV12_BUFFERS; i++) + vp8_yv12_de_alloc_frame_buffer(&oci->yv12_fb[i]); + vp8_yv12_de_alloc_frame_buffer(&oci->temp_scale_frame); - vp8_yv12_de_alloc_frame_buffer(&oci->new_frame); - vp8_yv12_de_alloc_frame_buffer(&oci->last_frame); - vp8_yv12_de_alloc_frame_buffer(&oci->golden_frame); - vp8_yv12_de_alloc_frame_buffer(&oci->alt_ref_frame); vp8_yv12_de_alloc_frame_buffer(&oci->post_proc_buffer); vpx_free(oci->above_context[Y1CONTEXT]); @@ -61,6 +63,8 @@ void vp8_de_alloc_frame_buffers(VP8_COMMON *oci) int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) { + int i; + vp8_de_alloc_frame_buffers(oci); // our internal buffers are always multiples of 16 @@ -71,32 +75,28 @@ int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) height += 16 - (height & 0xf); - if (vp8_yv12_alloc_frame_buffer(&oci->temp_scale_frame, width, 16, VP8BORDERINPIXELS) < 0) + for (i = 0; i < NUM_YV12_BUFFERS; i++) { - vp8_de_alloc_frame_buffers(oci); - return ALLOC_FAILURE; - } - + oci->fb_idx_ref_cnt[0] = 0; - if (vp8_yv12_alloc_frame_buffer(&oci->new_frame, width, height, VP8BORDERINPIXELS) < 0) - { - vp8_de_alloc_frame_buffers(oci); - return ALLOC_FAILURE; + if (vp8_yv12_alloc_frame_buffer(&oci->yv12_fb[i], width, height, VP8BORDERINPIXELS) < 0) + { + vp8_de_alloc_frame_buffers(oci); + return ALLOC_FAILURE; + } } - if (vp8_yv12_alloc_frame_buffer(&oci->last_frame, width, height, VP8BORDERINPIXELS) < 0) - { - vp8_de_alloc_frame_buffers(oci); - return ALLOC_FAILURE; - } + oci->new_fb_idx = 0; + oci->lst_fb_idx = 1; + oci->gld_fb_idx = 2; + oci->alt_fb_idx = 3; - if (vp8_yv12_alloc_frame_buffer(&oci->golden_frame, width, height, VP8BORDERINPIXELS) < 0) - { - vp8_de_alloc_frame_buffers(oci); - return ALLOC_FAILURE; - } + oci->fb_idx_ref_cnt[0] = 1; + oci->fb_idx_ref_cnt[1] = 1; + oci->fb_idx_ref_cnt[2] = 1; + oci->fb_idx_ref_cnt[3] = 1; - if (vp8_yv12_alloc_frame_buffer(&oci->alt_ref_frame, width, height, VP8BORDERINPIXELS) < 0) + if (vp8_yv12_alloc_frame_buffer(&oci->temp_scale_frame, width, 16, VP8BORDERINPIXELS) < 0) { vp8_de_alloc_frame_buffers(oci); return ALLOC_FAILURE; diff --git a/vp8/common/onyxc_int.h b/vp8/common/onyxc_int.h index d2fbc86..503ad5d 100644 --- a/vp8/common/onyxc_int.h +++ b/vp8/common/onyxc_int.h @@ -33,6 +33,7 @@ void vp8_initialize_common(void); #define MAXQ 127 #define QINDEX_RANGE (MAXQ + 1) +#define NUM_YV12_BUFFERS 4 typedef struct frame_contexts { @@ -94,11 +95,12 @@ typedef struct VP8Common YUV_TYPE clr_type; CLAMP_TYPE clamp_type; - YV12_BUFFER_CONFIG last_frame; - YV12_BUFFER_CONFIG golden_frame; - YV12_BUFFER_CONFIG alt_ref_frame; - YV12_BUFFER_CONFIG new_frame; YV12_BUFFER_CONFIG *frame_to_show; + + YV12_BUFFER_CONFIG yv12_fb[NUM_YV12_BUFFERS]; + int fb_idx_ref_cnt[NUM_YV12_BUFFERS]; + int new_fb_idx, lst_fb_idx, gld_fb_idx, alt_fb_idx; + YV12_BUFFER_CONFIG post_proc_buffer; YV12_BUFFER_CONFIG temp_scale_frame; diff --git a/vp8/decoder/decodframe.c b/vp8/decoder/decodframe.c index 8d9db10..a5850db 100644 --- a/vp8/decoder/decodframe.c +++ b/vp8/decoder/decodframe.c @@ -381,8 +381,10 @@ void vp8_decode_mb_row(VP8D_COMP *pbi, int i; int recon_yoffset, recon_uvoffset; int mb_col; - int recon_y_stride = pc->last_frame.y_stride; - int recon_uv_stride = pc->last_frame.uv_stride; + int ref_fb_idx = pc->lst_fb_idx; + int dst_fb_idx = pc->new_fb_idx; + int recon_y_stride = pc->yv12_fb[ref_fb_idx].y_stride; + int recon_uv_stride = pc->yv12_fb[ref_fb_idx].uv_stride; vpx_memset(pc->left_context, 0, sizeof(pc->left_context)); recon_yoffset = mb_row * recon_y_stride * 16; @@ -419,33 +421,23 @@ void vp8_decode_mb_row(VP8D_COMP *pbi, xd->mb_to_left_edge = -((mb_col * 16) << 3); xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; - xd->dst.y_buffer = pc->new_frame.y_buffer + recon_yoffset; - xd->dst.u_buffer = pc->new_frame.u_buffer + recon_uvoffset; - xd->dst.v_buffer = pc->new_frame.v_buffer + recon_uvoffset; + xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; + xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; + xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; xd->left_available = (mb_col != 0); // Select the appropriate reference frame for this MB if (xd->mbmi.ref_frame == LAST_FRAME) - { - xd->pre.y_buffer = pc->last_frame.y_buffer + recon_yoffset; - xd->pre.u_buffer = pc->last_frame.u_buffer + recon_uvoffset; - xd->pre.v_buffer = pc->last_frame.v_buffer + recon_uvoffset; - } + ref_fb_idx = pc->lst_fb_idx; else if (xd->mbmi.ref_frame == GOLDEN_FRAME) - { - // Golden frame reconstruction buffer - xd->pre.y_buffer = pc->golden_frame.y_buffer + recon_yoffset; - xd->pre.u_buffer = pc->golden_frame.u_buffer + recon_uvoffset; - xd->pre.v_buffer = pc->golden_frame.v_buffer + recon_uvoffset; - } + ref_fb_idx = pc->gld_fb_idx; else - { - // Alternate reference frame reconstruction buffer - xd->pre.y_buffer = pc->alt_ref_frame.y_buffer + recon_yoffset; - xd->pre.u_buffer = pc->alt_ref_frame.u_buffer + recon_uvoffset; - xd->pre.v_buffer = pc->alt_ref_frame.v_buffer + recon_uvoffset; - } + ref_fb_idx = pc->alt_fb_idx; + + xd->pre.y_buffer = pc->yv12_fb[ref_fb_idx].y_buffer + recon_yoffset; + xd->pre.u_buffer = pc->yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset; + xd->pre.v_buffer = pc->yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset; vp8_build_uvmvs(xd, pc->full_pixel); @@ -475,7 +467,7 @@ void vp8_decode_mb_row(VP8D_COMP *pbi, // adjust to the next row of mbs vp8_extend_mb_row( - &pc->new_frame, + &pc->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8 ); @@ -890,11 +882,11 @@ int vp8_decode_frame(VP8D_COMP *pbi) } } - vpx_memcpy(&xd->pre, &pc->last_frame, sizeof(YV12_BUFFER_CONFIG)); - vpx_memcpy(&xd->dst, &pc->new_frame, sizeof(YV12_BUFFER_CONFIG)); + vpx_memcpy(&xd->pre, &pc->yv12_fb[pc->lst_fb_idx], sizeof(YV12_BUFFER_CONFIG)); + vpx_memcpy(&xd->dst, &pc->yv12_fb[pc->new_fb_idx], sizeof(YV12_BUFFER_CONFIG)); // set up frame new frame for intra coded blocks - vp8_setup_intra_recon(&pc->new_frame); + vp8_setup_intra_recon(&pc->yv12_fb[pc->new_fb_idx]); vp8_setup_block_dptrs(xd); diff --git a/vp8/decoder/onyxd_if.c b/vp8/decoder/onyxd_if.c index 60ca74a..28f9908 100644 --- a/vp8/decoder/onyxd_if.c +++ b/vp8/decoder/onyxd_if.c @@ -180,38 +180,38 @@ int vp8dx_get_reference(VP8D_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_C { VP8D_COMP *pbi = (VP8D_COMP *) ptr; VP8_COMMON *cm = &pbi->common; + int ref_fb_idx; if (ref_frame_flag == VP8_LAST_FLAG) - vp8_yv12_copy_frame_ptr(&cm->last_frame, sd); - + ref_fb_idx = cm->lst_fb_idx; else if (ref_frame_flag == VP8_GOLD_FLAG) - vp8_yv12_copy_frame_ptr(&cm->golden_frame, sd); - + ref_fb_idx = cm->gld_fb_idx; else if (ref_frame_flag == VP8_ALT_FLAG) - vp8_yv12_copy_frame_ptr(&cm->alt_ref_frame, sd); - + ref_fb_idx = cm->alt_fb_idx; else return -1; + vp8_yv12_copy_frame_ptr(&cm->yv12_fb[ref_fb_idx], sd); + return 0; } int vp8dx_set_reference(VP8D_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) { VP8D_COMP *pbi = (VP8D_COMP *) ptr; VP8_COMMON *cm = &pbi->common; + int ref_fb_idx; if (ref_frame_flag == VP8_LAST_FLAG) - vp8_yv12_copy_frame_ptr(sd, &cm->last_frame); - + ref_fb_idx = cm->lst_fb_idx; else if (ref_frame_flag == VP8_GOLD_FLAG) - vp8_yv12_copy_frame_ptr(sd, &cm->golden_frame); - + ref_fb_idx = cm->gld_fb_idx; else if (ref_frame_flag == VP8_ALT_FLAG) - vp8_yv12_copy_frame_ptr(sd, &cm->alt_ref_frame); - + ref_fb_idx = cm->alt_fb_idx; else return -1; + vp8_yv12_copy_frame_ptr(sd, &cm->yv12_fb[ref_fb_idx]); + return 0; } @@ -221,12 +221,95 @@ extern void vp8_push_neon(INT64 *store); extern void vp8_pop_neon(INT64 *store); static INT64 dx_store_reg[8]; #endif + +static int get_free_fb (VP8_COMMON *cm) +{ + int i; + for (i = 0; i < NUM_YV12_BUFFERS; i++) + if (cm->fb_idx_ref_cnt[i] == 0) + break; + + cm->fb_idx_ref_cnt[i] = 1; + return i; +} + +static void ref_cnt_fb (int *buf, int *idx, int new_idx) +{ + if (buf[*idx] > 0) + buf[*idx]--; + + *idx = new_idx; + + buf[new_idx]++; +} + +// If any buffer copy / swapping is signalled it should be done here. +static int swap_frame_buffers (VP8_COMMON *cm) +{ + int fb_to_update_with, err = 0; + + if (cm->refresh_last_frame) + fb_to_update_with = cm->lst_fb_idx; + else + fb_to_update_with = cm->new_fb_idx; + + // The alternate reference frame or golden frame can be updated + // using the new, last, or golden/alt ref frame. If it + // is updated using the newly decoded frame it is a refresh. + // An update using the last or golden/alt ref frame is a copy. + if (cm->copy_buffer_to_arf) + { + int new_fb = 0; + + if (cm->copy_buffer_to_arf == 1) + new_fb = fb_to_update_with; + else if (cm->copy_buffer_to_arf == 2) + new_fb = cm->gld_fb_idx; + else + err = -1; + + ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->alt_fb_idx, new_fb); + } + + if (cm->copy_buffer_to_gf) + { + int new_fb = 0; + + if (cm->copy_buffer_to_gf == 1) + new_fb = fb_to_update_with; + else if (cm->copy_buffer_to_gf == 2) + new_fb = cm->alt_fb_idx; + else + err = -1; + + ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->gld_fb_idx, new_fb); + } + + if (cm->refresh_golden_frame) + ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->gld_fb_idx, cm->new_fb_idx); + + if (cm->refresh_alt_ref_frame) + ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->alt_fb_idx, cm->new_fb_idx); + + if (cm->refresh_last_frame) + { + ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->lst_fb_idx, cm->new_fb_idx); + + cm->frame_to_show = &cm->yv12_fb[cm->lst_fb_idx]; + } + else + cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx]; + + cm->fb_idx_ref_cnt[cm->new_fb_idx]--; + + return err; +} + int vp8dx_receive_compressed_data(VP8D_PTR ptr, unsigned long size, const unsigned char *source, INT64 time_stamp) { VP8D_COMP *pbi = (VP8D_COMP *) ptr; VP8_COMMON *cm = &pbi->common; int retcode = 0; - struct vpx_usec_timer timer; // if(pbi->ready_for_new_data == 0) @@ -257,6 +340,8 @@ int vp8dx_receive_compressed_data(VP8D_PTR ptr, unsigned long size, const unsign pbi->Source = source; pbi->source_sz = size; + cm->new_fb_idx = get_free_fb (cm); + retcode = vp8_decode_frame(pbi); if (retcode < 0) @@ -275,15 +360,11 @@ int vp8dx_receive_compressed_data(VP8D_PTR ptr, unsigned long size, const unsign if (pbi->b_multithreaded_lf && pbi->common.filter_level != 0) vp8_stop_lfthread(pbi); - if (cm->refresh_last_frame) - { - vp8_swap_yv12_buffer(&cm->last_frame, &cm->new_frame); - - cm->frame_to_show = &cm->last_frame; - } - else + if (swap_frame_buffers (cm)) { - cm->frame_to_show = &cm->new_frame; + pbi->common.error.error_code = VPX_CODEC_ERROR; + pbi->common.error.setjmp = 0; + return -1; } if (!pbi->b_multithreaded_lf) @@ -313,49 +394,6 @@ int vp8dx_receive_compressed_data(VP8D_PTR ptr, unsigned long size, const unsign write_dx_frame_to_file(cm->frame_to_show, cm->current_video_frame); #endif - // If any buffer copy / swaping is signalled it should be done here. - if (cm->copy_buffer_to_arf) - { - if (cm->copy_buffer_to_arf == 1) - { - if (cm->refresh_last_frame) - vp8_yv12_copy_frame_ptr(&cm->new_frame, &cm->alt_ref_frame); - else - vp8_yv12_copy_frame_ptr(&cm->last_frame, &cm->alt_ref_frame); - } - else if (cm->copy_buffer_to_arf == 2) - vp8_yv12_copy_frame_ptr(&cm->golden_frame, &cm->alt_ref_frame); - } - - if (cm->copy_buffer_to_gf) - { - if (cm->copy_buffer_to_gf == 1) - { - if (cm->refresh_last_frame) - vp8_yv12_copy_frame_ptr(&cm->new_frame, &cm->golden_frame); - else - vp8_yv12_copy_frame_ptr(&cm->last_frame, &cm->golden_frame); - } - else if (cm->copy_buffer_to_gf == 2) - vp8_yv12_copy_frame_ptr(&cm->alt_ref_frame, &cm->golden_frame); - } - - // Should the golden or alternate reference frame be refreshed? - if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) - { - if (cm->refresh_golden_frame) - vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->golden_frame); - - if (cm->refresh_alt_ref_frame) - vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->alt_ref_frame); - - //vpx_log("Decoder: recovery frame received \n"); - - // Update data structures that monitors GF useage - vpx_memset(cm->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols)); - cm->gf_active_count = cm->mb_rows * cm->mb_cols; - } - vp8_clear_system_state(); vpx_usec_timer_mark(&timer); diff --git a/vp8/decoder/threading.c b/vp8/decoder/threading.c index 18c8da0..752081e 100644 --- a/vp8/decoder/threading.c +++ b/vp8/decoder/threading.c @@ -59,11 +59,8 @@ void vp8_setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_ROW_DEC mbd->frames_since_golden = pc->frames_since_golden; mbd->frames_till_alt_ref_frame = pc->frames_till_alt_ref_frame; - mbd->pre = pc->last_frame; - mbd->dst = pc->new_frame; - - - + mbd->pre = pc->yv12_fb[pc->lst_fb_idx]; + mbd->dst = pc->yv12_fb[pc->new_fb_idx]; vp8_setup_block_dptrs(mbd); vp8_build_block_doffsets(mbd); @@ -119,8 +116,10 @@ THREAD_FUNCTION vp8_thread_decoding_proc(void *p_data) int i; int recon_yoffset, recon_uvoffset; int mb_col; - int recon_y_stride = pc->last_frame.y_stride; - int recon_uv_stride = pc->last_frame.uv_stride; + int ref_fb_idx = pc->lst_fb_idx; + int dst_fb_idx = pc->new_fb_idx; + int recon_y_stride = pc->yv12_fb[ref_fb_idx].y_stride; + int recon_uv_stride = pc->yv12_fb[ref_fb_idx].uv_stride; volatile int *last_row_current_mb_col; @@ -172,33 +171,23 @@ THREAD_FUNCTION vp8_thread_decoding_proc(void *p_data) xd->mb_to_left_edge = -((mb_col * 16) << 3); xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; - xd->dst.y_buffer = pc->new_frame.y_buffer + recon_yoffset; - xd->dst.u_buffer = pc->new_frame.u_buffer + recon_uvoffset; - xd->dst.v_buffer = pc->new_frame.v_buffer + recon_uvoffset; + xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; + xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; + xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; xd->left_available = (mb_col != 0); // Select the appropriate reference frame for this MB if (xd->mbmi.ref_frame == LAST_FRAME) - { - xd->pre.y_buffer = pc->last_frame.y_buffer + recon_yoffset; - xd->pre.u_buffer = pc->last_frame.u_buffer + recon_uvoffset; - xd->pre.v_buffer = pc->last_frame.v_buffer + recon_uvoffset; - } + ref_fb_idx = pc->lst_fb_idx; else if (xd->mbmi.ref_frame == GOLDEN_FRAME) - { - // Golden frame reconstruction buffer - xd->pre.y_buffer = pc->golden_frame.y_buffer + recon_yoffset; - xd->pre.u_buffer = pc->golden_frame.u_buffer + recon_uvoffset; - xd->pre.v_buffer = pc->golden_frame.v_buffer + recon_uvoffset; - } + ref_fb_idx = pc->gld_fb_idx; else - { - // Alternate reference frame reconstruction buffer - xd->pre.y_buffer = pc->alt_ref_frame.y_buffer + recon_yoffset; - xd->pre.u_buffer = pc->alt_ref_frame.u_buffer + recon_uvoffset; - xd->pre.v_buffer = pc->alt_ref_frame.v_buffer + recon_uvoffset; - } + ref_fb_idx = pc->alt_fb_idx; + + xd->pre.y_buffer = pc->yv12_fb[ref_fb_idx].y_buffer + recon_yoffset; + xd->pre.u_buffer = pc->yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset; + xd->pre.v_buffer = pc->yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset; vp8_build_uvmvs(xd, pc->full_pixel); @@ -222,7 +211,7 @@ THREAD_FUNCTION vp8_thread_decoding_proc(void *p_data) // adjust to the next row of mbs vp8_extend_mb_row( - &pc->new_frame, + &pc->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8 ); @@ -279,7 +268,7 @@ THREAD_FUNCTION vp8_thread_loop_filter(void *p_data) MACROBLOCKD *mbd = &pbi->lpfmb; int default_filt_lvl = pbi->common.filter_level; - YV12_BUFFER_CONFIG *post = &cm->new_frame; + YV12_BUFFER_CONFIG *post = &cm->yv12_fb[cm->new_fb_idx]; loop_filter_info *lfi = cm->lf_info; int frame_type = cm->frame_type; diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c index a05b332..cb9a8dd 100644 --- a/vp8/encoder/encodeframe.c +++ b/vp8/encoder/encodeframe.c @@ -277,8 +277,10 @@ void encode_mb_row(VP8_COMP *cpi, int i; int recon_yoffset, recon_uvoffset; int mb_col; - int recon_y_stride = cm->last_frame.y_stride; - int recon_uv_stride = cm->last_frame.uv_stride; + int ref_fb_idx = cm->lst_fb_idx; + int dst_fb_idx = cm->new_fb_idx; + int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; + int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; int seg_map_index = (mb_row * cpi->common.mb_cols); @@ -311,9 +313,9 @@ void encode_mb_row(VP8_COMP *cpi, x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16)); x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16); - xd->dst.y_buffer = cm->new_frame.y_buffer + recon_yoffset; - xd->dst.u_buffer = cm->new_frame.u_buffer + recon_uvoffset; - xd->dst.v_buffer = cm->new_frame.v_buffer + recon_uvoffset; + xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; + xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; + xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; xd->left_available = (mb_col != 0); // Is segmentation enabled @@ -419,7 +421,7 @@ void encode_mb_row(VP8_COMP *cpi, //extend the recon for intra prediction vp8_extend_mb_row( - &cm->new_frame, + &cm->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); @@ -531,12 +533,12 @@ void vp8_encode_frame(VP8_COMP *cpi) // Copy data over into macro block data sturctures. x->src = * cpi->Source; - xd->pre = cm->last_frame; - xd->dst = cm->new_frame; + xd->pre = cm->yv12_fb[cm->lst_fb_idx]; + xd->dst = cm->yv12_fb[cm->new_fb_idx]; // set up frame new frame for intra coded blocks - vp8_setup_intra_recon(&cm->new_frame); + vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]); vp8_build_block_offsets(x); @@ -1157,34 +1159,23 @@ int vp8cx_encode_inter_macroblock MV best_ref_mv; MV nearest, nearby; int mdcounts[4]; + int ref_fb_idx; vp8_find_near_mvs(xd, xd->mode_info_context, &nearest, &nearby, &best_ref_mv, mdcounts, xd->mbmi.ref_frame, cpi->common.ref_frame_sign_bias); vp8_build_uvmvs(xd, cpi->common.full_pixel); - // store motion vectors in our motion vector list if (xd->mbmi.ref_frame == LAST_FRAME) - { - // Set up pointers for this macro block into the previous frame recon buffer - xd->pre.y_buffer = cpi->common.last_frame.y_buffer + recon_yoffset; - xd->pre.u_buffer = cpi->common.last_frame.u_buffer + recon_uvoffset; - xd->pre.v_buffer = cpi->common.last_frame.v_buffer + recon_uvoffset; - } + ref_fb_idx = cpi->common.lst_fb_idx; else if (xd->mbmi.ref_frame == GOLDEN_FRAME) - { - // Set up pointers for this macro block into the golden frame recon buffer - xd->pre.y_buffer = cpi->common.golden_frame.y_buffer + recon_yoffset; - xd->pre.u_buffer = cpi->common.golden_frame.u_buffer + recon_uvoffset; - xd->pre.v_buffer = cpi->common.golden_frame.v_buffer + recon_uvoffset; - } + ref_fb_idx = cpi->common.gld_fb_idx; else - { - // Set up pointers for this macro block into the alternate reference frame recon buffer - xd->pre.y_buffer = cpi->common.alt_ref_frame.y_buffer + recon_yoffset; - xd->pre.u_buffer = cpi->common.alt_ref_frame.u_buffer + recon_uvoffset; - xd->pre.v_buffer = cpi->common.alt_ref_frame.v_buffer + recon_uvoffset; - } + ref_fb_idx = cpi->common.alt_fb_idx; + + xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset; + xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset; + xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset; if (xd->mbmi.mode == SPLITMV) { diff --git a/vp8/encoder/ethreading.c b/vp8/encoder/ethreading.c index 54646f4..b8bd414 100644 --- a/vp8/encoder/ethreading.c +++ b/vp8/encoder/ethreading.c @@ -56,8 +56,10 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data) int i; int recon_yoffset, recon_uvoffset; int mb_col; - int recon_y_stride = cm->last_frame.y_stride; - int recon_uv_stride = cm->last_frame.uv_stride; + int ref_fb_idx = cm->lst_fb_idx; + int dst_fb_idx = cm->new_fb_idx; + int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; + int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; volatile int *last_row_current_mb_col; if (ithread > 0) @@ -107,9 +109,9 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data) x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16)); x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16); - xd->dst.y_buffer = cm->new_frame.y_buffer + recon_yoffset; - xd->dst.u_buffer = cm->new_frame.u_buffer + recon_uvoffset; - xd->dst.v_buffer = cm->new_frame.v_buffer + recon_uvoffset; + xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; + xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; + xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; xd->left_available = (mb_col != 0); // Is segmentation enabled @@ -195,7 +197,7 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data) //extend the recon for intra prediction vp8_extend_mb_row( - &cm->new_frame, + &cm->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); @@ -386,8 +388,8 @@ void vp8cx_init_mbrthread_data(VP8_COMP *cpi, mbd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame; mb->src = * cpi->Source; - mbd->pre = cm->last_frame; - mbd->dst = cm->new_frame; + mbd->pre = cm->yv12_fb[cm->lst_fb_idx]; + mbd->dst = cm->yv12_fb[cm->new_fb_idx]; mb->src.y_buffer += 16 * x->src.y_stride * (i + 1); mb->src.u_buffer += 8 * x->src.uv_stride * (i + 1); diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c index 24886cb..b838378 100644 --- a/vp8/encoder/firstpass.c +++ b/vp8/encoder/firstpass.c @@ -536,8 +536,11 @@ void vp8_first_pass(VP8_COMP *cpi) int col_blocks = 4 * cm->mb_cols; int recon_yoffset, recon_uvoffset; - int recon_y_stride = cm->last_frame.y_stride; - int recon_uv_stride = cm->last_frame.uv_stride; + YV12_BUFFER_CONFIG *lst_yv12 = &cm->yv12_fb[cm->lst_fb_idx]; + YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx]; + YV12_BUFFER_CONFIG *gld_yv12 = &cm->yv12_fb[cm->gld_fb_idx]; + int recon_y_stride = lst_yv12->y_stride; + int recon_uv_stride = lst_yv12->uv_stride; int intra_error = 0; int coded_error = 0; @@ -559,8 +562,8 @@ void vp8_first_pass(VP8_COMP *cpi) vp8_clear_system_state(); //__asm emms; x->src = * cpi->Source; - xd->pre = cm->last_frame; - xd->dst = cm->new_frame; + xd->pre = *lst_yv12; + xd->dst = *new_yv12; vp8_build_block_offsets(x); @@ -569,7 +572,7 @@ void vp8_first_pass(VP8_COMP *cpi) vp8_setup_block_ptrs(x); // set up frame new frame for intra coded blocks - vp8_setup_intra_recon(&cm->new_frame); + vp8_setup_intra_recon(new_yv12); vp8cx_frame_init_quantizer(cpi); // Initialise the MV cost table to the defaults @@ -599,9 +602,9 @@ void vp8_first_pass(VP8_COMP *cpi) int gf_motion_error = INT_MAX; int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row); - xd->dst.y_buffer = cm->new_frame.y_buffer + recon_yoffset; - xd->dst.u_buffer = cm->new_frame.u_buffer + recon_uvoffset; - xd->dst.v_buffer = cm->new_frame.v_buffer + recon_uvoffset; + xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset; + xd->dst.u_buffer = new_yv12->u_buffer + recon_uvoffset; + xd->dst.v_buffer = new_yv12->v_buffer + recon_uvoffset; xd->left_available = (mb_col != 0); // do intra 16x16 prediction @@ -635,14 +638,14 @@ void vp8_first_pass(VP8_COMP *cpi) int motion_error = INT_MAX; // Simple 0,0 motion with no mv overhead - vp8_zz_motion_search( cpi, x, &cm->last_frame, &motion_error, recon_yoffset ); + vp8_zz_motion_search( cpi, x, lst_yv12, &motion_error, recon_yoffset ); d->bmi.mv.as_mv.row = 0; d->bmi.mv.as_mv.col = 0; // Test last reference frame using the previous best mv as the // starting point (best reference) for the search vp8_first_pass_motion_search(cpi, x, &best_ref_mv, - &d->bmi.mv.as_mv, &cm->last_frame, + &d->bmi.mv.as_mv, lst_yv12, &motion_error, recon_yoffset); // If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well @@ -650,7 +653,7 @@ void vp8_first_pass(VP8_COMP *cpi) { tmp_err = INT_MAX; vp8_first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv, - &cm->last_frame, &tmp_err, recon_yoffset); + lst_yv12, &tmp_err, recon_yoffset); if ( tmp_err < motion_error ) { @@ -664,7 +667,7 @@ void vp8_first_pass(VP8_COMP *cpi) // Experimental search in a second reference frame ((0,0) based only) if (cm->current_video_frame > 1) { - vp8_first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv, &cm->golden_frame, &gf_motion_error, recon_yoffset); + vp8_first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv, gld_yv12, &gf_motion_error, recon_yoffset); if ((gf_motion_error < motion_error) && (gf_motion_error < this_error)) { @@ -682,9 +685,9 @@ void vp8_first_pass(VP8_COMP *cpi) // Reset to last frame as reference buffer - xd->pre.y_buffer = cm->last_frame.y_buffer + recon_yoffset; - xd->pre.u_buffer = cm->last_frame.u_buffer + recon_uvoffset; - xd->pre.v_buffer = cm->last_frame.v_buffer + recon_uvoffset; + xd->pre.y_buffer = lst_yv12->y_buffer + recon_yoffset; + xd->pre.u_buffer = lst_yv12->u_buffer + recon_uvoffset; + xd->pre.v_buffer = lst_yv12->v_buffer + recon_uvoffset; } if (motion_error <= this_error) @@ -776,7 +779,7 @@ void vp8_first_pass(VP8_COMP *cpi) x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols; //extend the recon for intra prediction - vp8_extend_mb_row(&cm->new_frame, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); + vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); vp8_clear_system_state(); //__asm emms; } @@ -842,17 +845,17 @@ void vp8_first_pass(VP8_COMP *cpi) (cpi->this_frame_stats.pcnt_inter > 0.20) && ((cpi->this_frame_stats.intra_error / cpi->this_frame_stats.coded_error) > 2.0)) { - vp8_yv12_copy_frame_ptr(&cm->last_frame, &cm->golden_frame); + vp8_yv12_copy_frame_ptr(lst_yv12, gld_yv12); } // swap frame pointers so last frame refers to the frame we just compressed - vp8_swap_yv12_buffer(&cm->last_frame, &cm->new_frame); - vp8_yv12_extend_frame_borders(&cm->last_frame); + vp8_swap_yv12_buffer(lst_yv12, new_yv12); + vp8_yv12_extend_frame_borders(lst_yv12); // Special case for the first frame. Copy into the GF buffer as a second reference. if (cm->current_video_frame == 0) { - vp8_yv12_copy_frame_ptr(&cm->last_frame, &cm->golden_frame); + vp8_yv12_copy_frame_ptr(lst_yv12, gld_yv12); } @@ -868,7 +871,7 @@ void vp8_first_pass(VP8_COMP *cpi) else recon_file = fopen(filename, "ab"); - fwrite(cm->last_frame.buffer_alloc, cm->last_frame.frame_size, 1, recon_file); + fwrite(lst_yv12->buffer_alloc, lst_yv12->frame_size, 1, recon_file); fclose(recon_file); } @@ -1196,7 +1199,9 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) FIRSTPASS_STATS next_frame; FIRSTPASS_STATS *start_pos; int i; - int image_size = cpi->common.last_frame.y_width * cpi->common.last_frame.y_height; + int y_width = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_width; + int y_height = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_height; + int image_size = y_width * y_height; double boost_score = 0.0; double old_boost_score = 0.0; double gf_group_err = 0.0; @@ -2322,7 +2327,7 @@ void vp8_find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) int allocation_chunks; int Counter = cpi->frames_to_key; int alt_kf_bits; - + YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx]; // Min boost based on kf interval #if 0 @@ -2342,10 +2347,10 @@ void vp8_find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) } // bigger frame sizes need larger kf boosts, smaller frames smaller boosts... - if ((cpi->common.last_frame.y_width * cpi->common.last_frame.y_height) > (320 * 240)) - kf_boost += 2 * (cpi->common.last_frame.y_width * cpi->common.last_frame.y_height) / (320 * 240); - else if ((cpi->common.last_frame.y_width * cpi->common.last_frame.y_height) < (320 * 240)) - kf_boost -= 4 * (320 * 240) / (cpi->common.last_frame.y_width * cpi->common.last_frame.y_height); + if ((lst_yv12->y_width * lst_yv12->y_height) > (320 * 240)) + kf_boost += 2 * (lst_yv12->y_width * lst_yv12->y_height) / (320 * 240); + else if ((lst_yv12->y_width * lst_yv12->y_height) < (320 * 240)) + kf_boost -= 4 * (320 * 240) / (lst_yv12->y_width * lst_yv12->y_height); kf_boost = (int)((double)kf_boost * 100.0) >> 4; // Scale 16 to 100 diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c index 581cb68..29d9f7e 100644 --- a/vp8/encoder/onyx_if.c +++ b/vp8/encoder/onyx_if.c @@ -1123,11 +1123,11 @@ void vp8_set_speed_features(VP8_COMP *cpi) if (cpi->sf.search_method == NSTEP) { - vp8_init3smotion_compensation(&cpi->mb, cm->last_frame.y_stride); + vp8_init3smotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride); } else if (cpi->sf.search_method == DIAMOND) { - vp8_init_dsmotion_compensation(&cpi->mb, cm->last_frame.y_stride); + vp8_init_dsmotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride); } if (cpi->sf.improved_dct) @@ -1564,9 +1564,9 @@ void vp8_init_config(VP8_PTR ptr, VP8_CONFIG *oxcf) cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs; } - if (((cm->Width + 15) & 0xfffffff0) != cm->last_frame.y_width || - ((cm->Height + 15) & 0xfffffff0) != cm->last_frame.y_height || - cm->last_frame.y_width == 0) + if (((cm->Width + 15) & 0xfffffff0) != cm->yv12_fb[cm->lst_fb_idx].y_width || + ((cm->Height + 15) & 0xfffffff0) != cm->yv12_fb[cm->lst_fb_idx].y_height || + cm->yv12_fb[cm->lst_fb_idx].y_width == 0) { alloc_raw_frame_buffers(cpi); vp8_alloc_compressor_data(cpi); @@ -1843,9 +1843,9 @@ void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs; } - if (((cm->Width + 15) & 0xfffffff0) != cm->last_frame.y_width || - ((cm->Height + 15) & 0xfffffff0) != cm->last_frame.y_height || - cm->last_frame.y_width == 0) + if (((cm->Width + 15) & 0xfffffff0) != cm->yv12_fb[cm->lst_fb_idx].y_width || + ((cm->Height + 15) & 0xfffffff0) != cm->yv12_fb[cm->lst_fb_idx].y_height || + cm->yv12_fb[cm->lst_fb_idx].y_width == 0) { alloc_raw_frame_buffers(cpi); vp8_alloc_compressor_data(cpi); @@ -2241,7 +2241,8 @@ void vp8_remove_compressor(VP8_PTR *ptr) if (cpi->b_calculate_psnr) { - double samples = 3.0 / 2 * cpi->count * cpi->common.last_frame.y_width * cpi->common.last_frame.y_height; + YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx]; + double samples = 3.0 / 2 * cpi->count * lst_yv12->y_width * lst_yv12->y_height; double total_psnr = vp8_mse2psnr(samples, 255.0, cpi->total_sq_error); double total_psnr2 = vp8_mse2psnr(samples, 255.0, cpi->total_sq_error2); double total_ssim = 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0); @@ -2580,19 +2581,19 @@ int vp8_get_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONF { VP8_COMP *cpi = (VP8_COMP *)(ptr); VP8_COMMON *cm = &cpi->common; + int ref_fb_idx; if (ref_frame_flag == VP8_LAST_FLAG) - vp8_yv12_copy_frame_ptr(&cm->last_frame, sd); - + ref_fb_idx = cm->lst_fb_idx; else if (ref_frame_flag == VP8_GOLD_FLAG) - vp8_yv12_copy_frame_ptr(&cm->golden_frame, sd); - + ref_fb_idx = cm->gld_fb_idx; else if (ref_frame_flag == VP8_ALT_FLAG) - vp8_yv12_copy_frame_ptr(&cm->alt_ref_frame, sd); - + ref_fb_idx = cm->alt_fb_idx; else return -1; + vp8_yv12_copy_frame_ptr(&cm->yv12_fb[ref_fb_idx], sd); + return 0; } int vp8_set_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) @@ -2600,18 +2601,19 @@ int vp8_set_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONF VP8_COMP *cpi = (VP8_COMP *)(ptr); VP8_COMMON *cm = &cpi->common; - if (ref_frame_flag == VP8_LAST_FLAG) - vp8_yv12_copy_frame_ptr(sd, &cm->last_frame); + int ref_fb_idx; + if (ref_frame_flag == VP8_LAST_FLAG) + ref_fb_idx = cm->lst_fb_idx; else if (ref_frame_flag == VP8_GOLD_FLAG) - vp8_yv12_copy_frame_ptr(sd, &cm->golden_frame); - + ref_fb_idx = cm->gld_fb_idx; else if (ref_frame_flag == VP8_ALT_FLAG) - vp8_yv12_copy_frame_ptr(sd, &cm->alt_ref_frame); - + ref_fb_idx = cm->alt_fb_idx; else return -1; + vp8_yv12_copy_frame_ptr(sd, &cm->yv12_fb[ref_fb_idx]); + return 0; } int vp8_update_entropy(VP8_PTR comp, int update) @@ -2686,8 +2688,8 @@ static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) #endif } // we may need to copy to a buffer so we can extend the image... - else if (cm->Width != cm->last_frame.y_width || - cm->Height != cm->last_frame.y_height) + else if (cm->Width != cm->yv12_fb[cm->lst_fb_idx].y_width || + cm->Height != cm->yv12_fb[cm->lst_fb_idx].y_height) { //vp8_yv12_copy_frame_ptr(sd, &cpi->scaled_source); #if HAVE_ARMV7 @@ -2840,7 +2842,7 @@ static void update_alt_ref_frame_and_stats(VP8_COMP *cpi) VP8_COMMON *cm = &cpi->common; // Update the golden frame buffer - vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->alt_ref_frame); + vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->yv12_fb[cm->alt_fb_idx]); // Select an interval before next GF or altref if (!cpi->auto_gold) @@ -2882,7 +2884,7 @@ static void update_golden_frame_and_stats(VP8_COMP *cpi) if (cm->refresh_golden_frame) { // Update the golden frame buffer - vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->golden_frame); + vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->yv12_fb[cm->gld_fb_idx]); // Select an interval before next GF if (!cpi->auto_gold) @@ -4317,11 +4319,11 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, unsigned long *size, unsign if (cm->refresh_last_frame) { - vp8_swap_yv12_buffer(&cm->last_frame, &cm->new_frame); - cm->frame_to_show = &cm->last_frame; + vp8_swap_yv12_buffer(&cm->yv12_fb[cm->lst_fb_idx], &cm->yv12_fb[cm->new_fb_idx]); + cm->frame_to_show = &cm->yv12_fb[cm->lst_fb_idx]; } else - cm->frame_to_show = &cm->new_frame; + cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx]; @@ -4371,43 +4373,48 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, unsigned long *size, unsign } } - - // At this point the new frame has been encoded coded. - // If any buffer copy / swaping is signalled it should be done here. - if (cm->frame_type == KEY_FRAME) - { - vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->golden_frame); - vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->alt_ref_frame); - } - else // For non key frames { - // Code to copy between reference buffers - if (cm->copy_buffer_to_arf) + YV12_BUFFER_CONFIG *lst_yv12 = &cm->yv12_fb[cm->lst_fb_idx]; + YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx]; + YV12_BUFFER_CONFIG *gld_yv12 = &cm->yv12_fb[cm->gld_fb_idx]; + YV12_BUFFER_CONFIG *alt_yv12 = &cm->yv12_fb[cm->alt_fb_idx]; + // At this point the new frame has been encoded coded. + // If any buffer copy / swaping is signalled it should be done here. + if (cm->frame_type == KEY_FRAME) + { + vp8_yv12_copy_frame_ptr(cm->frame_to_show, gld_yv12); + vp8_yv12_copy_frame_ptr(cm->frame_to_show, alt_yv12); + } + else // For non key frames { - if (cm->copy_buffer_to_arf == 1) + // Code to copy between reference buffers + if (cm->copy_buffer_to_arf) { - if (cm->refresh_last_frame) - // We copy new_frame here because last and new buffers will already have been swapped if cm->refresh_last_frame is set. - vp8_yv12_copy_frame_ptr(&cm->new_frame, &cm->alt_ref_frame); - else - vp8_yv12_copy_frame_ptr(&cm->last_frame, &cm->alt_ref_frame); + if (cm->copy_buffer_to_arf == 1) + { + if (cm->refresh_last_frame) + // We copy new_frame here because last and new buffers will already have been swapped if cm->refresh_last_frame is set. + vp8_yv12_copy_frame_ptr(new_yv12, alt_yv12); + else + vp8_yv12_copy_frame_ptr(lst_yv12, alt_yv12); + } + else if (cm->copy_buffer_to_arf == 2) + vp8_yv12_copy_frame_ptr(gld_yv12, alt_yv12); } - else if (cm->copy_buffer_to_arf == 2) - vp8_yv12_copy_frame_ptr(&cm->golden_frame, &cm->alt_ref_frame); - } - if (cm->copy_buffer_to_gf) - { - if (cm->copy_buffer_to_gf == 1) + if (cm->copy_buffer_to_gf) { - if (cm->refresh_last_frame) - // We copy new_frame here because last and new buffers will already have been swapped if cm->refresh_last_frame is set. - vp8_yv12_copy_frame_ptr(&cm->new_frame, &cm->golden_frame); - else - vp8_yv12_copy_frame_ptr(&cm->last_frame, &cm->golden_frame); + if (cm->copy_buffer_to_gf == 1) + { + if (cm->refresh_last_frame) + // We copy new_frame here because last and new buffers will already have been swapped if cm->refresh_last_frame is set. + vp8_yv12_copy_frame_ptr(new_yv12, gld_yv12); + else + vp8_yv12_copy_frame_ptr(lst_yv12, gld_yv12); + } + else if (cm->copy_buffer_to_gf == 2) + vp8_yv12_copy_frame_ptr(alt_yv12, gld_yv12); } - else if (cm->copy_buffer_to_gf == 2) - vp8_yv12_copy_frame_ptr(&cm->alt_ref_frame, &cm->golden_frame); } } @@ -4623,10 +4630,10 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, unsigned long *size, unsign { // Is this an alternate reference update if (cpi->common.refresh_alt_ref_frame) - vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->alt_ref_frame); + vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->yv12_fb[cm->alt_fb_idx]); if (cpi->common.refresh_golden_frame) - vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->golden_frame); + vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->yv12_fb[cm->gld_fb_idx]); } else { @@ -4678,7 +4685,8 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, unsigned long *size, unsign FILE *recon_file; sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame); recon_file = fopen(filename, "wb"); - fwrite(cm->last_frame.buffer_alloc, cm->last_frame.frame_size, 1, recon_file); + fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc, + cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file); fclose(recon_file); } diff --git a/vp8/encoder/pickinter.c b/vp8/encoder/pickinter.c index bb6348d..1947e81 100644 --- a/vp8/encoder/pickinter.c +++ b/vp8/encoder/pickinter.c @@ -460,36 +460,42 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec // set up all the refframe dependent pointers. if (cpi->ref_frame_flags & VP8_LAST_FLAG) { + YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx]; + vp8_find_near_mvs(&x->e_mbd, x->e_mbd.mode_info_context, &nearest_mv[LAST_FRAME], &near_mv[LAST_FRAME], &best_ref_mv[LAST_FRAME], MDCounts[LAST_FRAME], LAST_FRAME, cpi->common.ref_frame_sign_bias); - y_buffer[LAST_FRAME] = cpi->common.last_frame.y_buffer + recon_yoffset; - u_buffer[LAST_FRAME] = cpi->common.last_frame.u_buffer + recon_uvoffset; - v_buffer[LAST_FRAME] = cpi->common.last_frame.v_buffer + recon_uvoffset; + y_buffer[LAST_FRAME] = lst_yv12->y_buffer + recon_yoffset; + u_buffer[LAST_FRAME] = lst_yv12->u_buffer + recon_uvoffset; + v_buffer[LAST_FRAME] = lst_yv12->v_buffer + recon_uvoffset; } else skip_mode[LAST_FRAME] = 1; if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { + YV12_BUFFER_CONFIG *gld_yv12 = &cpi->common.yv12_fb[cpi->common.gld_fb_idx]; + vp8_find_near_mvs(&x->e_mbd, x->e_mbd.mode_info_context, &nearest_mv[GOLDEN_FRAME], &near_mv[GOLDEN_FRAME], &best_ref_mv[GOLDEN_FRAME], MDCounts[GOLDEN_FRAME], GOLDEN_FRAME, cpi->common.ref_frame_sign_bias); - y_buffer[GOLDEN_FRAME] = cpi->common.golden_frame.y_buffer + recon_yoffset; - u_buffer[GOLDEN_FRAME] = cpi->common.golden_frame.u_buffer + recon_uvoffset; - v_buffer[GOLDEN_FRAME] = cpi->common.golden_frame.v_buffer + recon_uvoffset; + y_buffer[GOLDEN_FRAME] = gld_yv12->y_buffer + recon_yoffset; + u_buffer[GOLDEN_FRAME] = gld_yv12->u_buffer + recon_uvoffset; + v_buffer[GOLDEN_FRAME] = gld_yv12->v_buffer + recon_uvoffset; } else skip_mode[GOLDEN_FRAME] = 1; if (cpi->ref_frame_flags & VP8_ALT_FLAG && cpi->source_alt_ref_active) { + YV12_BUFFER_CONFIG *alt_yv12 = &cpi->common.yv12_fb[cpi->common.alt_fb_idx]; + vp8_find_near_mvs(&x->e_mbd, x->e_mbd.mode_info_context, &nearest_mv[ALTREF_FRAME], &near_mv[ALTREF_FRAME], &best_ref_mv[ALTREF_FRAME], MDCounts[ALTREF_FRAME], ALTREF_FRAME, cpi->common.ref_frame_sign_bias); - y_buffer[ALTREF_FRAME] = cpi->common.alt_ref_frame.y_buffer + recon_yoffset; - u_buffer[ALTREF_FRAME] = cpi->common.alt_ref_frame.u_buffer + recon_uvoffset; - v_buffer[ALTREF_FRAME] = cpi->common.alt_ref_frame.v_buffer + recon_uvoffset; + y_buffer[ALTREF_FRAME] = alt_yv12->y_buffer + recon_yoffset; + u_buffer[ALTREF_FRAME] = alt_yv12->u_buffer + recon_uvoffset; + v_buffer[ALTREF_FRAME] = alt_yv12->v_buffer + recon_uvoffset; } else skip_mode[ALTREF_FRAME] = 1; diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c index 65dbd8d..06a20c0 100644 --- a/vp8/encoder/rdopt.c +++ b/vp8/encoder/rdopt.c @@ -1578,18 +1578,21 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int if (x->e_mbd.mbmi.ref_frame == LAST_FRAME) { + YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx]; + if (!(cpi->ref_frame_flags & VP8_LAST_FLAG)) continue; lf_or_gf = 0; // Local last frame vs Golden frame flag // Set up pointers for this macro block into the previous frame recon buffer - x->e_mbd.pre.y_buffer = cpi->common.last_frame.y_buffer + recon_yoffset; - x->e_mbd.pre.u_buffer = cpi->common.last_frame.u_buffer + recon_uvoffset; - x->e_mbd.pre.v_buffer = cpi->common.last_frame.v_buffer + recon_uvoffset; + x->e_mbd.pre.y_buffer = lst_yv12->y_buffer + recon_yoffset; + x->e_mbd.pre.u_buffer = lst_yv12->u_buffer + recon_uvoffset; + x->e_mbd.pre.v_buffer = lst_yv12->v_buffer + recon_uvoffset; } else if (x->e_mbd.mbmi.ref_frame == GOLDEN_FRAME) { + YV12_BUFFER_CONFIG *gld_yv12 = &cpi->common.yv12_fb[cpi->common.gld_fb_idx]; // not supposed to reference gold frame if (!(cpi->ref_frame_flags & VP8_GOLD_FLAG)) @@ -1598,12 +1601,14 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int lf_or_gf = 1; // Local last frame vs Golden frame flag // Set up pointers for this macro block into the previous frame recon buffer - x->e_mbd.pre.y_buffer = cpi->common.golden_frame.y_buffer + recon_yoffset; - x->e_mbd.pre.u_buffer = cpi->common.golden_frame.u_buffer + recon_uvoffset; - x->e_mbd.pre.v_buffer = cpi->common.golden_frame.v_buffer + recon_uvoffset; + x->e_mbd.pre.y_buffer = gld_yv12->y_buffer + recon_yoffset; + x->e_mbd.pre.u_buffer = gld_yv12->u_buffer + recon_uvoffset; + x->e_mbd.pre.v_buffer = gld_yv12->v_buffer + recon_uvoffset; } else if (x->e_mbd.mbmi.ref_frame == ALTREF_FRAME) { + YV12_BUFFER_CONFIG *alt_yv12 = &cpi->common.yv12_fb[cpi->common.alt_fb_idx]; + // not supposed to reference alt ref frame if (!(cpi->ref_frame_flags & VP8_ALT_FLAG)) continue; @@ -1614,9 +1619,9 @@ int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int lf_or_gf = 1; // Local last frame vs Golden frame flag // Set up pointers for this macro block into the previous frame recon buffer - x->e_mbd.pre.y_buffer = cpi->common.alt_ref_frame.y_buffer + recon_yoffset; - x->e_mbd.pre.u_buffer = cpi->common.alt_ref_frame.u_buffer + recon_uvoffset; - x->e_mbd.pre.v_buffer = cpi->common.alt_ref_frame.v_buffer + recon_uvoffset; + x->e_mbd.pre.y_buffer = alt_yv12->y_buffer + recon_yoffset; + x->e_mbd.pre.u_buffer = alt_yv12->u_buffer + recon_uvoffset; + x->e_mbd.pre.v_buffer = alt_yv12->v_buffer + recon_uvoffset; } vp8_find_near_mvs(&x->e_mbd,