From: Paul Wilkins Date: Fri, 26 Sep 2014 16:20:01 +0000 (+0100) Subject: Improve two pass VBR accuracy. X-Git-Tag: v1.4.0~634^2~1 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=0e1068a4bdc55c28c7b4f5918056a1f807167f2c;p=platform%2Fupstream%2Flibvpx.git Improve two pass VBR accuracy. Adjustments to the GF interval choice and minimum boost. Adjustment to the calculation of 2 pass worst q. Compared to 09/29 head there is metrics hit on derf of (-0.123%,-0.191%) Compared to the September 29 head and a baseline on September 18 baseline the accuracy of the VBR rate control measured on the derf set is as follows:- Mean error % / Mean abs(error %) Sept 18 baseline (-7.0% / 14.76%) Sept 29 head (-15.7%, 19.8%) This check in (-1.5% / 14.4%) The mean undershoot is reduced slightly but the worst case overshoot on e.g. harbour/highway is increased. This will be addressed in a later patch. Change-Id: Iffd9b0ab7432a131c98fbaaa82d1e5b40be72b58 --- diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c index 6b1a259..c5bcd54 100644 --- a/vp9/encoder/vp9_encodeframe.c +++ b/vp9/encoder/vp9_encodeframe.c @@ -1758,7 +1758,7 @@ static void rd_use_partition(VP9_COMP *cpi, // We must have chosen a partitioning and encoding or we'll fail later on. // No other opportunities for success. - if ( bsize == BLOCK_64X64) + if (bsize == BLOCK_64X64) assert(chosen_rate < INT_MAX && chosen_dist < INT64_MAX); if (do_recon) { diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c index 26412b0..adbb57b 100644 --- a/vp9/encoder/vp9_firstpass.c +++ b/vp9/encoder/vp9_firstpass.c @@ -39,9 +39,9 @@ #define ARF_STATS_OUTPUT 0 #define BOOST_FACTOR 12.5 -#define ERR_DIVISOR 100.0 -#define FACTOR_PT_LOW 0.5 -#define FACTOR_PT_HIGH 0.9 +#define ERR_DIVISOR 125.0 +#define FACTOR_PT_LOW 0.70 +#define FACTOR_PT_HIGH 0.90 #define FIRST_PASS_Q 10.0 #define GF_MAX_BOOST 96.0 #define INTRA_MODE_PENALTY 1024 @@ -1057,7 +1057,7 @@ static double calc_correction_factor(double err_per_mb, // Adjustment based on actual quantizer to power term. const double power_term = - MIN(vp9_convert_qindex_to_q(q, bit_depth) * 0.0125 + pt_low, pt_high); + MIN(vp9_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high); // Calculate correction factor. if (power_term < 1.0) @@ -1066,6 +1066,11 @@ static double calc_correction_factor(double err_per_mb, return fclamp(pow(error_term, power_term), 0.05, 5.0); } +// Larger image formats are expected to be a little harder to code relatively +// given the same prediction error score. This in part at least relates to the +// increased size and hence coding cost of motion vectors. +#define EDIV_SIZE_FACTOR 800 + static int get_twopass_worst_quality(const VP9_COMP *cpi, const FIRSTPASS_STATS *stats, int section_target_bandwidth) { @@ -1079,8 +1084,10 @@ static int get_twopass_worst_quality(const VP9_COMP *cpi, const double section_err = stats->coded_error / stats->count; const double err_per_mb = section_err / num_mbs; const double speed_term = 1.0 + 0.04 * oxcf->speed; + const double ediv_size_correction = num_mbs / EDIV_SIZE_FACTOR; const int target_norm_bits_per_mb = ((uint64_t)section_target_bandwidth << BPER_MB_NORMBITS) / num_mbs; + int q; int is_svc_upper_layer = 0; if (is_two_pass_svc(cpi) && cpi->svc.spatial_layer_id > 0) @@ -1090,7 +1097,7 @@ static int get_twopass_worst_quality(const VP9_COMP *cpi, // content at the given rate. for (q = rc->best_quality; q < rc->worst_quality; ++q) { const double factor = - calc_correction_factor(err_per_mb, ERR_DIVISOR, + calc_correction_factor(err_per_mb, ERR_DIVISOR - ediv_size_correction, is_svc_upper_layer ? SVC_FACTOR_PT_LOW : FACTOR_PT_LOW, FACTOR_PT_HIGH, q, cpi->common.bit_depth); @@ -1735,7 +1742,7 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // bits to spare and are better with a smaller interval and smaller boost. // At high Q when there are few bits to spare we are better with a longer // interval to spread the cost of the GF. - active_max_gf_interval = 12 + MIN(4, (int_max_q / 32)); + active_max_gf_interval = 12 + MIN(4, (int_max_q / 24)); if (active_max_gf_interval > rc->max_gf_interval) active_max_gf_interval = rc->max_gf_interval; } diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c index 9b6c773..9f255b0 100644 --- a/vp9/encoder/vp9_ratectrl.c +++ b/vp9/encoder/vp9_ratectrl.c @@ -177,6 +177,9 @@ int vp9_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex, const double q = vp9_convert_qindex_to_q(qindex, bit_depth); int enumerator = frame_type == KEY_FRAME ? 2700000 : 1800000; + assert(correction_factor <= MAX_BPB_FACTOR && + correction_factor >= MIN_BPB_FACTOR); + // q based adjustment to baseline enumerator enumerator += (int)(enumerator * q) >> 12; return (int)(enumerator * correction_factor / q); @@ -187,7 +190,8 @@ static int estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs, vpx_bit_depth_t bit_depth) { const int bpm = (int)(vp9_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth)); - return ((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS; + return MAX(FRAME_OVERHEAD_BITS, + (int)((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS); } int vp9_rc_clamp_pframe_target_size(const VP9_COMP *const cpi, int target) { @@ -410,7 +414,7 @@ void vp9_rc_update_rate_correction_factors(VP9_COMP *cpi, int damp_var) { rate_correction_factor, cm->bit_depth); // Work out a size correction factor. - if (projected_size_based_on_q > 0) + if (projected_size_based_on_q > FRAME_OVERHEAD_BITS) correction_factor = (100 * cpi->rc.projected_frame_size) / projected_size_based_on_q;