$(qexec)$$(LD) -shared $$(LDFLAGS) \
-Wl,--no-undefined -Wl,-soname,$$(SONAME) \
-Wl,--version-script,$$(SO_VERSION_SCRIPT) -o $$@ \
- $$(filter %.o,$$?)
+ $$(filter %.o,$$?) $$(extralibs)
endef
define lipo_lib_template
LIBVPX_SO := libvpx.so.$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_PATCH)
LIBS-$(BUILD_LIBVPX_SO) += $(BUILD_PFX)$(LIBVPX_SO)
$(BUILD_PFX)$(LIBVPX_SO): $(LIBVPX_OBJS) libvpx.ver
-$(BUILD_PFX)$(LIBVPX_SO): LDFLAGS += -lm -pthread
+$(BUILD_PFX)$(LIBVPX_SO): extralibs += -lm -pthread
$(BUILD_PFX)$(LIBVPX_SO): SONAME = libvpx.so.$(VERSION_MAJOR)
$(BUILD_PFX)$(LIBVPX_SO): SO_VERSION_SCRIPT = libvpx.ver
LIBVPX_SO_SYMLINKS := $(addprefix $(LIBSUBDIR)/, \
int dc_diff;
unsigned char segment_id; // Which set of segmentation parameters should be used for this MB
int force_no_skip;
-
+ int need_to_clamp_mvs;
B_MODE_INFO partition_bmi[16];
-
} MB_MODE_INFO;
VP8_COMMON *const pc = &pbi->common;
MACROBLOCKD *xd = &pbi->mb;
+ mbmi->need_to_clamp_mvs = 0;
vp8dx_bool_decoder_fill(bc);
// Distance of Mb to the various image edges.
break;
}
+ if (mv->col < xd->mb_to_left_edge
+ - LEFT_TOP_MARGIN
+ || mv->col > xd->mb_to_right_edge
+ + RIGHT_BOTTOM_MARGIN
+ || mv->row < xd->mb_to_top_edge
+ - LEFT_TOP_MARGIN
+ || mv->row > xd->mb_to_bottom_edge
+ + RIGHT_BOTTOM_MARGIN
+ )
+ mbmi->need_to_clamp_mvs = 1;
+
/* Fill (uniform) modes, mvs of jth subset.
Must do it here because ensuing subsets can
refer back to us via "left" or "above". */
read_mv(bc, mv, (const MV_CONTEXT *) mvc);
mv->row += best_mv.row;
mv->col += best_mv.col;
- /* Encoder should not produce invalid motion vectors, but since
- * arbitrary length MVs can be parsed from the bitstream, we
- * need to clamp them here in case we're reading bad data to
- * avoid a crash.
- */
-#if CONFIG_DEBUG
- assert(mv->col >= (xd->mb_to_left_edge - LEFT_TOP_MARGIN));
- assert(mv->col <= (xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN));
- assert(mv->row >= (xd->mb_to_top_edge - LEFT_TOP_MARGIN));
- assert(mv->row <= (xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN));
-#endif
- if (mv->col < (xd->mb_to_left_edge - LEFT_TOP_MARGIN))
- mv->col = xd->mb_to_left_edge - LEFT_TOP_MARGIN;
- else if (mv->col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN)
- mv->col = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN;
-
- if (mv->row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN))
- mv->row = xd->mb_to_top_edge - LEFT_TOP_MARGIN;
- else if (mv->row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN)
- mv->row = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN;
+ /* Don't need to check this on NEARMV and NEARESTMV modes
+ * since those modes clamp the MV. The NEWMV mode does not,
+ * so signal to the prediction stage whether special
+ * handling may be required.
+ */
+ if (mv->col < xd->mb_to_left_edge - LEFT_TOP_MARGIN
+ || mv->col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN
+ || mv->row < xd->mb_to_top_edge - LEFT_TOP_MARGIN
+ || mv->row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN
+ )
+ mbmi->need_to_clamp_mvs = 1;
propagate_mv: /* same MV throughout */
{
assert(0);
#endif
}
-
}
else
{
/*
* Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
*
- * Use of this source code is governed by a BSD-style license
+ * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
+ * in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
}
}
+
+static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
+{
+ /* If the MV points so far into the UMV border that no visible pixels
+ * are used for reconstruction, the subpel part of the MV can be
+ * discarded and the MV limited to 16 pixels with equivalent results.
+ *
+ * This limit kicks in at 19 pixels for the top and left edges, for
+ * the 16 pixels plus 3 taps right of the central pixel when subpel
+ * filtering. The bottom and right edges use 16 pixels plus 2 pixels
+ * left of the central pixel when filtering.
+ */
+ if (mv->col < (xd->mb_to_left_edge - (19 << 3)))
+ mv->col = xd->mb_to_left_edge - (16 << 3);
+ else if (mv->col > xd->mb_to_right_edge + (18 << 3))
+ mv->col = xd->mb_to_right_edge + (16 << 3);
+
+ if (mv->row < (xd->mb_to_top_edge - (19 << 3)))
+ mv->row = xd->mb_to_top_edge - (16 << 3);
+ else if (mv->row > xd->mb_to_bottom_edge + (18 << 3))
+ mv->row = xd->mb_to_bottom_edge + (16 << 3);
+}
+
+
+static void clamp_mvs(MACROBLOCKD *xd)
+{
+ if (xd->mbmi.mode == SPLITMV)
+ {
+ int i;
+
+ for (i=0; i<16; i++)
+ clamp_mv_to_umv_border(&xd->block[i].bmi.mv.as_mv, xd);
+ }
+ else
+ {
+ clamp_mv_to_umv_border(&xd->mbmi.mv.as_mv, xd);
+ clamp_mv_to_umv_border(&xd->block[16].bmi.mv.as_mv, xd);
+ }
+
+}
+
static void reconstruct_mb(VP8D_COMP *pbi, MACROBLOCKD *xd)
{
if (xd->frame_type == KEY_FRAME || xd->mbmi.ref_frame == INTRA_FRAME)
void vp8_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
{
int eobtotal = 0;
+ MV orig_mvs[24];
+ int i, do_clamp = xd->mbmi.need_to_clamp_mvs;
if (xd->mbmi.mb_skip_coeff)
{
eobtotal = vp8_decode_mb_tokens(pbi, xd);
}
- xd->mode_info_context->mbmi.dc_diff = 1;
-
- if (xd->mbmi.mode != B_PRED && xd->mbmi.mode != SPLITMV && eobtotal == 0)
+ /* Perform temporary clamping of the MV to be used for prediction */
+ if (do_clamp)
{
- xd->mode_info_context->mbmi.dc_diff = 0;
- skip_recon_mb(pbi, xd);
- return;
+ if (xd->mbmi.mode == SPLITMV)
+ for (i=0; i<24; i++)
+ orig_mvs[i] = xd->block[i].bmi.mv.as_mv;
+ else
+ {
+ orig_mvs[0] = xd->mbmi.mv.as_mv;
+ orig_mvs[1] = xd->block[16].bmi.mv.as_mv;
+ }
+ clamp_mvs(xd);
}
- if (xd->segmentation_enabled)
- mb_init_dequantizer(pbi, xd);
+ xd->mode_info_context->mbmi.dc_diff = 1;
+
+ do {
+ if (xd->mbmi.mode != B_PRED && xd->mbmi.mode != SPLITMV && eobtotal == 0)
+ {
+ xd->mode_info_context->mbmi.dc_diff = 0;
+ skip_recon_mb(pbi, xd);
+ break;
+ }
- de_quantand_idct(pbi, xd);
- reconstruct_mb(pbi, xd);
+ if (xd->segmentation_enabled)
+ mb_init_dequantizer(pbi, xd);
+
+ de_quantand_idct(pbi, xd);
+ reconstruct_mb(pbi, xd);
+ } while(0);
+
+
+ /* Restore the original MV so as not to affect the entropy context. */
+ if (do_clamp)
+ {
+ if (xd->mbmi.mode == SPLITMV)
+ for (i=0; i<24; i++)
+ xd->block[i].bmi.mv.as_mv = orig_mvs[i];
+ else
+ {
+ xd->mbmi.mv.as_mv = orig_mvs[0];
+ xd->block[16].bmi.mv.as_mv = orig_mvs[1];
+ }
+ }
}
static int get_delta_q(vp8_reader *bc, int prev, int *q_update)
for (mb_col = 0; mb_col < pc->mb_cols; mb_col++)
{
// Take a copy of the mode and Mv information for this macroblock into the xd->mbmi
- vpx_memcpy(&xd->mbmi, &xd->mode_info_context->mbmi, 32); //sizeof(MB_MODE_INFO) );
+ // the partition_bmi array is unused in the decoder, so don't copy it.
+ vpx_memcpy(&xd->mbmi, &xd->mode_info_context->mbmi,
+ sizeof(MB_MODE_INFO) - sizeof(xd->mbmi.partition_bmi));
if (xd->mbmi.mode == SPLITMV || xd->mbmi.mode == B_PRED)
{
/*
* Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
*
- * Use of this source code is governed by a BSD-style license
+ * Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
+ * in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
}
// Take a copy of the mode and Mv information for this macroblock into the xd->mbmi
- vpx_memcpy(&xd->mbmi, &xd->mode_info_context->mbmi, 32); //sizeof(MB_MODE_INFO) );
+ // the partition_bmi array is unused in the decoder, so don't copy it.
+ vpx_memcpy(&xd->mbmi, &xd->mode_info_context->mbmi,
+ sizeof(MB_MODE_INFO) - sizeof(xd->mbmi.partition_bmi));
if (xd->mbmi.mode == SPLITMV || xd->mbmi.mode == B_PRED)
{
void (*vp8_short_fdct8x4)(short *input, short *output, int pitch);
void (*short_fdct4x4rd)(short *input, short *output, int pitch);
void (*short_fdct8x4rd)(short *input, short *output, int pitch);
- void (*vp8_short_fdct4x4_ptr)(short *input, short *output, int pitch);
void (*short_walsh4x4)(short *input, short *output, int pitch);
void (*quantize_b)(BLOCK *b, BLOCKD *d);
z->short_fdct4x4rd = x->short_fdct4x4rd;
z->short_fdct8x4rd = x->short_fdct8x4rd;
z->short_fdct8x4rd = x->short_fdct8x4rd;
- z->vp8_short_fdct4x4_ptr = x->vp8_short_fdct4x4_ptr;
z->short_walsh4x4 = x->short_walsh4x4;
z->quantize_b = x->quantize_b;
}
}
+ // Keep a globally available copy of this and the next frame's iiratio.
+ cpi->this_iiratio = this_frame_intra_error /
+ DOUBLE_DIVIDE_CHECK(this_frame_coded_error);
+ {
+ FIRSTPASS_STATS next_frame;
+ if ( lookup_next_frame_stats(cpi, &next_frame) != EOF )
+ {
+ cpi->next_iiratio = next_frame.intra_error /
+ DOUBLE_DIVIDE_CHECK(next_frame.coded_error);
+ }
+ }
+
// Set nominal per second bandwidth for this frame
cpi->target_bandwidth = cpi->per_frame_bandwidth * cpi->output_frame_rate;
if (cpi->target_bandwidth < 0)
cpi->mb.short_fdct4x4rd = FDCT_INVOKE(&cpi->rtcd.fdct, fast4x4);
}
- cpi->mb.vp8_short_fdct4x4_ptr = FDCT_INVOKE(&cpi->rtcd.fdct, short4x4);
cpi->mb.short_walsh4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, walsh_short4x4);
if (cpi->sf.improved_quant)
}
}
}
-
d->eob = eob + 1;
-
}
void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d)
d->eob = eob + 1;
}
+
void vp8_quantize_mby(MACROBLOCK *x)
{
int i;
+ int has_2nd_order = (x->e_mbd.mbmi.mode != B_PRED
+ && x->e_mbd.mbmi.mode != SPLITMV);
- if (x->e_mbd.mbmi.mode != B_PRED && x->e_mbd.mbmi.mode != SPLITMV)
+ for (i = 0; i < 16; i++)
{
- for (i = 0; i < 16; i++)
- {
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
- x->e_mbd.mbmi.mb_skip_coeff &= (x->e_mbd.block[i].eob < 2);
- }
+ x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+ x->e_mbd.mbmi.mb_skip_coeff &=
+ (x->e_mbd.block[i].eob <= has_2nd_order);
+ }
+ if(has_2nd_order)
+ {
x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
x->e_mbd.mbmi.mb_skip_coeff &= (!x->e_mbd.block[24].eob);
-
- }
- else
- {
- for (i = 0; i < 16; i++)
- {
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
- x->e_mbd.mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
- }
}
}
void vp8_quantize_mb(MACROBLOCK *x)
{
int i;
+ int has_2nd_order=(x->e_mbd.mbmi.mode != B_PRED
+ && x->e_mbd.mbmi.mode != SPLITMV);
x->e_mbd.mbmi.mb_skip_coeff = 1;
-
- if (x->e_mbd.mbmi.mode != B_PRED && x->e_mbd.mbmi.mode != SPLITMV)
+ for (i = 0; i < 24+has_2nd_order; i++)
{
- for (i = 0; i < 16; i++)
- {
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
- x->e_mbd.mbmi.mb_skip_coeff &= (x->e_mbd.block[i].eob < 2);
- }
-
- for (i = 16; i < 25; i++)
- {
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
- x->e_mbd.mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
- }
- }
- else
- {
- for (i = 0; i < 24; i++)
- {
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
- x->e_mbd.mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
- }
+ x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+ x->e_mbd.mbmi.mb_skip_coeff &=
+ (x->e_mbd.block[i].eob <= (has_2nd_order && i<16));
}
-
}
if (cpi->zbin_over_quant > zbin_oqmax)
cpi->zbin_over_quant = zbin_oqmax;
- // Each over-run step is assumed to equate to approximately
- // 3% reduction in bitrate
+ // Adjust bits_per_mb_at_this_q estimate
bits_per_mb_at_this_q = (int)(Factor * bits_per_mb_at_this_q);
Factor += factor_adjustment;
}
-static int rd_iifactor [ 32 ] = { 16, 16, 16, 12, 8, 4, 2, 0,
+static int rd_iifactor [ 32 ] = { 4, 4, 3, 2, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
};
-
-
// The values in this table should be reviewed
static int sad_per_bit16lut[128] =
{
vp8_clear_system_state(); //__asm emms;
- cpi->RDMULT = (int)((0.00007 * (capped_q * capped_q * capped_q * capped_q)) - (0.0125 * (capped_q * capped_q * capped_q)) +
- (2.25 * (capped_q * capped_q)) - (12.5 * capped_q) + 25.0);
+ cpi->RDMULT = (int)( (0.0001 * (capped_q * capped_q * capped_q * capped_q))
+ -(0.0125 * (capped_q * capped_q * capped_q))
+ +(3.25 * (capped_q * capped_q))
+ -(12.5 * capped_q) + 50.0);
- if (cpi->RDMULT < 25)
- cpi->RDMULT = 25;
+ if (cpi->RDMULT < 50)
+ cpi->RDMULT = 50;
- if (cpi->pass == 2)
+ if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME))
{
- if (cpi->common.frame_type == KEY_FRAME)
- cpi->RDMULT += (cpi->RDMULT * rd_iifactor[0]) / 16;
- else if (cpi->next_iiratio > 31)
- cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) / 16;
+ if (cpi->next_iiratio > 31)
+ cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4;
else
- cpi->RDMULT += (cpi->RDMULT * rd_iifactor[cpi->next_iiratio]) / 16;
+ cpi->RDMULT += (cpi->RDMULT * rd_iifactor[cpi->next_iiratio]) >> 4;
}
// Extend rate multiplier along side quantizer zbin increases
if (cpi->zbin_over_quant > 0)
{
- // Extend rate multiplier along side quantizer zbin increases
- if (cpi->zbin_over_quant > 0)
- {
- double oq_factor = pow(1.006, cpi->zbin_over_quant);
+ double oq_factor = pow(1.006, cpi->zbin_over_quant);
- if (oq_factor > (1.0 + ((double)cpi->zbin_over_quant / 64.0)))
- oq_factor = (1.0 + (double)cpi->zbin_over_quant / 64.0);
+ if (oq_factor > (1.0 + ((double)cpi->zbin_over_quant / 64.0)))
+ oq_factor = (1.0 + (double)cpi->zbin_over_quant / 64.0);
- cpi->RDMULT = (int)(oq_factor * cpi->RDMULT);
- }
+ cpi->RDMULT = (int)(oq_factor * cpi->RDMULT);
}
cpi->mb.errorperbit = (cpi->RDMULT / 100);