/*
- * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
#include "extend.h"
#include "entropymode.h"
#include "quant_common.h"
-#include "segmentation_common.h"
+#include "segmentation.h"
#include "setupintrarecon.h"
#include "encodeintra.h"
#include "reconinter.h"
unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#endif
-// The first four entries are dummy values
static const int qrounding_factors[129] =
{
- 56, 56, 56, 56, 56, 56, 56, 56,
48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48,
- 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48
};
static const int qzbin_factors[129] =
{
- 64, 64, 64, 64, 80, 80, 80, 80,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80,
80, 80, 80, 80, 80, 80, 80, 80,
80, 80, 80, 80, 80, 80, 80, 80,
80, 80, 80, 80, 80, 80, 80, 80,
80, 80, 80, 80, 80, 80, 80, 80,
80, 80, 80, 80, 80, 80, 80, 80,
80, 80, 80, 80, 80, 80, 80, 80,
+ 80
+};
+
+static const int qrounding_factors_y2[129] =
+{
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48, 48,
+ 48
+};
+
+static const int qzbin_factors_y2[129] =
+{
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 84, 84, 84, 84, 84, 84, 84, 84,
+ 80, 80, 80, 80, 80, 80, 80, 80,
80, 80, 80, 80, 80, 80, 80, 80,
80, 80, 80, 80, 80, 80, 80, 80,
80, 80, 80, 80, 80, 80, 80, 80,
80, 80, 80, 80, 80, 80, 80, 80,
80, 80, 80, 80, 80, 80, 80, 80,
80, 80, 80, 80, 80, 80, 80, 80,
- 80,
+ 80
};
-//#define EXACT_QUANT
+
+#define EXACT_QUANT
#ifdef EXACT_QUANT
-static void vp8cx_invert_quant(short *quant, short *shift, short d)
+static void vp8cx_invert_quant(int improved_quant, short *quant,
+ short *shift, short d)
{
- unsigned t;
- int l;
- t = d;
- for(l = 0; t > 1; l++)
- t>>=1;
- t = 1 + (1<<(16+l))/d;
- *quant = (short)(t - (1<<16));
- *shift = l;
+ if(improved_quant)
+ {
+ unsigned t;
+ int l;
+ t = d;
+ for(l = 0; t > 1; l++)
+ t>>=1;
+ t = 1 + (1<<(16+l))/d;
+ *quant = (short)(t - (1<<16));
+ *shift = l;
+ }
+ else
+ {
+ *quant = (1 << 16) / d;
+ *shift = 0;
+ }
}
void vp8cx_init_quantizer(VP8_COMP *cpi)
{
- int r, c;
int i;
int quant_val;
int Q;
{
// dc values
quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
- vp8cx_invert_quant(cpi->Y1quant[Q][0] + 0,
- cpi->Y1quant_shift[Q][0] + 0, quant_val);
- cpi->Y1zbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->Y1round[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.Y1dequant[Q][0][0] = quant_val;
+ cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
+ vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
+ cpi->Y1quant_shift[Q] + 0, quant_val);
+ cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
- vp8cx_invert_quant(cpi->Y2quant[Q][0] + 0,
- cpi->Y2quant_shift[Q][0] + 0, quant_val);
- cpi->Y2zbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->Y2round[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.Y2dequant[Q][0][0] = quant_val;
+ cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val;
+ vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
+ cpi->Y2quant_shift[Q] + 0, quant_val);
+ cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
- vp8cx_invert_quant(cpi->UVquant[Q][0] + 0,
- cpi->UVquant_shift[Q][0] + 0, quant_val);
- cpi->UVzbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
- cpi->UVround[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.UVdequant[Q][0][0] = quant_val;
+ cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val;
+ vp8cx_invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
+ cpi->UVquant_shift[Q] + 0, quant_val);
+ cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
+ cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
// all the ac values = ;
for (i = 1; i < 16; i++)
{
int rc = vp8_default_zig_zag1d[i];
- r = (rc >> 2);
- c = (rc & 3);
quant_val = vp8_ac_yquant(Q);
- vp8cx_invert_quant(cpi->Y1quant[Q][r] + c,
- cpi->Y1quant_shift[Q][r] + c, quant_val);
- cpi->Y1zbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->Y1round[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.Y1dequant[Q][r][c] = quant_val;
+ cpi->Y1quant_fast[Q][rc] = (1 << 16) / quant_val;
+ vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + rc,
+ cpi->Y1quant_shift[Q] + rc, quant_val);
+ cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][rc] = quant_val;
cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
- vp8cx_invert_quant(cpi->Y2quant[Q][r] + c,
- cpi->Y2quant_shift[Q][r] + c, quant_val);
- cpi->Y2zbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->Y2round[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.Y2dequant[Q][r][c] = quant_val;
+ cpi->Y2quant_fast[Q][rc] = (1 << 16) / quant_val;
+ vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + rc,
+ cpi->Y2quant_shift[Q] + rc, quant_val);
+ cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][rc] = quant_val;
cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
- vp8cx_invert_quant(cpi->UVquant[Q][r] + c,
- cpi->UVquant_shift[Q][r] + c, quant_val);
- cpi->UVzbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
- cpi->UVround[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;
- cpi->common.UVdequant[Q][r][c] = quant_val;
+ cpi->UVquant_fast[Q][rc] = (1 << 16) / quant_val;
+ vp8cx_invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + rc,
+ cpi->UVquant_shift[Q] + rc, quant_val);
+ cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][rc] = quant_val;
cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
}
}
}
#else
-void vp8cx_init_quantizer(VP8_COMP *cpi)\r
-{\r
- int r, c;\r
- int i;\r
- int quant_val;\r
- int Q;\r
-\r
- int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};\r
-\r
- for (Q = 0; Q < QINDEX_RANGE; Q++)\r
- {\r
- // dc values\r
- quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);\r
- cpi->Y1quant[Q][0][0] = (1 << 16) / quant_val;\r
- cpi->Y1zbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;\r
- cpi->Y1round[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;\r
- cpi->common.Y1dequant[Q][0][0] = quant_val;\r
- cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;\r
-\r
- quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);\r
- cpi->Y2quant[Q][0][0] = (1 << 16) / quant_val;\r
- cpi->Y2zbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;\r
- cpi->Y2round[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;\r
- cpi->common.Y2dequant[Q][0][0] = quant_val;\r
- cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;\r
-\r
- quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);\r
- cpi->UVquant[Q][0][0] = (1 << 16) / quant_val;\r
- cpi->UVzbin[Q][0][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;\r
- cpi->UVround[Q][0][0] = (qrounding_factors[Q] * quant_val) >> 7;\r
- cpi->common.UVdequant[Q][0][0] = quant_val;\r
- cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;\r
-\r
- // all the ac values = ;\r
- for (i = 1; i < 16; i++)\r
- {\r
- int rc = vp8_default_zig_zag1d[i];\r
- r = (rc >> 2);\r
- c = (rc & 3);\r
-\r
- quant_val = vp8_ac_yquant(Q);\r
- cpi->Y1quant[Q][r][c] = (1 << 16) / quant_val;\r
- cpi->Y1zbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;\r
- cpi->Y1round[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;\r
- cpi->common.Y1dequant[Q][r][c] = quant_val;\r
- cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;\r
-\r
- quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);\r
- cpi->Y2quant[Q][r][c] = (1 << 16) / quant_val;\r
- cpi->Y2zbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;\r
- cpi->Y2round[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;\r
- cpi->common.Y2dequant[Q][r][c] = quant_val;\r
- cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;\r
-\r
- quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);\r
- cpi->UVquant[Q][r][c] = (1 << 16) / quant_val;\r
- cpi->UVzbin[Q][r][c] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;\r
- cpi->UVround[Q][r][c] = (qrounding_factors[Q] * quant_val) >> 7;\r
- cpi->common.UVdequant[Q][r][c] = quant_val;\r
- cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;\r
- }\r
- }\r
-}\r
+void vp8cx_init_quantizer(VP8_COMP *cpi)
+{
+ int i;
+ int quant_val;
+ int Q;
+
+ int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
+
+ for (Q = 0; Q < QINDEX_RANGE; Q++)
+ {
+ // dc values
+ quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
+ cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
+ cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
+ cpi->Y2quant[Q][0] = (1 << 16) / quant_val;
+ cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
+ cpi->UVquant[Q][0] = (1 << 16) / quant_val;
+ cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
+ cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][0] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
+
+ // all the ac values = ;
+ for (i = 1; i < 16; i++)
+ {
+ int rc = vp8_default_zig_zag1d[i];
+
+ quant_val = vp8_ac_yquant(Q);
+ cpi->Y1quant[Q][rc] = (1 << 16) / quant_val;
+ cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.Y1dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+ quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
+ cpi->Y2quant[Q][rc] = (1 << 16) / quant_val;
+ cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
+ cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
+ cpi->common.Y2dequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+
+ quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
+ cpi->UVquant[Q][rc] = (1 << 16) / quant_val;
+ cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
+ cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
+ cpi->common.UVdequant[Q][rc] = quant_val;
+ cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
+ }
+ }
+}
#endif
void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
{
int i;
int QIndex;
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->mbmi;
int zbin_extra;
// Select the baseline MB Q index.
{
// Abs Value
if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
- QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id];
+ QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
// Delta Value
else
{
- QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id];
+ QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
}
}
QIndex = cpi->common.base_qindex;
// Y
- zbin_extra = (cpi->common.Y1dequant[QIndex][0][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
+ zbin_extra = (cpi->common.Y1dequant[QIndex][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
for (i = 0; i < 16; i++)
{
x->block[i].quant = cpi->Y1quant[QIndex];
+ x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
x->block[i].zbin = cpi->Y1zbin[QIndex];
x->block[i].round = cpi->Y1round[QIndex];
}
// UV
- zbin_extra = (cpi->common.UVdequant[QIndex][0][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
+ zbin_extra = (cpi->common.UVdequant[QIndex][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
for (i = 16; i < 24; i++)
{
x->block[i].quant = cpi->UVquant[QIndex];
+ x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
x->block[i].zbin = cpi->UVzbin[QIndex];
x->block[i].round = cpi->UVround[QIndex];
}
// Y2
- zbin_extra = (cpi->common.Y2dequant[QIndex][0][1] * ((cpi->zbin_over_quant / 2) + cpi->zbin_mode_boost)) >> 7;
+ zbin_extra = (cpi->common.Y2dequant[QIndex][1] * ((cpi->zbin_over_quant / 2) + cpi->zbin_mode_boost)) >> 7;
+ x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
x->block[24].quant = cpi->Y2quant[QIndex];
x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
x->block[24].zbin = cpi->Y2zbin[QIndex];
x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
x->block[24].zbin_extra = (short)zbin_extra;
+
+ /* save this macroblock QIndex for vp8_update_zbin_extra() */
+ x->q_index = QIndex;
+}
+void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
+{
+ int i;
+ int QIndex = x->q_index;
+ int zbin_extra;
+
+ // Y
+ zbin_extra = (cpi->common.Y1dequant[QIndex][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
+ for (i = 0; i < 16; i++)
+ {
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // UV
+ zbin_extra = (cpi->common.UVdequant[QIndex][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
+ for (i = 16; i < 24; i++)
+ {
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // Y2
+ zbin_extra = (cpi->common.Y2dequant[QIndex][1] * ((cpi->zbin_over_quant / 2) + cpi->zbin_mode_boost)) >> 7;
+ x->block[24].zbin_extra = (short)zbin_extra;
}
void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
{
+ // Clear Zbin mode boost for default case
+ cpi->zbin_mode_boost = 0;
+
// vp8cx_init_quantizer() is first called in vp8_create_compressor(). A check is added here so that vp8cx_init_quantizer() is only called
// when these values are not all zero.
if (cpi->common.y1dc_delta_q | cpi->common.y2dc_delta_q | cpi->common.uvdc_delta_q | cpi->common.y2ac_delta_q | cpi->common.uvac_delta_q)
}
+/* activity_avg must be positive, or flat regions could get a zero weight
+ * (infinite lambda), which confounds analysis.
+ * This also avoids the need for divide by zero checks in
+ * vp8_activity_masking().
+ */
+#define VP8_ACTIVITY_AVG_MIN (64)
+
+/* This is used as a reference when computing the source variance for the
+ * purposes of activity masking.
+ * Eventually this should be replaced by custom no-reference routines,
+ * which will be faster.
+ */
+static const unsigned char VP8_VAR_OFFS[16]=
+{
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128
+};
+
+unsigned int vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
+{
+ unsigned int act;
+ unsigned int sse;
+ int sum;
+ unsigned int a;
+ unsigned int b;
+ /* TODO: This could also be done over smaller areas (8x8), but that would
+ * require extensive changes elsewhere, as lambda is assumed to be fixed
+ * over an entire MB in most of the code.
+ * Another option is to compute four 8x8 variances, and pick a single
+ * lambda using a non-linear combination (e.g., the smallest, or second
+ * smallest, etc.).
+ */
+ VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16var)(x->src.y_buffer,
+ x->src.y_stride, VP8_VAR_OFFS, 0, &sse, &sum);
+ /* This requires a full 32 bits of precision. */
+ act = (sse<<8) - sum*sum;
+ /* Drop 4 to give us some headroom to work with. */
+ act = (act + 8) >> 4;
+ /* If the region is flat, lower the activity some more. */
+ if (act < 8<<12)
+ act = act < 5<<12 ? act : 5<<12;
+ /* TODO: For non-flat regions, edge regions should receive less masking
+ * than textured regions, but identifying edge regions quickly and
+ * reliably enough is still a subject of experimentation.
+ * This will be most noticable near edges with a complex shape (e.g.,
+ * text), but the 4x4 transform size should make this less of a problem
+ * than it would be for an 8x8 transform.
+ */
+ /* Apply the masking to the RD multiplier. */
+ a = act + 4*cpi->activity_avg;
+ b = 4*act + cpi->activity_avg;
+ x->rdmult = (unsigned int)(((INT64)x->rdmult*b + (a>>1))/a);
+ return act;
+}
+
+
static
void encode_mb_row(VP8_COMP *cpi,
int *segment_counts,
int *totalrate)
{
+ INT64 activity_sum = 0;
int i;
int recon_yoffset, recon_uvoffset;
int mb_col;
// reset above block coeffs
- xd->above_context[Y1CONTEXT] = cm->above_context[Y1CONTEXT];
- xd->above_context[UCONTEXT ] = cm->above_context[UCONTEXT ];
- xd->above_context[VCONTEXT ] = cm->above_context[VCONTEXT ];
- xd->above_context[Y2CONTEXT] = cm->above_context[Y2CONTEXT];
+ xd->above_context = cm->above_context;
xd->up_available = (mb_row != 0);
recon_yoffset = (mb_row * recon_y_stride * 16);
cpi->tplist[mb_row].start = *tp;
//printf("Main mb_row = %d\n", mb_row);
+ // Distance of Mb to the top & bottom edges, specified in 1/8th pel
+ // units as they are always compared to values that are in 1/8th pel units
+ xd->mb_to_top_edge = -((mb_row * 16) << 3);
+ xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
+
+ // Set up limit values for vertical motion vector components
+ // to prevent them extending beyond the UMV borders
+ x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
+ x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
+ + (VP8BORDERINPIXELS - 16);
+
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
- // Distance of Mb to the various image edges.
- // These specified to 8th pel as they are always compared to values that are in 1/8th pel units
+ // Distance of Mb to the left & right edges, specified in
+ // 1/8th pel units as they are always compared to values
+ // that are in 1/8th pel units
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
- xd->mb_to_top_edge = -((mb_row * 16) << 3);
- xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
- // Set up limit values for motion vectors used to prevent them extending outside the UMV borders
+ // Set up limit values for horizontal motion vector components
+ // to prevent them extending beyond the UMV borders
x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
- x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
- x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
- x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
+ x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16)
+ + (VP8BORDERINPIXELS - 16);
xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
xd->left_available = (mb_col != 0);
+ x->rddiv = cpi->RDDIV;
+ x->rdmult = cpi->RDMULT;
+
+ if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
+ activity_sum += vp8_activity_masking(cpi, x);
+
// Is segmentation enabled
// MB level adjutment to quantizer
if (xd->segmentation_enabled)
{
// Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
if (cpi->segmentation_map[seg_map_index+mb_col] <= 3)
- xd->mbmi.segment_id = cpi->segmentation_map[seg_map_index+mb_col];
+ xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[seg_map_index+mb_col];
else
- xd->mbmi.segment_id = 0;
+ xd->mode_info_context->mbmi.segment_id = 0;
vp8cx_mb_init_quantizer(cpi, x);
}
else
- xd->mbmi.segment_id = 0; // Set to Segment 0 by default
+ xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
x->active_ptr = cpi->active_map + seg_map_index + mb_col;
for (b = 0; b < xd->mbmi.partition_count; b++)
{
- inter_b_modes[xd->mbmi.partition_bmi[b].mode] ++;
+ inter_b_modes[x->partition->bmi[b].mode] ++;
}
}
#endif
// Count of last ref frame 0,0 useage
- if ((xd->mbmi.mode == ZEROMV) && (xd->mbmi.ref_frame == LAST_FRAME))
+ if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
cpi->inter_zz_count ++;
// Special case code for cyclic refresh
// during vp8cx_encode_inter_macroblock()) back into the global sgmentation map
if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)
{
- cpi->segmentation_map[seg_map_index+mb_col] = xd->mbmi.segment_id;
+ cpi->segmentation_map[seg_map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
// If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
// Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
// else mark it as dirty (1).
- if (xd->mbmi.segment_id)
+ if (xd->mode_info_context->mbmi.segment_id)
cpi->cyclic_refresh_map[seg_map_index+mb_col] = -1;
- else if ((xd->mbmi.mode == ZEROMV) && (xd->mbmi.ref_frame == LAST_FRAME))
+ else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
{
if (cpi->cyclic_refresh_map[seg_map_index+mb_col] == 1)
cpi->cyclic_refresh_map[seg_map_index+mb_col] = 0;
cpi->tplist[mb_row].stop = *tp;
- xd->gf_active_ptr++; // Increment pointer into gf useage flags structure for next mb
-
- // store macroblock mode info into context array
- vpx_memcpy(&xd->mode_info_context->mbmi, &xd->mbmi, sizeof(xd->mbmi));
+ x->gf_active_ptr++; // Increment pointer into gf useage flags structure for next mb
for (i = 0; i < 16; i++)
vpx_memcpy(&xd->mode_info_context->bmi[i], &xd->block[i].bmi, sizeof(xd->block[i].bmi));
recon_uvoffset += 8;
// Keep track of segment useage
- segment_counts[xd->mbmi.segment_id] ++;
+ segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
// skip to next mb
xd->mode_info_context++;
+ x->partition_info++;
- xd->above_context[Y1CONTEXT] += 4;
- xd->above_context[UCONTEXT ] += 2;
- xd->above_context[VCONTEXT ] += 2;
- xd->above_context[Y2CONTEXT] ++;
+ xd->above_context++;
cpi->current_mb_col_main = mb_col;
}
// this is to account for the border
xd->mode_info_context++;
+ x->partition_info++;
+ x->activity_sum += activity_sum;
}
VP8_COMMON *const cm = & cpi->common;
MACROBLOCKD *const xd = & x->e_mbd;
- int i;
TOKENEXTRA *tp = cpi->tok;
int segment_counts[MAX_MB_SEGMENTS];
int totalrate;
- if (cm->frame_type != KEY_FRAME)
+ // Functions setup for all frame types so we can use MC in AltRef
+ if (cm->mcomp_filter_type == SIXTAP)
{
- if (cm->mcomp_filter_type == SIXTAP)
- {
- xd->subpixel_predict = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, sixtap4x4);
- xd->subpixel_predict8x4 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, sixtap8x4);
- xd->subpixel_predict8x8 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, sixtap8x8);
- xd->subpixel_predict16x16 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, sixtap16x16);
- }
- else
- {
- xd->subpixel_predict = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, bilinear4x4);
- xd->subpixel_predict8x4 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, bilinear8x4);
- xd->subpixel_predict8x8 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, bilinear8x8);
- xd->subpixel_predict16x16 = SUBPIX_INVOKE(&cpi->common.rtcd.subpix, bilinear16x16);
- }
+ xd->subpixel_predict = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, sixtap4x4);
+ xd->subpixel_predict8x4 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, sixtap8x4);
+ xd->subpixel_predict8x8 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, sixtap8x8);
+ xd->subpixel_predict16x16 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, sixtap16x16);
+ }
+ else
+ {
+ xd->subpixel_predict = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, bilinear4x4);
+ xd->subpixel_predict8x4 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, bilinear8x4);
+ xd->subpixel_predict8x8 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, bilinear8x8);
+ xd->subpixel_predict16x16 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, bilinear16x16);
}
- //else // Key Frame
- //{
- // For key frames make sure the intra ref frame probability value
- // is set to "all intra"
- //cpi->prob_intra_coded = 255;
- //}
-
-
- xd->gf_active_ptr = (signed char *)cm->gf_active_flags; // Point to base of GF active flags data structure
+ x->gf_active_ptr = (signed char *)cpi->gf_active_flags; // Point to base of GF active flags data structure
x->vector_range = 32;
totalrate = 0;
- xd->mode_info = cm->mi - 1;
+ x->partition_info = x->pi;
xd->mode_info_context = cm->mi;
xd->mode_info_stride = cm->mode_info_stride;
}
vp8_initialize_rd_consts(cpi, vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
- //vp8_initialize_rd_consts( cpi, vp8_dc_quant(cpi->avg_frame_qindex, cm->y1dc_delta_q) );
vp8cx_initialize_me_consts(cpi, cm->base_qindex);
- //vp8cx_initialize_me_consts( cpi, cpi->avg_frame_qindex);
// Copy data over into macro block data sturctures.
vp8_setup_block_ptrs(x);
- x->rddiv = cpi->RDDIV;
- x->rdmult = cpi->RDMULT;
+ x->activity_sum = 0;
-#if 0
- // Experimental rd code
- // 2 Pass - Possibly set Rdmult based on last frame distortion + this frame target bits or other metrics
- // such as cpi->rate_correction_factor that indicate relative complexity.
- /*if ( cpi->pass == 2 && (cpi->last_frame_distortion > 0) && (cpi->target_bits_per_mb > 0) )
- {
- //x->rdmult = ((cpi->last_frame_distortion * 256)/cpi->common.MBs)/ cpi->target_bits_per_mb;
- x->rdmult = (int)(cpi->RDMULT * cpi->rate_correction_factor);
- }
- else
- x->rdmult = cpi->RDMULT; */
- //x->rdmult = (int)(cpi->RDMULT * pow( (cpi->rate_correction_factor * 2.0), 0.75 ));
-#endif
+ xd->mode_info_context->mbmi.mode = DC_PRED;
+ xd->mode_info_context->mbmi.uv_mode = DC_PRED;
- xd->mbmi.mode = DC_PRED;
- xd->mbmi.uv_mode = DC_PRED;
-
- xd->left_context = cm->left_context;
+ xd->left_context = &cm->left_context;
vp8_zero(cpi->count_mb_ref_frame_usage)
vp8_zero(cpi->ymode_count)
x->mvc = cm->fc.mvc;
- // vp8_zero( entropy_stats)
- {
- ENTROPY_CONTEXT **p = cm->above_context;
- const size_t L = cm->mb_cols;
-
- vp8_zero_array(p [Y1CONTEXT], L * 4)
- vp8_zero_array(p [ UCONTEXT], L * 2)
- vp8_zero_array(p [ VCONTEXT], L * 2)
- vp8_zero_array(p [Y2CONTEXT], L)
- }
-
+ vpx_memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
{
struct vpx_usec_timer emr_timer;
else
{
#if CONFIG_MULTITHREAD
+ int i;
+
vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 1, cpi->encoding_thread_count);
for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1))
{
- int i;
cpi->current_mb_col_main = -1;
for (i = 0; i < cpi->encoding_thread_count; i++)
x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count;
+ x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count;
if (mb_row < cm->mb_rows - 1)
//WaitForSingleObject(cpi->h_event_main, INFINITE);
totalrate += cpi->mb_row_ei[i].totalrate;
}
+ for (i = 0; i < cpi->encoding_thread_count; i++)
+ {
+ x->activity_sum += cpi->mb_row_ei[i].mb.activity_sum;
+ }
+
#endif
}
cpi->last_frame_distortion = cpi->frame_distortion;
#endif
+ /* Update the average activity for the next frame.
+ * This is feed-forward for now; it could also be saved in two-pass, or
+ * done during lookahead when that is eventually added.
+ */
+ cpi->activity_avg = (unsigned int )(x->activity_sum/cpi->common.MBs);
+ if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN)
+ cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
+
}
void vp8_setup_block_ptrs(MACROBLOCK *x)
{
static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
{
const MACROBLOCKD *xd = & x->e_mbd;
- const MB_PREDICTION_MODE m = xd->mbmi.mode;
- const MB_PREDICTION_MODE uvm = xd->mbmi.uv_mode;
+ const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
+ const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
#ifdef MODE_STATS
const int is_key = cpi->common.frame_type == KEY_FRAME;
int rateuv_tokenonly = 0;
int i;
- x->e_mbd.mbmi.ref_frame = INTRA_FRAME;
+ x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
#if !(CONFIG_REALTIME_ONLY)
error_uv = vp8_rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
- x->e_mbd.mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
-
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
rate += rateuv;
if (Error4x4 < Error16x16)
{
rate += rate4x4;
- x->e_mbd.mbmi.mode = B_PRED;
+ x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
// get back the intra block modes
for (i = 0; i < 16; i++)
for (mode = DC_PRED; mode <= TM_PRED; mode ++)
{
- x->e_mbd.mbmi.mode = mode;
+ x->e_mbd.mode_info_context->mbmi.mode = mode;
vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16prederror)(x->src.y_buffer, x->src.y_stride, x->e_mbd.predictor, 16, 0x7fffffff);
rate2 = x->mbmode_cost[x->e_mbd.frame_type][mode];
else
Error4x4 = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
- x->e_mbd.mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
-
if (Error4x4 < Error16x16)
{
- x->e_mbd.mbmi.mode = B_PRED;
+ x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
cpi->prediction_error += Error4x4;
}
else
{
- x->e_mbd.mbmi.mode = best_mode;
+ x->e_mbd.mode_info_context->mbmi.mode = best_mode;
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
cpi->prediction_error += Error16x16;
}
extern int cnt_pm;
#endif
-extern void vp8_fix_contexts(VP8_COMP *cpi, MACROBLOCKD *x);
+extern void vp8_fix_contexts(MACROBLOCKD *x);
int vp8cx_encode_inter_macroblock
(
x->skip = 0;
if (xd->segmentation_enabled)
- x->encode_breakout = cpi->segment_encode_breakout[xd->mbmi.segment_id];
+ x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
else
x->encode_breakout = cpi->oxcf.encode_breakout;
if (cpi->sf.RD)
{
+ int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
+
+ /* Are we using the fast quantizer for the mode selection? */
+ if(cpi->sf.use_fastquant_for_pick)
+ {
+ cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize, fastquantb);
+
+ /* the fast quantizer does not use zbin_extra, so
+ * do not recalculate */
+ cpi->zbin_mode_boost_enabled = 0;
+ }
inter_error = vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
+
+ /* switch back to the regular quantizer for the encode */
+ if (cpi->sf.improved_quant)
+ {
+ cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize, quantb);
+ }
+
+ /* restore cpi->zbin_mode_boost_enabled */
+ cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
+
}
else
#endif
#endif
// MB level adjutment to quantizer setup
- if (xd->segmentation_enabled || cpi->zbin_mode_boost_enabled)
+ if (xd->segmentation_enabled)
{
// If cyclic update enabled
if (cpi->cyclic_refresh_mode_enabled)
{
// Clear segment_id back to 0 if not coded (last frame 0,0)
- if ((xd->mbmi.segment_id == 1) &&
- ((xd->mbmi.ref_frame != LAST_FRAME) || (xd->mbmi.mode != ZEROMV)))
+ if ((xd->mode_info_context->mbmi.segment_id == 1) &&
+ ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
{
- xd->mbmi.segment_id = 0;
+ xd->mode_info_context->mbmi.segment_id = 0;
+
+ /* segment_id changed, so update */
+ vp8cx_mb_init_quantizer(cpi, x);
}
}
+ }
+ {
// Experimental code. Special case for gf and arf zeromv modes. Increase zbin size to supress noise
if (cpi->zbin_mode_boost_enabled)
{
- if ((xd->mbmi.mode == ZEROMV) && (xd->mbmi.ref_frame != LAST_FRAME))
- cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
+ if ( xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME )
+ cpi->zbin_mode_boost = 0;
else
- cpi->zbin_mode_boost = 0;
+ {
+ if (xd->mode_info_context->mbmi.mode == ZEROMV)
+ {
+ if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
+ cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
+ else
+ cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
+ }
+ else if (xd->mode_info_context->mbmi.mode == SPLITMV)
+ cpi->zbin_mode_boost = 0;
+ else
+ cpi->zbin_mode_boost = MV_ZBIN_BOOST;
+ }
}
+ else
+ cpi->zbin_mode_boost = 0;
- vp8cx_mb_init_quantizer(cpi, x);
+ vp8_update_zbin_extra(cpi, x);
}
- cpi->count_mb_ref_frame_usage[xd->mbmi.ref_frame] ++;
+ cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
- if (xd->mbmi.ref_frame == INTRA_FRAME)
+ if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
- x->e_mbd.mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
-
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
- if (xd->mbmi.mode == B_PRED)
+ if (xd->mode_info_context->mbmi.mode == B_PRED)
{
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
}
int ref_fb_idx;
vp8_find_near_mvs(xd, xd->mode_info_context,
- &nearest, &nearby, &best_ref_mv, mdcounts, xd->mbmi.ref_frame, cpi->common.ref_frame_sign_bias);
+ &nearest, &nearby, &best_ref_mv, mdcounts, xd->mode_info_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias);
vp8_build_uvmvs(xd, cpi->common.full_pixel);
- if (xd->mbmi.ref_frame == LAST_FRAME)
+ if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = cpi->common.lst_fb_idx;
- else if (xd->mbmi.ref_frame == GOLDEN_FRAME)
+ else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = cpi->common.gld_fb_idx;
else
ref_fb_idx = cpi->common.alt_fb_idx;
xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
- if (xd->mbmi.mode == SPLITMV)
+ if (xd->mode_info_context->mbmi.mode == SPLITMV)
{
int i;
}
}
}
- else if (xd->mbmi.mode == NEWMV)
+ else if (xd->mode_info_context->mbmi.mode == NEWMV)
{
cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv.row) >> 1)]++;
cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv.col) >> 1)]++;
}
- if (!x->skip && !x->e_mbd.mbmi.force_no_skip)
+ if (!x->skip && !x->e_mbd.mode_info_context->mbmi.force_no_skip)
{
vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
// Clear mb_skip_coeff if mb_no_coeff_skip is not set
if (!cpi->common.mb_no_coeff_skip)
- xd->mbmi.mb_skip_coeff = 0;
+ xd->mode_info_context->mbmi.mb_skip_coeff = 0;
}
else
{
if (cpi->common.mb_no_coeff_skip)
{
- if (xd->mbmi.mode != B_PRED && xd->mbmi.mode != SPLITMV)
- xd->mbmi.dc_diff = 0;
+ if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV)
+ xd->mode_info_context->mbmi.dc_diff = 0;
else
- xd->mbmi.dc_diff = 1;
+ xd->mode_info_context->mbmi.dc_diff = 1;
- xd->mbmi.mb_skip_coeff = 1;
+ xd->mode_info_context->mbmi.mb_skip_coeff = 1;
cpi->skip_true_count ++;
- vp8_fix_contexts(cpi, xd);
+ vp8_fix_contexts(xd);
}
else
{
vp8_stuff_mb(cpi, xd, t);
- xd->mbmi.mb_skip_coeff = 0;
+ xd->mode_info_context->mbmi.mb_skip_coeff = 0;
cpi->skip_false_count ++;
}
}