oneshotq
multiple_arf
non420
+ alpha
ab4x4
comp_inter_joint_search
"
// FIXME(jkoleszar): allocate subsampled arrays for U/V once subsampling
// information is exposed at this level
mi_cols = mi_cols_aligned_to_sb(oci);
+# if CONFIG_ALPHA
+ // TODO(jkoleszar): Why is this * 2?
+ oci->above_context[0] = vpx_calloc(sizeof(ENTROPY_CONTEXT) * 8 * mi_cols, 1);
+#else
oci->above_context[0] = vpx_calloc(sizeof(ENTROPY_CONTEXT) * 6 * mi_cols, 1);
+#endif
if (!oci->above_context[0]) {
vp9_free_frame_buffers(oci);
return 1;
}
- oci->above_context[1] =
- oci->above_context[0] + sizeof(ENTROPY_CONTEXT) * 2 * mi_cols;
- oci->above_context[2] =
- oci->above_context[1] + sizeof(ENTROPY_CONTEXT) * 2 * mi_cols;
+ for (i = 1; i < MAX_MB_PLANE; i++)
+ oci->above_context[i] =
+ oci->above_context[0] + i * sizeof(ENTROPY_CONTEXT) * 2 * mi_cols;
oci->above_seg_context =
vpx_calloc(sizeof(PARTITION_CONTEXT) * mi_cols, 1);
convolve_fn_t predict[2][2][2]; // horiz, vert, avg
};
+#if CONFIG_ALPHA
+enum { MAX_MB_PLANE = 4 };
+#else
enum { MAX_MB_PLANE = 3 };
+#endif
struct buf_2d {
uint8_t *buf;
const int eb_y = dst->border + dst->y_height - src->y_height;
const int er_y = dst->border + dst->y_width - src->y_width;
- const int et_uv = dst->border >> 1;
- const int el_uv = dst->border >> 1;
- const int eb_uv = (dst->border >> 1) + dst->uv_height - src->uv_height;
- const int er_uv = (dst->border >> 1) + dst->uv_width - src->uv_width;
+ const int et_uv = dst->border >> (dst->uv_height != dst->y_height);
+ const int el_uv = dst->border >> (dst->uv_width != dst->y_width);
+ const int eb_uv = et_uv + dst->uv_height - src->uv_height;
+ const int er_uv = el_uv + dst->uv_width - src->uv_width;
+
+#if CONFIG_ALPHA
+ const int et_a = dst->border >> (dst->alpha_height != dst->y_height);
+ const int el_a = dst->border >> (dst->alpha_width != dst->y_width);
+ const int eb_a = et_a + dst->alpha_height - src->alpha_height;
+ const int er_a = el_a + dst->alpha_width - src->alpha_width;
+
+ copy_and_extend_plane(src->alpha_buffer, src->alpha_stride,
+ dst->alpha_buffer, dst->alpha_stride,
+ src->alpha_width, src->alpha_height,
+ et_a, el_a, eb_a, er_a);
+#endif
copy_and_extend_plane(src->y_buffer, src->y_stride,
dst->y_buffer, dst->y_stride,
mb->plane[i].subsampling_x = i ? subsampling_x : 0;
mb->plane[i].subsampling_y = i ? subsampling_y : 0;
}
+#if CONFIG_ALPHA
+ // TODO(jkoleszar): Using the Y w/h for now
+ mb->plane[3].subsampling_x = 0;
+ mb->plane[3].subsampling_y = 0;
+#endif
}
DECLARE_ALIGNED(16, int16_t, y_dequant[QINDEX_RANGE][2]);
DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][2]);
+#if CONFIG_ALPHA
+ DECLARE_ALIGNED(16, int16_t, a_dequant[QINDEX_RANGE][2]);
+#endif
int width;
int height;
int y_dc_delta_q;
int uv_dc_delta_q;
int uv_ac_delta_q;
+#if CONFIG_ALPHA
+ int a_dc_delta_q;
+ int a_ac_delta_q;
+#endif
unsigned int frames_since_golden;
unsigned int frames_till_alt_ref_frame;
+ 0.0065 + 0.5);
int i;
- const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer};
- const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride};
- const int src_widths[3] = {src->y_width, src->uv_width, src->uv_width};
- const int src_heights[3] = {src->y_height, src->uv_height, src->uv_height};
-
- uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer};
- const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride};
+ const uint8_t *const srcs[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
+ src->alpha_buffer};
+ const int src_strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
+ src->alpha_stride};
+ const int src_widths[4] = {src->y_width, src->uv_width, src->uv_width,
+ src->alpha_width};
+ const int src_heights[4] = {src->y_height, src->uv_height, src->uv_height,
+ src->alpha_height};
+
+ uint8_t *const dsts[4] = {dst->y_buffer, dst->u_buffer, dst->v_buffer,
+ dst->alpha_buffer};
+ const int dst_strides[4] = {dst->y_stride, dst->uv_stride, dst->uv_stride,
+ dst->alpha_stride};
for (i = 0; i < MAX_MB_PLANE; ++i)
vp9_post_proc_down_and_across(srcs[i], dsts[i],
+ 0.0065 + 0.5);
int i;
- const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer};
- const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride};
- const int src_widths[3] = {src->y_width, src->uv_width, src->uv_width};
- const int src_heights[3] = {src->y_height, src->uv_height, src->uv_height};
-
- uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer};
- const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride};
+ const uint8_t *const srcs[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
+ src->alpha_buffer};
+ const int src_strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
+ src->alpha_stride};
+ const int src_widths[4] = {src->y_width, src->uv_width, src->uv_width,
+ src->alpha_width};
+ const int src_heights[4] = {src->y_height, src->uv_height, src->uv_height,
+ src->alpha_height};
+
+ uint8_t *const dsts[4] = {dst->y_buffer, dst->u_buffer, dst->v_buffer,
+ dst->alpha_buffer};
+ const int dst_strides[4] = {dst->y_stride, dst->uv_stride, dst->uv_stride,
+ dst->alpha_stride};
for (i = 0; i < MAX_MB_PLANE; ++i) {
const int src_stride = src_strides[i];
BLOCK_SIZE_TYPE bsize) {
struct build_inter_predictors_args args = {
xd, mi_col * MI_SIZE, mi_row * MI_SIZE,
+#if CONFIG_ALPHA
+ {NULL, xd->plane[1].dst.buf, xd->plane[2].dst.buf,
+ xd->plane[3].dst.buf},
+ {0, xd->plane[1].dst.stride, xd->plane[1].dst.stride,
+ xd->plane[3].dst.stride},
+ {{NULL, xd->plane[1].pre[0].buf, xd->plane[2].pre[0].buf,
+ xd->plane[3].pre[0].buf},
+ {NULL, xd->plane[1].pre[1].buf, xd->plane[2].pre[1].buf,
+ xd->plane[3].pre[1].buf}},
+ {{0, xd->plane[1].pre[0].stride, xd->plane[1].pre[0].stride,
+ xd->plane[3].pre[0].stride},
+ {0, xd->plane[1].pre[1].stride, xd->plane[1].pre[1].stride,
+ xd->plane[3].pre[1].stride}},
+#else
{NULL, xd->plane[1].dst.buf, xd->plane[2].dst.buf},
{0, xd->plane[1].dst.stride, xd->plane[1].dst.stride},
{{NULL, xd->plane[1].pre[0].buf, xd->plane[2].pre[0].buf},
{NULL, xd->plane[1].pre[1].buf, xd->plane[2].pre[1].buf}},
{{0, xd->plane[1].pre[0].stride, xd->plane[1].pre[0].stride},
{0, xd->plane[1].pre[1].stride, xd->plane[1].pre[1].stride}},
+#endif
};
foreach_predicted_block_uv(xd, bsize, build_inter_predictors, &args);
}
static void setup_dst_planes(MACROBLOCKD *xd,
const YV12_BUFFER_CONFIG *src,
int mi_row, int mi_col) {
- uint8_t *buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer};
- int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride};
+ uint8_t *buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
+ src->alpha_buffer};
+ int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
+ src->alpha_stride};
int i;
for (i = 0; i < MAX_MB_PLANE; ++i) {
for (i = 0; i < 2; ++i) {
const YV12_BUFFER_CONFIG *src = srcs[i];
if (src) {
- uint8_t* buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer};
- int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride};
+ uint8_t* buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
+ src->alpha_buffer};
+ int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
+ src->alpha_stride};
for (j = 0; j < MAX_MB_PLANE; ++j) {
struct macroblockd_plane *pd = &xd->plane[j];
void vp9_setup_src_planes(MACROBLOCK *x,
const YV12_BUFFER_CONFIG *src,
int mb_row, int mb_col) {
- setup_pred_plane(&x->plane[0].src,
- src->y_buffer, src->y_stride,
- mb_row, mb_col, NULL,
- x->e_mbd.plane[0].subsampling_x,
- x->e_mbd.plane[0].subsampling_y);
- setup_pred_plane(&x->plane[1].src,
- src->u_buffer, src->uv_stride,
- mb_row, mb_col, NULL,
- x->e_mbd.plane[1].subsampling_x,
- x->e_mbd.plane[1].subsampling_y);
- setup_pred_plane(&x->plane[2].src,
- src->v_buffer, src->uv_stride,
- mb_row, mb_col, NULL,
- x->e_mbd.plane[2].subsampling_x,
- x->e_mbd.plane[2].subsampling_y);
+ uint8_t *buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
+ src->alpha_buffer};
+ int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
+ src->alpha_stride};
+ int i;
+
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ setup_pred_plane(&x->plane[i].src,
+ buffers[i], strides[i],
+ mb_row, mb_col, NULL,
+ x->e_mbd.plane[i].subsampling_x,
+ x->e_mbd.plane[i].subsampling_y);
+ }
}
static void set_offsets(VP9_COMP *cpi,
const uint8_t *src = x->plane[plane].src.buf;
const int src_stride = x->plane[plane].src.stride;
- assert(plane < 3);
vp9_subtract_block(bh, bw,
x->plane[plane].src_diff, bw, src, src_stride,
xd->plane[plane].dst.buf, xd->plane[plane].dst.stride);
return NULL;
}
+#define USE_PARTIAL_COPY 0
int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
int64_t ts_start, int64_t ts_end, unsigned int flags,
unsigned char *active_map) {
struct lookahead_entry *buf;
+#if USE_PARTIAL_COPY
int row, col, active_end;
int mb_rows = (src->y_height + 15) >> 4;
int mb_cols = (src->y_width + 15) >> 4;
+#endif
if (ctx->sz + 1 > ctx->max_sz)
return 1;
ctx->sz++;
buf = pop(ctx, &ctx->write_idx);
+#if USE_PARTIAL_COPY
+ // TODO(jkoleszar): This is disabled for now, as
+ // vp9_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
+
// Only do this partial copy if the following conditions are all met:
// 1. Lookahead queue has has size of 1.
// 2. Active map is provided.
} else {
vp9_copy_and_extend_frame(src, &buf->img);
}
+#else
+ // Partial copy not implemented yet
+ vp9_copy_and_extend_frame(src, &buf->img);
+#endif
+
buf->ts_start = ts_start;
buf->ts_end = ts_end;
buf->flags = flags;
fwrite(src, s->uv_width, 1, yuv_rec_file);
src += s->uv_stride;
} while (--h);
+
+#if CONFIG_ALPHA
+ if (s->alpha_buffer) {
+ src = s->alpha_buffer;
+ h = s->alpha_height;
+ do {
+ fwrite(src, s->alpha_width, 1, yuv_rec_file);
+ src += s->alpha_stride;
+ } while (--h);
+ }
+#endif
+
fflush(yuv_rec_file);
}
#endif
const int out_h = dst_fb->y_crop_height;
int x, y, i;
- uint8_t *srcs[3] = {src_fb->y_buffer, src_fb->u_buffer, src_fb->v_buffer};
- int src_strides[3] = {src_fb->y_stride, src_fb->uv_stride, src_fb->uv_stride};
+ uint8_t *srcs[4] = {src_fb->y_buffer, src_fb->u_buffer, src_fb->v_buffer,
+ src_fb->alpha_buffer};
+ int src_strides[4] = {src_fb->y_stride, src_fb->uv_stride, src_fb->uv_stride,
+ src_fb->alpha_stride};
- uint8_t *dsts[3] = {dst_fb->y_buffer, dst_fb->u_buffer, dst_fb->v_buffer};
- int dst_strides[3] = {dst_fb->y_stride, dst_fb->uv_stride, dst_fb->uv_stride};
+ uint8_t *dsts[4] = {dst_fb->y_buffer, dst_fb->u_buffer, dst_fb->v_buffer,
+ dst_fb->alpha_buffer};
+ int dst_strides[4] = {dst_fb->y_stride, dst_fb->uv_stride, dst_fb->uv_stride,
+ dst_fb->alpha_stride};
for (y = 0; y < out_h; y += 16) {
for (x = 0; x < out_w; x += 16) {
DECLARE_ALIGNED(16, short, uv_zbin[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, uv_round[QINDEX_RANGE][16]);
+#if CONFIG_ALPHA
+ DECLARE_ALIGNED(16, short, a_quant[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, unsigned char, a_quant_shift[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, a_zbin[QINDEX_RANGE][16]);
+ DECLARE_ALIGNED(16, short, a_round[QINDEX_RANGE][16]);
+
+ DECLARE_ALIGNED(16, short, zrun_zbin_boost_a[QINDEX_RANGE][16]);
+#endif
DECLARE_ALIGNED(16, short, zrun_zbin_boost_y[QINDEX_RANGE][16]);
DECLARE_ALIGNED(16, short, zrun_zbin_boost_uv[QINDEX_RANGE][16]);
int i;
int quant_val;
int quant_uv_val;
+#if CONFIG_ALPHA
+ int quant_alpha_val;
+#endif
int q;
static const int zbin_boost[16] = { 0, 0, 0, 8, 8, 8, 10, 12,
cpi->common.y_dequant[q][0] = quant_val;
cpi->zrun_zbin_boost_y[q][0] = (quant_val * zbin_boost[0]) >> 7;
-
quant_val = vp9_dc_quant(q, cpi->common.uv_dc_delta_q);
invert_quant(cpi->uv_quant[q] + 0, cpi->uv_quant_shift[q] + 0, quant_val);
cpi->uv_zbin[q][0] = ROUND_POWER_OF_TWO(qzbin_factor * quant_val, 7);
cpi->common.uv_dequant[q][0] = quant_val;
cpi->zrun_zbin_boost_uv[q][0] = (quant_val * zbin_boost[0]) >> 7;
+#if CONFIG_ALPHA
+ quant_val = vp9_dc_quant(q, cpi->common.a_dc_delta_q);
+ invert_quant(cpi->a_quant[q] + 0, cpi->a_quant_shift[q] + 0, quant_val);
+ cpi->a_zbin[q][0] = ROUND_POWER_OF_TWO(qzbin_factor * quant_val, 7);
+ cpi->a_round[q][0] = (qrounding_factor * quant_val) >> 7;
+ cpi->common.a_dequant[q][0] = quant_val;
+ cpi->zrun_zbin_boost_a[q][0] = (quant_val * zbin_boost[0]) >> 7;
+#endif
+
quant_val = vp9_ac_quant(q, 0);
cpi->common.y_dequant[q][1] = quant_val;
quant_uv_val = vp9_ac_quant(q, cpi->common.uv_ac_delta_q);
cpi->common.uv_dequant[q][1] = quant_uv_val;
+#if CONFIG_ALPHA
+ quant_alpha_val = vp9_ac_quant(q, cpi->common.a_ac_delta_q);
+ cpi->common.a_dequant[q][1] = quant_alpha_val;
+#endif
// all the 4x4 ac values =;
for (i = 1; i < 16; i++) {
int rc = vp9_default_zig_zag1d_4x4[i];
cpi->uv_round[q][rc] = (qrounding_factor * quant_uv_val) >> 7;
cpi->zrun_zbin_boost_uv[q][i] =
ROUND_POWER_OF_TWO(quant_uv_val * zbin_boost[i], 7);
+
+#if CONFIG_ALPHA
+ invert_quant(cpi->a_quant[q] + rc, cpi->a_quant_shift[q] + rc,
+ quant_alpha_val);
+ cpi->a_zbin[q][rc] =
+ ROUND_POWER_OF_TWO(qzbin_factor * quant_alpha_val, 7);
+ cpi->a_round[q][rc] = (qrounding_factor * quant_alpha_val) >> 7;
+ cpi->zrun_zbin_boost_a[q][i] =
+ ROUND_POWER_OF_TWO(quant_alpha_val * zbin_boost[i], 7);
+#endif
}
}
}
x->e_mbd.plane[i].dequant = cpi->common.uv_dequant[qindex];
}
+#if CONFIG_ALPHA
+ x->plane[3].quant = cpi->a_quant[qindex];
+ x->plane[3].quant_shift = cpi->a_quant_shift[qindex];
+ x->plane[3].zbin = cpi->a_zbin[qindex];
+ x->plane[3].round = cpi->a_round[qindex];
+ x->plane[3].zrun_zbin_boost = cpi->zrun_zbin_boost_a[qindex];
+ x->plane[3].zbin_extra = (int16_t)zbin_extra;
+ x->e_mbd.plane[3].dequant = cpi->common.a_dequant[qindex];
+#endif
+
x->skip_block = vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP);
/* save this macroblock QIndex for vp9_update_zbin_extra() */
dst[1].buf = src->u_buffer;
dst[2].buf = src->v_buffer;
dst[1].stride = dst[2].stride = src->uv_stride;
+#if CONFIG_ALPHA
+ dst[3].buf = src->alpha_buffer;
+ dst[3].stride = src->alpha_stride;
+#endif
// TODO(jkoleszar): Make scale factors per-plane data
for (i = 0; i < MAX_MB_PLANE; i++) {
DECLARE_ALIGNED_ARRAY(16, uint8_t, predictor, 16 * 16 + 8 * 8 + 8 * 8);
// Save input state
- uint8_t *y_buffer = mbd->plane[0].pre[0].buf;
- uint8_t *u_buffer = mbd->plane[1].pre[0].buf;
- uint8_t *v_buffer = mbd->plane[2].pre[0].buf;
+ uint8_t* input_buffer[MAX_MB_PLANE];
+ int i;
+
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ input_buffer[i] = mbd->plane[i].pre[0].buf;
for (mb_row = 0; mb_row < mb_rows; mb_row++) {
#if ALT_REF_MC_ENABLED
}
// Restore input state
- mbd->plane[0].pre[0].buf = y_buffer;
- mbd->plane[1].pre[0].buf = u_buffer;
- mbd->plane[2].pre[0].buf = v_buffer;
+ for (i = 0; i < MAX_MB_PLANE; i++)
+ mbd->plane[i].pre[0].buf = input_buffer[i];
}
void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) {
yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
yv12->clrtype = REG_YUV;
+#if CONFIG_ALPHA
+ // For development purposes, force alpha to hold the same data a Y for now.
+ yv12->alpha_buffer = yv12->y_buffer;
+ yv12->alpha_width = yv12->y_width;
+ yv12->alpha_height = yv12->y_height;
+ yv12->alpha_stride = yv12->y_stride;
+#endif
return VPX_CODEC_OK;
}
const int uv_border_w = border >> ss_x;
const int uv_border_h = border >> ss_y;
const int uvplane_size = (uv_height + 2 * uv_border_h) * uv_stride;
+#if CONFIG_ALPHA
+ const int alpha_width = aligned_width;
+ const int alpha_height = aligned_height;
+ const int alpha_stride = y_stride;
+ const int alpha_border_w = border;
+ const int alpha_border_h = border;
+ const int alpha_plane_size = (alpha_height + 2 * alpha_border_h) *
+ alpha_stride;
+ const int frame_size = yplane_size + 2 * uvplane_size +
+ alpha_plane_size;
+#else
const int frame_size = yplane_size + 2 * uvplane_size;
-
+#endif
if (!ybf->buffer_alloc) {
ybf->buffer_alloc = vpx_memalign(32, frame_size);
ybf->buffer_alloc_sz = frame_size;
ybf->v_buffer = ybf->buffer_alloc + yplane_size + uvplane_size +
(uv_border_h * uv_stride) + uv_border_w;
+#if CONFIG_ALPHA
+ ybf->alpha_width = alpha_width;
+ ybf->alpha_height = alpha_height;
+ ybf->alpha_stride = alpha_stride;
+ ybf->alpha_buffer = ybf->buffer_alloc + yplane_size + 2 * uvplane_size +
+ (alpha_border_h * alpha_stride) + alpha_border_w;
+#endif
ybf->corrupted = 0; /* assume not currupted by errors */
return 0;
}