} while (--b_h);
}
+#if CONFIG_VP9_HIGHBITDEPTH
+static void high_build_mc_border(const uint8_t *src8, int src_stride,
+ uint16_t *dst, int dst_stride,
+ int x, int y, int b_w, int b_h,
+ int w, int h) {
+ // Get a pointer to the start of the real data for this row.
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *ref_row = src - x - y * src_stride;
+
+ if (y >= h)
+ ref_row += (h - 1) * src_stride;
+ else if (y > 0)
+ ref_row += y * src_stride;
+
+ do {
+ int right = 0, copy;
+ int left = x < 0 ? -x : 0;
+
+ if (left > b_w)
+ left = b_w;
+
+ if (x + b_w > w)
+ right = x + b_w - w;
+
+ if (right > b_w)
+ right = b_w;
+
+ copy = b_w - left - right;
+
+ if (left)
+ vpx_memset16(dst, ref_row[0], left);
+
+ if (copy)
+ memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
+
+ if (right)
+ vpx_memset16(dst + left + copy, ref_row[w - 1], right);
+
+ dst += dst_stride;
+ ++y;
+
+ if (y > 0 && y < h)
+ ref_row += src_stride;
+ } while (--b_h);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
static void inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int subpel_x,
sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4);
}
+#if CONFIG_VP9_HIGHBITDEPTH
+static void high_inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int subpel_x,
+ const int subpel_y,
+ const struct scale_factors *sf,
+ int w, int h, int ref,
+ const InterpKernel *kernel,
+ int xs, int ys, int bd) {
+ sf->high_predict[subpel_x != 0][subpel_y != 0][ref](
+ src, src_stride, dst, dst_stride,
+ kernel[subpel_x], xs, kernel[subpel_y], ys, w, h, bd);
+}
+
+void vp9_high_build_inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const MV *src_mv,
+ const struct scale_factors *sf,
+ int w, int h, int ref,
+ const InterpKernel *kernel,
+ enum mv_precision precision,
+ int x, int y, int bd) {
+ const int is_q4 = precision == MV_PRECISION_Q4;
+ const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
+ is_q4 ? src_mv->col : src_mv->col * 2 };
+ MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
+ const int subpel_x = mv.col & SUBPEL_MASK;
+ const int subpel_y = mv.row & SUBPEL_MASK;
+
+ src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
+
+ high_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
+ sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4, bd);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
static INLINE int round_mv_comp_q4(int value) {
return (value < 0 ? value - 2 : value + 2) / 4;
}
return res;
}
+static INLINE int round_mv_comp_q2(int value) {
+ return (value < 0 ? value - 1 : value + 1) / 2;
+}
+
+static MV mi_mv_pred_q2(const MODE_INFO *mi, int idx, int block0, int block1) {
+ MV res = { round_mv_comp_q2(mi->bmi[block0].as_mv[idx].as_mv.row +
+ mi->bmi[block1].as_mv[idx].as_mv.row),
+ round_mv_comp_q2(mi->bmi[block0].as_mv[idx].as_mv.col +
+ mi->bmi[block1].as_mv[idx].as_mv.col) };
+ return res;
+}
+
// TODO(jkoleszar): yet another mv clamping function :-(
MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv,
int bw, int bh, int ss_x, int ss_y) {
return clamped_mv;
}
+static MV average_split_mvs(const struct macroblockd_plane *pd,
+ const MODE_INFO *mi, int ref, int block) {
+ const int ss_idx = ((pd->subsampling_x > 0) << 1) | (pd->subsampling_y > 0);
+ MV res = {0, 0};
+ switch (ss_idx) {
+ case 0:
+ res = mi->bmi[block].as_mv[ref].as_mv;
+ break;
+ case 1:
+ res = mi_mv_pred_q2(mi, ref, block, block + 2);
+ break;
+ case 2:
+ res = mi_mv_pred_q2(mi, ref, block, block + 1);
+ break;
+ case 3:
+ res = mi_mv_pred_q4(mi, ref);
+ break;
+ default:
+ assert(ss_idx <= 3 || ss_idx >= 0);
+ }
+ return res;
+}
+
static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
int bw, int bh,
int x, int y, int w, int h,
int mi_x, int mi_y) {
struct macroblockd_plane *const pd = &xd->plane[plane];
- const MODE_INFO *mi = xd->mi[0];
+ const MODE_INFO *mi = xd->mi[0].src_mi;
const int is_compound = has_second_ref(&mi->mbmi);
const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
int ref;
struct buf_2d *const pre_buf = &pd->pre[ref];
struct buf_2d *const dst_buf = &pd->dst;
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
-
- // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the
- // same MV (the average of the 4 luma MVs) but we could do something
- // smarter for non-4:2:0. Just punt for now, pending the changes to get
- // rid of SPLITMV mode entirely.
const MV mv = mi->mbmi.sb_type < BLOCK_8X8
- ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv
- : mi_mv_pred_q4(mi, ref))
+ ? average_split_mvs(pd, mi, ref, block)
: mi->mbmi.mv[ref].as_mv;
// TODO(jkoleszar): This clamping is done in the incorrect place for the
pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride
+ (scaled_mv.col >> SUBPEL_BITS);
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ high_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
+ subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys,
+ xd->bd);
+ } else {
+ inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
+ subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys);
+ }
+#else
inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys);
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
const int bw = 4 * num_4x4_w;
const int bh = 4 * num_4x4_h;
- if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) {
+ if (xd->mi[0].src_mi->mbmi.sb_type < BLOCK_8X8) {
int i = 0, x, y;
assert(bsize == BLOCK_8X8);
for (y = 0; y < num_4x4_h; ++y)
int x, int y, int w, int h,
int mi_x, int mi_y) {
struct macroblockd_plane *const pd = &xd->plane[plane];
- const MODE_INFO *mi = xd->mi[0];
+ const MODE_INFO *mi = xd->mi[0].src_mi;
const int is_compound = has_second_ref(&mi->mbmi);
const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
int ref;
struct buf_2d *const pre_buf = &pd->pre[ref];
struct buf_2d *const dst_buf = &pd->dst;
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
-
- // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the
- // same MV (the average of the 4 luma MVs) but we could do something
- // smarter for non-4:2:0. Just punt for now, pending the changes to get
- // rid of SPLITMV mode entirely.
const MV mv = mi->mbmi.sb_type < BLOCK_8X8
- ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv
- : mi_mv_pred_q4(mi, ref))
+ ? average_split_mvs(pd, mi, ref, block)
: mi->mbmi.mv[ref].as_mv;
+
// TODO(jkoleszar): This clamping is done in the incorrect place for the
// scaling case. It needs to be done on the scaled MV, not the pre-scaling
// MV. Note however that it performs the subsampling aware scaling so
}
// Skip border extension if block is inside the frame.
- if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width ||
+ if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 ||
y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) {
uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0;
// Extend the border.
- build_mc_border(buf_ptr1, pre_buf->stride, xd->mc_buf, x1 - x0 + 1,
- x0, y0, x1 - x0 + 1, y1 - y0 + 1, frame_width,
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ high_build_mc_border(buf_ptr1,
+ pre_buf->stride,
+ xd->mc_buf_high,
+ x1 - x0 + 1,
+ x0,
+ y0,
+ x1 - x0 + 1,
+ y1 - y0 + 1,
+ frame_width,
+ frame_height);
+ buf_stride = x1 - x0 + 1;
+ buf_ptr = CONVERT_TO_BYTEPTR(xd->mc_buf_high) +
+ y_pad * 3 * buf_stride + x_pad * 3;
+ } else {
+ build_mc_border(buf_ptr1,
+ pre_buf->stride,
+ xd->mc_buf,
+ x1 - x0 + 1,
+ x0,
+ y0,
+ x1 - x0 + 1,
+ y1 - y0 + 1,
+ frame_width,
+ frame_height);
+ buf_stride = x1 - x0 + 1;
+ buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
+ }
+#else
+ build_mc_border(buf_ptr1,
+ pre_buf->stride,
+ xd->mc_buf,
+ x1 - x0 + 1,
+ x0,
+ y0,
+ x1 - x0 + 1,
+ y1 - y0 + 1,
+ frame_width,
frame_height);
buf_stride = x1 - x0 + 1;
buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
+ subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
+ } else {
+ inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
+ subpel_y, sf, w, h, ref, kernel, xs, ys);
+ }
+#else
inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
subpel_y, sf, w, h, ref, kernel, xs, ys);
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
const int bw = 4 * num_4x4_w;
const int bh = 4 * num_4x4_h;
- if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) {
+ if (xd->mi[0].src_mi->mbmi.sb_type < BLOCK_8X8) {
int i = 0, x, y;
assert(bsize == BLOCK_8X8);
for (y = 0; y < num_4x4_h; ++y)
}
}
-void vp9_setup_dst_planes(MACROBLOCKD *xd,
+void vp9_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
const YV12_BUFFER_CONFIG *src,
int mi_row, int mi_col) {
uint8_t *const buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
int i;
for (i = 0; i < MAX_MB_PLANE; ++i) {
- struct macroblockd_plane *const pd = &xd->plane[i];
+ struct macroblockd_plane *const pd = &planes[i];
setup_pred_plane(&pd->dst, buffers[i], strides[i], mi_row, mi_col, NULL,
pd->subsampling_x, pd->subsampling_y);
}