return val;
}
-static int_mv32 mv_q3_to_q4_with_scaling(const int_mv *src_mv,
- const struct scale_factors *scale) {
- // returns mv * scale + offset
- int_mv32 result;
- const int32_t mv_row_q4 = src_mv->as_mv.row << 1;
- const int32_t mv_col_q4 = src_mv->as_mv.col << 1;
-
- result.as_mv.row = (mv_row_q4 * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT)
- + scale->y_offset_q4;
- result.as_mv.col = (mv_col_q4 * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT)
- + scale->x_offset_q4;
- return result;
+static MV32 mv_q3_to_q4_with_scaling(const MV *mv,
+ const struct scale_factors *scale) {
+ const MV32 res = {
+ ((mv->row << 1) * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT)
+ + scale->y_offset_q4,
+ ((mv->col << 1) * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT)
+ + scale->x_offset_q4
+ };
+ return res;
}
-static int_mv32 mv_q3_to_q4_without_scaling(const int_mv *src_mv,
- const struct scale_factors *scale) {
- // returns mv * scale + offset
- int_mv32 result;
-
- result.as_mv.row = src_mv->as_mv.row << 1;
- result.as_mv.col = src_mv->as_mv.col << 1;
- return result;
+static MV32 mv_q3_to_q4_without_scaling(const MV *mv,
+ const struct scale_factors *scale) {
+ const MV32 res = {
+ mv->row << 1,
+ mv->col << 1
+ };
+ return res;
}
-static int32_t mv_component_q4_with_scaling(int mv_q4, int scale_fp,
- int offset_q4) {
- int32_t scaled_mv;
- // returns the scaled and offset value of the mv component.
- scaled_mv = (mv_q4 * scale_fp >> VP9_REF_SCALE_SHIFT) + offset_q4;
-
- return scaled_mv;
+static MV32 mv_q4_with_scaling(const MV *mv,
+ const struct scale_factors *scale) {
+ const MV32 res = {
+ (mv->row * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT) + scale->y_offset_q4,
+ (mv->col * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT) + scale->x_offset_q4
+ };
+ return res;
}
-static int32_t mv_component_q4_without_scaling(int mv_q4, int scale_fp,
- int offset_q4) {
- // returns the scaled and offset value of the mv component.
- (void)scale_fp;
- (void)offset_q4;
- return mv_q4;
+static MV32 mv_q4_without_scaling(const MV *mv,
+ const struct scale_factors *scale) {
+ const MV32 res = {
+ mv->row,
+ mv->col
+ };
+ return res;
}
static void set_offsets_with_scaling(struct scale_factors *scale,
scale->scale_value_y = unscaled_value;
scale->set_scaled_offsets = set_offsets_without_scaling;
scale->scale_mv_q3_to_q4 = mv_q3_to_q4_without_scaling;
- scale->scale_mv_component_q4 = mv_component_q4_without_scaling;
+ scale->scale_mv_q4 = mv_q4_without_scaling;
} else {
scale->scale_value_x = scale_value_x_with_scaling;
scale->scale_value_y = scale_value_y_with_scaling;
scale->set_scaled_offsets = set_offsets_with_scaling;
scale->scale_mv_q3_to_q4 = mv_q3_to_q4_with_scaling;
- scale->scale_mv_component_q4 = mv_component_q4_with_scaling;
+ scale->scale_mv_q4 = mv_q4_with_scaling;
}
// TODO(agrange): Investigate the best choice of functions to use here
void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
- const int_mv *mv_q3,
+ const int_mv *src_mv,
const struct scale_factors *scale,
int w, int h, int weight,
- const struct subpix_fn_table *subpix) {
- int_mv32 mv = scale->scale_mv_q3_to_q4(mv_q3, scale);
- src += (mv.as_mv.row >> 4) * src_stride + (mv.as_mv.col >> 4);
- scale->predict[!!(mv.as_mv.col & 15)][!!(mv.as_mv.row & 15)][weight](
- src, src_stride, dst, dst_stride,
- subpix->filter_x[mv.as_mv.col & 15], scale->x_step_q4,
- subpix->filter_y[mv.as_mv.row & 15], scale->y_step_q4,
- w, h);
-}
-
-void vp9_build_inter_predictor_q4(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int_mv *mv_q4,
- const struct scale_factors *scale,
- int w, int h, int weight,
- const struct subpix_fn_table *subpix) {
- const int scaled_mv_row_q4 = scale->scale_mv_component_q4(mv_q4->as_mv.row,
- scale->y_scale_fp,
- scale->y_offset_q4);
- const int scaled_mv_col_q4 = scale->scale_mv_component_q4(mv_q4->as_mv.col,
- scale->x_scale_fp,
- scale->x_offset_q4);
- const int subpel_x = scaled_mv_col_q4 & 15;
- const int subpel_y = scaled_mv_row_q4 & 15;
-
- src += (scaled_mv_row_q4 >> 4) * src_stride + (scaled_mv_col_q4 >> 4);
+ const struct subpix_fn_table *subpix,
+ enum mv_precision precision) {
+ const MV32 mv = precision == MV_PRECISION_Q4
+ ? scale->scale_mv_q4(&src_mv->as_mv, scale)
+ : scale->scale_mv_q3_to_q4(&src_mv->as_mv, scale);
+ const int subpel_x = mv.col & 15;
+ const int subpel_y = mv.row & 15;
+
+ src += (mv.row >> 4) * src_stride + (mv.col >> 4);
scale->predict[!!subpel_x][!!subpel_y][weight](
src, src_stride, dst, dst_stride,
subpix->filter_x[subpel_x], scale->x_step_q4,
xd->mb_to_bottom_edge);
scale->set_scaled_offsets(scale, arg->y + y, arg->x + x);
- vp9_build_inter_predictor_q4(pre, pre_stride,
- dst, arg->dst_stride[plane],
- &clamped_mv, &xd->scale_factor[which_mv],
- 4 << pred_w, 4 << pred_h, which_mv,
- &xd->subpix);
+ vp9_build_inter_predictor(pre, pre_stride,
+ dst, arg->dst_stride[plane],
+ &clamped_mv, &xd->scale_factor[which_mv],
+ 4 << pred_w, 4 << pred_h, which_mv,
+ &xd->subpix, MV_PRECISION_Q4);
}
}
void vp9_build_inter_predictors_sby(MACROBLOCKD *xd,
xd->plane[0].dst.stride,
&xd->mode_info_context->bmi[i].as_mv[0],
&xd->scale_factor[0],
- 4 * bw, 4 * bh, 0 /* no avg */, &xd->subpix);
+ 4 * bw, 4 * bh, 0 /* no avg */, &xd->subpix,
+ MV_PRECISION_Q3);
// TODO(debargha): Make this work properly with the
// implicit-compoundinter-weight experiment when implicit
dst, xd->plane[0].dst.stride,
&xd->mode_info_context->bmi[i].as_mv[1],
&xd->scale_factor[1], 4 * bw, 4 * bh, 1,
- &xd->subpix);
+ &xd->subpix, MV_PRECISION_Q3);
}
vp9_subtract_block(4 * bh, 4 * bw, src_diff, 8,
int64_t dist;
cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride,
pd->dst.buf, pd->dst.stride, &sse);
+
model_rd_from_var_lapndz(sse, bw * bh, pd->dequant[1] >> 3, &rate, &dist);
rate_sum += rate;
&frame_mv[refs[!id]],
&xd->scale_factor[!id],
pw, ph, 0,
- &xd->subpix);
+ &xd->subpix, MV_PRECISION_Q3);
// Compound motion search on first ref frame.
if (id)