2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
16 #include "./vp9_rtcd.h"
18 #include "vpx_mem/vpx_mem.h"
20 #include "vp9/common/vp9_blockd.h"
21 #include "vp9/common/vp9_common.h"
22 #include "vp9/common/vp9_mvref_common.h"
23 #include "vp9/common/vp9_reconinter.h"
24 #include "vp9/common/vp9_reconintra.h"
26 #include "vp9/encoder/vp9_encoder.h"
27 #include "vp9/encoder/vp9_pickmode.h"
28 #include "vp9/encoder/vp9_ratectrl.h"
29 #include "vp9/encoder/vp9_rd.h"
37 static int mv_refs_rt(const VP9_COMMON *cm, const MACROBLOCKD *xd,
38 const TileInfo *const tile,
39 MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
41 int mi_row, int mi_col) {
42 const int *ref_sign_bias = cm->ref_frame_sign_bias;
43 int i, refmv_count = 0;
45 const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
47 int different_ref_found = 0;
48 int context_counter = 0;
51 // Blank the reference vector list
52 vpx_memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
54 // The nearest 2 blocks are treated differently
55 // if the size < 8x8 we get the mv from the bmi substructure,
56 // and we also need to keep a mode count.
57 for (i = 0; i < 2; ++i) {
58 const POSITION *const mv_ref = &mv_ref_search[i];
59 if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
60 const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row *
61 xd->mi_stride].src_mi;
62 const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
63 // Keep counts for entropy encoding.
64 context_counter += mode_2_counter[candidate->mode];
65 different_ref_found = 1;
67 if (candidate->ref_frame[0] == ref_frame)
68 ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 0, mv_ref->col, -1));
74 // Check the rest of the neighbors in much the same way
75 // as before except we don't need to keep track of sub blocks or
77 for (; i < MVREF_NEIGHBOURS && !refmv_count; ++i) {
78 const POSITION *const mv_ref = &mv_ref_search[i];
79 if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
80 const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row *
81 xd->mi_stride].src_mi->mbmi;
82 different_ref_found = 1;
84 if (candidate->ref_frame[0] == ref_frame)
85 ADD_MV_REF_LIST(candidate->mv[0]);
89 // Since we couldn't find 2 mvs from the same reference frame
90 // go back through the neighbors and find motion vectors from
91 // different reference frames.
92 if (different_ref_found && !refmv_count) {
93 for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
94 const POSITION *mv_ref = &mv_ref_search[i];
95 if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
96 const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row
97 * xd->mi_stride].src_mi->mbmi;
99 // If the candidate is INTRA we don't want to consider its mv.
100 IF_DIFF_REF_FRAME_ADD_MV(candidate);
107 mi->mbmi.mode_context[ref_frame] = counter_to_context[context_counter];
110 for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i)
111 clamp_mv_ref(&mv_ref_list[i].as_mv, xd);
116 static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
117 BLOCK_SIZE bsize, int mi_row, int mi_col,
118 int_mv *tmp_mv, int *rate_mv,
119 int64_t best_rd_sofar) {
120 MACROBLOCKD *xd = &x->e_mbd;
121 MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
122 struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}};
123 const int step_param = cpi->sf.mv.fullpel_search_step_param;
124 const int sadpb = x->sadperbit16;
126 const int ref = mbmi->ref_frame[0];
127 const MV ref_mv = mbmi->ref_mvs[ref][0].as_mv;
130 const int tmp_col_min = x->mv_col_min;
131 const int tmp_col_max = x->mv_col_max;
132 const int tmp_row_min = x->mv_row_min;
133 const int tmp_row_max = x->mv_row_max;
136 const YV12_BUFFER_CONFIG *scaled_ref_frame = vp9_get_scaled_ref_frame(cpi,
138 if (cpi->common.show_frame &&
139 (x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[LAST_FRAME])
142 if (scaled_ref_frame) {
144 // Swap out the reference frame for a version that's been scaled to
145 // match the resolution of the current frame, allowing the existing
146 // motion search code to be used without additional modifications.
147 for (i = 0; i < MAX_MB_PLANE; i++)
148 backup_yv12[i] = xd->plane[i].pre[0];
149 vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
151 vp9_set_mv_search_range(x, &ref_mv);
153 assert(x->mv_best_ref_index[ref] <= 2);
154 if (x->mv_best_ref_index[ref] < 2)
155 mvp_full = mbmi->ref_mvs[ref][x->mv_best_ref_index[ref]].as_mv;
157 mvp_full = x->pred_mv[ref];
162 vp9_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
163 cond_sad_list(cpi, sad_list),
164 &ref_mv, &tmp_mv->as_mv, INT_MAX, 0);
166 x->mv_col_min = tmp_col_min;
167 x->mv_col_max = tmp_col_max;
168 x->mv_row_min = tmp_row_min;
169 x->mv_row_max = tmp_row_max;
171 // calculate the bit cost on motion vector
172 mvp_full.row = tmp_mv->as_mv.row * 8;
173 mvp_full.col = tmp_mv->as_mv.col * 8;
175 *rate_mv = vp9_mv_bit_cost(&mvp_full, &ref_mv,
176 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
178 rate_mode = cpi->inter_mode_cost[mbmi->mode_context[ref]]
179 [INTER_OFFSET(NEWMV)];
180 rv = !(RDCOST(x->rdmult, x->rddiv, (*rate_mv + rate_mode), 0) >
184 cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv,
185 cpi->common.allow_high_precision_mv,
188 cpi->sf.mv.subpel_force_stop,
189 cpi->sf.mv.subpel_iters_per_step,
190 cond_sad_list(cpi, sad_list),
191 x->nmvjointcost, x->mvcost,
192 &dis, &x->pred_sse[ref], NULL, 0, 0);
193 x->pred_mv[ref] = tmp_mv->as_mv;
196 if (scaled_ref_frame) {
198 for (i = 0; i < MAX_MB_PLANE; i++)
199 xd->plane[i].pre[0] = backup_yv12[i];
205 static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize,
206 MACROBLOCK *x, MACROBLOCKD *xd,
207 int *out_rate_sum, int64_t *out_dist_sum,
208 unsigned int *var_y, unsigned int *sse_y) {
209 // Note our transform coeffs are 8 times an orthogonal transform.
210 // Hence quantizer step is also 8 times. To get effective quantizer
211 // we need to divide by 8 before sending to modeling function.
215 struct macroblock_plane *const p = &x->plane[0];
216 struct macroblockd_plane *const pd = &xd->plane[0];
217 const uint32_t dc_quant = pd->dequant[0];
218 const uint32_t ac_quant = pd->dequant[1];
219 unsigned int var = cpi->fn_ptr[bsize].vf(p->src.buf, p->src.stride,
220 pd->dst.buf, pd->dst.stride, &sse);
224 if (sse < dc_quant * dc_quant >> 6)
226 else if (var < ac_quant * ac_quant >> 6)
231 if (cpi->common.tx_mode == TX_MODE_SELECT) {
232 if (sse > (var << 2))
233 xd->mi[0].src_mi->mbmi.tx_size =
234 MIN(max_txsize_lookup[bsize],
235 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
237 xd->mi[0].src_mi->mbmi.tx_size = TX_8X8;
239 xd->mi[0].src_mi->mbmi.tx_size =
240 MIN(max_txsize_lookup[bsize],
241 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
244 vp9_model_rd_from_var_lapndz(sse - var, 1 << num_pels_log2_lookup[bsize],
245 dc_quant >> 3, &rate, &dist);
246 *out_rate_sum = rate >> 1;
247 *out_dist_sum = dist << 3;
249 vp9_model_rd_from_var_lapndz(var, 1 << num_pels_log2_lookup[bsize],
250 ac_quant >> 3, &rate, &dist);
251 *out_rate_sum += rate;
252 *out_dist_sum += dist << 4;
255 static int get_pred_buffer(PRED_BUFFER *p, int len) {
258 for (i = 0; i < len; i++) {
267 static void free_pred_buffer(PRED_BUFFER *p) {
272 static void encode_breakout_test(VP9_COMP *cpi, MACROBLOCK *x,
273 BLOCK_SIZE bsize, int mi_row, int mi_col,
274 MV_REFERENCE_FRAME ref_frame,
275 PREDICTION_MODE this_mode,
276 unsigned int var_y, unsigned int sse_y,
277 struct buf_2d yv12_mb[][MAX_MB_PLANE],
278 int *rate, int64_t *dist) {
279 MACROBLOCKD *xd = &x->e_mbd;
280 MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
282 const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]);
283 unsigned int var = var_y, sse = sse_y;
284 // Skipping threshold for ac.
285 unsigned int thresh_ac;
286 // Skipping threshold for dc.
287 unsigned int thresh_dc;
288 if (x->encode_breakout > 0) {
289 // Set a maximum for threshold to avoid big PSNR loss in low bit rate
290 // case. Use extreme low threshold for static frames to limit
292 const unsigned int max_thresh = 36000;
293 // The encode_breakout input
294 const unsigned int min_thresh =
295 MIN(((unsigned int)x->encode_breakout << 4), max_thresh);
297 // Calculate threshold according to dequant value.
298 thresh_ac = (xd->plane[0].dequant[1] * xd->plane[0].dequant[1]) / 9;
299 thresh_ac = clamp(thresh_ac, min_thresh, max_thresh);
301 // Adjust ac threshold according to partition size.
303 8 - (b_width_log2(bsize) + b_height_log2(bsize));
305 thresh_dc = (xd->plane[0].dequant[0] * xd->plane[0].dequant[0] >> 6);
311 // Y skipping condition checking for ac and dc.
312 if (var <= thresh_ac && (sse - var) <= thresh_dc) {
313 unsigned int sse_u, sse_v;
314 unsigned int var_u, var_v;
316 // Skip UV prediction unless breakout is zero (lossless) to save
317 // computation with low impact on the result
318 if (x->encode_breakout == 0) {
319 xd->plane[1].pre[0] = yv12_mb[ref_frame][1];
320 xd->plane[2].pre[0] = yv12_mb[ref_frame][2];
321 vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, bsize);
324 var_u = cpi->fn_ptr[uv_size].vf(x->plane[1].src.buf,
325 x->plane[1].src.stride,
326 xd->plane[1].dst.buf,
327 xd->plane[1].dst.stride, &sse_u);
329 // U skipping condition checking
330 if ((var_u * 4 <= thresh_ac) && (sse_u - var_u <= thresh_dc)) {
331 var_v = cpi->fn_ptr[uv_size].vf(x->plane[2].src.buf,
332 x->plane[2].src.stride,
333 xd->plane[2].dst.buf,
334 xd->plane[2].dst.stride, &sse_v);
336 // V skipping condition checking
337 if ((var_v * 4 <= thresh_ac) && (sse_v - var_v <= thresh_dc)) {
340 // The cost of skip bit needs to be added.
341 *rate = cpi->inter_mode_cost[mbmi->mode_context[ref_frame]]
342 [INTER_OFFSET(this_mode)];
344 // More on this part of rate
345 // rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
347 // Scaling factor for SSE from spatial domain to frequency
348 // domain is 16. Adjust distortion accordingly.
349 // TODO(yunqingwang): In this function, only y-plane dist is
351 *dist = (sse << 4); // + ((sse_u + sse_v) << 4);
353 // *disable_skip = 1;
359 struct estimate_block_intra_args {
362 PREDICTION_MODE mode;
367 static void estimate_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
368 TX_SIZE tx_size, void *arg) {
369 struct estimate_block_intra_args* const args = arg;
370 VP9_COMP *const cpi = args->cpi;
371 MACROBLOCK *const x = args->x;
372 MACROBLOCKD *const xd = &x->e_mbd;
373 struct macroblock_plane *const p = &x->plane[0];
374 struct macroblockd_plane *const pd = &xd->plane[0];
375 const BLOCK_SIZE bsize_tx = txsize_to_bsize[tx_size];
376 uint8_t *const src_buf_base = p->src.buf;
377 uint8_t *const dst_buf_base = pd->dst.buf;
378 const int src_stride = p->src.stride;
379 const int dst_stride = pd->dst.stride;
383 unsigned int var_y, sse_y;
384 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j);
388 p->src.buf = &src_buf_base[4 * (j * src_stride + i)];
389 pd->dst.buf = &dst_buf_base[4 * (j * dst_stride + i)];
390 // Use source buffer as an approximation for the fully reconstructed buffer.
391 vp9_predict_intra_block(xd, block >> (2 * tx_size),
392 b_width_log2(plane_bsize),
394 p->src.buf, src_stride,
395 pd->dst.buf, dst_stride,
397 // This procedure assumes zero offset from p->src.buf and pd->dst.buf.
398 model_rd_for_sb_y(cpi, bsize_tx, x, xd, &rate, &dist, &var_y, &sse_y);
399 p->src.buf = src_buf_base;
400 pd->dst.buf = dst_buf_base;
405 static const THR_MODES mode_idx[MAX_REF_FRAMES - 1][INTER_MODES] = {
406 {THR_NEARESTMV, THR_NEARMV, THR_ZEROMV, THR_NEWMV},
407 {THR_NEARESTG, THR_NEARG, THR_ZEROG, THR_NEWG},
408 {THR_NEARESTA, THR_NEARA, THR_ZEROA, THR_NEWA},
411 // TODO(jingning) placeholder for inter-frame non-RD mode decision.
412 // this needs various further optimizations. to be continued..
413 void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
414 const TileInfo *const tile,
415 int mi_row, int mi_col,
417 int64_t *returndistortion,
419 PICK_MODE_CONTEXT *ctx) {
420 VP9_COMMON *const cm = &cpi->common;
421 MACROBLOCKD *const xd = &x->e_mbd;
422 MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
423 struct macroblockd_plane *const pd = &xd->plane[0];
424 PREDICTION_MODE best_mode = ZEROMV;
425 MV_REFERENCE_FRAME ref_frame, best_ref_frame = LAST_FRAME;
426 TX_SIZE best_tx_size = MIN(max_txsize_lookup[bsize],
427 tx_mode_to_biggest_tx_size[cm->tx_mode]);
428 INTERP_FILTER best_pred_filter = EIGHTTAP;
429 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
430 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
431 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
433 int64_t best_rd = INT64_MAX;
434 int64_t this_rd = INT64_MAX;
435 uint8_t skip_txfm = 0;
437 int64_t dist = INT64_MAX;
438 // var_y and sse_y are saved to be used in skipping checking
439 unsigned int var_y = UINT_MAX;
440 unsigned int sse_y = UINT_MAX;
442 const int intra_cost_penalty =
443 20 * vp9_dc_quant(cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
444 const int64_t inter_mode_thresh = RDCOST(x->rdmult, x->rddiv,
445 intra_cost_penalty, 0);
446 const int intra_mode_cost = 50;
448 const int8_t segment_id = mbmi->segment_id;
449 const int *const rd_threshes = cpi->rd.threshes[segment_id][bsize];
450 const int *const rd_thresh_freq_fact = cpi->rd.thresh_freq_fact[bsize];
451 INTERP_FILTER filter_ref = cm->interp_filter;
452 const int bsl = mi_width_log2(bsize);
453 const int pred_filter_search = cm->interp_filter == SWITCHABLE ?
454 (((mi_row + mi_col) >> bsl) +
455 get_chessboard_index(cm->current_video_frame)) & 0x1 : 0;
456 int const_motion[MAX_REF_FRAMES] = { 0 };
457 const int bh = num_4x4_blocks_high_lookup[bsize] << 2;
458 const int bw = num_4x4_blocks_wide_lookup[bsize] << 2;
459 // For speed 6, the result of interp filter is reused later in actual encoding
461 // tmp[3] points to dst buffer, and the other 3 point to allocated buffers.
463 DECLARE_ALIGNED_ARRAY(16, uint8_t, pred_buf, 3 * 64 * 64);
464 struct buf_2d orig_dst = pd->dst;
465 PRED_BUFFER *best_pred = NULL;
466 PRED_BUFFER *this_mode_pred = NULL;
468 if (cpi->sf.reuse_inter_pred_sby) {
470 for (i = 0; i < 3; i++) {
471 tmp[i].data = &pred_buf[bw * bh * i];
475 tmp[3].data = pd->dst.buf;
476 tmp[3].stride = pd->dst.stride;
480 x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
483 // initialize mode decisions
484 *returnrate = INT_MAX;
485 *returndistortion = INT64_MAX;
486 vpx_memset(mbmi, 0, sizeof(MB_MODE_INFO));
487 mbmi->sb_type = bsize;
488 mbmi->ref_frame[0] = NONE;
489 mbmi->ref_frame[1] = NONE;
490 mbmi->tx_size = MIN(max_txsize_lookup[bsize],
491 tx_mode_to_biggest_tx_size[cm->tx_mode]);
492 mbmi->interp_filter = cm->interp_filter == SWITCHABLE ?
493 EIGHTTAP : cm->interp_filter;
494 mbmi->segment_id = segment_id;
496 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
497 PREDICTION_MODE this_mode;
498 x->pred_mv_sad[ref_frame] = INT_MAX;
499 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
500 frame_mv[ZEROMV][ref_frame].as_int = 0;
502 if (xd->up_available)
503 filter_ref = xd->mi[-xd->mi_stride].src_mi->mbmi.interp_filter;
504 else if (xd->left_available)
505 filter_ref = xd->mi[-1].src_mi->mbmi.interp_filter;
507 if (cpi->ref_frame_flags & flag_list[ref_frame]) {
508 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
509 int_mv *const candidates = mbmi->ref_mvs[ref_frame];
510 const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
511 vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col,
514 if (!cm->error_resilient_mode)
515 vp9_find_mv_refs(cm, xd, tile, xd->mi[0].src_mi, ref_frame,
516 candidates, mi_row, mi_col);
518 const_motion[ref_frame] = mv_refs_rt(cm, xd, tile, xd->mi[0].src_mi,
519 ref_frame, candidates,
522 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
523 &frame_mv[NEARESTMV][ref_frame],
524 &frame_mv[NEARMV][ref_frame]);
526 if (!vp9_is_scaled(sf) && bsize >= BLOCK_8X8)
527 vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride,
533 // Select prediction reference frames.
534 xd->plane[0].pre[0] = yv12_mb[ref_frame][0];
536 clamp_mv2(&frame_mv[NEARESTMV][ref_frame].as_mv, xd);
537 clamp_mv2(&frame_mv[NEARMV][ref_frame].as_mv, xd);
539 mbmi->ref_frame[0] = ref_frame;
541 for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
545 if (const_motion[ref_frame] &&
546 (this_mode == NEARMV || this_mode == ZEROMV))
549 if (!(cpi->sf.inter_mode_mask[bsize] & (1 << this_mode)))
553 rd_threshes[mode_idx[ref_frame -
554 LAST_FRAME][INTER_OFFSET(this_mode)]];
555 if (rd_less_than_thresh(best_rd, mode_rd_thresh,
556 rd_thresh_freq_fact[this_mode]))
559 if (this_mode == NEWMV) {
560 if (this_rd < (int64_t)(1 << num_pels_log2_lookup[bsize]))
562 if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
563 &frame_mv[NEWMV][ref_frame],
568 if (this_mode != NEARESTMV &&
569 frame_mv[this_mode][ref_frame].as_int ==
570 frame_mv[NEARESTMV][ref_frame].as_int)
573 mbmi->mode = this_mode;
574 mbmi->mv[0].as_int = frame_mv[this_mode][ref_frame].as_int;
576 // Search for the best prediction filter type, when the resulting
577 // motion vector is at sub-pixel accuracy level for luma component, i.e.,
578 // the last three bits are all zeros.
579 if (cpi->sf.reuse_inter_pred_sby) {
580 if (this_mode == NEARESTMV) {
581 this_mode_pred = &tmp[3];
583 this_mode_pred = &tmp[get_pred_buffer(tmp, 3)];
584 pd->dst.buf = this_mode_pred->data;
589 if ((this_mode == NEWMV || filter_ref == SWITCHABLE) &&
590 pred_filter_search &&
591 ((mbmi->mv[0].as_mv.row & 0x07) != 0 ||
592 (mbmi->mv[0].as_mv.col & 0x07) != 0)) {
595 unsigned int pf_var[3];
596 unsigned int pf_sse[3];
597 TX_SIZE pf_tx_size[3];
598 int64_t best_cost = INT64_MAX;
599 INTERP_FILTER best_filter = SWITCHABLE, filter;
600 PRED_BUFFER *current_pred = this_mode_pred;
602 for (filter = EIGHTTAP; filter <= EIGHTTAP_SHARP; ++filter) {
604 mbmi->interp_filter = filter;
605 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
606 model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rate[filter],
607 &pf_dist[filter], &pf_var[filter], &pf_sse[filter]);
608 cost = RDCOST(x->rdmult, x->rddiv,
609 vp9_get_switchable_rate(cpi) + pf_rate[filter],
611 pf_tx_size[filter] = mbmi->tx_size;
612 if (cost < best_cost) {
613 best_filter = filter;
615 skip_txfm = x->skip_txfm[0];
617 if (cpi->sf.reuse_inter_pred_sby) {
618 if (this_mode_pred != current_pred) {
619 free_pred_buffer(this_mode_pred);
620 this_mode_pred = current_pred;
623 if (filter < EIGHTTAP_SHARP) {
624 current_pred = &tmp[get_pred_buffer(tmp, 3)];
625 pd->dst.buf = current_pred->data;
632 if (cpi->sf.reuse_inter_pred_sby && this_mode_pred != current_pred)
633 free_pred_buffer(current_pred);
635 mbmi->interp_filter = best_filter;
636 mbmi->tx_size = pf_tx_size[mbmi->interp_filter];
637 rate = pf_rate[mbmi->interp_filter];
638 dist = pf_dist[mbmi->interp_filter];
639 var_y = pf_var[mbmi->interp_filter];
640 sse_y = pf_sse[mbmi->interp_filter];
641 x->skip_txfm[0] = skip_txfm;
643 mbmi->interp_filter = (filter_ref == SWITCHABLE) ? EIGHTTAP: filter_ref;
644 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
645 model_rd_for_sb_y(cpi, bsize, x, xd, &rate, &dist, &var_y, &sse_y);
649 rate += cpi->inter_mode_cost[mbmi->mode_context[ref_frame]]
650 [INTER_OFFSET(this_mode)];
651 this_rd = RDCOST(x->rdmult, x->rddiv, rate, dist);
653 // Skipping checking: test to see if this block can be reconstructed by
655 if (cpi->allow_encode_breakout) {
656 encode_breakout_test(cpi, x, bsize, mi_row, mi_col, ref_frame,
657 this_mode, var_y, sse_y, yv12_mb, &rate, &dist);
660 this_rd = RDCOST(x->rdmult, x->rddiv, rate, dist);
664 #if CONFIG_VP9_TEMPORAL_DENOISING
665 if (cpi->oxcf.noise_sensitivity > 0) {
666 vp9_denoiser_update_frame_stats(mbmi, sse_y, this_mode, ctx);
672 if (this_rd < best_rd || x->skip) {
675 *returndistortion = dist;
676 best_mode = this_mode;
677 best_pred_filter = mbmi->interp_filter;
678 best_tx_size = mbmi->tx_size;
679 best_ref_frame = ref_frame;
680 skip_txfm = x->skip_txfm[0];
682 if (cpi->sf.reuse_inter_pred_sby) {
683 free_pred_buffer(best_pred);
685 best_pred = this_mode_pred;
688 if (cpi->sf.reuse_inter_pred_sby)
689 free_pred_buffer(this_mode_pred);
695 // If the current reference frame is valid and we found a usable mode,
697 if (best_rd < INT64_MAX)
701 // If best prediction is not in dst buf, then copy the prediction block from
702 // temp buf to dst buf.
703 if (best_pred != NULL && cpi->sf.reuse_inter_pred_sby &&
704 best_pred->data != orig_dst.buf) {
706 vp9_convolve_copy(best_pred->data, bw, pd->dst.buf, pd->dst.stride, NULL, 0,
710 mbmi->mode = best_mode;
711 mbmi->interp_filter = best_pred_filter;
712 mbmi->tx_size = best_tx_size;
713 mbmi->ref_frame[0] = best_ref_frame;
714 mbmi->mv[0].as_int = frame_mv[best_mode][best_ref_frame].as_int;
715 xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
716 x->skip_txfm[0] = skip_txfm;
718 // Perform intra prediction search, if the best SAD is above a certain
720 if (!x->skip && best_rd > inter_mode_thresh &&
721 bsize <= cpi->sf.max_intra_bsize) {
722 PREDICTION_MODE this_mode;
723 struct estimate_block_intra_args args = { cpi, x, DC_PRED, 0, 0 };
724 const TX_SIZE intra_tx_size =
725 MIN(max_txsize_lookup[bsize],
726 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
728 if (cpi->sf.reuse_inter_pred_sby) {
729 pd->dst.buf = tmp[0].data;
733 for (this_mode = DC_PRED; this_mode <= DC_PRED; ++this_mode) {
734 const TX_SIZE saved_tx_size = mbmi->tx_size;
735 args.mode = this_mode;
738 mbmi->tx_size = intra_tx_size;
739 vp9_foreach_transformed_block_in_plane(xd, bsize, 0,
740 estimate_block_intra, &args);
741 mbmi->tx_size = saved_tx_size;
744 rate += cpi->mbmode_cost[this_mode];
745 rate += intra_cost_penalty;
746 this_rd = RDCOST(x->rdmult, x->rddiv, rate, dist);
748 if (this_rd + intra_mode_cost < best_rd) {
751 *returndistortion = dist;
752 mbmi->mode = this_mode;
753 mbmi->tx_size = intra_tx_size;
754 mbmi->ref_frame[0] = INTRA_FRAME;
755 mbmi->uv_mode = this_mode;
756 mbmi->mv[0].as_int = INVALID_MV;
758 x->skip_txfm[0] = skip_txfm;
761 if (cpi->sf.reuse_inter_pred_sby)