Refactor RD to take same codepath for single and comp pred
authorDaniel Kang <ddkang@google.com>
Thu, 9 Aug 2012 23:07:41 +0000 (16:07 -0700)
committerDaniel Kang <ddkang@google.com>
Fri, 10 Aug 2012 16:40:10 +0000 (09:40 -0700)
Change-Id: Id38baf1b89648ef534e28be72f583137871f920c

12 files changed:
vp8/common/blockd.h
vp8/common/debugmodes.c
vp8/common/findnearmv.c
vp8/common/findnearmv.h
vp8/common/implicit_segmentation.c
vp8/common/reconinter.c
vp8/common/reconinter.h
vp8/decoder/decodemv.c
vp8/encoder/bitstream.c
vp8/encoder/encodeframe.c
vp8/encoder/onyx_if.c
vp8/encoder/rdopt.c

index a0fe46c..de4ff52 100644 (file)
@@ -266,7 +266,7 @@ typedef struct {
 #endif
   MV_REFERENCE_FRAME ref_frame, second_ref_frame;
   TX_SIZE txfm_size;
-  int_mv mv, second_mv;
+  int_mv mv[2]; // for each reference frame used
 #if CONFIG_NEWBESTREFMV
   int_mv ref_mv, second_ref_mv;
 #endif
index 32e947b..a588942 100644 (file)
@@ -108,7 +108,8 @@ void vp8_print_modes_and_motion_vectors(MODE_INFO *mi, int rows, int cols, int f
 
   for (mb_row = 0; mb_row < rows; mb_row++) {
     for (mb_col = 0; mb_col < cols; mb_col++) {
-      fprintf(mvs, "%5d:%-5d", mi[mb_index].mbmi.mv.as_mv.row / 2, mi[mb_index].mbmi.mv.as_mv.col / 2);
+      fprintf(mvs, "%5d:%-5d", mi[mb_index].mbmi.mv[0].as_mv.row / 2,
+          mi[mb_index].mbmi.mv[0].as_mv.col / 2);
 
       mb_index++;
     }
index 303893d..15a21b4 100644 (file)
@@ -68,9 +68,9 @@ void vp8_find_near_mvs
 
   /* Process above */
   if (above->mbmi.ref_frame != INTRA_FRAME) {
-    if (above->mbmi.mv.as_int) {
+    if (above->mbmi.mv[0].as_int) {
       ++ mv;
-      mv->as_int = above->mbmi.mv.as_int;
+      mv->as_int = above->mbmi.mv[0].as_int;
       mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame],
               refframe, mv, ref_frame_sign_bias);
 #if CONFIG_NEWBESTREFMV
@@ -83,9 +83,9 @@ void vp8_find_near_mvs
 
   /* Process left */
   if (left->mbmi.ref_frame != INTRA_FRAME) {
-    if (left->mbmi.mv.as_int) {
+    if (left->mbmi.mv[0].as_int) {
       int_mv this_mv;
-      this_mv.as_int = left->mbmi.mv.as_int;
+      this_mv.as_int = left->mbmi.mv[0].as_int;
       mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame],
               refframe, &this_mv, ref_frame_sign_bias);
 #if CONFIG_NEWBESTREFMV
@@ -103,14 +103,14 @@ void vp8_find_near_mvs
   /* Process above left or the one from last frame */
   if (aboveleft->mbmi.ref_frame != INTRA_FRAME ||
       (lf_here->mbmi.ref_frame == LAST_FRAME && refframe == LAST_FRAME)) {
-    if (aboveleft->mbmi.mv.as_int) {
+    if (aboveleft->mbmi.mv[0].as_int) {
       third = aboveleft;
 #if CONFIG_NEWBESTREFMV
       ref_mv[2].as_int = aboveleft->mbmi.mv.as_int;
       mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame],
               refframe, (ref_mv+2), ref_frame_sign_bias);
 #endif
-    } else if (lf_here->mbmi.mv.as_int) {
+    } else if (lf_here->mbmi.mv[0].as_int) {
       third = lf_here;
     }
 #if CONFIG_NEWBESTREFMV
@@ -122,7 +122,7 @@ void vp8_find_near_mvs
 #endif
     if (third) {
       int_mv this_mv;
-      this_mv.as_int = third->mbmi.mv.as_int;
+      this_mv.as_int = third->mbmi.mv[0].as_int;
       mv_bias(ref_frame_sign_bias[third->mbmi.ref_frame],
               refframe, &this_mv, ref_frame_sign_bias);
 
index 3bb2024..eff00c4 100644 (file)
@@ -96,7 +96,7 @@ static int left_block_mv(const MODE_INFO *cur_mb, int b) {
     --cur_mb;
 
     if (cur_mb->mbmi.mode != SPLITMV)
-      return cur_mb->mbmi.mv.as_int;
+      return cur_mb->mbmi.mv[0].as_int;
     b += 4;
   }
 
@@ -109,7 +109,7 @@ static int left_block_second_mv(const MODE_INFO *cur_mb, int b) {
     --cur_mb;
 
     if (cur_mb->mbmi.mode != SPLITMV)
-      return cur_mb->mbmi.second_ref_frame ? cur_mb->mbmi.second_mv.as_int : cur_mb->mbmi.mv.as_int;
+      return cur_mb->mbmi.second_ref_frame ? cur_mb->mbmi.mv[1].as_int : cur_mb->mbmi.mv[0].as_int;
     b += 4;
   }
 
@@ -122,7 +122,7 @@ static int above_block_mv(const MODE_INFO *cur_mb, int b, int mi_stride) {
     cur_mb -= mi_stride;
 
     if (cur_mb->mbmi.mode != SPLITMV)
-      return cur_mb->mbmi.mv.as_int;
+      return cur_mb->mbmi.mv[0].as_int;
     b += 16;
   }
 
@@ -135,7 +135,7 @@ static int above_block_second_mv(const MODE_INFO *cur_mb, int b, int mi_stride)
     cur_mb -= mi_stride;
 
     if (cur_mb->mbmi.mode != SPLITMV)
-      return cur_mb->mbmi.second_ref_frame ? cur_mb->mbmi.second_mv.as_int : cur_mb->mbmi.mv.as_int;
+      return cur_mb->mbmi.second_ref_frame ? cur_mb->mbmi.mv[1].as_int : cur_mb->mbmi.mv[0].as_int;
     b += 16;
   }
 
index 24d4ec8..2b9b7ad 100644 (file)
@@ -139,7 +139,7 @@ void segment_via_mode_info(VP8_COMMON *oci, int how) {
           n = mi[mb_index].mbmi.mode;
           break;
         case SEGMENT_MV:
-          n = mi[mb_index].mbmi.mv.as_int;
+          n = mi[mb_index].mbmi.mv[0].as_int;
           if (mi[mb_index].mbmi.ref_frame == INTRA_FRAME)
             n = -9999999;
           break;
@@ -243,7 +243,8 @@ void segment_via_mode_info(VP8_COMMON *oci, int how) {
       printf("            ");
       for (j = 0; j < oci->mb_cols; j++, mb_index++) {
         // printf("%3d",mi[mb_index].mbmi.mode );
-        printf("%4d:%4d", mi[mb_index].mbmi.mv.as_mv.row, mi[mb_index].mbmi.mv.as_mv.col);
+        printf("%4d:%4d", mi[mb_index].mbmi.mv[0].as_mv.row,
+            mi[mb_index].mbmi.mv[0].as_mv.col);
       }
       printf("\n");
       ++mb_index;
index 2cd4507..b174d6a 100644 (file)
@@ -611,7 +611,7 @@ void vp8_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
   int pre_stride = xd->block[0].pre_stride;
   int_mv ymv;
 
-  ymv.as_int = xd->mode_info_context->mbmi.mv.as_int;
+  ymv.as_int = xd->mode_info_context->mbmi.mv[0].as_int;
 
   if (xd->mode_info_context->mbmi.need_to_clamp_mvs)
     clamp_mv_to_umv_border(&ymv.as_mv, xd);
@@ -662,7 +662,7 @@ void vp8_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *x,
   int_mv _o16x16mv;
   int_mv _16x16mv;
 
-  _16x16mv.as_int = x->mode_info_context->mbmi.mv.as_int;
+  _16x16mv.as_int = x->mode_info_context->mbmi.mv[0].as_int;
 
   if (x->mode_info_context->mbmi.need_to_clamp_mvs)
     clamp_mv_to_umv_border(&_16x16mv.as_mv, x);
@@ -767,7 +767,7 @@ void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *x,
   unsigned char *ptr_base = x->second_pre.y_buffer;
   int pre_stride = x->block[0].pre_stride;
 
-  _16x16mv.as_int = x->mode_info_context->mbmi.second_mv.as_int;
+  _16x16mv.as_int = x->mode_info_context->mbmi.mv[1].as_int;
 
   if (x->mode_info_context->mbmi.need_to_clamp_secondmv)
     clamp_mv_to_umv_border(&_16x16mv.as_mv, x);
@@ -825,7 +825,7 @@ void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *x,
 
   int pre_stride = x->block[0].pre_stride;
 
-  _16x16mv.as_int = x->mode_info_context->mbmi.second_mv.as_int;
+  _16x16mv.as_int = x->mode_info_context->mbmi.mv[1].as_int;
 
   if (x->mode_info_context->mbmi.need_to_clamp_secondmv)
     clamp_mv_to_umv_border(&_16x16mv.as_mv, x);
index 1542e93..2340ccd 100644 (file)
@@ -30,6 +30,13 @@ extern void vp8_build_1st_inter16x16_predictors_mb(MACROBLOCKD *x,
                                                    int dst_ystride,
                                                    int dst_uvstride);
 
+extern void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *x,
+                                                    unsigned char *dst_y,
+                                                    int dst_ystride);
+extern void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *x,
+                                                     unsigned char *dst_u,
+                                                     unsigned char *dst_v,
+                                                     int dst_uvstride);
 extern void vp8_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *x,
                                                    unsigned char *dst_y,
                                                    unsigned char *dst_u,
index 2e0049d..f8cf837 100644 (file)
@@ -867,7 +867,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
       }
 
       mv->as_int = mi->bmi[15].as_mv.first.as_int;
-      mbmi->second_mv.as_int = mi->bmi[15].as_mv.second.as_int;
+      mbmi->mv[1].as_int = mi->bmi[15].as_mv.second.as_int;
 
       break;  /* done with SPLITMV */
 
@@ -877,8 +877,8 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
         vp8_clamp_mv(mv, mb_to_left_edge, mb_to_right_edge,
                      mb_to_top_edge, mb_to_bottom_edge);
         if (mbmi->second_ref_frame) {
-          mbmi->second_mv.as_int = nearby_second.as_int;
-          vp8_clamp_mv(&mbmi->second_mv, mb_to_left_edge, mb_to_right_edge,
+          mbmi->mv[1].as_int = nearby_second.as_int;
+          vp8_clamp_mv(&mbmi->mv[1], mb_to_left_edge, mb_to_right_edge,
                        mb_to_top_edge, mb_to_bottom_edge);
         }
         break;
@@ -889,8 +889,8 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
         vp8_clamp_mv(mv, mb_to_left_edge, mb_to_right_edge,
                      mb_to_top_edge, mb_to_bottom_edge);
         if (mbmi->second_ref_frame) {
-          mbmi->second_mv.as_int = nearest_second.as_int;
-          vp8_clamp_mv(&mbmi->second_mv, mb_to_left_edge, mb_to_right_edge,
+          mbmi->mv[1].as_int = nearest_second.as_int;
+          vp8_clamp_mv(&mbmi->mv[1], mb_to_left_edge, mb_to_right_edge,
                        mb_to_top_edge, mb_to_bottom_edge);
         }
         break;
@@ -898,7 +898,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
       case ZEROMV:
         mv->as_int = 0;
         if (mbmi->second_ref_frame)
-          mbmi->second_mv.as_int = 0;
+          mbmi->mv[1].as_int = 0;
         break;
 
       case NEWMV:
@@ -926,22 +926,20 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
                                                       mb_to_bottom_edge);
         if (mbmi->second_ref_frame) {
           if (xd->allow_high_precision_mv) {
-            read_mv_hp(bc, &mbmi->second_mv.as_mv,
-                       (const MV_CONTEXT_HP *) mvc_hp);
-            cm->fc.MVcount_hp[0][mv_max_hp + (mbmi->second_mv.as_mv.row)]++;
-            cm->fc.MVcount_hp[1][mv_max_hp + (mbmi->second_mv.as_mv.col)]++;
+            read_mv_hp(bc, &mbmi->mv[1].as_mv, (const MV_CONTEXT_HP *) mvc_hp);
+            cm->fc.MVcount_hp[0][mv_max_hp + (mbmi->mv[1].as_mv.row)]++;
+            cm->fc.MVcount_hp[1][mv_max_hp + (mbmi->mv[1].as_mv.col)]++;
           } else {
-            read_mv(bc, &mbmi->second_mv.as_mv, (const MV_CONTEXT *) mvc);
-            cm->fc.MVcount[0][mv_max + (mbmi->second_mv.as_mv.row >> 1)]++;
-            cm->fc.MVcount[1][mv_max + (mbmi->second_mv.as_mv.col >> 1)]++;
+            read_mv(bc, &mbmi->mv[1].as_mv, (const MV_CONTEXT *) mvc);
+            cm->fc.MVcount[0][mv_max + (mbmi->mv[1].as_mv.row >> 1)]++;
+            cm->fc.MVcount[1][mv_max + (mbmi->mv[1].as_mv.col >> 1)]++;
           }
-          mbmi->second_mv.as_mv.row += best_mv_second.as_mv.row;
-          mbmi->second_mv.as_mv.col += best_mv_second.as_mv.col;
-          mbmi->need_to_clamp_secondmv |= vp8_check_mv_bounds(&mbmi->second_mv,
-                                                              mb_to_left_edge,
-                                                              mb_to_right_edge,
-                                                              mb_to_top_edge,
-                                                              mb_to_bottom_edge);
+          mbmi->mv[1].as_mv.row += best_mv_second.as_mv.row;
+          mbmi->mv[1].as_mv.col += best_mv_second.as_mv.col;
+          mbmi->need_to_clamp_secondmv |=
+            vp8_check_mv_bounds(&mbmi->mv[1],
+                                mb_to_left_edge, mb_to_right_edge,
+                                mb_to_top_edge, mb_to_bottom_edge);
         }
         break;
       default:
@@ -952,7 +950,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
     }
   } else {
     /* required for left and above block mv */
-    mbmi->mv.as_int = 0;
+    mbmi->mv[0].as_int = 0;
 
     if (segfeature_active(xd, mbmi->segment_id, SEG_LVL_MODE))
       mbmi->mode = (MB_PREDICTION_MODE)
index 103391c..6a08eb8 100644 (file)
@@ -1007,20 +1007,16 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
                 active_section = 5;
 #endif
 
-                if (xd->allow_high_precision_mv) {
-                  write_mv_hp(w, &mi->mv.as_mv, &best_mv, mvc_hp);
-                } else {
-                  write_mv(w, &mi->mv.as_mv, &best_mv, mvc);
-                }
+                if (xd->allow_high_precision_mv)
+                  write_mv_hp(w, &mi->mv[0].as_mv, &best_mv, mvc_hp);
+                else
+                  write_mv(w, &mi->mv[0].as_mv, &best_mv, mvc);
 
                 if (mi->second_ref_frame) {
-                  if (xd->allow_high_precision_mv) {
-                    write_mv_hp(w, &mi->second_mv.as_mv,
-                                &best_second_mv, mvc_hp);
-                  } else {
-                    write_mv(w, &mi->second_mv.as_mv,
-                             &best_second_mv, mvc);
-                  }
+                  if (xd->allow_high_precision_mv)
+                    write_mv_hp(w, &mi->mv[1].as_mv, &best_second_mv, mvc_hp);
+                  else
+                    write_mv(w, &mi->mv[1].as_mv, &best_second_mv, mvc);
                 }
                 break;
               case SPLITMV: {
index d7a93ac..5352de7 100644 (file)
@@ -392,8 +392,8 @@ static void update_state(VP8_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
     vpx_memcpy(x->partition_info, &ctx->partition_info,
                sizeof(PARTITION_INFO));
 
-    mbmi->mv.as_int = x->partition_info->bmi[15].mv.as_int;
-    mbmi->second_mv.as_int = x->partition_info->bmi[15].second_mv.as_int;
+    mbmi->mv[0].as_int = x->partition_info->bmi[15].mv.as_int;
+    mbmi->mv[1].as_int = x->partition_info->bmi[15].second_mv.as_int;
   }
 
   if (cpi->common.frame_type == KEY_FRAME) {
index 854e318..80e9f49 100644 (file)
@@ -3574,7 +3574,7 @@ static void encode_frame_to_data_rate
       for (mb_row = 0; mb_row < cm->mb_rows + 1; mb_row ++) {
         for (mb_col = 0; mb_col < cm->mb_cols + 1; mb_col ++) {
           if (tmp->mbmi.ref_frame != INTRA_FRAME)
-            cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int = tmp->mbmi.mv.as_int;
+            cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int = tmp->mbmi.mv[0].as_int;
 
           cpi->lf_ref_frame_sign_bias[mb_col + mb_row * (cm->mode_info_stride + 1)] = cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
           cpi->lf_ref_frame[mb_col + mb_row * (cm->mode_info_stride + 1)] = tmp->mbmi.ref_frame;
index 9375e13..0c16653 100644 (file)
@@ -495,8 +495,8 @@ int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd) {
 
   unsigned int sse1 = 0;
   unsigned int sse2 = 0;
-  int mv_row = x->e_mbd.mode_info_context->mbmi.mv.as_mv.row;
-  int mv_col = x->e_mbd.mode_info_context->mbmi.mv.as_mv.col;
+  int mv_row = x->e_mbd.mode_info_context->mbmi.mv[0].as_mv.row;
+  int mv_col = x->e_mbd.mode_info_context->mbmi.mv[0].as_mv.col;
   int offset;
   int pre_stride = x->e_mbd.block[16].pre_stride;
 
@@ -1571,7 +1571,7 @@ int vp8_cost_mv_ref(VP8_COMP *cpi,
 
 void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) {
   x->e_mbd.mode_info_context->mbmi.mode = mb;
-  x->e_mbd.mode_info_context->mbmi.mv.as_int = mv->as_int;
+  x->e_mbd.mode_info_context->mbmi.mv[0].as_int = mv->as_int;
 }
 
 static int labels2mode(MACROBLOCK *x, int const *labelings, int which_label,
@@ -2193,19 +2193,19 @@ void vp8_mv_pred(VP8_COMP *cpi, MACROBLOCKD *xd, const MODE_INFO *here,
 
     // read in 3 nearby block's MVs from current frame as prediction candidates.
     if (above->mbmi.ref_frame != INTRA_FRAME) {
-      near_mvs[vcnt].as_int = above->mbmi.mv.as_int;
+      near_mvs[vcnt].as_int = above->mbmi.mv[0].as_int;
       mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
       near_ref[vcnt] =  above->mbmi.ref_frame;
     }
     vcnt++;
     if (left->mbmi.ref_frame != INTRA_FRAME) {
-      near_mvs[vcnt].as_int = left->mbmi.mv.as_int;
+      near_mvs[vcnt].as_int = left->mbmi.mv[0].as_int;
       mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
       near_ref[vcnt] =  left->mbmi.ref_frame;
     }
     vcnt++;
     if (aboveleft->mbmi.ref_frame != INTRA_FRAME) {
-      near_mvs[vcnt].as_int = aboveleft->mbmi.mv.as_int;
+      near_mvs[vcnt].as_int = aboveleft->mbmi.mv[0].as_int;
       mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
       near_ref[vcnt] =  aboveleft->mbmi.ref_frame;
     }
@@ -2381,26 +2381,26 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
     }
   } else if (mbmi->mode == NEWMV) {
     if (x->e_mbd.allow_high_precision_mv) {
-      cpi->MVcount_hp[0][mv_max_hp + (mbmi->mv.as_mv.row
+      cpi->MVcount_hp[0][mv_max_hp + (mbmi->mv[0].as_mv.row
                                       - best_ref_mv->as_mv.row)]++;
-      cpi->MVcount_hp[1][mv_max_hp + (mbmi->mv.as_mv.col
+      cpi->MVcount_hp[1][mv_max_hp + (mbmi->mv[0].as_mv.col
                                       - best_ref_mv->as_mv.col)]++;
       if (mbmi->second_ref_frame) {
-        cpi->MVcount_hp[0][mv_max_hp + (mbmi->second_mv.as_mv.row
+        cpi->MVcount_hp[0][mv_max_hp + (mbmi->mv[1].as_mv.row
                                         - second_best_ref_mv->as_mv.row)]++;
-        cpi->MVcount_hp[1][mv_max_hp + (mbmi->second_mv.as_mv.col
+        cpi->MVcount_hp[1][mv_max_hp + (mbmi->mv[1].as_mv.col
                                         - second_best_ref_mv->as_mv.col)]++;
       }
     } else
     {
-      cpi->MVcount[0][mv_max + ((mbmi->mv.as_mv.row
+      cpi->MVcount[0][mv_max + ((mbmi->mv[0].as_mv.row
                                  - best_ref_mv->as_mv.row) >> 1)]++;
-      cpi->MVcount[1][mv_max + ((mbmi->mv.as_mv.col
+      cpi->MVcount[1][mv_max + ((mbmi->mv[0].as_mv.col
                                  - best_ref_mv->as_mv.col) >> 1)]++;
       if (mbmi->second_ref_frame) {
-        cpi->MVcount[0][mv_max + ((mbmi->second_mv.as_mv.row
+        cpi->MVcount[0][mv_max + ((mbmi->mv[1].as_mv.row
                                    - second_best_ref_mv->as_mv.row) >> 1)]++;
-        cpi->MVcount[1][mv_max + ((mbmi->second_mv.as_mv.col
+        cpi->MVcount[1][mv_max + ((mbmi->mv[1].as_mv.col
                                    - second_best_ref_mv->as_mv.col) >> 1)]++;
       }
     }
@@ -2669,7 +2669,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
   MB_MODE_INFO best_mbmode;
   PARTITION_INFO best_partition;
   int_mv best_ref_mv, second_best_ref_mv;
-  int_mv mode_mv[MB_MODE_COUNT];
   MB_PREDICTION_MODE this_mode;
   MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
   int i, best_mode_index = 0;
@@ -2713,22 +2712,21 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
   int saddone = 0;
   int sr = 0;  // search range got from mv_pred(). It uses step_param levels. (0-7)
 
-  int_mv frame_nearest_mv[4];
-  int_mv frame_near_mv[4];
+  int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
   int_mv frame_best_ref_mv[4];
-  int_mv mc_search_result[4];
   int frame_mdcounts[4][4];
   unsigned char *y_buffer[4], *u_buffer[4], *v_buffer[4];
 
   unsigned int ref_costs[MAX_REF_FRAMES];
   int_mv seg_mvs[BLOCK_MAX_SEGMENTS - 1][16 /* n_blocks */][MAX_REF_FRAMES - 1];
 
+  vpx_memset(&frame_mv, 0, sizeof(frame_mv));
   vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
   vpx_memset(&best_bmodes, 0, sizeof(best_bmodes));
   vpx_memset(&x->mb_context[xd->mb_index], 0, sizeof(PICK_MODE_CONTEXT));
 
-  for (i = 0; i < 4; i++)
-    mc_search_result[i].as_int = INVALID_MV;
+  for (i = 0; i < MAX_REF_FRAMES; i++)
+    frame_mv[NEWMV][i].as_int = INVALID_MV;
 
   for (i = 0; i < BLOCK_MAX_SEGMENTS - 1; i++) {
     int j, k;
@@ -2740,8 +2738,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
 
   if (cpi->ref_frame_flags & VP8_LAST_FLAG) {
     setup_buffer_inter(cpi, x, cpi->common.lst_fb_idx, LAST_FRAME,
-                       recon_yoffset, recon_uvoffset, frame_nearest_mv,
-                       frame_near_mv, frame_best_ref_mv,
+                       recon_yoffset, recon_uvoffset, frame_mv[NEARESTMV],
+                       frame_mv[NEARMV], frame_best_ref_mv,
 #if CONFIG_NEWBESTREFMV
                        ref_mv,
 #endif
@@ -2750,8 +2748,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
 
   if (cpi->ref_frame_flags & VP8_GOLD_FLAG) {
     setup_buffer_inter(cpi, x, cpi->common.gld_fb_idx, GOLDEN_FRAME,
-                       recon_yoffset, recon_uvoffset, frame_nearest_mv,
-                       frame_near_mv, frame_best_ref_mv,
+                       recon_yoffset, recon_uvoffset, frame_mv[NEARESTMV],
+                       frame_mv[NEARMV], frame_best_ref_mv,
 #if CONFIG_NEWBESTREFMV
                        ref_mv,
 #endif
@@ -2760,8 +2758,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
 
   if (cpi->ref_frame_flags & VP8_ALT_FLAG) {
     setup_buffer_inter(cpi, x, cpi->common.alt_fb_idx, ALTREF_FRAME,
-                       recon_yoffset, recon_uvoffset, frame_nearest_mv,
-                       frame_near_mv, frame_best_ref_mv,
+                       recon_yoffset, recon_uvoffset, frame_mv[NEARESTMV],
+                       frame_mv[NEARMV], frame_best_ref_mv,
 #if CONFIG_NEWBESTREFMV
                        ref_mv,
 #endif
@@ -2772,8 +2770,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
 
   x->skip = 0;
 
-  vpx_memset(mode_mv, 0, sizeof(mode_mv));
-
   mbmi->ref_frame = INTRA_FRAME;
 
   /* Initialize zbin mode boost for uv costing */
@@ -2893,8 +2889,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
       x->e_mbd.pre.y_buffer = y_buffer[ref];
       x->e_mbd.pre.u_buffer = u_buffer[ref];
       x->e_mbd.pre.v_buffer = v_buffer[ref];
-      mode_mv[NEARESTMV] = frame_nearest_mv[ref];
-      mode_mv[NEARMV] = frame_near_mv[ref];
       best_ref_mv = frame_best_ref_mv[ref];
       vpx_memcpy(mdcounts, frame_mdcounts[ref], sizeof(mdcounts));
     }
@@ -3086,237 +3080,192 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
         vp8_cost_bit(get_pred_prob(cm, xd, PRED_COMP), is_comp_pred);
       mbmi->mode = this_mode;
     }
-    // Single prediction inter
-    else if (!mbmi->second_ref_frame) {
+    else {
+      const int is_comp_pred = x->e_mbd.mode_info_context->mbmi.second_ref_frame != 0;
+      const int num_refs = is_comp_pred ? 2 : 1;
+      int flag;
+      int refs[2] = {x->e_mbd.mode_info_context->mbmi.ref_frame,
+                     x->e_mbd.mode_info_context->mbmi.second_ref_frame};
+      int_mv cur_mv[2];
       switch (this_mode) {
-        case NEWMV: {
-          int bestsme = INT_MAX;
-          int further_steps, step_param = cpi->sf.first_step;
-          int sadpb = x->sadperbit16;
-          int_mv mvp_full;
+        case NEWMV:
+          if (is_comp_pred) {
+            if (frame_mv[NEWMV][refs[0]].as_int == INVALID_MV ||
+                frame_mv[NEWMV][refs[1]].as_int == INVALID_MV)
+              continue;
+            rate2 += vp8_mv_bit_cost(&frame_mv[NEWMV][refs[0]],
+                                     &frame_best_ref_mv[refs[0]],
+                                     XMVCOST, 96,
+                                     x->e_mbd.allow_high_precision_mv);
+            rate2 += vp8_mv_bit_cost(&frame_mv[NEWMV][refs[1]],
+                                     &frame_best_ref_mv[refs[1]],
+                                     XMVCOST, 96,
+                                     x->e_mbd.allow_high_precision_mv);
+          } else {
+            int bestsme = INT_MAX;
+            int further_steps, step_param = cpi->sf.first_step;
+            int sadpb = x->sadperbit16;
+            int_mv mvp_full, tmp_mv;
 
-          int tmp_col_min = x->mv_col_min;
-          int tmp_col_max = x->mv_col_max;
-          int tmp_row_min = x->mv_row_min;
-          int tmp_row_max = x->mv_row_max;
+            int tmp_col_min = x->mv_col_min;
+            int tmp_col_max = x->mv_col_max;
+            int tmp_row_min = x->mv_row_min;
+            int tmp_row_max = x->mv_row_max;
 
-          vp8_clamp_mv_min_max(x, &best_ref_mv);
+            vp8_clamp_mv_min_max(x, &best_ref_mv);
 
-          if (!saddone) {
-            vp8_cal_sad(cpi, xd, x, recon_yoffset, &near_sadidx[0]);
-            saddone = 1;
-          }
+            if (!saddone) {
+              vp8_cal_sad(cpi, xd, x, recon_yoffset, &near_sadidx[0]);
+              saddone = 1;
+            }
 
-          vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
-                      mbmi->ref_frame, cpi->common.ref_frame_sign_bias,
-                      &sr, &near_sadidx[0]);
+            vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
+                        mbmi->ref_frame, cpi->common.ref_frame_sign_bias,
+                        &sr, &near_sadidx[0]);
 
-          mvp_full.as_mv.col = mvp.as_mv.col >> 3;
-          mvp_full.as_mv.row = mvp.as_mv.row >> 3;
+            mvp_full.as_mv.col = mvp.as_mv.col >> 3;
+            mvp_full.as_mv.row = mvp.as_mv.row >> 3;
 
-          // adjust search range according to sr from mv prediction
-          step_param = MAX(step_param, sr);
+            // adjust search range according to sr from mv prediction
+            step_param = MAX(step_param, sr);
 
-          // Further step/diamond searches as necessary
-          further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
+            // Further step/diamond searches as necessary
+            further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
 
-          bestsme = vp8_full_pixel_diamond(cpi, x, b, d, &mvp_full, step_param,
-                                           sadpb, further_steps, 1,
-                                           &cpi->fn_ptr[BLOCK_16X16],
-                                           &best_ref_mv, &mode_mv[NEWMV]);
-          d->bmi.as_mv.first.as_int = mode_mv[NEWMV].as_int;
-
-          x->mv_col_min = tmp_col_min;
-          x->mv_col_max = tmp_col_max;
-          x->mv_row_min = tmp_row_min;
-          x->mv_row_max = tmp_row_max;
-
-          if (bestsme < INT_MAX) {
-            int dis; /* TODO: use dis in distortion calculation later. */
-            unsigned int sse;
-            cpi->find_fractional_mv_step(x, b, d, &d->bmi.as_mv.first, &best_ref_mv,
-                                         x->errorperbit,
-                                         &cpi->fn_ptr[BLOCK_16X16],
-                                         XMVCOST, &dis, &sse);
-          }
-          mc_search_result[mbmi->ref_frame].as_int = d->bmi.as_mv.first.as_int;
+            bestsme = vp8_full_pixel_diamond(cpi, x, b, d, &mvp_full, step_param,
+                                             sadpb, further_steps, 1,
+                                             &cpi->fn_ptr[BLOCK_16X16],
+                                             &best_ref_mv, &tmp_mv);
 
-          mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
+            x->mv_col_min = tmp_col_min;
+            x->mv_col_max = tmp_col_max;
+            x->mv_row_min = tmp_row_min;
+            x->mv_row_max = tmp_row_max;
 
-          // Add the new motion vector cost to our rolling cost variable
-          rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
-                                   XMVCOST, 96,
-                                   x->e_mbd.allow_high_precision_mv);
-        }
+            if (bestsme < INT_MAX) {
+              int dis; /* TODO: use dis in distortion calculation later. */
+              unsigned int sse;
+              cpi->find_fractional_mv_step(x, b, d, &tmp_mv, &best_ref_mv,
+                                           x->errorperbit,
+                                           &cpi->fn_ptr[BLOCK_16X16],
+                                           XMVCOST, &dis, &sse);
+            }
+            d->bmi.as_mv.first.as_int = tmp_mv.as_int;
+            frame_mv[NEWMV][refs[0]].as_int = d->bmi.as_mv.first.as_int;
 
+            // Add the new motion vector cost to our rolling cost variable
+            rate2 += vp8_mv_bit_cost(&tmp_mv, &best_ref_mv,
+                                     XMVCOST, 96,
+                                     x->e_mbd.allow_high_precision_mv);
+          }
+          break;
         case NEARESTMV:
         case NEARMV:
-          // Clip "next_nearest" so that it does not extend to far out of image
-          vp8_clamp_mv2(&mode_mv[this_mode], xd);
-
-          // Do not bother proceeding if the vector (from newmv,nearest or near) is 0,0 as this should then be coded using the zeromv mode.
-          if (((this_mode == NEARMV) || (this_mode == NEARESTMV)) &&
-              (mode_mv[this_mode].as_int == 0)) {
+          flag = 0;
+          // Do not bother proceeding if the vector (from newmv, nearest or
+          // near) is 0,0 as this should then be coded using the zeromv mode.
+          for (i = 0; i < num_refs; ++i)
+            if (frame_mv[this_mode][refs[i]].as_int == 0)
+              flag = 1;
+          if (flag)
             continue;
-          }
-
         case ZEROMV:
-          // Trap vectors that reach beyond the UMV borders
-          // Note that ALL New MV, Nearest MV Near MV and Zero MV code drops through to this point
-          // because of the lack of break statements in the previous two cases.
-          if (mv_check_bounds(x, &mode_mv[this_mode]))
-            continue;
-
-          vp8_set_mbmode_and_mvs(x, this_mode, &mode_mv[this_mode]);
+        default:
+          break;
+      }
+      flag = 0;
+      for (i = 0; i < num_refs; ++i) {
+        cur_mv[i] = frame_mv[this_mode][refs[i]];
+        // Clip "next_nearest" so that it does not extend to far out of image
+        vp8_clamp_mv2(&cur_mv[i], xd);
+        if (mv_check_bounds(x, &cur_mv[i]))
+          flag = 1;
+        mv_check_bounds(x, &cur_mv[i]);
+        x->e_mbd.mode_info_context->mbmi.mv[i].as_int = cur_mv[i].as_int;
+      }
+      if (flag)
+        continue;
 
 #if CONFIG_PRED_FILTER
-          // Filtered prediction:
-          mbmi->pred_filter_enabled =
-            vp8_mode_order[mode_index].pred_filter_flag;
-          rate2 += vp8_cost_bit(cpi->common.prob_pred_filter_off,
-                                mbmi->pred_filter_enabled);
+      // Filtered prediction:
+      xd->mode_info_context->mbmi.pred_filter_enabled =
+        vp8_mode_order[mode_index].pred_filter_flag;
+      rate2 += vp8_cost_bit(cpi->common.prob_pred_filter_off,
+                            xd->mode_info_context->mbmi.pred_filter_enabled);
 #endif
 #if CONFIG_SWITCHABLE_INTERP
-          if (cpi->common.mcomp_filter_type == SWITCHABLE)
-            rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs
-                [get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
-                [vp8_switchable_interp_map[mbmi->interp_filter]];
+      if (cpi->common.mcomp_filter_type == SWITCHABLE)
+        rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs
+            [get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
+            [vp8_switchable_interp_map[
+            x->e_mbd.mode_info_context->mbmi.interp_filter]];
 #endif
 
-          vp8_build_1st_inter16x16_predictors_mby(&x->e_mbd,
-                                                  xd->predictor, 16);
-
-          compmode_cost =
-            vp8_cost_bit(get_pred_prob(cm, xd, PRED_COMP), 0);
-
-          // Add in the Mv/mode cost
-          rate2 += vp8_cost_mv_ref(cpi, this_mode, mdcounts);
-
-          if (cpi->active_map_enabled && x->active_ptr[0] == 0)
-            x->skip = 1;
-          else if (x->encode_breakout) {
-            unsigned int sse, var;
-            int threshold = (xd->block[0].dequant[1]
-                             * xd->block[0].dequant[1] >> 4);
-
-            if (threshold < x->encode_breakout)
-              threshold = x->encode_breakout;
-
-            var = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
-                  (*(b->base_src), b->src_stride,
-                   x->e_mbd.predictor, 16, &sse);
-
-            if (sse < threshold) {
-              unsigned int q2dc = xd->block[24].dequant[0];
-              /* If there is no codeable 2nd order dc
-                 or a very small uniform pixel change change */
-              if ((sse - var < q2dc *q2dc >> 4) ||
-                  (sse / 2 > var && sse - var < 64)) {
-                // Check u and v to make sure skip is ok
-                int sse2 =  VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
-                if (sse2 * 2 < threshold) {
-                  x->skip = 1;
-                  distortion2 = sse + sse2;
-                  rate2 = 500;
-
-                  /* for best_yrd calculation */
-                  rate_uv = 0;
-                  distortion_uv = sse2;
-
-                  disable_skip = 1;
-                  this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
-
-                  break;
-                }
-              }
-            }
-          }
-
-          vp8_build_1st_inter16x16_predictors_mbuv(&x->e_mbd,
-                                                   &xd->predictor[256],
-                                                   &xd->predictor[320], 8);
-          inter_mode_cost(cpi, x, this_mode, &rate2, &distortion2,
-                          &rate_y, &distortion, &rate_uv, &distortion_uv);
-          mode_excluded = cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY;
-          break;
-
-        default:
-          break;
-      }
-    } else { /* mbmi->second_ref_frame != 0 */
-      int ref1 = mbmi->ref_frame;
-      int ref2 = mbmi->second_ref_frame;
-
-      mode_excluded = cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY;
-      switch (this_mode) {
-        case NEWMV:
-          if (mc_search_result[ref1].as_int == INVALID_MV ||
-              mc_search_result[ref2].as_int == INVALID_MV)
-            continue;
-          mbmi->mv.as_int        = mc_search_result[ref1].as_int;
-          mbmi->second_mv.as_int = mc_search_result[ref2].as_int;
-          rate2 += vp8_mv_bit_cost(&mc_search_result[ref1],
-                                   &frame_best_ref_mv[ref1],
-                                   XMVCOST, 96,
-                                   x->e_mbd.allow_high_precision_mv);
-          rate2 += vp8_mv_bit_cost(&mc_search_result[ref2],
-                                   &frame_best_ref_mv[ref2],
-                                   XMVCOST, 96,
-                                   x->e_mbd.allow_high_precision_mv);
-          break;
-        case ZEROMV:
-          mbmi->mv.as_int        = 0;
-          mbmi->second_mv.as_int = 0;
-          break;
-        case NEARMV:
-          if (frame_near_mv[ref1].as_int == 0 ||
-              frame_near_mv[ref2].as_int == 0)
-            continue;
-          mbmi->mv.as_int        = frame_near_mv[ref1].as_int;
-          mbmi->second_mv.as_int = frame_near_mv[ref2].as_int;
-          break;
-        case NEARESTMV:
-          if (frame_nearest_mv[ref1].as_int == 0 ||
-              frame_nearest_mv[ref2].as_int == 0)
-            continue;
-          mbmi->mv.as_int        = frame_nearest_mv[ref1].as_int;
-          mbmi->second_mv.as_int = frame_nearest_mv[ref2].as_int;
-          break;
-        default:
-          break;
-      }
-
       /* We don't include the cost of the second reference here, because there are only
        * three options: Last/Golden, ARF/Last or Golden/ARF, or in other words if you
        * present them in that order, the second one is always known if the first is known */
-      compmode_cost =
-        vp8_cost_bit(get_pred_prob(cm, xd, PRED_COMP), 1);
-
-      /* Add in the Mv/mode cost */
+      compmode_cost = vp8_cost_bit(get_pred_prob(cm, xd, PRED_COMP),
+                                   is_comp_pred);
       rate2 += vp8_cost_mv_ref(cpi, this_mode, mdcounts);
 
-      vp8_clamp_mv2(&mbmi->mv, xd);
-      vp8_clamp_mv2(&mbmi->second_mv, xd);
-      if (mv_check_bounds(x, &mbmi->mv))
-        continue;
-      if (mv_check_bounds(x, &mbmi->second_mv))
-        continue;
-
-      /* build first and second prediction */
       vp8_build_1st_inter16x16_predictors_mby(&x->e_mbd, x->e_mbd.predictor,
                                               16);
+      if (is_comp_pred)
+        vp8_build_2nd_inter16x16_predictors_mby(&x->e_mbd, x->e_mbd.predictor,
+                                                16);
+
+      if (cpi->active_map_enabled && x->active_ptr[0] == 0)
+        x->skip = 1;
+      else if (x->encode_breakout) {
+        unsigned int sse, var;
+        int threshold = (xd->block[0].dequant[1]
+                         * xd->block[0].dequant[1] >> 4);
+
+        if (threshold < x->encode_breakout)
+          threshold = x->encode_breakout;
+
+        var = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
+              (*(b->base_src), b->src_stride,
+               x->e_mbd.predictor, 16, &sse);
+
+        if (sse < threshold) {
+          unsigned int q2dc = xd->block[24].dequant[0];
+          /* If there is no codeable 2nd order dc
+             or a very small uniform pixel change change */
+          if ((sse - var < q2dc *q2dc >> 4) ||
+              (sse / 2 > var && sse - var < 64)) {
+            // Check u and v to make sure skip is ok
+            int sse2 =  VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
+            if (sse2 * 2 < threshold) {
+              x->skip = 1;
+              distortion2 = sse + sse2;
+              rate2 = 500;
+
+              /* for best_yrd calculation */
+              rate_uv = 0;
+              distortion_uv = sse2;
+
+              disable_skip = 1;
+              this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+
+              break;
+            }
+          }
+        }
+      }
+
       vp8_build_1st_inter16x16_predictors_mbuv(&x->e_mbd, &xd->predictor[256],
                                                &xd->predictor[320], 8);
-      /* do second round and average the results */
-      vp8_build_2nd_inter16x16_predictors_mb(&x->e_mbd, x->e_mbd.predictor,
-                                             &x->e_mbd.predictor[256],
-                                             &x->e_mbd.predictor[320], 16, 8);
-
+      if (is_comp_pred)
+        vp8_build_2nd_inter16x16_predictors_mbuv(&x->e_mbd,
+                                                 &xd->predictor[256],
+                                                 &xd->predictor[320], 8);
       inter_mode_cost(cpi, x, this_mode, &rate2, &distortion2,
                       &rate_y, &distortion, &rate_uv, &distortion_uv);
-
-      /* don't bother w/ skip, we would never have come here if skip were enabled */
-      mbmi->mode = this_mode;
+      if (is_comp_pred)
+        mode_excluded = cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY;
+      else
+        mode_excluded = cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY;
     }
 
     if (cpi->common.comp_pred_mode == HYBRID_PREDICTION)
@@ -3439,7 +3388,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
             else
               mbmi->uv_mode = uv_intra_mode;
             /* required for left and above block mv */
-            mbmi->mv.as_int = 0;
+            mbmi->mv[0].as_int = 0;
           }
 
           other_cost += ref_costs[mbmi->ref_frame];
@@ -3559,7 +3508,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
       (best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) {
     mbmi->mode = ZEROMV;
     mbmi->ref_frame = ALTREF_FRAME;
-    mbmi->mv.as_int = 0;
+    mbmi->mv[0].as_int = 0;
     mbmi->uv_mode = DC_PRED;
     mbmi->mb_skip_coeff =
       (cpi->common.mb_no_coeff_skip) ? 1 : 0;
@@ -3600,8 +3549,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
 
     vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
 
-    mbmi->mv.as_int = x->partition_info->bmi[15].mv.as_int;
-    mbmi->second_mv.as_int = x->partition_info->bmi[15].second_mv.as_int;
+    mbmi->mv[0].as_int = x->partition_info->bmi[15].mv.as_int;
+    mbmi->mv[1].as_int = x->partition_info->bmi[15].second_mv.as_int;
   }
 
   if (best_single_rd == INT64_MAX)