[turbofan] Add support for shifted and rotated operands on ARM64.
authorbaptiste.afsa@arm.com <baptiste.afsa@arm.com>
Tue, 14 Oct 2014 09:28:53 +0000 (09:28 +0000)
committerbaptiste.afsa@arm.com <baptiste.afsa@arm.com>
Tue, 14 Oct 2014 09:28:53 +0000 (09:28 +0000)
R=bmeurer@chromium.org
BUG=

Review URL: https://codereview.chromium.org/642923003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24591 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/compiler/arm64/code-generator-arm64.cc
src/compiler/arm64/instruction-codes-arm64.h
src/compiler/arm64/instruction-selector-arm64.cc
test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc

index fe62d5991039212ce1a0a60c114d366efc2044f8..c4a7d0e17c8f2c4389b2f89b5bb974315a29a117 100644 (file)
@@ -46,10 +46,54 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
 
   Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
 
+  Operand InputOperand2_32(int index) {
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+        return InputOperand32(index);
+      case kMode_Operand2_R_LSL_I:
+        return Operand(InputRegister32(index), LSL, InputInt5(index + 1));
+      case kMode_Operand2_R_LSR_I:
+        return Operand(InputRegister32(index), LSR, InputInt5(index + 1));
+      case kMode_Operand2_R_ASR_I:
+        return Operand(InputRegister32(index), ASR, InputInt5(index + 1));
+      case kMode_Operand2_R_ROR_I:
+        return Operand(InputRegister32(index), ROR, InputInt5(index + 1));
+      case kMode_MRI:
+      case kMode_MRR:
+        break;
+    }
+    UNREACHABLE();
+    return Operand(-1);
+  }
+
+  Operand InputOperand2_64(int index) {
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+        return InputOperand64(index);
+      case kMode_Operand2_R_LSL_I:
+        return Operand(InputRegister64(index), LSL, InputInt6(index + 1));
+      case kMode_Operand2_R_LSR_I:
+        return Operand(InputRegister64(index), LSR, InputInt6(index + 1));
+      case kMode_Operand2_R_ASR_I:
+        return Operand(InputRegister64(index), ASR, InputInt6(index + 1));
+      case kMode_Operand2_R_ROR_I:
+        return Operand(InputRegister64(index), ROR, InputInt6(index + 1));
+      case kMode_MRI:
+      case kMode_MRR:
+        break;
+    }
+    UNREACHABLE();
+    return Operand(-1);
+  }
+
   MemOperand MemoryOperand(int* first_index) {
     const int index = *first_index;
     switch (AddressingModeField::decode(instr_->opcode())) {
       case kMode_None:
+      case kMode_Operand2_R_LSL_I:
+      case kMode_Operand2_R_LSR_I:
+      case kMode_Operand2_R_ASR_I:
+      case kMode_Operand2_R_ROR_I:
         break;
       case kMode_MRI:
         *first_index += 2;
@@ -179,27 +223,28 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
       break;
     case kArm64Add:
-      __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Add32:
       if (FlagsModeField::decode(opcode) != kFlags_none) {
         __ Adds(i.OutputRegister32(), i.InputRegister32(0),
-                i.InputOperand32(1));
+                i.InputOperand2_32(1));
       } else {
-        __ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+        __ Add(i.OutputRegister32(), i.InputRegister32(0),
+               i.InputOperand2_32(1));
       }
       break;
     case kArm64And:
-      __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64And32:
-      __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Bic:
-      __ Bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Bic32:
-      __ Bic(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      __ Bic(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Mul:
       __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -285,38 +330,39 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       __ Neg(i.OutputRegister32(), i.InputOperand32(0));
       break;
     case kArm64Or:
-      __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Or32:
-      __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Orn:
-      __ Orn(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Orn(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Orn32:
-      __ Orn(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      __ Orn(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Eor:
-      __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Eor32:
-      __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Eon:
-      __ Eon(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Eon(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Eon32:
-      __ Eon(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      __ Eon(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Sub:
-      __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Sub32:
       if (FlagsModeField::decode(opcode) != kFlags_none) {
         __ Subs(i.OutputRegister32(), i.InputRegister32(0),
-                i.InputOperand32(1));
+                i.InputOperand2_32(1));
       } else {
-        __ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+        __ Sub(i.OutputRegister32(), i.InputRegister32(0),
+               i.InputOperand2_32(1));
       }
       break;
     case kArm64Lsl:
index 0f0d1d2ac8e97b411adbf657fa64aa4d3832913c..4d192901b05b94af14f6f7e60d9c4d3a9cbc54f9 100644 (file)
@@ -113,9 +113,13 @@ namespace compiler {
 // I = immediate (handle, external, int32)
 // MRI = [register + immediate]
 // MRR = [register + register]
-#define TARGET_ADDRESSING_MODE_LIST(V) \
-  V(MRI) /* [%r0 + K] */               \
-  V(MRR) /* [%r0 + %r1] */
+#define TARGET_ADDRESSING_MODE_LIST(V)  \
+  V(MRI)              /* [%r0 + K] */   \
+  V(MRR)              /* [%r0 + %r1] */ \
+  V(Operand2_R_LSL_I) /* %r0 LSL K */   \
+  V(Operand2_R_LSR_I) /* %r0 LSR K */   \
+  V(Operand2_R_ASR_I) /* %r0 ASR K */   \
+  V(Operand2_R_ROR_I) /* %r0 ROR K */
 
 }  // namespace internal
 }  // namespace compiler
index 47860070183c15b820478e0f1b0fac530d461398..893c00508efcfce8427801c2911d79c0f0ddcddd 100644 (file)
@@ -113,6 +113,51 @@ static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
 }
 
 
+template <typename Matcher>
+static bool TryMatchShift(InstructionSelector* selector, Node* node,
+                          InstructionCode* opcode, IrOpcode::Value shift_opcode,
+                          ImmediateMode imm_mode,
+                          AddressingMode addressing_mode) {
+  if (node->opcode() != shift_opcode) return false;
+  Arm64OperandGenerator g(selector);
+  Matcher m(node);
+  if (g.CanBeImmediate(m.right().node(), imm_mode)) {
+    *opcode |= AddressingModeField::encode(addressing_mode);
+    return true;
+  }
+  return false;
+}
+
+
+static bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
+                             InstructionCode* opcode, bool try_ror) {
+  return TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord32Shl, kShift32Imm,
+                                          kMode_Operand2_R_LSL_I) ||
+         TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord32Shr, kShift32Imm,
+                                          kMode_Operand2_R_LSR_I) ||
+         TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord32Sar, kShift32Imm,
+                                          kMode_Operand2_R_ASR_I) ||
+         (try_ror && TryMatchShift<Int32BinopMatcher>(
+                         selector, node, opcode, IrOpcode::kWord32Ror,
+                         kShift32Imm, kMode_Operand2_R_ROR_I)) ||
+         TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord64Shl, kShift64Imm,
+                                          kMode_Operand2_R_LSL_I) ||
+         TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord64Shr, kShift64Imm,
+                                          kMode_Operand2_R_LSR_I) ||
+         TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord64Sar, kShift64Imm,
+                                          kMode_Operand2_R_ASR_I) ||
+         (try_ror && TryMatchShift<Int64BinopMatcher>(
+                         selector, node, opcode, IrOpcode::kWord64Ror,
+                         kShift64Imm, kMode_Operand2_R_ROR_I));
+}
+
+
 // Shared routine for multiple binary operations.
 template <typename Matcher>
 static void VisitBinop(InstructionSelector* selector, Node* node,
@@ -124,9 +169,32 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
   size_t input_count = 0;
   InstructionOperand* outputs[2];
   size_t output_count = 0;
+  bool try_ror_operand = true;
+
+  if (m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() || m.IsInt64Sub()) {
+    try_ror_operand = false;
+  }
 
-  inputs[input_count++] = g.UseRegister(m.left().node());
-  inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
+  if (g.CanBeImmediate(m.right().node(), operand_mode)) {
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseImmediate(m.right().node());
+  } else if (TryMatchAnyShift(selector, m.right().node(), &opcode,
+                              try_ror_operand)) {
+    Matcher m_shift(m.right().node());
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseRegister(m_shift.left().node());
+    inputs[input_count++] = g.UseImmediate(m_shift.right().node());
+  } else if (m.HasProperty(Operator::kCommutative) &&
+             TryMatchAnyShift(selector, m.left().node(), &opcode,
+                              try_ror_operand)) {
+    Matcher m_shift(m.left().node());
+    inputs[input_count++] = g.UseRegister(m.right().node());
+    inputs[input_count++] = g.UseRegister(m_shift.left().node());
+    inputs[input_count++] = g.UseImmediate(m_shift.right().node());
+  } else {
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseRegister(m.right().node());
+  }
 
   if (cont->IsBranch()) {
     inputs[input_count++] = g.Label(cont->true_block());
index 6f0388907c063f010af14b4333387823345c965f..b4c31457182c80476dea307a3d1fe4788c7b24f8 100644 (file)
@@ -30,6 +30,17 @@ std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
 }
 
 
+struct Shift {
+  MachInst2 mi;
+  AddressingMode mode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Shift& shift) {
+  return os << shift.mi;
+}
+
+
 // Helper to build Int32Constant or Int64Constant depending on the given
 // machine type.
 Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
@@ -139,15 +150,23 @@ static const MachInst2 kOvfAddSubInstructions[] = {
 
 
 // ARM64 shift instructions.
-static const MachInst2 kShiftInstructions[] = {
-    {&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Lsl32, kMachInt32},
-    {&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Lsl, kMachInt64},
-    {&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Lsr32, kMachInt32},
-    {&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Lsr, kMachInt64},
-    {&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Asr32, kMachInt32},
-    {&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Asr, kMachInt64},
-    {&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32, kMachInt32},
-    {&RawMachineAssembler::Word64Ror, "Word64Ror", kArm64Ror, kMachInt64}};
+static const Shift kShiftInstructions[] = {
+    {{&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Lsl32, kMachInt32},
+     kMode_Operand2_R_LSL_I},
+    {{&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Lsl, kMachInt64},
+     kMode_Operand2_R_LSL_I},
+    {{&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Lsr32, kMachInt32},
+     kMode_Operand2_R_LSR_I},
+    {{&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Lsr, kMachInt64},
+     kMode_Operand2_R_LSR_I},
+    {{&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Asr32, kMachInt32},
+     kMode_Operand2_R_ASR_I},
+    {{&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Asr, kMachInt64},
+     kMode_Operand2_R_ASR_I},
+    {{&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32, kMachInt32},
+     kMode_Operand2_R_ROR_I},
+    {{&RawMachineAssembler::Word64Ror, "Word64Ror", kArm64Ror, kMachInt64},
+     kMode_Operand2_R_ROR_I}};
 
 
 // ARM64 Mul/Div instructions.
@@ -296,6 +315,46 @@ TEST_P(InstructionSelectorLogicalTest, Immediate) {
 }
 
 
+TEST_P(InstructionSelectorLogicalTest, ShiftByImmediate) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+    // Only test 64-bit shifted operands with 64-bit instructions.
+    if (shift.mi.machine_type != type) continue;
+
+    TRACED_FORRANGE(int, imm, 0, ((type == kMachInt32) ? 31 : 63)) {
+      StreamBuilder m(this, type, type, type);
+      m.Return((m.*dpi.constructor)(
+          m.Parameter(0),
+          (m.*shift.mi.constructor)(m.Parameter(1),
+                                    BuildConstant(m, type, imm))));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+      EXPECT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+
+    TRACED_FORRANGE(int, imm, 0, ((type == kMachInt32) ? 31 : 63)) {
+      StreamBuilder m(this, type, type, type);
+      m.Return((m.*dpi.constructor)(
+          (m.*shift.mi.constructor)(m.Parameter(1),
+                                    BuildConstant(m, type, imm)),
+          m.Parameter(0)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+      EXPECT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+}
+
+
 INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
                         ::testing::ValuesIn(kLogicalInstructions));
 
@@ -356,6 +415,37 @@ TEST_P(InstructionSelectorAddSubTest, NegImmediateOnRight) {
 }
 
 
+TEST_P(InstructionSelectorAddSubTest, ShiftByImmediateOnRight) {
+  const AddSub dpi = GetParam();
+  const MachineType type = dpi.mi.machine_type;
+  TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+    // Only test 64-bit shifted operands with 64-bit instructions.
+    if (shift.mi.machine_type != type) continue;
+
+    if ((shift.mi.arch_opcode == kArm64Ror32) ||
+        (shift.mi.arch_opcode == kArm64Ror)) {
+      // Not supported by add/sub instructions.
+      continue;
+    }
+
+    TRACED_FORRANGE(int, imm, 0, ((type == kMachInt32) ? 31 : 63)) {
+      StreamBuilder m(this, type, type, type);
+      m.Return((m.*dpi.mi.constructor)(
+          m.Parameter(0),
+          (m.*shift.mi.constructor)(m.Parameter(1),
+                                    BuildConstant(m, type, imm))));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.mi.arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+      EXPECT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+}
+
+
 INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorAddSubTest,
                         ::testing::ValuesIn(kAddSubInstructions));
 
@@ -455,6 +545,51 @@ TEST_F(InstructionSelectorTest, AddNegImmediateOnLeft) {
 }
 
 
+TEST_F(InstructionSelectorTest, AddShiftByImmediateOnLeft) {
+  // 32-bit add.
+  TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+    // Only test relevant shifted operands.
+    if (shift.mi.machine_type != kMachInt32) continue;
+    if (shift.mi.arch_opcode == kArm64Ror32) continue;
+
+    TRACED_FORRANGE(int, imm, 0, 31) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      m.Return((m.Int32Add)(
+          (m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+          m.Parameter(0)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+      EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+      EXPECT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+
+  // 64-bit add.
+  TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+    // Only test relevant shifted operands.
+    if (shift.mi.machine_type != kMachInt64) continue;
+    if (shift.mi.arch_opcode == kArm64Ror) continue;
+
+    TRACED_FORRANGE(int, imm, 0, 63) {
+      StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+      m.Return((m.Int64Add)(
+          (m.*shift.mi.constructor)(m.Parameter(1), m.Int64Constant(imm)),
+          m.Parameter(0)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+      EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+      EXPECT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+}
+
+
 // -----------------------------------------------------------------------------
 // Data processing controlled branches.
 
@@ -818,32 +953,31 @@ TEST_F(InstructionSelectorTest, OvfBranchWithImmediateOnLeft) {
 // Shift instructions.
 
 
-typedef InstructionSelectorTestWithParam<MachInst2>
-    InstructionSelectorShiftTest;
+typedef InstructionSelectorTestWithParam<Shift> InstructionSelectorShiftTest;
 
 
 TEST_P(InstructionSelectorShiftTest, Parameter) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
+  const Shift shift = GetParam();
+  const MachineType type = shift.mi.machine_type;
   StreamBuilder m(this, type, type, type);
-  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  m.Return((m.*shift.mi.constructor)(m.Parameter(0), m.Parameter(1)));
   Stream s = m.Build();
   ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(shift.mi.arch_opcode, s[0]->arch_opcode());
   EXPECT_EQ(2U, s[0]->InputCount());
   EXPECT_EQ(1U, s[0]->OutputCount());
 }
 
 
 TEST_P(InstructionSelectorShiftTest, Immediate) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
+  const Shift shift = GetParam();
+  const MachineType type = shift.mi.machine_type;
   TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
     StreamBuilder m(this, type, type);
-    m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+    m.Return((m.*shift.mi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
     Stream s = m.Build();
     ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.mi.arch_opcode, s[0]->arch_opcode());
     EXPECT_EQ(2U, s[0]->InputCount());
     EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
     EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));