case kArm64Sxtw:
__ Sxtw(i.OutputRegister(), i.InputRegister32(0));
break;
+ case kArm64Sbfx32:
+ __ Sbfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
+ i.InputInt5(2));
+ break;
case kArm64Ubfx:
- __ Ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
- i.InputInt8(2));
+ __ Ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt6(1),
+ i.InputInt6(2));
break;
case kArm64Ubfx32:
- __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1),
- i.InputInt8(2));
+ __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
+ i.InputInt5(2));
break;
case kArm64Bfi:
__ Bfi(i.OutputRegister(), i.InputRegister(1), i.InputInt6(2),
}
+namespace {
+
+bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
+ Arm64OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
+ // Select Ubfx or Sbfx for (x << (K & 0x1f)) OP (K & 0x1f), where
+ // OP is >>> or >> and (K & 0x1f) != 0.
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue() && m.right().HasValue() &&
+ (mleft.right().Value() & 0x1f) == (m.right().Value() & 0x1f)) {
+ DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
+ ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
+
+ int right_val = m.right().Value() & 0x1f;
+ DCHECK_NE(right_val, 0);
+
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0),
+ g.TempImmediate(32 - right_val));
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+
void InstructionSelector::VisitWord32Shr(Node* node) {
- Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
uint32_t lsb = m.right().Value();
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
if ((mask_msb + mask_width + lsb) == 32) {
+ Arm64OperandGenerator g(this);
DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
Emit(kArm64Ubfx32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
return;
}
}
+ } else if (TryEmitBitfieldExtract32(this, node)) {
+ return;
}
VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
}
void InstructionSelector::VisitWord32Sar(Node* node) {
- Arm64OperandGenerator g(this);
- Int32BinopMatcher m(node);
- // Select Sxth/Sxtb for (x << K) >> K where K is 16 or 24.
- if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().Is(16) && m.right().Is(16)) {
- Emit(kArm64Sxth32, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()));
- return;
- } else if (mleft.right().Is(24) && m.right().Is(24)) {
- Emit(kArm64Sxtb32, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()));
- return;
- }
+ if (TryEmitBitfieldExtract32(this, node)) {
+ return;
}
VisitRRO(this, kArm64Asr32, node, kShift32Imm);
}
TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
- {
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
StreamBuilder m(this, kMachInt32, kMachInt32);
Node* const p0 = m.Parameter(0);
- Node* const r =
- m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24));
+ Node* const r = m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(shift)),
+ m.Int32Constant(shift));
m.Return(r);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Sxtb32, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(kArm64Sbfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
- {
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
StreamBuilder m(this, kMachInt32, kMachInt32);
Node* const p0 = m.Parameter(0);
- Node* const r =
- m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16));
+ Node* const r = m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(shift + 32)),
+ m.Int32Constant(shift + 64));
m.Return(r);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Sxth32, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(kArm64Sbfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32ShrWithWord32Shl) {
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const r = m.Word32Shr(m.Word32Shl(p0, m.Int32Constant(shift)),
+ m.Int32Constant(shift));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const r = m.Word32Shr(m.Word32Shl(p0, m.Int32Constant(shift + 32)),
+ m.Int32Constant(shift + 64));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));