namespace bits {
// CountPopulation32(value) returns the number of bits set in |value|.
-inline uint32_t CountPopulation32(uint32_t value) {
+inline unsigned CountPopulation32(uint32_t value) {
#if V8_HAS_BUILTIN_POPCOUNT
return __builtin_popcount(value);
#else
value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
- return value;
+ return static_cast<unsigned>(value);
+#endif
+}
+
+
+// CountPopulation64(value) returns the number of bits set in |value|.
+inline unsigned CountPopulation64(uint64_t value) {
+#if V8_HAS_BUILTIN_POPCOUNT
+ return __builtin_popcountll(value);
+#else
+ return CountPopulation32(static_cast<uint32_t>(value)) +
+ CountPopulation32(static_cast<uint32_t>(value >> 32));
#endif
}
// CountLeadingZeros32(value) returns the number of zero bits following the most
// significant 1 bit in |value| if |value| is non-zero, otherwise it returns 32.
-inline uint32_t CountLeadingZeros32(uint32_t value) {
+inline unsigned CountLeadingZeros32(uint32_t value) {
#if V8_HAS_BUILTIN_CLZ
return value ? __builtin_clz(value) : 32;
#elif V8_CC_MSVC
unsigned long result; // NOLINT(runtime/int)
if (!_BitScanReverse(&result, value)) return 32;
- return static_cast<uint32_t>(31 - result);
+ return static_cast<unsigned>(31 - result);
#else
value = value | (value >> 1);
value = value | (value >> 2);
}
+// CountLeadingZeros64(value) returns the number of zero bits following the most
+// significant 1 bit in |value| if |value| is non-zero, otherwise it returns 64.
+inline unsigned CountLeadingZeros64(uint64_t value) {
+#if V8_HAS_BUILTIN_CLZ
+ return value ? __builtin_clzll(value) : 64;
+#else
+ value = value | (value >> 1);
+ value = value | (value >> 2);
+ value = value | (value >> 4);
+ value = value | (value >> 8);
+ value = value | (value >> 16);
+ value = value | (value >> 32);
+ return CountPopulation64(~value);
+#endif
+}
+
+
// CountTrailingZeros32(value) returns the number of zero bits preceding the
// least significant 1 bit in |value| if |value| is non-zero, otherwise it
// returns 32.
-inline uint32_t CountTrailingZeros32(uint32_t value) {
+inline unsigned CountTrailingZeros32(uint32_t value) {
#if V8_HAS_BUILTIN_CTZ
return value ? __builtin_ctz(value) : 32;
#elif V8_CC_MSVC
unsigned long result; // NOLINT(runtime/int)
if (!_BitScanForward(&result, value)) return 32;
- return static_cast<uint32_t>(result);
+ return static_cast<unsigned>(result);
#else
if (value == 0) return 32;
unsigned count = 0;
}
+// CountTrailingZeros64(value) returns the number of zero bits preceding the
+// least significant 1 bit in |value| if |value| is non-zero, otherwise it
+// returns 64.
+inline unsigned CountTrailingZeros64(uint64_t value) {
+#if V8_HAS_BUILTIN_CTZ
+ return value ? __builtin_ctzll(value) : 64;
+#else
+ if (value == 0) return 64;
+ unsigned count = 0;
+ for (value ^= value - 1; value >>= 1; ++count)
+ ;
+ return count;
+#endif
+}
+
+
// Returns true iff |value| is a power of 2.
inline bool IsPowerOfTwo32(uint32_t value) {
return value && !(value & (value - 1));
}
+int64_t RandomNumberGenerator::NextInt64() {
+ uint64_t lo = bit_cast<unsigned>(Next(32));
+ uint64_t hi = bit_cast<unsigned>(Next(32));
+ return lo | (hi << 32);
+}
+
+
void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
for (size_t n = 0; n < buflen; ++n) {
static_cast<uint8_t*>(buffer)[n] = static_cast<uint8_t>(Next(8));
// (exclusive), is pseudorandomly generated and returned.
double NextDouble() WARN_UNUSED_RESULT;
+ // Returns the next pseudorandom, uniformly distributed int64 value from this
+ // random number generator's sequence. The general contract of |NextInt64()|
+ // is that one 64-bit int value is pseudorandomly generated and returned.
+ // All 2^64 possible integer values are produced with (approximately) equal
+ // probability.
+ int64_t NextInt64() WARN_UNUSED_RESULT;
+
// Fills the elements of a specified array of bytes with random numbers.
void NextBytes(void* buffer, size_t buflen);
__ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
}
break;
- case kArm64Shl:
+ case kArm64Lsl:
ASSEMBLE_SHIFT(Lsl, 64);
break;
- case kArm64Shl32:
+ case kArm64Lsl32:
ASSEMBLE_SHIFT(Lsl, 32);
break;
- case kArm64Shr:
+ case kArm64Lsr:
ASSEMBLE_SHIFT(Lsr, 64);
break;
- case kArm64Shr32:
+ case kArm64Lsr32:
ASSEMBLE_SHIFT(Lsr, 32);
break;
- case kArm64Sar:
+ case kArm64Asr:
ASSEMBLE_SHIFT(Asr, 64);
break;
- case kArm64Sar32:
+ case kArm64Asr32:
ASSEMBLE_SHIFT(Asr, 32);
break;
case kArm64Ror:
case kArm64Sxtw:
__ Sxtw(i.OutputRegister(), i.InputRegister32(0));
break;
+ case kArm64Ubfx:
+ __ Ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ break;
+ case kArm64Ubfx32:
+ __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1),
+ i.InputInt8(2));
+ break;
case kArm64Claim: {
int words = MiscField::decode(instr->opcode());
__ Claim(words);
V(Arm64Not32) \
V(Arm64Neg) \
V(Arm64Neg32) \
- V(Arm64Shl) \
- V(Arm64Shl32) \
- V(Arm64Shr) \
- V(Arm64Shr32) \
- V(Arm64Sar) \
- V(Arm64Sar32) \
+ V(Arm64Lsl) \
+ V(Arm64Lsl32) \
+ V(Arm64Lsr) \
+ V(Arm64Lsr32) \
+ V(Arm64Asr) \
+ V(Arm64Asr32) \
V(Arm64Ror) \
V(Arm64Ror32) \
V(Arm64Mov32) \
V(Arm64Sxtw) \
+ V(Arm64Ubfx) \
+ V(Arm64Ubfx32) \
V(Arm64Claim) \
V(Arm64Poke) \
V(Arm64PokePairZero) \
void InstructionSelector::VisitWord32And(Node* node) {
+ Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0, base::bits::CountTrailingZeros32(mask));
+
+ // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().IsInRange(0, 31)) {
+ Emit(kArm64Ubfx32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
VisitLogical<Int32BinopMatcher>(
this, node, &m, kArm64And32, CanCover(node, m.left().node()),
CanCover(node, m.right().node()), kLogical32Imm);
void InstructionSelector::VisitWord64And(Node* node) {
+ Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
+ if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasValue()) {
+ uint64_t mask = m.right().Value();
+ uint64_t mask_width = base::bits::CountPopulation64(mask);
+ uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0, base::bits::CountTrailingZeros64(mask));
+
+ // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().IsInRange(0, 63)) {
+ Emit(kArm64Ubfx, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
VisitLogical<Int64BinopMatcher>(
this, node, &m, kArm64And, CanCover(node, m.left().node()),
CanCover(node, m.right().node()), kLogical64Imm);
void InstructionSelector::VisitWord32Shl(Node* node) {
- VisitRRO(this, kArm64Shl32, node, kShift32Imm);
+ VisitRRO(this, kArm64Lsl32, node, kShift32Imm);
}
void InstructionSelector::VisitWord64Shl(Node* node) {
- VisitRRO(this, kArm64Shl, node, kShift64Imm);
+ VisitRRO(this, kArm64Lsl, node, kShift64Imm);
}
void InstructionSelector::VisitWord32Shr(Node* node) {
- VisitRRO(this, kArm64Shr32, node, kShift32Imm);
+ Arm64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+ int32_t lsb = m.right().Value();
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
+ // shifted into the least-significant bits.
+ if ((mask_msb + mask_width + lsb) == 32) {
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
+ Emit(kArm64Ubfx32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
}
void InstructionSelector::VisitWord64Shr(Node* node) {
- VisitRRO(this, kArm64Shr, node, kShift64Imm);
+ Arm64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+ int64_t lsb = m.right().Value();
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
+ // shifted into the least-significant bits.
+ uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
+ uint64_t mask_width = base::bits::CountPopulation64(mask);
+ uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_msb + mask_width + lsb) == 64) {
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
+ Emit(kArm64Ubfx, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kArm64Lsr, node, kShift64Imm);
}
void InstructionSelector::VisitWord32Sar(Node* node) {
- VisitRRO(this, kArm64Sar32, node, kShift32Imm);
+ VisitRRO(this, kArm64Asr32, node, kShift32Imm);
}
void InstructionSelector::VisitWord64Sar(Node* node) {
- VisitRRO(this, kArm64Sar, node, kShift64Imm);
+ VisitRRO(this, kArm64Asr, node, kShift64Imm);
}
}
+TEST(Bits, CountPopulation64) {
+ EXPECT_EQ(0u, CountPopulation64(0));
+ EXPECT_EQ(1u, CountPopulation64(1));
+ EXPECT_EQ(2u, CountPopulation64(0x8000000000000001));
+ EXPECT_EQ(8u, CountPopulation64(0x11111111));
+ EXPECT_EQ(16u, CountPopulation64(0xf0f0f0f0));
+ EXPECT_EQ(24u, CountPopulation64(0xfff0f0ff));
+ EXPECT_EQ(32u, CountPopulation64(0xffffffff));
+ EXPECT_EQ(16u, CountPopulation64(0x1111111111111111));
+ EXPECT_EQ(32u, CountPopulation64(0xf0f0f0f0f0f0f0f0));
+ EXPECT_EQ(48u, CountPopulation64(0xfff0f0fffff0f0ff));
+ EXPECT_EQ(64u, CountPopulation64(0xffffffffffffffff));
+}
+
+
TEST(Bits, CountLeadingZeros32) {
EXPECT_EQ(32u, CountLeadingZeros32(0));
EXPECT_EQ(31u, CountLeadingZeros32(1));
}
+TEST(Bits, CountLeadingZeros64) {
+ EXPECT_EQ(64u, CountLeadingZeros64(0));
+ EXPECT_EQ(63u, CountLeadingZeros64(1));
+ TRACED_FORRANGE(uint32_t, shift, 0, 63) {
+ EXPECT_EQ(63u - shift, CountLeadingZeros64(V8_UINT64_C(1) << shift));
+ }
+ EXPECT_EQ(36u, CountLeadingZeros64(0x0f0f0f0f));
+ EXPECT_EQ(4u, CountLeadingZeros64(0x0f0f0f0f00000000));
+}
+
+
TEST(Bits, CountTrailingZeros32) {
EXPECT_EQ(32u, CountTrailingZeros32(0));
EXPECT_EQ(31u, CountTrailingZeros32(0x80000000));
}
+TEST(Bits, CountTrailingZeros64) {
+ EXPECT_EQ(64u, CountTrailingZeros64(0));
+ EXPECT_EQ(63u, CountTrailingZeros64(0x8000000000000000));
+ TRACED_FORRANGE(uint32_t, shift, 0, 63) {
+ EXPECT_EQ(shift, CountTrailingZeros64(V8_UINT64_C(1) << shift));
+ }
+ EXPECT_EQ(4u, CountTrailingZeros64(0xf0f0f0f0));
+ EXPECT_EQ(36u, CountTrailingZeros64(0xf0f0f0f000000000));
+}
+
+
TEST(Bits, IsPowerOfTwo32) {
EXPECT_FALSE(IsPowerOfTwo32(0U));
TRACED_FORRANGE(uint32_t, shift, 0, 31) {
// ARM64 shift instructions.
static const MachInst2 kShiftInstructions[] = {
- {&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Shl32, kMachInt32},
- {&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Shl, kMachInt64},
- {&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Shr32, kMachInt32},
- {&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Shr, kMachInt64},
- {&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Sar32, kMachInt32},
- {&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Sar, kMachInt64},
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Lsl32, kMachInt32},
+ {&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Lsl, kMachInt64},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Lsr32, kMachInt32},
+ {&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Lsr, kMachInt64},
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Asr32, kMachInt32},
+ {&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Asr, kMachInt64},
{&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32, kMachInt32},
{&RawMachineAssembler::Word64Ror, "Word64Ror", kArm64Ror, kMachInt64}};
}
}
+
+TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
+ TRACED_FORRANGE(int32_t, lsb, 1, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t jnk = rng()->NextInt();
+ jnk >>= 32 - lsb;
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
+ m.Int32Constant(lsb)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 1, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t jnk = rng()->NextInt();
+ jnk >>= 32 - lsb;
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
+ m.Int32Constant(lsb)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64ShrWithWord64AndWithImmediate) {
+ TRACED_FORRANGE(int32_t, lsb, 1, 63) {
+ TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
+ uint64_t jnk = rng()->NextInt64();
+ jnk >>= 64 - lsb;
+ uint64_t msk =
+ ((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Shr(m.Word64And(m.Parameter(0), m.Int64Constant(msk)),
+ m.Int64Constant(lsb)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 1, 63) {
+ TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
+ uint64_t jnk = rng()->NextInt64();
+ jnk >>= 64 - lsb;
+ uint64_t msk =
+ ((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64Shr(m.Word64And(m.Int64Constant(msk), m.Parameter(0)),
+ m.Int64Constant(lsb)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
+ TRACED_FORRANGE(int32_t, lsb, 1, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint32_t msk = (1 << width) - 1;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
+ m.Int32Constant(msk)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 1, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint32_t msk = (1 << width) - 1;
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32And(m.Int32Constant(msk),
+ m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64AndWithImmediateWithWord64Shr) {
+ TRACED_FORRANGE(int32_t, lsb, 1, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint64_t msk = (V8_UINT64_C(1) << width) - 1;
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64And(m.Word64Shr(m.Parameter(0), m.Int64Constant(lsb)),
+ m.Int64Constant(msk)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, lsb, 1, 31) {
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint64_t msk = (V8_UINT64_C(1) << width) - 1;
+ StreamBuilder m(this, kMachInt64, kMachInt64);
+ m.Return(m.Word64And(m.Int64Constant(msk),
+ m.Word64Shr(m.Parameter(0), m.Int64Constant(lsb))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8