From f0452e2193b5fe2eec03799e008d81064c7b7c59 Mon Sep 17 00:00:00 2001 From: "bmeurer@chromium.org" Date: Thu, 9 Oct 2014 09:18:31 +0000 Subject: [PATCH] [turbofan] Add support for ARM64 Ubfx Support selecting Ubfx for shift-mask and mask-shift operations. Also, rename the shifts to match the instruction names. BUG= R=bmeurer@chromium.org Review URL: https://codereview.chromium.org/633123002 Patch from Martyn Capewell . git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24482 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/base/bits.h | 56 ++++++- src/base/utils/random-number-generator.cc | 7 + src/base/utils/random-number-generator.h | 7 + src/compiler/arm64/code-generator-arm64.cc | 20 ++- src/compiler/arm64/instruction-codes-arm64.h | 14 +- .../arm64/instruction-selector-arm64.cc | 96 ++++++++++- test/unittests/base/bits-unittest.cc | 37 +++++ .../instruction-selector-arm64-unittest.cc | 150 +++++++++++++++++- 8 files changed, 357 insertions(+), 30 deletions(-) diff --git a/src/base/bits.h b/src/base/bits.h index e6a733a45..d681ba8ba 100644 --- a/src/base/bits.h +++ b/src/base/bits.h @@ -19,7 +19,7 @@ namespace base { namespace bits { // CountPopulation32(value) returns the number of bits set in |value|. -inline uint32_t CountPopulation32(uint32_t value) { +inline unsigned CountPopulation32(uint32_t value) { #if V8_HAS_BUILTIN_POPCOUNT return __builtin_popcount(value); #else @@ -28,20 +28,31 @@ inline uint32_t CountPopulation32(uint32_t value) { value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f); value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff); value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff); - return value; + return static_cast(value); +#endif +} + + +// CountPopulation64(value) returns the number of bits set in |value|. +inline unsigned CountPopulation64(uint64_t value) { +#if V8_HAS_BUILTIN_POPCOUNT + return __builtin_popcountll(value); +#else + return CountPopulation32(static_cast(value)) + + CountPopulation32(static_cast(value >> 32)); #endif } // CountLeadingZeros32(value) returns the number of zero bits following the most // significant 1 bit in |value| if |value| is non-zero, otherwise it returns 32. -inline uint32_t CountLeadingZeros32(uint32_t value) { +inline unsigned CountLeadingZeros32(uint32_t value) { #if V8_HAS_BUILTIN_CLZ return value ? __builtin_clz(value) : 32; #elif V8_CC_MSVC unsigned long result; // NOLINT(runtime/int) if (!_BitScanReverse(&result, value)) return 32; - return static_cast(31 - result); + return static_cast(31 - result); #else value = value | (value >> 1); value = value | (value >> 2); @@ -53,16 +64,33 @@ inline uint32_t CountLeadingZeros32(uint32_t value) { } +// CountLeadingZeros64(value) returns the number of zero bits following the most +// significant 1 bit in |value| if |value| is non-zero, otherwise it returns 64. +inline unsigned CountLeadingZeros64(uint64_t value) { +#if V8_HAS_BUILTIN_CLZ + return value ? __builtin_clzll(value) : 64; +#else + value = value | (value >> 1); + value = value | (value >> 2); + value = value | (value >> 4); + value = value | (value >> 8); + value = value | (value >> 16); + value = value | (value >> 32); + return CountPopulation64(~value); +#endif +} + + // CountTrailingZeros32(value) returns the number of zero bits preceding the // least significant 1 bit in |value| if |value| is non-zero, otherwise it // returns 32. -inline uint32_t CountTrailingZeros32(uint32_t value) { +inline unsigned CountTrailingZeros32(uint32_t value) { #if V8_HAS_BUILTIN_CTZ return value ? __builtin_ctz(value) : 32; #elif V8_CC_MSVC unsigned long result; // NOLINT(runtime/int) if (!_BitScanForward(&result, value)) return 32; - return static_cast(result); + return static_cast(result); #else if (value == 0) return 32; unsigned count = 0; @@ -73,6 +101,22 @@ inline uint32_t CountTrailingZeros32(uint32_t value) { } +// CountTrailingZeros64(value) returns the number of zero bits preceding the +// least significant 1 bit in |value| if |value| is non-zero, otherwise it +// returns 64. +inline unsigned CountTrailingZeros64(uint64_t value) { +#if V8_HAS_BUILTIN_CTZ + return value ? __builtin_ctzll(value) : 64; +#else + if (value == 0) return 64; + unsigned count = 0; + for (value ^= value - 1; value >>= 1; ++count) + ; + return count; +#endif +} + + // Returns true iff |value| is a power of 2. inline bool IsPowerOfTwo32(uint32_t value) { return value && !(value & (value - 1)); diff --git a/src/base/utils/random-number-generator.cc b/src/base/utils/random-number-generator.cc index a1ec9d718..29a48ffb0 100644 --- a/src/base/utils/random-number-generator.cc +++ b/src/base/utils/random-number-generator.cc @@ -102,6 +102,13 @@ double RandomNumberGenerator::NextDouble() { } +int64_t RandomNumberGenerator::NextInt64() { + uint64_t lo = bit_cast(Next(32)); + uint64_t hi = bit_cast(Next(32)); + return lo | (hi << 32); +} + + void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) { for (size_t n = 0; n < buflen; ++n) { static_cast(buffer)[n] = static_cast(Next(8)); diff --git a/src/base/utils/random-number-generator.h b/src/base/utils/random-number-generator.h index 479423d65..d1294f266 100644 --- a/src/base/utils/random-number-generator.h +++ b/src/base/utils/random-number-generator.h @@ -68,6 +68,13 @@ class RandomNumberGenerator FINAL { // (exclusive), is pseudorandomly generated and returned. double NextDouble() WARN_UNUSED_RESULT; + // Returns the next pseudorandom, uniformly distributed int64 value from this + // random number generator's sequence. The general contract of |NextInt64()| + // is that one 64-bit int value is pseudorandomly generated and returned. + // All 2^64 possible integer values are produced with (approximately) equal + // probability. + int64_t NextInt64() WARN_UNUSED_RESULT; + // Fills the elements of a specified array of bytes with random numbers. void NextBytes(void* buffer, size_t buflen); diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc index c041e1536..da229d85d 100644 --- a/src/compiler/arm64/code-generator-arm64.cc +++ b/src/compiler/arm64/code-generator-arm64.cc @@ -319,22 +319,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { __ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1)); } break; - case kArm64Shl: + case kArm64Lsl: ASSEMBLE_SHIFT(Lsl, 64); break; - case kArm64Shl32: + case kArm64Lsl32: ASSEMBLE_SHIFT(Lsl, 32); break; - case kArm64Shr: + case kArm64Lsr: ASSEMBLE_SHIFT(Lsr, 64); break; - case kArm64Shr32: + case kArm64Lsr32: ASSEMBLE_SHIFT(Lsr, 32); break; - case kArm64Sar: + case kArm64Asr: ASSEMBLE_SHIFT(Asr, 64); break; - case kArm64Sar32: + case kArm64Asr32: ASSEMBLE_SHIFT(Asr, 32); break; case kArm64Ror: @@ -349,6 +349,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { case kArm64Sxtw: __ Sxtw(i.OutputRegister(), i.InputRegister32(0)); break; + case kArm64Ubfx: + __ Ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), + i.InputInt8(2)); + break; + case kArm64Ubfx32: + __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1), + i.InputInt8(2)); + break; case kArm64Claim: { int words = MiscField::decode(instr->opcode()); __ Claim(words); diff --git a/src/compiler/arm64/instruction-codes-arm64.h b/src/compiler/arm64/instruction-codes-arm64.h index b8484b7d5..0f0d1d2ac 100644 --- a/src/compiler/arm64/instruction-codes-arm64.h +++ b/src/compiler/arm64/instruction-codes-arm64.h @@ -54,16 +54,18 @@ namespace compiler { V(Arm64Not32) \ V(Arm64Neg) \ V(Arm64Neg32) \ - V(Arm64Shl) \ - V(Arm64Shl32) \ - V(Arm64Shr) \ - V(Arm64Shr32) \ - V(Arm64Sar) \ - V(Arm64Sar32) \ + V(Arm64Lsl) \ + V(Arm64Lsl32) \ + V(Arm64Lsr) \ + V(Arm64Lsr32) \ + V(Arm64Asr) \ + V(Arm64Asr32) \ V(Arm64Ror) \ V(Arm64Ror32) \ V(Arm64Mov32) \ V(Arm64Sxtw) \ + V(Arm64Ubfx) \ + V(Arm64Ubfx32) \ V(Arm64Claim) \ V(Arm64Poke) \ V(Arm64PokePairZero) \ diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc index 3a2a820ae..2c5b2ff37 100644 --- a/src/compiler/arm64/instruction-selector-arm64.cc +++ b/src/compiler/arm64/instruction-selector-arm64.cc @@ -355,7 +355,29 @@ static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m, void InstructionSelector::VisitWord32And(Node* node) { + Arm64OperandGenerator g(this); Int32BinopMatcher m(node); + if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) && + m.right().HasValue()) { + uint32_t mask = m.right().Value(); + uint32_t mask_width = base::bits::CountPopulation32(mask); + uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); + if ((mask_width != 0) && (mask_msb + mask_width == 32)) { + // The mask must be contiguous, and occupy the least-significant bits. + DCHECK_EQ(0, base::bits::CountTrailingZeros32(mask)); + + // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least + // significant bits. + Int32BinopMatcher mleft(m.left().node()); + if (mleft.right().IsInRange(0, 31)) { + Emit(kArm64Ubfx32, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), + g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width)); + return; + } + // Other cases fall through to the normal And operation. + } + } VisitLogical( this, node, &m, kArm64And32, CanCover(node, m.left().node()), CanCover(node, m.right().node()), kLogical32Imm); @@ -363,7 +385,29 @@ void InstructionSelector::VisitWord32And(Node* node) { void InstructionSelector::VisitWord64And(Node* node) { + Arm64OperandGenerator g(this); Int64BinopMatcher m(node); + if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) && + m.right().HasValue()) { + uint64_t mask = m.right().Value(); + uint64_t mask_width = base::bits::CountPopulation64(mask); + uint64_t mask_msb = base::bits::CountLeadingZeros64(mask); + if ((mask_width != 0) && (mask_msb + mask_width == 64)) { + // The mask must be contiguous, and occupy the least-significant bits. + DCHECK_EQ(0, base::bits::CountTrailingZeros64(mask)); + + // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least + // significant bits. + Int64BinopMatcher mleft(m.left().node()); + if (mleft.right().IsInRange(0, 63)) { + Emit(kArm64Ubfx, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), + g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width)); + return; + } + // Other cases fall through to the normal And operation. + } + } VisitLogical( this, node, &m, kArm64And, CanCover(node, m.left().node()), CanCover(node, m.right().node()), kLogical64Imm); @@ -403,32 +447,72 @@ void InstructionSelector::VisitWord64Xor(Node* node) { void InstructionSelector::VisitWord32Shl(Node* node) { - VisitRRO(this, kArm64Shl32, node, kShift32Imm); + VisitRRO(this, kArm64Lsl32, node, kShift32Imm); } void InstructionSelector::VisitWord64Shl(Node* node) { - VisitRRO(this, kArm64Shl, node, kShift64Imm); + VisitRRO(this, kArm64Lsl, node, kShift64Imm); } void InstructionSelector::VisitWord32Shr(Node* node) { - VisitRRO(this, kArm64Shr32, node, kShift32Imm); + Arm64OperandGenerator g(this); + Int32BinopMatcher m(node); + if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) { + int32_t lsb = m.right().Value(); + Int32BinopMatcher mleft(m.left().node()); + if (mleft.right().HasValue()) { + uint32_t mask = (mleft.right().Value() >> lsb) << lsb; + uint32_t mask_width = base::bits::CountPopulation32(mask); + uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); + // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is + // shifted into the least-significant bits. + if ((mask_msb + mask_width + lsb) == 32) { + DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask)); + Emit(kArm64Ubfx32, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), + g.TempImmediate(mask_width)); + return; + } + } + } + VisitRRO(this, kArm64Lsr32, node, kShift32Imm); } void InstructionSelector::VisitWord64Shr(Node* node) { - VisitRRO(this, kArm64Shr, node, kShift64Imm); + Arm64OperandGenerator g(this); + Int64BinopMatcher m(node); + if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) { + int64_t lsb = m.right().Value(); + Int64BinopMatcher mleft(m.left().node()); + if (mleft.right().HasValue()) { + // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is + // shifted into the least-significant bits. + uint64_t mask = (mleft.right().Value() >> lsb) << lsb; + uint64_t mask_width = base::bits::CountPopulation64(mask); + uint64_t mask_msb = base::bits::CountLeadingZeros64(mask); + if ((mask_msb + mask_width + lsb) == 64) { + DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask)); + Emit(kArm64Ubfx, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), + g.TempImmediate(mask_width)); + return; + } + } + } + VisitRRO(this, kArm64Lsr, node, kShift64Imm); } void InstructionSelector::VisitWord32Sar(Node* node) { - VisitRRO(this, kArm64Sar32, node, kShift32Imm); + VisitRRO(this, kArm64Asr32, node, kShift32Imm); } void InstructionSelector::VisitWord64Sar(Node* node) { - VisitRRO(this, kArm64Sar, node, kShift64Imm); + VisitRRO(this, kArm64Asr, node, kShift64Imm); } diff --git a/test/unittests/base/bits-unittest.cc b/test/unittests/base/bits-unittest.cc index 06c118358..caedae240 100644 --- a/test/unittests/base/bits-unittest.cc +++ b/test/unittests/base/bits-unittest.cc @@ -28,6 +28,21 @@ TEST(Bits, CountPopulation32) { } +TEST(Bits, CountPopulation64) { + EXPECT_EQ(0u, CountPopulation64(0)); + EXPECT_EQ(1u, CountPopulation64(1)); + EXPECT_EQ(2u, CountPopulation64(0x8000000000000001)); + EXPECT_EQ(8u, CountPopulation64(0x11111111)); + EXPECT_EQ(16u, CountPopulation64(0xf0f0f0f0)); + EXPECT_EQ(24u, CountPopulation64(0xfff0f0ff)); + EXPECT_EQ(32u, CountPopulation64(0xffffffff)); + EXPECT_EQ(16u, CountPopulation64(0x1111111111111111)); + EXPECT_EQ(32u, CountPopulation64(0xf0f0f0f0f0f0f0f0)); + EXPECT_EQ(48u, CountPopulation64(0xfff0f0fffff0f0ff)); + EXPECT_EQ(64u, CountPopulation64(0xffffffffffffffff)); +} + + TEST(Bits, CountLeadingZeros32) { EXPECT_EQ(32u, CountLeadingZeros32(0)); EXPECT_EQ(31u, CountLeadingZeros32(1)); @@ -38,6 +53,17 @@ TEST(Bits, CountLeadingZeros32) { } +TEST(Bits, CountLeadingZeros64) { + EXPECT_EQ(64u, CountLeadingZeros64(0)); + EXPECT_EQ(63u, CountLeadingZeros64(1)); + TRACED_FORRANGE(uint32_t, shift, 0, 63) { + EXPECT_EQ(63u - shift, CountLeadingZeros64(V8_UINT64_C(1) << shift)); + } + EXPECT_EQ(36u, CountLeadingZeros64(0x0f0f0f0f)); + EXPECT_EQ(4u, CountLeadingZeros64(0x0f0f0f0f00000000)); +} + + TEST(Bits, CountTrailingZeros32) { EXPECT_EQ(32u, CountTrailingZeros32(0)); EXPECT_EQ(31u, CountTrailingZeros32(0x80000000)); @@ -48,6 +74,17 @@ TEST(Bits, CountTrailingZeros32) { } +TEST(Bits, CountTrailingZeros64) { + EXPECT_EQ(64u, CountTrailingZeros64(0)); + EXPECT_EQ(63u, CountTrailingZeros64(0x8000000000000000)); + TRACED_FORRANGE(uint32_t, shift, 0, 63) { + EXPECT_EQ(shift, CountTrailingZeros64(V8_UINT64_C(1) << shift)); + } + EXPECT_EQ(4u, CountTrailingZeros64(0xf0f0f0f0)); + EXPECT_EQ(36u, CountTrailingZeros64(0xf0f0f0f000000000)); +} + + TEST(Bits, IsPowerOfTwo32) { EXPECT_FALSE(IsPowerOfTwo32(0U)); TRACED_FORRANGE(uint32_t, shift, 0, 31) { diff --git a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc index dc74a7ba6..6f0388907 100644 --- a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc +++ b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc @@ -140,12 +140,12 @@ static const MachInst2 kOvfAddSubInstructions[] = { // ARM64 shift instructions. static const MachInst2 kShiftInstructions[] = { - {&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Shl32, kMachInt32}, - {&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Shl, kMachInt64}, - {&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Shr32, kMachInt32}, - {&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Shr, kMachInt64}, - {&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Sar32, kMachInt32}, - {&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Sar, kMachInt64}, + {&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Lsl32, kMachInt32}, + {&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Lsl, kMachInt64}, + {&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Lsr32, kMachInt32}, + {&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Lsr, kMachInt64}, + {&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Asr32, kMachInt32}, + {&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Asr, kMachInt64}, {&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32, kMachInt32}, {&RawMachineAssembler::Word64Ror, "Word64Ror", kArm64Ror, kMachInt64}}; @@ -1487,6 +1487,144 @@ TEST_F(InstructionSelectorTest, Word64XorMinusOneWithParameter) { } } + +TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) { + TRACED_FORRANGE(int32_t, lsb, 1, 31) { + TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) { + uint32_t jnk = rng()->NextInt(); + jnk >>= 32 - lsb; + uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk; + StreamBuilder m(this, kMachInt32, kMachInt32); + m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)), + m.Int32Constant(lsb))); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode()); + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1))); + EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2))); + } + } + TRACED_FORRANGE(int32_t, lsb, 1, 31) { + TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) { + uint32_t jnk = rng()->NextInt(); + jnk >>= 32 - lsb; + uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk; + StreamBuilder m(this, kMachInt32, kMachInt32); + m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)), + m.Int32Constant(lsb))); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode()); + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1))); + EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2))); + } + } +} + + +TEST_F(InstructionSelectorTest, Word64ShrWithWord64AndWithImmediate) { + TRACED_FORRANGE(int32_t, lsb, 1, 63) { + TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) { + uint64_t jnk = rng()->NextInt64(); + jnk >>= 64 - lsb; + uint64_t msk = + ((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk; + StreamBuilder m(this, kMachInt64, kMachInt64); + m.Return(m.Word64Shr(m.Word64And(m.Parameter(0), m.Int64Constant(msk)), + m.Int64Constant(lsb))); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + EXPECT_EQ(kArm64Ubfx, s[0]->arch_opcode()); + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1))); + EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2))); + } + } + TRACED_FORRANGE(int32_t, lsb, 1, 63) { + TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) { + uint64_t jnk = rng()->NextInt64(); + jnk >>= 64 - lsb; + uint64_t msk = + ((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk; + StreamBuilder m(this, kMachInt64, kMachInt64); + m.Return(m.Word64Shr(m.Word64And(m.Int64Constant(msk), m.Parameter(0)), + m.Int64Constant(lsb))); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + EXPECT_EQ(kArm64Ubfx, s[0]->arch_opcode()); + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1))); + EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2))); + } + } +} + + +TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) { + TRACED_FORRANGE(int32_t, lsb, 1, 31) { + TRACED_FORRANGE(int32_t, width, 1, 31) { + uint32_t msk = (1 << width) - 1; + StreamBuilder m(this, kMachInt32, kMachInt32); + m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)), + m.Int32Constant(msk))); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode()); + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1))); + EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2))); + } + } + TRACED_FORRANGE(int32_t, lsb, 1, 31) { + TRACED_FORRANGE(int32_t, width, 1, 31) { + uint32_t msk = (1 << width) - 1; + StreamBuilder m(this, kMachInt32, kMachInt32); + m.Return(m.Word32And(m.Int32Constant(msk), + m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)))); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode()); + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1))); + EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2))); + } + } +} + + +TEST_F(InstructionSelectorTest, Word64AndWithImmediateWithWord64Shr) { + TRACED_FORRANGE(int32_t, lsb, 1, 31) { + TRACED_FORRANGE(int32_t, width, 1, 31) { + uint64_t msk = (V8_UINT64_C(1) << width) - 1; + StreamBuilder m(this, kMachInt64, kMachInt64); + m.Return(m.Word64And(m.Word64Shr(m.Parameter(0), m.Int64Constant(lsb)), + m.Int64Constant(msk))); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + EXPECT_EQ(kArm64Ubfx, s[0]->arch_opcode()); + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1))); + EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2))); + } + } + TRACED_FORRANGE(int32_t, lsb, 1, 31) { + TRACED_FORRANGE(int32_t, width, 1, 31) { + uint64_t msk = (V8_UINT64_C(1) << width) - 1; + StreamBuilder m(this, kMachInt64, kMachInt64); + m.Return(m.Word64And(m.Int64Constant(msk), + m.Word64Shr(m.Parameter(0), m.Int64Constant(lsb)))); + Stream s = m.Build(); + ASSERT_EQ(1U, s.size()); + EXPECT_EQ(kArm64Ubfx, s[0]->arch_opcode()); + ASSERT_EQ(3U, s[0]->InputCount()); + EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1))); + EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2))); + } + } +} + } // namespace compiler } // namespace internal } // namespace v8 -- 2.34.1