From 91ba01532f8b99946544d76803c781550c3a0692 Mon Sep 17 00:00:00 2001 From: "m.m.capewell@googlemail.com" Date: Wed, 2 Jul 2014 09:52:23 +0000 Subject: [PATCH] ARM64: Reland faster immediate check Improve the code used to check for encodable logical immediates, fix some corner cases associated with moving kWMinInt into W registers, and add tests. BUG= R=ulan@chromium.org Review URL: https://codereview.chromium.org/364653003 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22148 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/arm64/assembler-arm64.cc | 250 ++++++++++++++++++++++++++---------- src/arm64/macro-assembler-arm64.cc | 14 +- src/arm64/utils-arm64.cc | 5 + src/arm64/utils-arm64.h | 1 + test/cctest/test-assembler-arm64.cc | 19 ++- 5 files changed, 216 insertions(+), 73 deletions(-) diff --git a/src/arm64/assembler-arm64.cc b/src/arm64/assembler-arm64.cc index b349421..ce4f451 100644 --- a/src/arm64/assembler-arm64.cc +++ b/src/arm64/assembler-arm64.cc @@ -2104,6 +2104,15 @@ void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift, MoveWideImmediateOp mov_op) { + // Ignore the top 32 bits of an immediate if we're moving to a W register. + if (rd.Is32Bits()) { + // Check that the top 32 bits are zero (a positive 32-bit number) or top + // 33 bits are one (a negative 32-bit number, sign extended to 64 bits). + ASSERT(((imm >> kWRegSizeInBits) == 0) || + ((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff)); + imm &= kWRegMask; + } + if (shift >= 0) { // Explicit shift specified. ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48)); @@ -2509,91 +2518,198 @@ bool Assembler::IsImmLogical(uint64_t value, ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL)); ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits)); + bool negate = false; + // Logical immediates are encoded using parameters n, imm_s and imm_r using // the following table: // - // N imms immr size S R - // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) - // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) - // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) - // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) - // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) - // 0 11110s xxxxxr 2 UInt(s) UInt(r) + // N imms immr size S R + // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) + // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) + // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) + // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) + // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) + // 0 11110s xxxxxr 2 UInt(s) UInt(r) // (s bits must not be all set) // - // A pattern is constructed of size bits, where the least significant S+1 - // bits are set. The pattern is rotated right by R, and repeated across a - // 32 or 64-bit value, depending on destination register width. + // A pattern is constructed of size bits, where the least significant S+1 bits + // are set. The pattern is rotated right by R, and repeated across a 32 or + // 64-bit value, depending on destination register width. // - // To test if an arbitary immediate can be encoded using this scheme, an - // iterative algorithm is used. + // Put another way: the basic format of a logical immediate is a single + // contiguous stretch of 1 bits, repeated across the whole word at intervals + // given by a power of 2. To identify them quickly, we first locate the + // lowest stretch of 1 bits, then the next 1 bit above that; that combination + // is different for every logical immediate, so it gives us all the + // information we need to identify the only logical immediate that our input + // could be, and then we simply check if that's the value we actually have. // - // TODO(mcapewel) This code does not consider using X/W register overlap to - // support 64-bit immediates where the top 32-bits are zero, and the bottom - // 32-bits are an encodable logical immediate. + // (The rotation parameter does give the possibility of the stretch of 1 bits + // going 'round the end' of the word. To deal with that, we observe that in + // any situation where that happens the bitwise NOT of the value is also a + // valid logical immediate. So we simply invert the input whenever its low bit + // is set, and then we know that the rotated case can't arise.) - // 1. If the value has all set or all clear bits, it can't be encoded. - if ((value == 0) || (value == 0xffffffffffffffffUL) || - ((width == kWRegSizeInBits) && (value == 0xffffffff))) { - return false; + if (value & 1) { + // If the low bit is 1, negate the value, and set a flag to remember that we + // did (so that we can adjust the return values appropriately). + negate = true; + value = ~value; } - unsigned lead_zero = CountLeadingZeros(value, width); - unsigned lead_one = CountLeadingZeros(~value, width); - unsigned trail_zero = CountTrailingZeros(value, width); - unsigned trail_one = CountTrailingZeros(~value, width); - unsigned set_bits = CountSetBits(value, width); - - // The fixed bits in the immediate s field. - // If width == 64 (X reg), start at 0xFFFFFF80. - // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit - // widths won't be executed. - int imm_s_fixed = (width == kXRegSizeInBits) ? -128 : -64; - int imm_s_mask = 0x3F; - - for (;;) { - // 2. If the value is two bits wide, it can be encoded. - if (width == 2) { - *n = 0; - *imm_s = 0x3C; - *imm_r = (value & 3) - 1; - return true; - } + if (width == kWRegSizeInBits) { + // To handle 32-bit logical immediates, the very easiest thing is to repeat + // the input value twice to make a 64-bit word. The correct encoding of that + // as a logical immediate will also be the correct encoding of the 32-bit + // value. - *n = (width == 64) ? 1 : 0; - *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask); - if ((lead_zero + set_bits) == width) { - *imm_r = 0; - } else { - *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one; - } + // The most-significant 32 bits may not be zero (ie. negate is true) so + // shift the value left before duplicating it. + value <<= kWRegSizeInBits; + value |= value >> kWRegSizeInBits; + } - // 3. If the sum of leading zeros, trailing zeros and set bits is equal to - // the bit width of the value, it can be encoded. - if (lead_zero + trail_zero + set_bits == width) { - return true; + // The basic analysis idea: imagine our input word looks like this. + // + // 0011111000111110001111100011111000111110001111100011111000111110 + // c b a + // |<--d-->| + // + // We find the lowest set bit (as an actual power-of-2 value, not its index) + // and call it a. Then we add a to our original number, which wipes out the + // bottommost stretch of set bits and replaces it with a 1 carried into the + // next zero bit. Then we look for the new lowest set bit, which is in + // position b, and subtract it, so now our number is just like the original + // but with the lowest stretch of set bits completely gone. Now we find the + // lowest set bit again, which is position c in the diagram above. Then we'll + // measure the distance d between bit positions a and c (using CLZ), and that + // tells us that the only valid logical immediate that could possibly be equal + // to this number is the one in which a stretch of bits running from a to just + // below b is replicated every d bits. + uint64_t a = LargestPowerOf2Divisor(value); + uint64_t value_plus_a = value + a; + uint64_t b = LargestPowerOf2Divisor(value_plus_a); + uint64_t value_plus_a_minus_b = value_plus_a - b; + uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b); + + int d, clz_a, out_n; + uint64_t mask; + + if (c != 0) { + // The general case, in which there is more than one stretch of set bits. + // Compute the repeat distance d, and set up a bitmask covering the basic + // unit of repetition (i.e. a word with the bottom d bits set). Also, in all + // of these cases the N bit of the output will be zero. + clz_a = CountLeadingZeros(a, kXRegSizeInBits); + int clz_c = CountLeadingZeros(c, kXRegSizeInBits); + d = clz_a - clz_c; + mask = ((V8_UINT64_C(1) << d) - 1); + out_n = 0; + } else { + // Handle degenerate cases. + // + // If any of those 'find lowest set bit' operations didn't find a set bit at + // all, then the word will have been zero thereafter, so in particular the + // last lowest_set_bit operation will have returned zero. So we can test for + // all the special case conditions in one go by seeing if c is zero. + if (a == 0) { + // The input was zero (or all 1 bits, which will come to here too after we + // inverted it at the start of the function), for which we just return + // false. + return false; + } else { + // Otherwise, if c was zero but a was not, then there's just one stretch + // of set bits in our word, meaning that we have the trivial case of + // d == 64 and only one 'repetition'. Set up all the same variables as in + // the general case above, and set the N bit in the output. + clz_a = CountLeadingZeros(a, kXRegSizeInBits); + d = 64; + mask = ~V8_UINT64_C(0); + out_n = 1; } + } - // 4. If the sum of leading ones, trailing ones and unset bits in the - // value is equal to the bit width of the value, it can be encoded. - if (lead_one + trail_one + (width - set_bits) == width) { - return true; - } + // If the repeat period d is not a power of two, it can't be encoded. + if (!IS_POWER_OF_TWO(d)) { + return false; + } - // 5. If the most-significant half of the bitwise value is equal to the - // least-significant half, return to step 2 using the least-significant - // half of the value. - uint64_t mask = (1UL << (width >> 1)) - 1; - if ((value & mask) == ((value >> (width >> 1)) & mask)) { - width >>= 1; - set_bits >>= 1; - imm_s_fixed >>= 1; - continue; - } + if (((b - a) & ~mask) != 0) { + // If the bit stretch (b - a) does not fit within the mask derived from the + // repeat period, then fail. + return false; + } - // 6. Otherwise, the value can't be encoded. + // The only possible option is b - a repeated every d bits. Now we're going to + // actually construct the valid logical immediate derived from that + // specification, and see if it equals our original input. + // + // To repeat a value every d bits, we multiply it by a number of the form + // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can + // be derived using a table lookup on CLZ(d). + static const uint64_t multipliers[] = { + 0x0000000000000001UL, + 0x0000000100000001UL, + 0x0001000100010001UL, + 0x0101010101010101UL, + 0x1111111111111111UL, + 0x5555555555555555UL, + }; + int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57; + // Ensure that the index to the multipliers array is within bounds. + ASSERT((multiplier_idx >= 0) && + (static_cast(multiplier_idx) < + (sizeof(multipliers) / sizeof(multipliers[0])))); + uint64_t multiplier = multipliers[multiplier_idx]; + uint64_t candidate = (b - a) * multiplier; + + if (value != candidate) { + // The candidate pattern doesn't match our input value, so fail. return false; } + + // We have a match! This is a valid logical immediate, so now we have to + // construct the bits and pieces of the instruction encoding that generates + // it. + + // Count the set bits in our basic stretch. The special case of clz(0) == -1 + // makes the answer come out right for stretches that reach the very top of + // the word (e.g. numbers like 0xffffc00000000000). + int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits); + int s = clz_a - clz_b; + + // Decide how many bits to rotate right by, to put the low bit of that basic + // stretch in position a. + int r; + if (negate) { + // If we inverted the input right at the start of this function, here's + // where we compensate: the number of set bits becomes the number of clear + // bits, and the rotation count is based on position b rather than position + // a (since b is the location of the 'lowest' 1 bit after inversion). + s = d - s; + r = (clz_b + 1) & (d - 1); + } else { + r = (clz_a + 1) & (d - 1); + } + + // Now we're done, except for having to encode the S output in such a way that + // it gives both the number of set bits and the length of the repeated + // segment. The s field is encoded like this: + // + // imms size S + // ssssss 64 UInt(ssssss) + // 0sssss 32 UInt(sssss) + // 10ssss 16 UInt(ssss) + // 110sss 8 UInt(sss) + // 1110ss 4 UInt(ss) + // 11110s 2 UInt(s) + // + // So we 'or' (-d << 1) with our computed s to form imms. + *n = out_n; + *imm_s = ((-d << 1) | (s - 1)) & 0x3f; + *imm_r = r; + + return true; } diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc index 343a9e3..fc93e8f 100644 --- a/src/arm64/macro-assembler-arm64.cc +++ b/src/arm64/macro-assembler-arm64.cc @@ -64,17 +64,23 @@ void MacroAssembler::LogicalMacro(const Register& rd, } else if (operand.IsImmediate()) { int64_t immediate = operand.ImmediateValue(); unsigned reg_size = rd.SizeInBits(); - ASSERT(rd.Is64Bits() || is_uint32(immediate)); // If the operation is NOT, invert the operation and immediate. if ((op & NOT) == NOT) { op = static_cast(op & ~NOT); immediate = ~immediate; - if (rd.Is32Bits()) { - immediate &= kWRegMask; - } } + // Ignore the top 32 bits of an immediate if we're moving to a W register. + if (rd.Is32Bits()) { + // Check that the top 32 bits are consistent. + ASSERT(((immediate >> kWRegSizeInBits) == 0) || + ((immediate >> kWRegSizeInBits) == -1)); + immediate &= kWRegMask; + } + + ASSERT(rd.Is64Bits() || is_uint32(immediate)); + // Special cases for all set or all clear immediates. if (immediate == 0) { switch (op) { diff --git a/src/arm64/utils-arm64.cc b/src/arm64/utils-arm64.cc index 0cb4ea5..bde4968 100644 --- a/src/arm64/utils-arm64.cc +++ b/src/arm64/utils-arm64.cc @@ -78,6 +78,11 @@ int CountSetBits(uint64_t value, int width) { } +uint64_t LargestPowerOf2Divisor(uint64_t value) { + return value & -value; +} + + int MaskToBit(uint64_t mask) { ASSERT(CountSetBits(mask, 64) == 1); return CountTrailingZeros(mask, 64); diff --git a/src/arm64/utils-arm64.h b/src/arm64/utils-arm64.h index 3c040c6..8494983 100644 --- a/src/arm64/utils-arm64.h +++ b/src/arm64/utils-arm64.h @@ -57,6 +57,7 @@ int CountLeadingZeros(uint64_t value, int width); int CountLeadingSignBits(int64_t value, int width); int CountTrailingZeros(uint64_t value, int width); int CountSetBits(uint64_t value, int width); +uint64_t LargestPowerOf2Divisor(uint64_t value); int MaskToBit(uint64_t mask); diff --git a/test/cctest/test-assembler-arm64.cc b/test/cctest/test-assembler-arm64.cc index 8d641bd..3ac3669 100644 --- a/test/cctest/test-assembler-arm64.cc +++ b/test/cctest/test-assembler-arm64.cc @@ -426,6 +426,9 @@ TEST(mov_imm_w) { __ Mov(w4, 0x00001234L); __ Mov(w5, 0x12340000L); __ Mov(w6, 0x12345678L); + __ Mov(w7, (int32_t)0x80000000); + __ Mov(w8, (int32_t)0xffff0000); + __ Mov(w9, kWMinInt); END(); RUN(); @@ -437,6 +440,9 @@ TEST(mov_imm_w) { ASSERT_EQUAL_64(0x00001234L, x4); ASSERT_EQUAL_64(0x12340000L, x5); ASSERT_EQUAL_64(0x12345678L, x6); + ASSERT_EQUAL_64(0x80000000L, x7); + ASSERT_EQUAL_64(0xffff0000L, x8); + ASSERT_EQUAL_32(kWMinInt, w9); TEARDOWN(); } @@ -588,6 +594,9 @@ TEST(bitwise_wide_imm) { __ Orr(x10, x0, Operand(0x1234567890abcdefUL)); __ Orr(w11, w1, Operand(0x90abcdef)); + + __ Orr(w12, w0, kWMinInt); + __ Eor(w13, w0, kWMinInt); END(); RUN(); @@ -596,6 +605,8 @@ TEST(bitwise_wide_imm) { ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1); ASSERT_EQUAL_64(0x1234567890abcdefUL, x10); ASSERT_EQUAL_64(0xf0fbfdffUL, x11); + ASSERT_EQUAL_32(kWMinInt, w12); + ASSERT_EQUAL_32(kWMinInt, w13); TEARDOWN(); } @@ -3362,8 +3373,10 @@ TEST(add_sub_wide_imm) { __ Add(w12, w0, Operand(0x12345678)); __ Add(w13, w1, Operand(0xffffffff)); - __ Sub(x20, x0, Operand(0x1234567890abcdefUL)); + __ Add(w18, w0, Operand(kWMinInt)); + __ Sub(w19, w0, Operand(kWMinInt)); + __ Sub(x20, x0, Operand(0x1234567890abcdefUL)); __ Sub(w21, w0, Operand(0x12345678)); END(); @@ -3375,8 +3388,10 @@ TEST(add_sub_wide_imm) { ASSERT_EQUAL_32(0x12345678, w12); ASSERT_EQUAL_64(0x0, x13); - ASSERT_EQUAL_64(-0x1234567890abcdefUL, x20); + ASSERT_EQUAL_32(kWMinInt, w18); + ASSERT_EQUAL_32(kWMinInt, w19); + ASSERT_EQUAL_64(-0x1234567890abcdefUL, x20); ASSERT_EQUAL_32(-0x12345678, w21); TEARDOWN(); -- 2.7.4