2 * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef ARMAssembler_h
28 #define ARMAssembler_h
30 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
32 #include "AssemblerBuffer.h"
33 #include <wtf/Assertions.h>
34 #include <wtf/Vector.h>
39 namespace ARMRegisters {
48 r7, wr = r7, // thumb work register
50 r9, sb = r9, // static base
51 r10, sl = r10, // stack limit
52 r11, fp = r11, // frame pointer
127 } FPDoubleRegisterID;
164 inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
167 return (FPSingleRegisterID)(reg << 1);
170 inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
173 return (FPDoubleRegisterID)(reg >> 1);
177 class ARMv7Assembler;
178 class ARMThumbImmediate {
179 friend class ARMv7Assembler;
181 typedef uint8_t ThumbImmediateType;
182 static const ThumbImmediateType TypeInvalid = 0;
183 static const ThumbImmediateType TypeEncoded = 1;
184 static const ThumbImmediateType TypeUInt16 = 2;
194 // If this is an encoded immediate, then it may describe a shift, or a pattern.
196 unsigned shiftValue7 : 7;
197 unsigned shiftAmount : 5;
200 unsigned immediate : 8;
201 unsigned pattern : 4;
203 } ThumbImmediateValue;
205 // byte0 contains least significant bit; not using an array to make client code endian agnostic.
216 ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
218 if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
219 value >>= N; /* if any were set, lose the bottom N */
220 else /* if none of the top N bits are set, */
221 zeros += N; /* then we have identified N leading zeros */
224 static int32_t countLeadingZeros(uint32_t value)
230 countLeadingZerosPartial(value, zeros, 16);
231 countLeadingZerosPartial(value, zeros, 8);
232 countLeadingZerosPartial(value, zeros, 4);
233 countLeadingZerosPartial(value, zeros, 2);
234 countLeadingZerosPartial(value, zeros, 1);
239 : m_type(TypeInvalid)
244 ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
250 ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
253 // Make sure this constructor is only reached with type TypeUInt16;
254 // this extra parameter makes the code a little clearer by making it
255 // explicit at call sites which type is being constructed
256 ASSERT_UNUSED(type, type == TypeUInt16);
258 m_value.asInt = value;
262 static ARMThumbImmediate makeEncodedImm(uint32_t value)
264 ThumbImmediateValue encoding;
267 // okay, these are easy.
269 encoding.immediate = value;
270 encoding.pattern = 0;
271 return ARMThumbImmediate(TypeEncoded, encoding);
274 int32_t leadingZeros = countLeadingZeros(value);
275 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
276 ASSERT(leadingZeros < 24);
278 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
279 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
280 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
281 int32_t rightShiftAmount = 24 - leadingZeros;
282 if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
283 // Shift the value down to the low byte position. The assign to
284 // shiftValue7 drops the implicit top bit.
285 encoding.shiftValue7 = value >> rightShiftAmount;
286 // The endoded shift amount is the magnitude of a right rotate.
287 encoding.shiftAmount = 8 + leadingZeros;
288 return ARMThumbImmediate(TypeEncoded, encoding);
294 if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
295 encoding.immediate = bytes.byte0;
296 encoding.pattern = 3;
297 return ARMThumbImmediate(TypeEncoded, encoding);
300 if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
301 encoding.immediate = bytes.byte0;
302 encoding.pattern = 1;
303 return ARMThumbImmediate(TypeEncoded, encoding);
306 if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
307 encoding.immediate = bytes.byte1;
308 encoding.pattern = 2;
309 return ARMThumbImmediate(TypeEncoded, encoding);
312 return ARMThumbImmediate();
315 static ARMThumbImmediate makeUInt12(int32_t value)
317 return (!(value & 0xfffff000))
318 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
319 : ARMThumbImmediate();
322 static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
324 // If this is not a 12-bit unsigned it, try making an encoded immediate.
325 return (!(value & 0xfffff000))
326 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
327 : makeEncodedImm(value);
330 // The 'make' methods, above, return a !isValid() value if the argument
331 // cannot be represented as the requested type. This methods is called
332 // 'get' since the argument can always be represented.
333 static ARMThumbImmediate makeUInt16(uint16_t value)
335 return ARMThumbImmediate(TypeUInt16, value);
340 return m_type != TypeInvalid;
343 uint16_t asUInt16() const { return m_value.asInt; }
345 // These methods rely on the format of encoded byte values.
346 bool isUInt3() { return !(m_value.asInt & 0xfff8); }
347 bool isUInt4() { return !(m_value.asInt & 0xfff0); }
348 bool isUInt5() { return !(m_value.asInt & 0xffe0); }
349 bool isUInt6() { return !(m_value.asInt & 0xffc0); }
350 bool isUInt7() { return !(m_value.asInt & 0xff80); }
351 bool isUInt8() { return !(m_value.asInt & 0xff00); }
352 bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
353 bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
354 bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
355 bool isUInt16() { return m_type == TypeUInt16; }
356 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
357 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
358 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
359 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
360 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
361 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
362 uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
363 uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
364 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
365 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
367 bool isEncodedImm() { return m_type == TypeEncoded; }
370 ThumbImmediateType m_type;
371 ThumbImmediateValue m_value;
380 SRType_RRX = SRType_ROR
383 class ShiftTypeAndAmount {
384 friend class ARMv7Assembler;
389 m_u.type = (ARMShiftType)0;
393 ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
396 m_u.amount = amount & 31;
399 unsigned lo4() { return m_u.lo4; }
400 unsigned hi4() { return m_u.hi4; }
415 class ARMv7Assembler {
417 typedef ARMRegisters::RegisterID RegisterID;
418 typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
419 typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
420 typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
422 // (HS, LO, HI, LS) -> (AE, B, A, BE)
423 // (VS, VC) -> (O, NO)
427 ConditionHS, ConditionCS = ConditionHS,
428 ConditionLO, ConditionCC = ConditionLO,
443 #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
444 #define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
445 enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
446 JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
447 JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
448 JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
449 JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
452 LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
453 LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
454 LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
455 LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
456 LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
457 LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
458 LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
459 LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
464 LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
466 data.realTypes.m_from = from;
467 data.realTypes.m_to = to;
468 data.realTypes.m_type = type;
469 data.realTypes.m_linkType = LinkInvalid;
470 data.realTypes.m_condition = condition;
472 void operator=(const LinkRecord& other)
474 data.copyTypes.content[0] = other.data.copyTypes.content[0];
475 data.copyTypes.content[1] = other.data.copyTypes.content[1];
476 data.copyTypes.content[2] = other.data.copyTypes.content[2];
478 intptr_t from() const { return data.realTypes.m_from; }
479 void setFrom(intptr_t from) { data.realTypes.m_from = from; }
480 intptr_t to() const { return data.realTypes.m_to; }
481 JumpType type() const { return data.realTypes.m_type; }
482 JumpLinkType linkType() const { return data.realTypes.m_linkType; }
483 void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
484 Condition condition() const { return data.realTypes.m_condition; }
488 intptr_t m_from : 31;
491 JumpLinkType m_linkType : 8;
492 Condition m_condition : 16;
497 COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
502 : m_indexOfLastWatchpoint(INT_MIN)
503 , m_indexOfTailOfLastWatchpoint(INT_MIN)
510 bool BadReg(RegisterID reg)
512 return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
515 uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
517 uint32_t rdMask = (rdNum >> 1) << highBitsShift;
519 rdMask |= 1 << lowBitShift;
523 uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
525 uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
527 rdMask |= 1 << highBitShift;
532 OP_ADD_reg_T1 = 0x1800,
533 OP_SUB_reg_T1 = 0x1A00,
534 OP_ADD_imm_T1 = 0x1C00,
535 OP_SUB_imm_T1 = 0x1E00,
536 OP_MOV_imm_T1 = 0x2000,
537 OP_CMP_imm_T1 = 0x2800,
538 OP_ADD_imm_T2 = 0x3000,
539 OP_SUB_imm_T2 = 0x3800,
540 OP_AND_reg_T1 = 0x4000,
541 OP_EOR_reg_T1 = 0x4040,
542 OP_TST_reg_T1 = 0x4200,
543 OP_RSB_imm_T1 = 0x4240,
544 OP_CMP_reg_T1 = 0x4280,
545 OP_ORR_reg_T1 = 0x4300,
546 OP_MVN_reg_T1 = 0x43C0,
547 OP_ADD_reg_T2 = 0x4400,
548 OP_MOV_reg_T1 = 0x4600,
551 OP_STR_reg_T1 = 0x5000,
552 OP_STRH_reg_T1 = 0x5200,
553 OP_STRB_reg_T1 = 0x5400,
554 OP_LDRSB_reg_T1 = 0x5600,
555 OP_LDR_reg_T1 = 0x5800,
556 OP_LDRH_reg_T1 = 0x5A00,
557 OP_LDRB_reg_T1 = 0x5C00,
558 OP_LDRSH_reg_T1 = 0x5E00,
559 OP_STR_imm_T1 = 0x6000,
560 OP_LDR_imm_T1 = 0x6800,
561 OP_STRB_imm_T1 = 0x7000,
562 OP_LDRB_imm_T1 = 0x7800,
563 OP_STRH_imm_T1 = 0x8000,
564 OP_LDRH_imm_T1 = 0x8800,
565 OP_STR_imm_T2 = 0x9000,
566 OP_LDR_imm_T2 = 0x9800,
567 OP_ADD_SP_imm_T1 = 0xA800,
568 OP_ADD_SP_imm_T2 = 0xB000,
569 OP_SUB_SP_imm_T1 = 0xB080,
578 OP_AND_reg_T2 = 0xEA00,
579 OP_TST_reg_T2 = 0xEA10,
580 OP_ORR_reg_T2 = 0xEA40,
581 OP_ORR_S_reg_T2 = 0xEA50,
582 OP_ASR_imm_T1 = 0xEA4F,
583 OP_LSL_imm_T1 = 0xEA4F,
584 OP_LSR_imm_T1 = 0xEA4F,
585 OP_ROR_imm_T1 = 0xEA4F,
586 OP_MVN_reg_T2 = 0xEA6F,
587 OP_EOR_reg_T2 = 0xEA80,
588 OP_ADD_reg_T3 = 0xEB00,
589 OP_ADD_S_reg_T3 = 0xEB10,
590 OP_SUB_reg_T2 = 0xEBA0,
591 OP_SUB_S_reg_T2 = 0xEBB0,
592 OP_CMP_reg_T2 = 0xEBB0,
593 OP_VMOV_CtoD = 0xEC00,
594 OP_VMOV_DtoC = 0xEC10,
599 OP_VMOV_CtoS = 0xEE00,
600 OP_VMOV_StoC = 0xEE10,
607 OP_VCVT_FPIVFP = 0xEEB0,
609 OP_VMOV_IMM_T2 = 0xEEB0,
612 OP_VSQRT_T1 = 0xEEB0,
613 OP_VCVTSD_T1 = 0xEEB0,
614 OP_VCVTDS_T1 = 0xEEB0,
617 OP_AND_imm_T1 = 0xF000,
619 OP_ORR_imm_T1 = 0xF040,
620 OP_MOV_imm_T2 = 0xF040,
622 OP_EOR_imm_T1 = 0xF080,
623 OP_ADD_imm_T3 = 0xF100,
624 OP_ADD_S_imm_T3 = 0xF110,
627 OP_SUB_imm_T3 = 0xF1A0,
628 OP_SUB_S_imm_T3 = 0xF1B0,
629 OP_CMP_imm_T2 = 0xF1B0,
630 OP_RSB_imm_T2 = 0xF1C0,
631 OP_RSB_S_imm_T2 = 0xF1D0,
632 OP_ADD_imm_T4 = 0xF200,
633 OP_MOV_imm_T3 = 0xF240,
634 OP_SUB_imm_T4 = 0xF2A0,
638 OP_STRB_imm_T3 = 0xF800,
639 OP_STRB_reg_T2 = 0xF800,
640 OP_LDRB_imm_T3 = 0xF810,
641 OP_LDRB_reg_T2 = 0xF810,
642 OP_STRH_imm_T3 = 0xF820,
643 OP_STRH_reg_T2 = 0xF820,
644 OP_LDRH_reg_T2 = 0xF830,
645 OP_LDRH_imm_T3 = 0xF830,
646 OP_STR_imm_T4 = 0xF840,
647 OP_STR_reg_T2 = 0xF840,
648 OP_LDR_imm_T4 = 0xF850,
649 OP_LDR_reg_T2 = 0xF850,
650 OP_STRB_imm_T2 = 0xF880,
651 OP_LDRB_imm_T2 = 0xF890,
652 OP_STRH_imm_T2 = 0xF8A0,
653 OP_LDRH_imm_T2 = 0xF8B0,
654 OP_STR_imm_T3 = 0xF8C0,
655 OP_LDR_imm_T3 = 0xF8D0,
656 OP_LDRSB_reg_T2 = 0xF910,
657 OP_LDRSH_reg_T2 = 0xF930,
658 OP_LSL_reg_T2 = 0xFA00,
659 OP_LSR_reg_T2 = 0xFA20,
660 OP_ASR_reg_T2 = 0xFA40,
661 OP_ROR_reg_T2 = 0xFA60,
663 OP_SMULL_T1 = 0xFB80,
667 OP_VADD_T2b = 0x0A00,
671 OP_VMOV_IMM_T2b = 0x0A00,
672 OP_VMOV_T2b = 0x0A40,
673 OP_VMUL_T2b = 0x0A00,
676 OP_VMOV_StoCb = 0x0A10,
677 OP_VMOV_CtoSb = 0x0A10,
678 OP_VMOV_DtoCb = 0x0A10,
679 OP_VMOV_CtoDb = 0x0A10,
681 OP_VABS_T2b = 0x0A40,
683 OP_VCVT_FPIVFPb = 0x0A40,
684 OP_VNEG_T2b = 0x0A40,
685 OP_VSUB_T2b = 0x0A40,
686 OP_VSQRT_T1b = 0x0A40,
687 OP_VCVTSD_T1b = 0x0A40,
688 OP_VCVTDS_T1b = 0x0A40,
695 FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
714 class ARMInstructionFormatter;
717 bool ifThenElseConditionBit(Condition condition, bool isIf)
719 return isIf ? (condition & 1) : !(condition & 1);
721 uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
723 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
724 | (ifThenElseConditionBit(condition, inst3if) << 2)
725 | (ifThenElseConditionBit(condition, inst4if) << 1)
727 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
728 return (condition << 4) | mask;
730 uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
732 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
733 | (ifThenElseConditionBit(condition, inst3if) << 2)
735 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
736 return (condition << 4) | mask;
738 uint8_t ifThenElse(Condition condition, bool inst2if)
740 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
742 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
743 return (condition << 4) | mask;
746 uint8_t ifThenElse(Condition condition)
749 return (condition << 4) | mask;
754 void adc(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
756 // Rd can only be SP if Rn is also SP.
757 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
758 ASSERT(rd != ARMRegisters::pc);
759 ASSERT(rn != ARMRegisters::pc);
760 ASSERT(imm.isEncodedImm());
762 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm, rn, rd, imm);
765 void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
767 // Rd can only be SP if Rn is also SP.
768 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
769 ASSERT(rd != ARMRegisters::pc);
770 ASSERT(rn != ARMRegisters::pc);
771 ASSERT(imm.isValid());
773 if (rn == ARMRegisters::sp) {
774 ASSERT(!(imm.getUInt16() & 3));
775 if (!(rd & 8) && imm.isUInt10()) {
776 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
778 } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
779 m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast<uint8_t>(imm.getUInt9() >> 2));
782 } else if (!((rd | rn) & 8)) {
784 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
786 } else if ((rd == rn) && imm.isUInt8()) {
787 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
792 if (imm.isEncodedImm())
793 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
795 ASSERT(imm.isUInt12());
796 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
800 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
802 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
803 ASSERT(rd != ARMRegisters::pc);
804 ASSERT(rn != ARMRegisters::pc);
806 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
809 // NOTE: In an IT block, add doesn't modify the flags register.
810 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
813 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
815 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
816 else if (!((rd | rn | rm) & 8))
817 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
819 add(rd, rn, rm, ShiftTypeAndAmount());
822 // Not allowed in an IT (if then) block.
823 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
825 // Rd can only be SP if Rn is also SP.
826 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
827 ASSERT(rd != ARMRegisters::pc);
828 ASSERT(rn != ARMRegisters::pc);
829 ASSERT(imm.isEncodedImm());
831 if (!((rd | rn) & 8)) {
833 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
835 } else if ((rd == rn) && imm.isUInt8()) {
836 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
841 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
844 // Not allowed in an IT (if then) block?
845 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
847 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
848 ASSERT(rd != ARMRegisters::pc);
849 ASSERT(rn != ARMRegisters::pc);
851 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
854 // Not allowed in an IT (if then) block.
855 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
857 if (!((rd | rn | rm) & 8))
858 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
860 add_S(rd, rn, rm, ShiftTypeAndAmount());
863 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
867 ASSERT(imm.isEncodedImm());
868 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
871 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
876 m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
879 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
881 if ((rd == rn) && !((rd | rm) & 8))
882 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
883 else if ((rd == rm) && !((rd | rn) & 8))
884 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
886 ARM_and(rd, rn, rm, ShiftTypeAndAmount());
889 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
893 ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
894 m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
897 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
902 m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
905 // Only allowed in IT (if then) block if last instruction.
906 ALWAYS_INLINE AssemblerLabel b()
908 m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
909 return m_formatter.label();
912 // Only allowed in IT (if then) block if last instruction.
913 ALWAYS_INLINE AssemblerLabel blx(RegisterID rm)
915 ASSERT(rm != ARMRegisters::pc);
916 m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
917 return m_formatter.label();
920 // Only allowed in IT (if then) block if last instruction.
921 ALWAYS_INLINE AssemblerLabel bx(RegisterID rm)
923 m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
924 return m_formatter.label();
927 void bkpt(uint8_t imm = 0)
929 m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
932 ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm)
936 m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm));
939 ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm)
941 ASSERT(rn != ARMRegisters::pc);
942 ASSERT(imm.isEncodedImm());
944 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
947 ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm)
949 ASSERT(rn != ARMRegisters::pc);
950 ASSERT(imm.isEncodedImm());
952 if (!(rn & 8) && imm.isUInt8())
953 m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
955 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
958 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
960 ASSERT(rn != ARMRegisters::pc);
962 m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
965 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
968 cmp(rn, rm, ShiftTypeAndAmount());
970 m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
973 // xor is not spelled with an 'e'. :-(
974 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
978 ASSERT(imm.isEncodedImm());
979 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
982 // xor is not spelled with an 'e'. :-(
983 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
988 m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
991 // xor is not spelled with an 'e'. :-(
992 void eor(RegisterID rd, RegisterID rn, RegisterID rm)
994 if ((rd == rn) && !((rd | rm) & 8))
995 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
996 else if ((rd == rm) && !((rd | rn) & 8))
997 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
999 eor(rd, rn, rm, ShiftTypeAndAmount());
1002 ALWAYS_INLINE void it(Condition cond)
1004 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
1007 ALWAYS_INLINE void it(Condition cond, bool inst2if)
1009 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
1012 ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if)
1014 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
1017 ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
1019 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
1022 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1023 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1025 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1026 ASSERT(imm.isUInt12());
1028 if (!((rt | rn) & 8) && imm.isUInt7())
1029 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1030 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1031 m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1033 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
1036 ALWAYS_INLINE void ldrWide8BitImmediate(RegisterID rt, RegisterID rn, uint8_t immediate)
1038 ASSERT(rn != ARMRegisters::pc);
1039 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, immediate);
1042 ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1044 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1045 ASSERT(imm.isUInt7());
1046 ASSERT(!((rt | rn) & 8));
1047 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1050 // If index is set, this is a regular offset or a pre-indexed load;
1051 // if index is not set then is is a post-index load.
1053 // If wback is set rn is updated - this is a pre or post index load,
1054 // if wback is not set this is a regular offset memory access.
1056 // (-255 <= offset <= 255)
1058 // _tmp = _reg + offset
1059 // MEM[index ? _tmp : _reg] = REG[rt]
1060 // if (wback) REG[rn] = _tmp
1061 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1063 ASSERT(rt != ARMRegisters::pc);
1064 ASSERT(rn != ARMRegisters::pc);
1065 ASSERT(index || wback);
1066 ASSERT(!wback | (rt != rn));
1073 ASSERT((offset & ~0xff) == 0);
1075 offset |= (wback << 8);
1076 offset |= (add << 9);
1077 offset |= (index << 10);
1078 offset |= (1 << 11);
1080 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
1083 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1084 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1086 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1087 ASSERT(!BadReg(rm));
1090 if (!shift && !((rt | rn | rm) & 8))
1091 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
1093 m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1096 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1097 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1099 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1100 ASSERT(imm.isUInt12());
1102 if (!((rt | rn) & 8) && imm.isUInt6())
1103 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
1105 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
1108 // If index is set, this is a regular offset or a pre-indexed load;
1109 // if index is not set then is is a post-index load.
1111 // If wback is set rn is updated - this is a pre or post index load,
1112 // if wback is not set this is a regular offset memory access.
1114 // (-255 <= offset <= 255)
1116 // _tmp = _reg + offset
1117 // MEM[index ? _tmp : _reg] = REG[rt]
1118 // if (wback) REG[rn] = _tmp
1119 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1121 ASSERT(rt != ARMRegisters::pc);
1122 ASSERT(rn != ARMRegisters::pc);
1123 ASSERT(index || wback);
1124 ASSERT(!wback | (rt != rn));
1131 ASSERT((offset & ~0xff) == 0);
1133 offset |= (wback << 8);
1134 offset |= (add << 9);
1135 offset |= (index << 10);
1136 offset |= (1 << 11);
1138 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
1141 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1143 ASSERT(!BadReg(rt)); // Memory hint
1144 ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
1145 ASSERT(!BadReg(rm));
1148 if (!shift && !((rt | rn | rm) & 8))
1149 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
1151 m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1154 void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1156 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1157 ASSERT(imm.isUInt12());
1159 if (!((rt | rn) & 8) && imm.isUInt5())
1160 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
1162 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
1165 void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1167 ASSERT(rt != ARMRegisters::pc);
1168 ASSERT(rn != ARMRegisters::pc);
1169 ASSERT(index || wback);
1170 ASSERT(!wback | (rt != rn));
1178 ASSERT(!(offset & ~0xff));
1180 offset |= (wback << 8);
1181 offset |= (add << 9);
1182 offset |= (index << 10);
1183 offset |= (1 << 11);
1185 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
1188 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1190 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1191 ASSERT(!BadReg(rm));
1194 if (!shift && !((rt | rn | rm) & 8))
1195 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
1197 m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1200 void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1202 ASSERT(rn != ARMRegisters::pc);
1203 ASSERT(!BadReg(rm));
1206 if (!shift && !((rt | rn | rm) & 8))
1207 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1, rm, rn, rt);
1209 m_formatter.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1212 void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1214 ASSERT(rn != ARMRegisters::pc);
1215 ASSERT(!BadReg(rm));
1218 if (!shift && !((rt | rn | rm) & 8))
1219 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1, rm, rn, rt);
1221 m_formatter.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1224 void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1226 ASSERT(!BadReg(rd));
1227 ASSERT(!BadReg(rm));
1228 ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
1229 m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1232 ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
1234 ASSERT(!BadReg(rd));
1235 ASSERT(!BadReg(rn));
1236 ASSERT(!BadReg(rm));
1237 m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1240 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1242 ASSERT(!BadReg(rd));
1243 ASSERT(!BadReg(rm));
1244 ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
1245 m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1248 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
1250 ASSERT(!BadReg(rd));
1251 ASSERT(!BadReg(rn));
1252 ASSERT(!BadReg(rm));
1253 m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1256 ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm)
1258 ASSERT(imm.isValid());
1259 ASSERT(!imm.isEncodedImm());
1260 ASSERT(!BadReg(rd));
1262 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
1265 ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm)
1267 ASSERT(imm.isValid());
1268 ASSERT(!BadReg(rd));
1270 if ((rd < 8) && imm.isUInt8())
1271 m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
1272 else if (imm.isEncodedImm())
1273 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
1278 ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
1280 m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
1283 ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm)
1285 ASSERT(imm.isUInt16());
1286 ASSERT(!BadReg(rd));
1287 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
1290 ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm)
1292 ASSERT(imm.isEncodedImm());
1293 ASSERT(!BadReg(rd));
1295 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
1298 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
1300 ASSERT(!BadReg(rd));
1301 ASSERT(!BadReg(rm));
1302 m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1305 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
1307 if (!((rd | rm) & 8))
1308 m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
1310 mvn(rd, rm, ShiftTypeAndAmount());
1313 ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
1315 ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1319 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1321 ASSERT(!BadReg(rd));
1322 ASSERT(!BadReg(rn));
1323 ASSERT(imm.isEncodedImm());
1324 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
1327 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1329 ASSERT(!BadReg(rd));
1330 ASSERT(!BadReg(rn));
1331 ASSERT(!BadReg(rm));
1332 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1335 void orr(RegisterID rd, RegisterID rn, RegisterID rm)
1337 if ((rd == rn) && !((rd | rm) & 8))
1338 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1339 else if ((rd == rm) && !((rd | rn) & 8))
1340 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1342 orr(rd, rn, rm, ShiftTypeAndAmount());
1345 ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1347 ASSERT(!BadReg(rd));
1348 ASSERT(!BadReg(rn));
1349 ASSERT(!BadReg(rm));
1350 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1353 void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
1355 if ((rd == rn) && !((rd | rm) & 8))
1356 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1357 else if ((rd == rm) && !((rd | rn) & 8))
1358 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1360 orr_S(rd, rn, rm, ShiftTypeAndAmount());
1363 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1365 ASSERT(!BadReg(rd));
1366 ASSERT(!BadReg(rm));
1367 ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
1368 m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1371 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
1373 ASSERT(!BadReg(rd));
1374 ASSERT(!BadReg(rn));
1375 ASSERT(!BadReg(rm));
1376 m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1379 ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
1381 ASSERT(!BadReg(rdLo));
1382 ASSERT(!BadReg(rdHi));
1383 ASSERT(!BadReg(rn));
1384 ASSERT(!BadReg(rm));
1385 ASSERT(rdLo != rdHi);
1386 m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
1389 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1390 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1392 ASSERT(rt != ARMRegisters::pc);
1393 ASSERT(rn != ARMRegisters::pc);
1394 ASSERT(imm.isUInt12());
1396 if (!((rt | rn) & 8) && imm.isUInt7())
1397 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1398 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1399 m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1401 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
1404 // If index is set, this is a regular offset or a pre-indexed store;
1405 // if index is not set then is is a post-index store.
1407 // If wback is set rn is updated - this is a pre or post index store,
1408 // if wback is not set this is a regular offset memory access.
1410 // (-255 <= offset <= 255)
1412 // _tmp = _reg + offset
1413 // MEM[index ? _tmp : _reg] = REG[rt]
1414 // if (wback) REG[rn] = _tmp
1415 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1417 ASSERT(rt != ARMRegisters::pc);
1418 ASSERT(rn != ARMRegisters::pc);
1419 ASSERT(index || wback);
1420 ASSERT(!wback | (rt != rn));
1427 ASSERT((offset & ~0xff) == 0);
1429 offset |= (wback << 8);
1430 offset |= (add << 9);
1431 offset |= (index << 10);
1432 offset |= (1 << 11);
1434 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
1437 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1438 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1440 ASSERT(rn != ARMRegisters::pc);
1441 ASSERT(!BadReg(rm));
1444 if (!shift && !((rt | rn | rm) & 8))
1445 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
1447 m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1450 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1451 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1453 ASSERT(rt != ARMRegisters::pc);
1454 ASSERT(rn != ARMRegisters::pc);
1455 ASSERT(imm.isUInt12());
1457 if (!((rt | rn) & 8) && imm.isUInt7())
1458 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1, imm.getUInt7() >> 2, rn, rt);
1460 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2, rn, rt, imm.getUInt12());
1463 // If index is set, this is a regular offset or a pre-indexed store;
1464 // if index is not set then is is a post-index store.
1466 // If wback is set rn is updated - this is a pre or post index store,
1467 // if wback is not set this is a regular offset memory access.
1469 // (-255 <= offset <= 255)
1471 // _tmp = _reg + offset
1472 // MEM[index ? _tmp : _reg] = REG[rt]
1473 // if (wback) REG[rn] = _tmp
1474 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1476 ASSERT(rt != ARMRegisters::pc);
1477 ASSERT(rn != ARMRegisters::pc);
1478 ASSERT(index || wback);
1479 ASSERT(!wback | (rt != rn));
1486 ASSERT((offset & ~0xff) == 0);
1488 offset |= (wback << 8);
1489 offset |= (add << 9);
1490 offset |= (index << 10);
1491 offset |= (1 << 11);
1493 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3, rn, rt, offset);
1496 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1497 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1499 ASSERT(rn != ARMRegisters::pc);
1500 ASSERT(!BadReg(rm));
1503 if (!shift && !((rt | rn | rm) & 8))
1504 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1, rm, rn, rt);
1506 m_formatter.twoWordOp12Reg4FourFours(OP_STRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1509 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1510 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1512 ASSERT(rt != ARMRegisters::pc);
1513 ASSERT(rn != ARMRegisters::pc);
1514 ASSERT(imm.isUInt12());
1516 if (!((rt | rn) & 8) && imm.isUInt7())
1517 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt7() >> 2, rn, rt);
1519 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
1522 // If index is set, this is a regular offset or a pre-indexed store;
1523 // if index is not set then is is a post-index store.
1525 // If wback is set rn is updated - this is a pre or post index store,
1526 // if wback is not set this is a regular offset memory access.
1528 // (-255 <= offset <= 255)
1530 // _tmp = _reg + offset
1531 // MEM[index ? _tmp : _reg] = REG[rt]
1532 // if (wback) REG[rn] = _tmp
1533 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1535 ASSERT(rt != ARMRegisters::pc);
1536 ASSERT(rn != ARMRegisters::pc);
1537 ASSERT(index || wback);
1538 ASSERT(!wback | (rt != rn));
1545 ASSERT(!(offset & ~0xff));
1547 offset |= (wback << 8);
1548 offset |= (add << 9);
1549 offset |= (index << 10);
1550 offset |= (1 << 11);
1552 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3, rn, rt, offset);
1555 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1556 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1558 ASSERT(rn != ARMRegisters::pc);
1559 ASSERT(!BadReg(rm));
1562 if (!shift && !((rt | rn | rm) & 8))
1563 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1, rm, rn, rt);
1565 m_formatter.twoWordOp12Reg4FourFours(OP_STRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1568 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1570 // Rd can only be SP if Rn is also SP.
1571 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1572 ASSERT(rd != ARMRegisters::pc);
1573 ASSERT(rn != ARMRegisters::pc);
1574 ASSERT(imm.isValid());
1576 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1577 ASSERT(!(imm.getUInt16() & 3));
1578 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1580 } else if (!((rd | rn) & 8)) {
1581 if (imm.isUInt3()) {
1582 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1584 } else if ((rd == rn) && imm.isUInt8()) {
1585 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1590 if (imm.isEncodedImm())
1591 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
1593 ASSERT(imm.isUInt12());
1594 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
1598 ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1600 ASSERT(rd != ARMRegisters::pc);
1601 ASSERT(rn != ARMRegisters::pc);
1602 ASSERT(imm.isValid());
1603 ASSERT(imm.isUInt12());
1605 if (!((rd | rn) & 8) && !imm.getUInt12())
1606 m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
1608 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
1611 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1613 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1614 ASSERT(rd != ARMRegisters::pc);
1615 ASSERT(rn != ARMRegisters::pc);
1616 ASSERT(!BadReg(rm));
1617 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1620 // NOTE: In an IT block, add doesn't modify the flags register.
1621 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
1623 if (!((rd | rn | rm) & 8))
1624 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1626 sub(rd, rn, rm, ShiftTypeAndAmount());
1629 // Not allowed in an IT (if then) block.
1630 void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1632 // Rd can only be SP if Rn is also SP.
1633 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1634 ASSERT(rd != ARMRegisters::pc);
1635 ASSERT(rn != ARMRegisters::pc);
1636 ASSERT(imm.isValid());
1638 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1639 ASSERT(!(imm.getUInt16() & 3));
1640 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1642 } else if (!((rd | rn) & 8)) {
1643 if (imm.isUInt3()) {
1644 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1646 } else if ((rd == rn) && imm.isUInt8()) {
1647 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1652 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
1655 ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1657 ASSERT(rd != ARMRegisters::pc);
1658 ASSERT(rn != ARMRegisters::pc);
1659 ASSERT(imm.isValid());
1660 ASSERT(imm.isUInt12());
1662 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm);
1665 // Not allowed in an IT (if then) block?
1666 ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1668 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1669 ASSERT(rd != ARMRegisters::pc);
1670 ASSERT(rn != ARMRegisters::pc);
1671 ASSERT(!BadReg(rm));
1672 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1675 // Not allowed in an IT (if then) block.
1676 ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
1678 if (!((rd | rn | rm) & 8))
1679 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1681 sub_S(rd, rn, rm, ShiftTypeAndAmount());
1684 ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm)
1686 ASSERT(!BadReg(rn));
1687 ASSERT(imm.isEncodedImm());
1689 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
1692 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1694 ASSERT(!BadReg(rn));
1695 ASSERT(!BadReg(rm));
1696 m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
1699 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
1702 tst(rn, rm, ShiftTypeAndAmount());
1704 m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
1707 ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, unsigned lsb, unsigned width)
1710 ASSERT((width >= 1) && (width <= 32));
1711 ASSERT((lsb + width) <= 32);
1712 m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
1715 void vadd(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1717 m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
1720 void vcmp(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1722 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
1725 void vcmpz(FPDoubleRegisterID rd)
1727 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
1730 void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1732 // boolean values are 64bit (toInt, unsigned, roundZero)
1733 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
1736 void vcvt_floatingPointToSigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1738 // boolean values are 64bit (toInt, unsigned, roundZero)
1739 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
1742 void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1744 // boolean values are 64bit (toInt, unsigned, roundZero)
1745 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, true, true), rd, rm);
1748 void vdiv(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1750 m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
1753 void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1755 m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
1758 void flds(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1760 m_formatter.vfpMemOp(OP_FLDS, OP_FLDSb, false, rn, rd, imm);
1763 void vmov(RegisterID rd, FPSingleRegisterID rn)
1765 ASSERT(!BadReg(rd));
1766 m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rn, rd, VFPOperand(0));
1769 void vmov(FPSingleRegisterID rd, RegisterID rn)
1771 ASSERT(!BadReg(rn));
1772 m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rd, rn, VFPOperand(0));
1775 void vmov(RegisterID rd1, RegisterID rd2, FPDoubleRegisterID rn)
1777 ASSERT(!BadReg(rd1));
1778 ASSERT(!BadReg(rd2));
1779 m_formatter.vfpOp(OP_VMOV_DtoC, OP_VMOV_DtoCb, true, rd2, VFPOperand(rd1 | 16), rn);
1782 void vmov(FPDoubleRegisterID rd, RegisterID rn1, RegisterID rn2)
1784 ASSERT(!BadReg(rn1));
1785 ASSERT(!BadReg(rn2));
1786 m_formatter.vfpOp(OP_VMOV_CtoD, OP_VMOV_CtoDb, true, rn2, VFPOperand(rn1 | 16), rd);
1789 void vmov(FPDoubleRegisterID rd, FPDoubleRegisterID rn)
1791 m_formatter.vfpOp(OP_VMOV_T2, OP_VMOV_T2b, true, VFPOperand(0), rd, rn);
1794 void vmrs(RegisterID reg = ARMRegisters::pc)
1796 ASSERT(reg != ARMRegisters::sp);
1797 m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
1800 void vmul(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1802 m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
1805 void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1807 m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
1810 void fsts(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1812 m_formatter.vfpMemOp(OP_FSTS, OP_FSTSb, false, rn, rd, imm);
1815 void vsub(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1817 m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
1820 void vabs(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1822 m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm);
1825 void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1827 m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm);
1830 void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1832 m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm);
1835 void vcvtds(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1837 m_formatter.vfpOp(OP_VCVTDS_T1, OP_VCVTDS_T1b, false, VFPOperand(23), rd, rm);
1840 void vcvtsd(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1842 m_formatter.vfpOp(OP_VCVTSD_T1, OP_VCVTSD_T1b, true, VFPOperand(23), rd, rm);
1847 m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0);
1850 AssemblerLabel labelIgnoringWatchpoints()
1852 return m_formatter.label();
1855 AssemblerLabel labelForWatchpoint()
1857 AssemblerLabel result = m_formatter.label();
1858 if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
1860 m_indexOfLastWatchpoint = result.m_offset;
1861 m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
1865 AssemblerLabel label()
1867 AssemblerLabel result = m_formatter.label();
1868 while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
1870 result = m_formatter.label();
1875 AssemblerLabel align(int alignment)
1877 while (!m_formatter.isAligned(alignment))
1883 static void* getRelocatedAddress(void* code, AssemblerLabel label)
1885 ASSERT(label.isSet());
1886 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
1889 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
1891 return b.m_offset - a.m_offset;
1894 int executableOffsetFor(int location)
1898 return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1];
1901 int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
1903 // Assembler admin methods:
1905 #if ENABLE(TIZEN_BUILD_THUMB_GCC)
1906 static bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
1908 static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
1911 return a.from() < b.from();
1914 bool canCompact(JumpType jumpType)
1916 // The following cannot be compacted:
1917 // JumpFixed: represents custom jump sequence
1918 // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
1919 // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
1920 return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
1923 JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
1925 if (jumpType == JumpFixed)
1928 // for patchable jump we must leave space for the longest code sequence
1929 if (jumpType == JumpNoConditionFixedSize)
1931 if (jumpType == JumpConditionFixedSize)
1932 return LinkConditionalBX;
1934 const int paddingSize = JUMP_ENUM_SIZE(jumpType);
1935 bool mayTriggerErrata = false;
1937 if (jumpType == JumpCondition) {
1938 // 2-byte conditional T1
1939 const uint16_t* jumpT1Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1)));
1940 if (canBeJumpT1(jumpT1Location, to))
1942 // 4-byte conditional T3
1943 const uint16_t* jumpT3Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3)));
1944 if (canBeJumpT3(jumpT3Location, to, mayTriggerErrata)) {
1945 if (!mayTriggerErrata)
1948 // 4-byte conditional T4 with IT
1949 const uint16_t* conditionalJumpT4Location =
1950 reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4)));
1951 if (canBeJumpT4(conditionalJumpT4Location, to, mayTriggerErrata)) {
1952 if (!mayTriggerErrata)
1953 return LinkConditionalJumpT4;
1956 // 2-byte unconditional T2
1957 const uint16_t* jumpT2Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2)));
1958 if (canBeJumpT2(jumpT2Location, to))
1960 // 4-byte unconditional T4
1961 const uint16_t* jumpT4Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4)));
1962 if (canBeJumpT4(jumpT4Location, to, mayTriggerErrata)) {
1963 if (!mayTriggerErrata)
1966 // use long jump sequence
1970 ASSERT(jumpType == JumpCondition);
1971 return LinkConditionalBX;
1974 JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
1976 JumpLinkType linkType = computeJumpType(record.type(), from, to);
1977 record.setLinkType(linkType);
1981 void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
1983 int32_t ptr = regionStart / sizeof(int32_t);
1984 const int32_t end = regionEnd / sizeof(int32_t);
1985 int32_t* offsets = static_cast<int32_t*>(m_formatter.data());
1987 offsets[ptr++] = offset;
1990 Vector<LinkRecord>& jumpsToLink()
1992 std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
1993 return m_jumpsToLink;
1996 void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
1998 switch (record.linkType()) {
2000 linkJumpT1(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2003 linkJumpT2(reinterpret_cast_ptr<uint16_t*>(from), to);
2006 linkJumpT3(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2009 linkJumpT4(reinterpret_cast_ptr<uint16_t*>(from), to);
2011 case LinkConditionalJumpT4:
2012 linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2014 case LinkConditionalBX:
2015 linkConditionalBX(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2018 linkBX(reinterpret_cast_ptr<uint16_t*>(from), to);
2021 ASSERT_NOT_REACHED();
2026 void* unlinkedCode() { return m_formatter.data(); }
2027 size_t codeSize() const { return m_formatter.codeSize(); }
2029 static unsigned getCallReturnOffset(AssemblerLabel call)
2031 ASSERT(call.isSet());
2032 return call.m_offset;
2035 // Linking & patching:
2037 // 'link' and 'patch' methods are for use on unprotected code - such as the code
2038 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
2039 // code has been finalized it is (platform support permitting) within a non-
2040 // writable region of memory; to modify the code in an execute-only execuable
2041 // pool the 'repatch' and 'relink' methods should be used.
2043 void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
2046 ASSERT(from.isSet());
2047 m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
2050 static void linkJump(void* code, AssemblerLabel from, void* to)
2052 ASSERT(from.isSet());
2054 uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
2055 linkJumpAbsolute(location, to);
2058 static void linkCall(void* code, AssemblerLabel from, void* to)
2060 ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
2061 ASSERT(from.isSet());
2062 ASSERT(reinterpret_cast<intptr_t>(to) & 1);
2064 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to, false);
2067 static void linkPointer(void* code, AssemblerLabel where, void* value)
2069 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value, false);
2072 static void relinkJump(void* from, void* to)
2074 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2075 ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
2077 linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
2079 cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
2082 static void relinkCall(void* from, void* to)
2084 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2085 ASSERT(reinterpret_cast<intptr_t>(to) & 1);
2087 setPointer(reinterpret_cast<uint16_t*>(from) - 1, to, true);
2090 static void* readCallTarget(void* from)
2092 return readPointer(reinterpret_cast<uint16_t*>(from) - 1);
2095 static void repatchInt32(void* where, int32_t value)
2097 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2099 setInt32(where, value, true);
2102 static void repatchCompact(void* where, int32_t offset)
2104 ASSERT(offset >= -255 && offset <= 255);
2112 offset |= (add << 9);
2113 offset |= (1 << 10);
2114 offset |= (1 << 11);
2116 uint16_t* location = reinterpret_cast<uint16_t*>(where);
2117 location[1] &= ~((1 << 12) - 1);
2118 location[1] |= offset;
2119 cacheFlush(location, sizeof(uint16_t) * 2);
2122 static void repatchPointer(void* where, void* value)
2124 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2126 setPointer(where, value, true);
2129 static void* readPointer(void* where)
2131 return reinterpret_cast<void*>(readInt32(where));
2134 static void replaceWithJump(void* instructionStart, void* to)
2136 ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2137 ASSERT(!(bitwise_cast<uintptr_t>(to) & 1));
2138 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
2140 // Ensure that we're not in one of those errata-triggering thingies. If we are, then
2142 bool spansTwo4K = ((reinterpret_cast<intptr_t>(ptr) & 0xfff) == 0x002);
2145 ptr[-2] = OP_NOP_T1;
2149 linkJumpT4(ptr, to);
2150 cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
2153 static ptrdiff_t maxJumpReplacementSize()
2158 static void replaceWithLoad(void* instructionStart)
2160 ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2161 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2162 switch (ptr[0] & 0xFFF0) {
2166 ASSERT(!(ptr[1] & 0xF000));
2168 ptr[0] |= OP_LDR_imm_T3;
2169 ptr[1] |= (ptr[1] & 0x0F00) << 4;
2171 cacheFlush(ptr, sizeof(uint16_t) * 2);
2174 ASSERT_NOT_REACHED();
2178 static void replaceWithAddressComputation(void* instructionStart)
2180 ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2181 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2182 switch (ptr[0] & 0xFFF0) {
2184 ASSERT(!(ptr[1] & 0x0F00));
2186 ptr[0] |= OP_ADD_imm_T3;
2187 ptr[1] |= (ptr[1] & 0xF000) >> 4;
2189 cacheFlush(ptr, sizeof(uint16_t) * 2);
2194 ASSERT_NOT_REACHED();
2198 unsigned debugOffset() { return m_formatter.debugOffset(); }
2200 static void cacheFlush(void* code, size_t size)
2203 sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
2204 #elif ENABLE(TIZEN_JSC_CACHEFLUSH_PAGE_BY_PAGE)
2205 uintptr_t currentPage = reinterpret_cast<uintptr_t>(code) & ~(pageSize() - 1);
2206 uintptr_t lastPage = (reinterpret_cast<uintptr_t>(code) + size) & ~(pageSize() - 1);
2218 : "r" (currentPage), "r" (currentPage + pageSize())
2219 : "r0", "r1", "r2");
2220 currentPage += pageSize();
2221 } while (lastPage >= currentPage);
2233 : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
2234 : "r0", "r1", "r2");
2236 CacheRangeFlush(code, size, CACHE_SYNC_ALL);
2238 #if !ENABLE(ASSEMBLER_WX_EXCLUSIVE)
2239 msync(code, size, MS_INVALIDATE_ICACHE);
2245 #error "The cacheFlush support is missing on this platform."
2250 // VFP operations commonly take one or more 5-bit operands, typically representing a
2251 // floating point register number. This will commonly be encoded in the instruction
2252 // in two parts, with one single bit field, and one 4-bit field. In the case of
2253 // double precision operands the high bit of the register number will be encoded
2254 // separately, and for single precision operands the high bit of the register number
2255 // will be encoded individually.
2256 // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
2257 // field to be encoded together in the instruction (the low 4-bits of a double
2258 // register number, or the high 4-bits of a single register number), and bit 4
2259 // contains the bit value to be encoded individually.
2261 explicit VFPOperand(uint32_t value)
2264 ASSERT(!(m_value & ~0x1f));
2267 VFPOperand(FPDoubleRegisterID reg)
2272 VFPOperand(RegisterID reg)
2277 VFPOperand(FPSingleRegisterID reg)
2278 : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
2284 return m_value >> 4;
2289 return m_value & 0xf;
2295 VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
2297 // Cannot specify rounding when converting to float.
2298 ASSERT(toInteger || !isRoundZero);
2302 // opc2 indicates both toInteger & isUnsigned.
2303 op |= isUnsigned ? 0x4 : 0x5;
2304 // 'op' field in instruction is isRoundZero
2308 ASSERT(!isRoundZero);
2309 // 'op' field in instruction is isUnsigned
2313 return VFPOperand(op);
2316 static void setInt32(void* code, uint32_t value, bool flush)
2318 uint16_t* location = reinterpret_cast<uint16_t*>(code);
2319 ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2321 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
2322 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
2323 location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2324 location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
2325 location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2326 location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
2329 cacheFlush(location - 4, 4 * sizeof(uint16_t));
2332 static int32_t readInt32(void* code)
2334 uint16_t* location = reinterpret_cast<uint16_t*>(code);
2335 ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2337 ARMThumbImmediate lo16;
2338 ARMThumbImmediate hi16;
2339 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]);
2340 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]);
2341 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]);
2342 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]);
2343 uint32_t result = hi16.asUInt16();
2345 result |= lo16.asUInt16();
2346 return static_cast<int32_t>(result);
2349 static void setUInt7ForLoad(void* code, ARMThumbImmediate imm)
2351 // Requires us to have planted a LDR_imm_T1
2352 ASSERT(imm.isValid());
2353 ASSERT(imm.isUInt7());
2354 uint16_t* location = reinterpret_cast<uint16_t*>(code);
2355 location[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
2356 location[0] |= (imm.getUInt7() >> 2) << 6;
2357 cacheFlush(location, sizeof(uint16_t));
2360 static void setPointer(void* code, void* value, bool flush)
2362 setInt32(code, reinterpret_cast<uint32_t>(value), flush);
2365 static bool isB(void* address)
2367 uint16_t* instruction = static_cast<uint16_t*>(address);
2368 return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
2371 static bool isBX(void* address)
2373 uint16_t* instruction = static_cast<uint16_t*>(address);
2374 return (instruction[0] & 0xff87) == OP_BX;
2377 static bool isMOV_imm_T3(void* address)
2379 uint16_t* instruction = static_cast<uint16_t*>(address);
2380 return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
2383 static bool isMOVT(void* address)
2385 uint16_t* instruction = static_cast<uint16_t*>(address);
2386 return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
2389 static bool isNOP_T1(void* address)
2391 uint16_t* instruction = static_cast<uint16_t*>(address);
2392 return instruction[0] == OP_NOP_T1;
2395 static bool isNOP_T2(void* address)
2397 uint16_t* instruction = static_cast<uint16_t*>(address);
2398 return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
2401 static bool canBeJumpT1(const uint16_t* instruction, const void* target)
2403 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2404 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2406 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2407 // It does not appear to be documented in the ARM ARM (big surprise), but
2408 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2409 // less than the actual displacement.
2411 return ((relative << 23) >> 23) == relative;
2414 static bool canBeJumpT2(const uint16_t* instruction, const void* target)
2416 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2417 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2419 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2420 // It does not appear to be documented in the ARM ARM (big surprise), but
2421 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2422 // less than the actual displacement.
2424 return ((relative << 20) >> 20) == relative;
2427 static bool canBeJumpT3(const uint16_t* instruction, const void* target, bool& mayTriggerErrata)
2429 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2430 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2432 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2433 // From Cortex-A8 errata:
2434 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
2435 // the target of the branch falls within the first region it is
2436 // possible for the processor to incorrectly determine the branch
2437 // instruction, and it is also possible in some cases for the processor
2438 // to enter a deadlock state.
2439 // The instruction is spanning two pages if it ends at an address ending 0x002
2440 bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002);
2441 mayTriggerErrata = spansTwo4K;
2442 // The target is in the first page if the jump branch back by [3..0x1002] bytes
2443 bool targetInFirstPage = (relative >= -0x1002) && (relative < -2);
2444 bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage;
2445 return ((relative << 11) >> 11) == relative && !wouldTriggerA8Errata;
2448 static bool canBeJumpT4(const uint16_t* instruction, const void* target, bool& mayTriggerErrata)
2450 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2451 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2453 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2454 // From Cortex-A8 errata:
2455 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
2456 // the target of the branch falls within the first region it is
2457 // possible for the processor to incorrectly determine the branch
2458 // instruction, and it is also possible in some cases for the processor
2459 // to enter a deadlock state.
2460 // The instruction is spanning two pages if it ends at an address ending 0x002
2461 bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002);
2462 mayTriggerErrata = spansTwo4K;
2463 // The target is in the first page if the jump branch back by [3..0x1002] bytes
2464 bool targetInFirstPage = (relative >= -0x1002) && (relative < -2);
2465 bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage;
2466 return ((relative << 7) >> 7) == relative && !wouldTriggerA8Errata;
2469 void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
2471 // FIMXE: this should be up in the MacroAssembler layer. :-(
2472 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2473 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2474 ASSERT(canBeJumpT1(instruction, target));
2476 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2477 // It does not appear to be documented in the ARM ARM (big surprise), but
2478 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2479 // less than the actual displacement.
2482 // All branch offsets should be an even distance.
2483 ASSERT(!(relative & 1));
2484 instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
2487 static void linkJumpT2(uint16_t* instruction, void* target)
2489 // FIMXE: this should be up in the MacroAssembler layer. :-(
2490 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2491 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2492 ASSERT(canBeJumpT2(instruction, target));
2494 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2495 // It does not appear to be documented in the ARM ARM (big surprise), but
2496 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2497 // less than the actual displacement.
2500 // All branch offsets should be an even distance.
2501 ASSERT(!(relative & 1));
2502 instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
2505 void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
2507 // FIMXE: this should be up in the MacroAssembler layer. :-(
2508 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2509 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2511 UNUSED_PARAM(scratch);
2512 ASSERT(canBeJumpT3(instruction, target, scratch));
2514 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2516 // All branch offsets should be an even distance.
2517 ASSERT(!(relative & 1));
2518 instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
2519 instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
2522 static void linkJumpT4(uint16_t* instruction, void* target)
2524 // FIMXE: this should be up in the MacroAssembler layer. :-(
2525 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2526 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2528 UNUSED_PARAM(scratch);
2529 ASSERT(canBeJumpT4(instruction, target, scratch));
2531 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2532 // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2534 relative ^= 0xC00000;
2536 // All branch offsets should be an even distance.
2537 ASSERT(!(relative & 1));
2538 instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
2539 instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
2542 void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
2544 // FIMXE: this should be up in the MacroAssembler layer. :-(
2545 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2546 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2548 instruction[-3] = ifThenElse(cond) | OP_IT;
2549 linkJumpT4(instruction, target);
2552 static void linkBX(uint16_t* instruction, void* target)
2554 // FIMXE: this should be up in the MacroAssembler layer. :-(
2555 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2556 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2558 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2559 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2560 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2561 instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2562 instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2563 instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2564 instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2565 instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2568 void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
2570 // FIMXE: this should be up in the MacroAssembler layer. :-(
2571 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2572 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2574 linkBX(instruction, target);
2575 instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
2578 static void linkJumpAbsolute(uint16_t* instruction, void* target)
2580 // FIMXE: this should be up in the MacroAssembler layer. :-(
2581 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2582 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2584 ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
2585 || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
2588 if (canBeJumpT4(instruction, target, scratch)) {
2589 // There may be a better way to fix this, but right now put the NOPs first, since in the
2590 // case of an conditional branch this will be coming after an ITTT predicating *three*
2591 // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
2592 // variable wdith encoding - the previous instruction might *look* like an ITTT but
2593 // actually be the second half of a 2-word op.
2594 instruction[-5] = OP_NOP_T1;
2595 instruction[-4] = OP_NOP_T2a;
2596 instruction[-3] = OP_NOP_T2b;
2597 linkJumpT4(instruction, target);
2599 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2600 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2601 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2602 instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2603 instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2604 instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2605 instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2606 instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2610 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
2612 return op | (imm.m_value.i << 10) | imm.m_value.imm4;
2615 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value)
2617 result.m_value.i = (value >> 10) & 1;
2618 result.m_value.imm4 = value & 15;
2621 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
2623 return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
2626 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value)
2628 result.m_value.imm3 = (value >> 12) & 7;
2629 result.m_value.imm8 = value & 255;
2632 class ARMInstructionFormatter {
2634 ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
2636 m_buffer.putShort(op | (rd << 8) | imm);
2639 ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
2641 m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
2644 ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
2646 m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
2649 ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
2651 m_buffer.putShort(op | imm);
2654 ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
2656 m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
2659 ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
2661 m_buffer.putShort(op | imm);
2664 ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
2666 m_buffer.putShort(op | (reg1 << 3) | reg2);
2669 ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
2671 m_buffer.putShort(op | reg);
2672 m_buffer.putShort(ff.m_u.value);
2675 ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
2677 m_buffer.putShort(op);
2678 m_buffer.putShort(ff.m_u.value);
2681 ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
2683 m_buffer.putShort(op1);
2684 m_buffer.putShort(op2);
2687 ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
2689 ARMThumbImmediate newImm = imm;
2690 newImm.m_value.imm4 = imm4;
2692 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
2693 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
2696 ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
2698 m_buffer.putShort(op | reg1);
2699 m_buffer.putShort((reg2 << 12) | imm);
2702 ALWAYS_INLINE void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm1, uint16_t imm2, uint16_t imm3)
2704 m_buffer.putShort(op | reg1);
2705 m_buffer.putShort((imm1 << 12) | (reg2 << 8) | (imm2 << 6) | imm3);
2708 // Formats up instructions of the pattern:
2709 // 111111111B11aaaa:bbbb222SA2C2cccc
2710 // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2711 // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2712 ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
2714 ASSERT(!(op1 & 0x004f));
2715 ASSERT(!(op2 & 0xf1af));
2716 m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
2717 m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
2720 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2721 // (i.e. +/-(0..255) 32-bit words)
2722 ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
2730 uint32_t offset = imm;
2731 ASSERT(!(offset & ~0x3fc));
2734 m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
2735 m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
2738 // Administrative methods:
2740 size_t codeSize() const { return m_buffer.codeSize(); }
2741 AssemblerLabel label() const { return m_buffer.label(); }
2742 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
2743 void* data() const { return m_buffer.data(); }
2745 unsigned debugOffset() { return m_buffer.debugOffset(); }
2748 AssemblerBuffer m_buffer;
2751 Vector<LinkRecord> m_jumpsToLink;
2752 Vector<int32_t> m_offsets;
2753 int m_indexOfLastWatchpoint;
2754 int m_indexOfTailOfLastWatchpoint;
2759 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
2761 #endif // ARMAssembler_h