2 * Copyright (C) 2009, 2010 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef ARMAssembler_h
28 #define ARMAssembler_h
30 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
32 #include "AssemblerBufferWithConstantPool.h"
33 #include "JITCompilationEffort.h"
34 #include <wtf/Assertions.h>
37 typedef uint32_t ARMWord;
39 namespace ARMRegisters {
44 r3, S0 = r3, /* Same as thumb assembler. */
67 d7, SD0 = d7, /* Same as thumb assembler. */
94 } // namespace ARMRegisters
98 typedef ARMRegisters::RegisterID RegisterID;
99 typedef ARMRegisters::FPRegisterID FPRegisterID;
100 typedef AssemblerBufferWithConstantPool<2048, 4, 4, ARMAssembler> ARMBuffer;
101 typedef SegmentedVector<AssemblerLabel, 64> Jumps;
104 : m_indexOfTailOfLastWatchpoint(1)
108 // ARM conditional constants
110 EQ = 0x00000000, // Zero
111 NE = 0x10000000, // Non-zero
127 // ARM instruction constants
147 VMOV_F64 = 0x0eb00b40,
148 VADD_F64 = 0x0e300b00,
149 VDIV_F64 = 0x0e800b00,
150 VSUB_F64 = 0x0e300b40,
151 VMUL_F64 = 0x0e200b00,
152 VCMP_F64 = 0x0eb40b40,
153 VSQRT_F64 = 0x0eb10bc0,
154 VABS_F64 = 0x0eb00bc0,
155 VNEG_F64 = 0x0eb10b40,
161 VMOV_VFP64 = 0x0c400a10,
162 VMOV_ARM64 = 0x0c500a10,
163 VMOV_VFP32 = 0x0e000a10,
164 VMOV_ARM32 = 0x0e100a10,
165 VCVT_F64_S32 = 0x0eb80bc0,
166 VCVT_S32_F64 = 0x0ebd0b40,
167 VCVT_U32_F64 = 0x0ebc0b40,
168 VCVT_F32_F64 = 0x0eb70bc0,
169 VCVT_F64_F32 = 0x0eb70ac0,
170 VMRS_APSR = 0x0ef1fa10,
174 #if WTF_ARM_ARCH_AT_LEAST(7)
183 OP2_IMM_HALF = (1 << 22),
184 OP2_INV_IMM = (1 << 26),
186 OP2_OFSREG = (1 << 25),
187 // Data transfer flags.
195 enum DataTransferTypeA {
196 LoadUint32 = 0x05000000 | DT_LOAD,
197 LoadUint8 = 0x05400000 | DT_LOAD,
198 StoreUint32 = 0x05000000,
199 StoreUint8 = 0x05400000,
202 enum DataTransferTypeB {
203 LoadUint16 = 0x010000b0 | DT_LOAD,
204 LoadInt16 = 0x010000f0 | DT_LOAD,
205 LoadInt8 = 0x010000d0 | DT_LOAD,
206 StoreUint16 = 0x010000b0,
209 enum DataTransferTypeFloat {
210 LoadFloat = 0x0d000a00 | DT_LOAD,
211 LoadDouble = 0x0d000b00 | DT_LOAD,
212 StoreFloat = 0x0d000a00,
213 StoreDouble = 0x0d000b00,
216 // Masks of ARM instructions
218 BRANCH_MASK = 0x00ffffff,
220 SDT_MASK = 0x0c000000,
221 SDT_OFFSET_MASK = 0xfff,
225 BOFFSET_MIN = -0x00800000,
226 BOFFSET_MAX = 0x007fffff,
232 padForAlign16 = 0x0000,
233 padForAlign32 = 0xe12fff7f // 'bkpt 0xffff' instruction.
236 static const ARMWord INVALID_IMM = 0xf0000000;
237 static const ARMWord InvalidBranchTarget = 0xffffffff;
238 static const int DefaultPrefetching = 2;
240 // Instruction formating
242 void emitInst(ARMWord op, int rd, int rn, ARMWord op2)
244 ASSERT(((op2 & ~OP2_IMM) <= 0xfff) || (((op2 & ~OP2_IMM_HALF) <= 0xfff)));
245 m_buffer.putInt(op | RN(rn) | RD(rd) | op2);
248 void emitDoublePrecisionInst(ARMWord op, int dd, int dn, int dm)
250 ASSERT((dd >= 0 && dd <= 31) && (dn >= 0 && dn <= 31) && (dm >= 0 && dm <= 31));
251 m_buffer.putInt(op | ((dd & 0xf) << 12) | ((dd & 0x10) << (22 - 4))
252 | ((dn & 0xf) << 16) | ((dn & 0x10) << (7 - 4))
253 | (dm & 0xf) | ((dm & 0x10) << (5 - 4)));
256 void emitSinglePrecisionInst(ARMWord op, int sd, int sn, int sm)
258 ASSERT((sd >= 0 && sd <= 31) && (sn >= 0 && sn <= 31) && (sm >= 0 && sm <= 31));
259 m_buffer.putInt(op | ((sd >> 1) << 12) | ((sd & 0x1) << 22)
260 | ((sn >> 1) << 16) | ((sn & 0x1) << 7)
261 | (sm >> 1) | ((sm & 0x1) << 5));
264 void and_r(int rd, int rn, ARMWord op2, Condition cc = AL)
266 emitInst(static_cast<ARMWord>(cc) | AND, rd, rn, op2);
269 void ands_r(int rd, int rn, ARMWord op2, Condition cc = AL)
271 emitInst(static_cast<ARMWord>(cc) | AND | SET_CC, rd, rn, op2);
274 void eor_r(int rd, int rn, ARMWord op2, Condition cc = AL)
276 emitInst(static_cast<ARMWord>(cc) | EOR, rd, rn, op2);
279 void eors_r(int rd, int rn, ARMWord op2, Condition cc = AL)
281 emitInst(static_cast<ARMWord>(cc) | EOR | SET_CC, rd, rn, op2);
284 void sub_r(int rd, int rn, ARMWord op2, Condition cc = AL)
286 emitInst(static_cast<ARMWord>(cc) | SUB, rd, rn, op2);
289 void subs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
291 emitInst(static_cast<ARMWord>(cc) | SUB | SET_CC, rd, rn, op2);
294 void rsb_r(int rd, int rn, ARMWord op2, Condition cc = AL)
296 emitInst(static_cast<ARMWord>(cc) | RSB, rd, rn, op2);
299 void rsbs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
301 emitInst(static_cast<ARMWord>(cc) | RSB | SET_CC, rd, rn, op2);
304 void add_r(int rd, int rn, ARMWord op2, Condition cc = AL)
306 emitInst(static_cast<ARMWord>(cc) | ADD, rd, rn, op2);
309 void adds_r(int rd, int rn, ARMWord op2, Condition cc = AL)
311 emitInst(static_cast<ARMWord>(cc) | ADD | SET_CC, rd, rn, op2);
314 void adc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
316 emitInst(static_cast<ARMWord>(cc) | ADC, rd, rn, op2);
319 void adcs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
321 emitInst(static_cast<ARMWord>(cc) | ADC | SET_CC, rd, rn, op2);
324 void sbc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
326 emitInst(static_cast<ARMWord>(cc) | SBC, rd, rn, op2);
329 void sbcs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
331 emitInst(static_cast<ARMWord>(cc) | SBC | SET_CC, rd, rn, op2);
334 void rsc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
336 emitInst(static_cast<ARMWord>(cc) | RSC, rd, rn, op2);
339 void rscs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
341 emitInst(static_cast<ARMWord>(cc) | RSC | SET_CC, rd, rn, op2);
344 void tst_r(int rn, ARMWord op2, Condition cc = AL)
346 emitInst(static_cast<ARMWord>(cc) | TST | SET_CC, 0, rn, op2);
349 void teq_r(int rn, ARMWord op2, Condition cc = AL)
351 emitInst(static_cast<ARMWord>(cc) | TEQ | SET_CC, 0, rn, op2);
354 void cmp_r(int rn, ARMWord op2, Condition cc = AL)
356 emitInst(static_cast<ARMWord>(cc) | CMP | SET_CC, 0, rn, op2);
359 void cmn_r(int rn, ARMWord op2, Condition cc = AL)
361 emitInst(static_cast<ARMWord>(cc) | CMN | SET_CC, 0, rn, op2);
364 void orr_r(int rd, int rn, ARMWord op2, Condition cc = AL)
366 emitInst(static_cast<ARMWord>(cc) | ORR, rd, rn, op2);
369 void orrs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
371 emitInst(static_cast<ARMWord>(cc) | ORR | SET_CC, rd, rn, op2);
374 void mov_r(int rd, ARMWord op2, Condition cc = AL)
376 emitInst(static_cast<ARMWord>(cc) | MOV, rd, ARMRegisters::r0, op2);
379 #if WTF_ARM_ARCH_AT_LEAST(7)
380 void movw_r(int rd, ARMWord op2, Condition cc = AL)
382 ASSERT((op2 | 0xf0fff) == 0xf0fff);
383 m_buffer.putInt(static_cast<ARMWord>(cc) | MOVW | RD(rd) | op2);
386 void movt_r(int rd, ARMWord op2, Condition cc = AL)
388 ASSERT((op2 | 0xf0fff) == 0xf0fff);
389 m_buffer.putInt(static_cast<ARMWord>(cc) | MOVT | RD(rd) | op2);
393 void movs_r(int rd, ARMWord op2, Condition cc = AL)
395 emitInst(static_cast<ARMWord>(cc) | MOV | SET_CC, rd, ARMRegisters::r0, op2);
398 void bic_r(int rd, int rn, ARMWord op2, Condition cc = AL)
400 emitInst(static_cast<ARMWord>(cc) | BIC, rd, rn, op2);
403 void bics_r(int rd, int rn, ARMWord op2, Condition cc = AL)
405 emitInst(static_cast<ARMWord>(cc) | BIC | SET_CC, rd, rn, op2);
408 void mvn_r(int rd, ARMWord op2, Condition cc = AL)
410 emitInst(static_cast<ARMWord>(cc) | MVN, rd, ARMRegisters::r0, op2);
413 void mvns_r(int rd, ARMWord op2, Condition cc = AL)
415 emitInst(static_cast<ARMWord>(cc) | MVN | SET_CC, rd, ARMRegisters::r0, op2);
418 void mul_r(int rd, int rn, int rm, Condition cc = AL)
420 m_buffer.putInt(static_cast<ARMWord>(cc) | MUL | RN(rd) | RS(rn) | RM(rm));
423 void muls_r(int rd, int rn, int rm, Condition cc = AL)
425 m_buffer.putInt(static_cast<ARMWord>(cc) | MUL | SET_CC | RN(rd) | RS(rn) | RM(rm));
428 void mull_r(int rdhi, int rdlo, int rn, int rm, Condition cc = AL)
430 m_buffer.putInt(static_cast<ARMWord>(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm));
433 void vmov_f64_r(int dd, int dm, Condition cc = AL)
435 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VMOV_F64, dd, 0, dm);
438 void vadd_f64_r(int dd, int dn, int dm, Condition cc = AL)
440 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VADD_F64, dd, dn, dm);
443 void vdiv_f64_r(int dd, int dn, int dm, Condition cc = AL)
445 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VDIV_F64, dd, dn, dm);
448 void vsub_f64_r(int dd, int dn, int dm, Condition cc = AL)
450 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VSUB_F64, dd, dn, dm);
453 void vmul_f64_r(int dd, int dn, int dm, Condition cc = AL)
455 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VMUL_F64, dd, dn, dm);
458 void vcmp_f64_r(int dd, int dm, Condition cc = AL)
460 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VCMP_F64, dd, 0, dm);
463 void vsqrt_f64_r(int dd, int dm, Condition cc = AL)
465 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VSQRT_F64, dd, 0, dm);
468 void vabs_f64_r(int dd, int dm, Condition cc = AL)
470 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VABS_F64, dd, 0, dm);
473 void vneg_f64_r(int dd, int dm, Condition cc = AL)
475 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VNEG_F64, dd, 0, dm);
478 void ldr_imm(int rd, ARMWord imm, Condition cc = AL)
480 m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | LoadUint32 | DT_UP | RN(ARMRegisters::pc) | RD(rd), imm, true);
483 void ldr_un_imm(int rd, ARMWord imm, Condition cc = AL)
485 m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | LoadUint32 | DT_UP | RN(ARMRegisters::pc) | RD(rd), imm);
488 void dtr_u(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
490 emitInst(static_cast<ARMWord>(cc) | transferType | DT_UP, rd, rb, op2);
493 void dtr_ur(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL)
495 emitInst(static_cast<ARMWord>(cc) | transferType | DT_UP | OP2_OFSREG, rd, rb, rm);
498 void dtr_d(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
500 emitInst(static_cast<ARMWord>(cc) | transferType, rd, rb, op2);
503 void dtr_dr(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL)
505 emitInst(static_cast<ARMWord>(cc) | transferType | OP2_OFSREG, rd, rb, rm);
508 void dtrh_u(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
510 emitInst(static_cast<ARMWord>(cc) | transferType | DT_UP, rd, rb, op2);
513 void dtrh_ur(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL)
515 emitInst(static_cast<ARMWord>(cc) | transferType | DT_UP, rd, rn, rm);
518 void dtrh_d(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
520 emitInst(static_cast<ARMWord>(cc) | transferType, rd, rb, op2);
523 void dtrh_dr(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL)
525 emitInst(static_cast<ARMWord>(cc) | transferType, rd, rn, rm);
528 void fdtr_u(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL)
530 ASSERT(op2 <= 0xff && rd <= 15);
531 /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */
532 m_buffer.putInt(static_cast<ARMWord>(cc) | DT_UP | type | (rd << 12) | RN(rb) | op2);
535 void fdtr_d(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL)
537 ASSERT(op2 <= 0xff && rd <= 15);
538 /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */
539 m_buffer.putInt(static_cast<ARMWord>(cc) | type | (rd << 12) | RN(rb) | op2);
542 void push_r(int reg, Condition cc = AL)
544 ASSERT(ARMWord(reg) <= 0xf);
545 m_buffer.putInt(static_cast<ARMWord>(cc) | StoreUint32 | DT_WB | RN(ARMRegisters::sp) | RD(reg) | 0x4);
548 void pop_r(int reg, Condition cc = AL)
550 ASSERT(ARMWord(reg) <= 0xf);
551 m_buffer.putInt(static_cast<ARMWord>(cc) | (LoadUint32 ^ DT_PRE) | DT_UP | RN(ARMRegisters::sp) | RD(reg) | 0x4);
554 inline void poke_r(int reg, Condition cc = AL)
556 dtr_d(StoreUint32, ARMRegisters::sp, 0, reg, cc);
559 inline void peek_r(int reg, Condition cc = AL)
561 dtr_u(LoadUint32, reg, ARMRegisters::sp, 0, cc);
564 void vmov_vfp64_r(int sm, int rt, int rt2, Condition cc = AL)
567 m_buffer.putInt(static_cast<ARMWord>(cc) | VMOV_VFP64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4)));
570 void vmov_arm64_r(int rt, int rt2, int sm, Condition cc = AL)
573 m_buffer.putInt(static_cast<ARMWord>(cc) | VMOV_ARM64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4)));
576 void vmov_vfp32_r(int sn, int rt, Condition cc = AL)
579 emitSinglePrecisionInst(static_cast<ARMWord>(cc) | VMOV_VFP32, rt << 1, sn, 0);
582 void vmov_arm32_r(int rt, int sn, Condition cc = AL)
585 emitSinglePrecisionInst(static_cast<ARMWord>(cc) | VMOV_ARM32, rt << 1, sn, 0);
588 void vcvt_f64_s32_r(int dd, int sm, Condition cc = AL)
590 ASSERT(!(sm & 0x1)); // sm must be divisible by 2
591 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VCVT_F64_S32, dd, 0, (sm >> 1));
594 void vcvt_s32_f64_r(int sd, int dm, Condition cc = AL)
596 ASSERT(!(sd & 0x1)); // sd must be divisible by 2
597 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VCVT_S32_F64, (sd >> 1), 0, dm);
600 void vcvt_u32_f64_r(int sd, int dm, Condition cc = AL)
602 ASSERT(!(sd & 0x1)); // sd must be divisible by 2
603 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VCVT_U32_F64, (sd >> 1), 0, dm);
606 void vcvt_f64_f32_r(int dd, int sm, Condition cc = AL)
608 ASSERT(dd <= 15 && sm <= 15);
609 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VCVT_F64_F32, dd, 0, sm);
612 void vcvt_f32_f64_r(int dd, int sm, Condition cc = AL)
614 ASSERT(dd <= 15 && sm <= 15);
615 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VCVT_F32_F64, dd, 0, sm);
618 void vmrs_apsr(Condition cc = AL)
620 m_buffer.putInt(static_cast<ARMWord>(cc) | VMRS_APSR);
623 void clz_r(int rd, int rm, Condition cc = AL)
625 m_buffer.putInt(static_cast<ARMWord>(cc) | CLZ | RD(rd) | RM(rm));
628 void bkpt(ARMWord value)
630 m_buffer.putInt(BKPT | ((value & 0xff0) << 4) | (value & 0xf));
635 m_buffer.putInt(NOP);
638 void bx(int rm, Condition cc = AL)
640 emitInst(static_cast<ARMWord>(cc) | BX, 0, 0, RM(rm));
643 AssemblerLabel blx(int rm, Condition cc = AL)
645 emitInst(static_cast<ARMWord>(cc) | BLX, 0, 0, RM(rm));
646 return m_buffer.label();
649 static ARMWord lsl(int reg, ARMWord value)
651 ASSERT(reg <= ARMRegisters::pc);
652 ASSERT(value <= 0x1f);
653 return reg | (value << 7) | 0x00;
656 static ARMWord lsr(int reg, ARMWord value)
658 ASSERT(reg <= ARMRegisters::pc);
659 ASSERT(value <= 0x1f);
660 return reg | (value << 7) | 0x20;
663 static ARMWord asr(int reg, ARMWord value)
665 ASSERT(reg <= ARMRegisters::pc);
666 ASSERT(value <= 0x1f);
667 return reg | (value << 7) | 0x40;
670 static ARMWord lsl_r(int reg, int shiftReg)
672 ASSERT(reg <= ARMRegisters::pc);
673 ASSERT(shiftReg <= ARMRegisters::pc);
674 return reg | (shiftReg << 8) | 0x10;
677 static ARMWord lsr_r(int reg, int shiftReg)
679 ASSERT(reg <= ARMRegisters::pc);
680 ASSERT(shiftReg <= ARMRegisters::pc);
681 return reg | (shiftReg << 8) | 0x30;
684 static ARMWord asr_r(int reg, int shiftReg)
686 ASSERT(reg <= ARMRegisters::pc);
687 ASSERT(shiftReg <= ARMRegisters::pc);
688 return reg | (shiftReg << 8) | 0x50;
693 size_t codeSize() const
695 return m_buffer.codeSize();
698 void ensureSpace(int insnSpace, int constSpace)
700 m_buffer.ensureSpace(insnSpace, constSpace);
703 int sizeOfConstantPool()
705 return m_buffer.sizeOfConstantPool();
708 AssemblerLabel labelIgnoringWatchpoints()
710 m_buffer.ensureSpaceForAnyInstruction();
711 return m_buffer.label();
714 AssemblerLabel labelForWatchpoint()
716 m_buffer.ensureSpaceForAnyInstruction(maxJumpReplacementSize() / sizeof(ARMWord));
717 AssemblerLabel result = m_buffer.label();
718 if (result.m_offset != (m_indexOfTailOfLastWatchpoint - maxJumpReplacementSize()))
720 m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
724 AssemblerLabel label()
726 AssemblerLabel result = labelIgnoringWatchpoints();
727 while (result.m_offset + 1 < m_indexOfTailOfLastWatchpoint) {
729 // The available number of instructions are ensured by labelForWatchpoint.
730 result = m_buffer.label();
735 AssemblerLabel align(int alignment)
737 while (!m_buffer.isAligned(alignment))
738 mov_r(ARMRegisters::r0, ARMRegisters::r0);
743 AssemblerLabel loadBranchTarget(int rd, Condition cc = AL, int useConstantPool = 0)
745 ensureSpace(sizeof(ARMWord), sizeof(ARMWord));
746 m_jumps.append(m_buffer.codeSize() | (useConstantPool & 0x1));
747 ldr_un_imm(rd, InvalidBranchTarget, cc);
748 return m_buffer.label();
751 AssemblerLabel jmp(Condition cc = AL, int useConstantPool = 0)
753 return loadBranchTarget(ARMRegisters::pc, cc, useConstantPool);
756 PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData&, void* ownerUID, JITCompilationEffort);
758 unsigned debugOffset() { return m_buffer.debugOffset(); }
760 // DFG assembly helpers for moving data between fp and registers.
761 void vmov(RegisterID rd1, RegisterID rd2, FPRegisterID rn)
763 vmov_arm64_r(rd1, rd2, rn);
766 void vmov(FPRegisterID rd, RegisterID rn1, RegisterID rn2)
768 vmov_vfp64_r(rd, rn1, rn2);
773 static ARMWord* getLdrImmAddress(ARMWord* insn)
776 if ((*insn & 0x0f7f0000) != 0x051f0000) {
778 ASSERT((*insn & 0x012fff30) == 0x012fff30);
782 // Must be an ldr ..., [pc +/- imm]
783 ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
785 ARMWord addr = reinterpret_cast<ARMWord>(insn) + DefaultPrefetching * sizeof(ARMWord);
787 return reinterpret_cast<ARMWord*>(addr + (*insn & SDT_OFFSET_MASK));
788 return reinterpret_cast<ARMWord*>(addr - (*insn & SDT_OFFSET_MASK));
791 static ARMWord* getLdrImmAddressOnPool(ARMWord* insn, uint32_t* constPool)
793 // Must be an ldr ..., [pc +/- imm]
794 ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
797 return reinterpret_cast<ARMWord*>(constPool + ((*insn & SDT_OFFSET_MASK) >> 1));
798 return getLdrImmAddress(insn);
801 static void patchPointerInternal(intptr_t from, void* to)
803 ARMWord* insn = reinterpret_cast<ARMWord*>(from);
804 ARMWord* addr = getLdrImmAddress(insn);
805 *addr = reinterpret_cast<ARMWord>(to);
808 static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value)
810 value = (value << 1) + 1;
811 ASSERT(!(value & ~0xfff));
812 return (load & ~0xfff) | value;
815 static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
818 static void* readPointer(void* from)
820 ARMWord* instruction = reinterpret_cast<ARMWord*>(from);
821 ARMWord* address = getLdrImmAddress(instruction);
822 return *reinterpret_cast<void**>(address);
827 static void linkPointer(void* code, AssemblerLabel from, void* to)
829 patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
832 static void repatchInt32(void* where, int32_t to)
834 patchPointerInternal(reinterpret_cast<intptr_t>(where), reinterpret_cast<void*>(to));
837 static void repatchCompact(void* where, int32_t value)
839 ARMWord* instruction = reinterpret_cast<ARMWord*>(where);
840 ASSERT((*instruction & 0x0f700000) == LoadUint32);
842 *instruction = (*instruction & 0xff7ff000) | DT_UP | value;
844 *instruction = (*instruction & 0xff7ff000) | -value;
845 cacheFlush(instruction, sizeof(ARMWord));
848 static void repatchPointer(void* from, void* to)
850 patchPointerInternal(reinterpret_cast<intptr_t>(from), to);
854 static intptr_t getAbsoluteJumpAddress(void* base, int offset = 0)
856 return reinterpret_cast<intptr_t>(base) + offset - sizeof(ARMWord);
859 void linkJump(AssemblerLabel from, AssemblerLabel to)
861 ARMWord* insn = reinterpret_cast<ARMWord*>(getAbsoluteJumpAddress(m_buffer.data(), from.m_offset));
862 ARMWord* addr = getLdrImmAddressOnPool(insn, m_buffer.poolAddress());
863 *addr = static_cast<ARMWord>(to.m_offset);
866 static void linkJump(void* code, AssemblerLabel from, void* to)
868 patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
871 static void relinkJump(void* from, void* to)
873 patchPointerInternal(getAbsoluteJumpAddress(from), to);
876 static void linkCall(void* code, AssemblerLabel from, void* to)
878 patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
881 static void relinkCall(void* from, void* to)
883 patchPointerInternal(getAbsoluteJumpAddress(from), to);
886 static void* readCallTarget(void* from)
888 return reinterpret_cast<void*>(readPointer(reinterpret_cast<void*>(getAbsoluteJumpAddress(from))));
891 static void replaceWithJump(void* instructionStart, void* to)
893 ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart) - 1;
894 intptr_t difference = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(instruction) + DefaultPrefetching * sizeof(ARMWord));
896 if (!(difference & 1)) {
898 if ((difference <= BOFFSET_MAX && difference >= BOFFSET_MIN)) {
900 instruction[0] = B | AL | (difference & BRANCH_MASK);
901 cacheFlush(instruction, sizeof(ARMWord));
907 instruction[0] = LoadUint32 | AL | RN(ARMRegisters::pc) | RD(ARMRegisters::pc) | 4;
908 instruction[1] = reinterpret_cast<ARMWord>(to);
909 cacheFlush(instruction, sizeof(ARMWord) * 2);
912 static ptrdiff_t maxJumpReplacementSize()
914 return sizeof(ARMWord) * 2;
917 static void replaceWithLoad(void* instructionStart)
919 ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
920 cacheFlush(instruction, sizeof(ARMWord));
922 ASSERT((*instruction & 0x0ff00000) == 0x02800000 || (*instruction & 0x0ff00000) == 0x05900000);
923 if ((*instruction & 0x0ff00000) == 0x02800000) {
924 *instruction = (*instruction & 0xf00fffff) | 0x05900000;
925 cacheFlush(instruction, sizeof(ARMWord));
929 static void replaceWithAddressComputation(void* instructionStart)
931 ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
932 cacheFlush(instruction, sizeof(ARMWord));
934 ASSERT((*instruction & 0x0ff00000) == 0x02800000 || (*instruction & 0x0ff00000) == 0x05900000);
935 if ((*instruction & 0x0ff00000) == 0x05900000) {
936 *instruction = (*instruction & 0xf00fffff) | 0x02800000;
937 cacheFlush(instruction, sizeof(ARMWord));
941 // Address operations
943 static void* getRelocatedAddress(void* code, AssemblerLabel label)
945 return reinterpret_cast<void*>(reinterpret_cast<char*>(code) + label.m_offset);
948 // Address differences
950 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
952 return b.m_offset - a.m_offset;
955 static unsigned getCallReturnOffset(AssemblerLabel call)
957 return call.m_offset;
962 static ARMWord getOp2(ARMWord imm);
964 // Fast case if imm is known to be between 0 and 0xff
965 static ARMWord getOp2Byte(ARMWord imm)
968 return OP2_IMM | imm;
971 static ARMWord getOp2Half(ARMWord imm)
974 return OP2_IMM_HALF | (imm & 0x0f) | ((imm & 0xf0) << 4);
977 #if WTF_ARM_ARCH_AT_LEAST(7)
978 static ARMWord getImm16Op2(ARMWord imm)
981 return (imm & 0xf000) << 4 | (imm & 0xfff);
985 ARMWord getImm(ARMWord imm, int tmpReg, bool invert = false);
986 void moveImm(ARMWord imm, int dest);
987 ARMWord encodeComplexImm(ARMWord imm, int dest);
989 // Memory load/store helpers
991 void dataTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, int32_t offset);
992 void baseIndexTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
993 void dataTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, int32_t offset);
994 void baseIndexTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
995 void dataTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, int32_t offset);
996 void baseIndexTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
998 // Constant pool hnadlers
1000 static ARMWord placeConstantPoolBarrier(int offset)
1002 offset = (offset - sizeof(ARMWord)) >> 2;
1003 ASSERT((offset <= BOFFSET_MAX && offset >= BOFFSET_MIN));
1004 return AL | B | (offset & BRANCH_MASK);
1007 #if OS(LINUX) && COMPILER(RVCT)
1008 static __asm void cacheFlush(void* code, size_t);
1010 static void cacheFlush(void* code, size_t size)
1012 #if OS(LINUX) && COMPILER(GCC)
1013 uintptr_t currentPage = reinterpret_cast<uintptr_t>(code) & ~(pageSize() - 1);
1014 uintptr_t lastPage = (reinterpret_cast<uintptr_t>(code) + size) & ~(pageSize() - 1);
1020 "mov r7, #0xf0000\n"
1021 "add r7, r7, #0x2\n"
1026 : "r" (currentPage), "r" (currentPage + pageSize())
1027 : "r0", "r1", "r2");
1028 currentPage += pageSize();
1029 } while (lastPage >= currentPage);
1031 CacheRangeFlush(code, size, CACHE_SYNC_ALL);
1032 #elif OS(QNX) && ENABLE(ASSEMBLER_WX_EXCLUSIVE)
1036 msync(code, size, MS_INVALIDATE_ICACHE);
1038 #error "The cacheFlush support is missing on this platform."
1044 static ARMWord RM(int reg)
1046 ASSERT(reg <= ARMRegisters::pc);
1050 static ARMWord RS(int reg)
1052 ASSERT(reg <= ARMRegisters::pc);
1056 static ARMWord RD(int reg)
1058 ASSERT(reg <= ARMRegisters::pc);
1062 static ARMWord RN(int reg)
1064 ASSERT(reg <= ARMRegisters::pc);
1068 static ARMWord getConditionalField(ARMWord i)
1070 return i & 0xf0000000;
1073 int genInt(int reg, ARMWord imm, bool positive);
1077 uint32_t m_indexOfTailOfLastWatchpoint;
1082 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
1084 #endif // ARMAssembler_h