1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
39 #if V8_TARGET_ARCH_ARM
41 #include "arm/assembler-arm-inl.h"
42 #include "macro-assembler.h"
43 #include "serialize.h"
49 bool CpuFeatures::initialized_ = false;
51 unsigned CpuFeatures::supported_ = 0;
52 unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
53 unsigned CpuFeatures::cross_compile_ = 0;
54 unsigned CpuFeatures::cache_line_size_ = 64;
57 ExternalReference ExternalReference::cpu_features() {
58 ASSERT(CpuFeatures::initialized_);
59 return ExternalReference(&CpuFeatures::supported_);
63 // Get the CPU features enabled by the build. For cross compilation the
64 // preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
65 // can be defined to enable ARMv7 and VFPv3 instructions when building the
67 static unsigned CpuFeaturesImpliedByCompiler() {
69 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
70 if (FLAG_enable_armv7) {
71 answer |= 1u << ARMv7;
73 #endif // CAN_USE_ARMV7_INSTRUCTIONS
74 #ifdef CAN_USE_VFP3_INSTRUCTIONS
75 if (FLAG_enable_vfp3) {
76 answer |= 1u << VFP3 | 1u << ARMv7;
78 #endif // CAN_USE_VFP3_INSTRUCTIONS
79 #ifdef CAN_USE_VFP32DREGS
80 if (FLAG_enable_32dregs) {
81 answer |= 1u << VFP32DREGS;
83 #endif // CAN_USE_VFP32DREGS
84 if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
85 answer |= 1u << UNALIGNED_ACCESSES;
92 const char* DwVfpRegister::AllocationIndexToString(int index) {
93 ASSERT(index >= 0 && index < NumAllocatableRegisters());
94 ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
95 kNumReservedRegisters - 1);
96 if (index >= kDoubleRegZero.code())
97 index += kNumReservedRegisters;
99 return VFPRegisters::Name(index, true);
103 void CpuFeatures::Probe(bool serializer_enabled) {
104 uint64_t standard_features = static_cast<unsigned>(
105 OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
106 ASSERT(supported_ == 0 ||
107 (supported_ & standard_features) == standard_features);
112 // Get the features implied by the OS and the compiler settings. This is the
113 // minimal set of features which is also alowed for generated code in the
115 supported_ |= standard_features;
117 if (serializer_enabled) {
118 // No probing for features if we might serialize (generate snapshot).
123 // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
124 // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
125 if (FLAG_enable_vfp3) {
127 static_cast<uint64_t>(1) << VFP3 |
128 static_cast<uint64_t>(1) << ARMv7;
130 if (FLAG_enable_neon) {
131 supported_ |= 1u << NEON;
133 // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
134 if (FLAG_enable_armv7) {
135 supported_ |= static_cast<uint64_t>(1) << ARMv7;
138 if (FLAG_enable_sudiv) {
139 supported_ |= static_cast<uint64_t>(1) << SUDIV;
142 if (FLAG_enable_movw_movt) {
143 supported_ |= static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
146 if (FLAG_enable_32dregs) {
147 supported_ |= static_cast<uint64_t>(1) << VFP32DREGS;
150 if (FLAG_enable_unaligned_accesses) {
151 supported_ |= static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
155 // Probe for additional features not already known to be available.
157 if (!IsSupported(VFP3) && FLAG_enable_vfp3 && cpu.has_vfp3()) {
158 // This implementation also sets the VFP flags if runtime
159 // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
161 found_by_runtime_probing_only_ |=
162 static_cast<uint64_t>(1) << VFP3 |
163 static_cast<uint64_t>(1) << ARMv7;
166 if (!IsSupported(NEON) && FLAG_enable_neon && cpu.has_neon()) {
167 found_by_runtime_probing_only_ |= 1u << NEON;
170 if (!IsSupported(ARMv7) && FLAG_enable_armv7 && cpu.architecture() >= 7) {
171 found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7;
174 if (!IsSupported(SUDIV) && FLAG_enable_sudiv && cpu.has_idiva()) {
175 found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV;
178 if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses
179 && cpu.architecture() >= 7) {
180 found_by_runtime_probing_only_ |=
181 static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
184 // Use movw/movt for QUALCOMM ARMv7 cores.
185 if (cpu.implementer() == CPU::QUALCOMM &&
186 cpu.architecture() >= 7 &&
187 FLAG_enable_movw_movt) {
188 found_by_runtime_probing_only_ |=
189 static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
192 // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
193 if (cpu.implementer() == CPU::ARM &&
194 (cpu.part() == CPU::ARM_CORTEX_A5 ||
195 cpu.part() == CPU::ARM_CORTEX_A9)) {
196 cache_line_size_ = 32;
199 if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs && cpu.has_vfp3_d32()) {
200 found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS;
203 supported_ |= found_by_runtime_probing_only_;
206 // Assert that VFP3 implies ARMv7.
207 ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7));
211 void CpuFeatures::PrintTarget() {
212 const char* arm_arch = NULL;
213 const char* arm_test = "";
214 const char* arm_fpu = "";
215 const char* arm_thumb = "";
216 const char* arm_float_abi = NULL;
218 #if defined CAN_USE_ARMV7_INSTRUCTIONS
229 # if defined __ARM_NEON__
231 # elif defined CAN_USE_VFP3_INSTRUCTIONS
236 # if (defined __thumb__) || (defined __thumb2__)
237 arm_thumb = " thumb";
239 arm_float_abi = OS::ArmUsingHardFloat() ? "hard" : "softfp";
243 arm_test = " simulator";
244 # if defined CAN_USE_VFP3_INSTRUCTIONS
245 # if defined CAN_USE_VFP32DREGS
248 arm_fpu = " vfp3-d16";
253 # if USE_EABI_HARDFLOAT == 1
254 arm_float_abi = "hard";
256 arm_float_abi = "softfp";
261 printf("target%s %s%s%s %s\n",
262 arm_test, arm_arch, arm_fpu, arm_thumb, arm_float_abi);
266 void CpuFeatures::PrintFeatures() {
268 "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
269 "MOVW_MOVT_IMMEDIATE_LOADS=%d",
270 CpuFeatures::IsSupported(ARMv7),
271 CpuFeatures::IsSupported(VFP3),
272 CpuFeatures::IsSupported(VFP32DREGS),
273 CpuFeatures::IsSupported(NEON),
274 CpuFeatures::IsSupported(SUDIV),
275 CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
276 CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
278 bool eabi_hardfloat = OS::ArmUsingHardFloat();
279 #elif USE_EABI_HARDFLOAT
280 bool eabi_hardfloat = true;
282 bool eabi_hardfloat = false;
284 printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
288 // -----------------------------------------------------------------------------
289 // Implementation of RelocInfo
291 const int RelocInfo::kApplyMask = 0;
294 bool RelocInfo::IsCodedSpecially() {
295 // The deserializer needs to know whether a pointer is specially coded. Â Being
296 // specially coded on ARM means that it is a movw/movt instruction, or is an
297 // out of line constant pool entry. Â These only occur if
298 // FLAG_enable_ool_constant_pool is true.
299 return FLAG_enable_ool_constant_pool;
303 bool RelocInfo::IsInConstantPool() {
304 if (FLAG_enable_ool_constant_pool) {
305 return Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_));
307 return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_));
312 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
313 // Patch the code at the current address with the supplied instructions.
314 Instr* pc = reinterpret_cast<Instr*>(pc_);
315 Instr* instr = reinterpret_cast<Instr*>(instructions);
316 for (int i = 0; i < instruction_count; i++) {
317 *(pc + i) = *(instr + i);
320 // Indicate that code has changed.
321 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
325 // Patch the code at the current PC with a call to the target address.
326 // Additional guard instructions can be added if required.
327 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
328 // Patch the code at the current address with a call to the target.
333 // -----------------------------------------------------------------------------
334 // Implementation of Operand and MemOperand
335 // See assembler-arm-inl.h for inlined constructors
337 Operand::Operand(Handle<Object> handle) {
338 AllowDeferredHandleDereference using_raw_address;
340 // Verify all Objects referred by code are NOT in new space.
341 Object* obj = *handle;
342 if (obj->IsHeapObject()) {
343 ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
344 imm32_ = reinterpret_cast<intptr_t>(handle.location());
345 rmode_ = RelocInfo::EMBEDDED_OBJECT;
347 // no relocation needed
348 imm32_ = reinterpret_cast<intptr_t>(obj);
349 rmode_ = RelocInfo::NONE32;
354 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
355 ASSERT(is_uint5(shift_imm));
359 shift_op_ = shift_op;
360 shift_imm_ = shift_imm & 31;
362 if ((shift_op == ROR) && (shift_imm == 0)) {
363 // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode
364 // RRX as ROR #0 (See below).
366 } else if (shift_op == RRX) {
367 // encoded as ROR with shift_imm == 0
368 ASSERT(shift_imm == 0);
375 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
376 ASSERT(shift_op != RRX);
379 shift_op_ = shift_op;
384 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
392 MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
401 MemOperand::MemOperand(Register rn, Register rm,
402 ShiftOp shift_op, int shift_imm, AddrMode am) {
403 ASSERT(is_uint5(shift_imm));
406 shift_op_ = shift_op;
407 shift_imm_ = shift_imm & 31;
412 NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
413 ASSERT((am == Offset) || (am == PostIndex));
415 rm_ = (am == Offset) ? pc : sp;
420 NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
427 void NeonMemOperand::SetAlignment(int align) {
449 NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
451 switch (registers_count) {
472 // -----------------------------------------------------------------------------
473 // Specific instructions, constants, and masks.
475 // add(sp, sp, 4) instruction (aka Pop())
476 const Instr kPopInstruction =
477 al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
478 kRegister_sp_Code * B12;
479 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
480 // register r is not encoded.
481 const Instr kPushRegPattern =
482 al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
483 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
484 // register r is not encoded.
485 const Instr kPopRegPattern =
486 al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
488 const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
489 // ldr rd, [pc, #offset]
490 const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
491 const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
492 // ldr rd, [pp, #offset]
493 const Instr kLdrPpMask = 15 * B24 | 7 * B20 | 15 * B16;
494 const Instr kLdrPpPattern = 5 * B24 | L | kRegister_r8_Code * B16;
495 // vldr dd, [pc, #offset]
496 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
497 const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
498 // vldr dd, [pp, #offset]
499 const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
500 const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
502 const Instr kBlxRegMask =
503 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
504 const Instr kBlxRegPattern =
505 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
506 const Instr kBlxIp = al | kBlxRegPattern | ip.code();
507 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
508 const Instr kMovMvnPattern = 0xd * B21;
509 const Instr kMovMvnFlip = B22;
510 const Instr kMovLeaveCCMask = 0xdff * B16;
511 const Instr kMovLeaveCCPattern = 0x1a0 * B16;
512 const Instr kMovwMask = 0xff * B20;
513 const Instr kMovwPattern = 0x30 * B20;
514 const Instr kMovwLeaveCCFlip = 0x5 * B21;
515 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
516 const Instr kCmpCmnPattern = 0x15 * B20;
517 const Instr kCmpCmnFlip = B21;
518 const Instr kAddSubFlip = 0x6 * B21;
519 const Instr kAndBicFlip = 0xe * B21;
521 // A mask for the Rd register for push, pop, ldr, str instructions.
522 const Instr kLdrRegFpOffsetPattern =
523 al | B26 | L | Offset | kRegister_fp_Code * B16;
524 const Instr kStrRegFpOffsetPattern =
525 al | B26 | Offset | kRegister_fp_Code * B16;
526 const Instr kLdrRegFpNegOffsetPattern =
527 al | B26 | L | NegOffset | kRegister_fp_Code * B16;
528 const Instr kStrRegFpNegOffsetPattern =
529 al | B26 | NegOffset | kRegister_fp_Code * B16;
530 const Instr kLdrStrInstrTypeMask = 0xffff0000;
531 const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
532 const Instr kLdrStrOffsetMask = 0x00000fff;
535 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
536 : AssemblerBase(isolate, buffer, buffer_size),
537 recorded_ast_id_(TypeFeedbackId::None()),
538 constant_pool_builder_(),
539 positions_recorder_(this) {
540 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
541 num_pending_32_bit_reloc_info_ = 0;
542 num_pending_64_bit_reloc_info_ = 0;
543 next_buffer_check_ = 0;
544 const_pool_blocked_nesting_ = 0;
545 no_const_pool_before_ = 0;
546 first_const_pool_32_use_ = -1;
547 first_const_pool_64_use_ = -1;
549 constant_pool_available_ = !FLAG_enable_ool_constant_pool;
550 constant_pool_full_ = false;
551 ClearRecordedAstId();
555 Assembler::~Assembler() {
556 ASSERT(const_pool_blocked_nesting_ == 0);
560 void Assembler::GetCode(CodeDesc* desc) {
561 if (!FLAG_enable_ool_constant_pool) {
562 // Emit constant pool if necessary.
563 CheckConstPool(true, false);
564 ASSERT(num_pending_32_bit_reloc_info_ == 0);
565 ASSERT(num_pending_64_bit_reloc_info_ == 0);
567 // Set up code descriptor.
568 desc->buffer = buffer_;
569 desc->buffer_size = buffer_size_;
570 desc->instr_size = pc_offset();
571 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
576 void Assembler::Align(int m) {
577 ASSERT(m >= 4 && IsPowerOf2(m));
578 while ((pc_offset() & (m - 1)) != 0) {
584 void Assembler::CodeTargetAlign() {
585 // Preferred alignment of jump targets on some ARM chips.
590 Condition Assembler::GetCondition(Instr instr) {
591 return Instruction::ConditionField(instr);
595 bool Assembler::IsBranch(Instr instr) {
596 return (instr & (B27 | B25)) == (B27 | B25);
600 int Assembler::GetBranchOffset(Instr instr) {
601 ASSERT(IsBranch(instr));
602 // Take the jump offset in the lower 24 bits, sign extend it and multiply it
603 // with 4 to get the offset in bytes.
604 return ((instr & kImm24Mask) << 8) >> 6;
608 bool Assembler::IsLdrRegisterImmediate(Instr instr) {
609 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
613 bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
614 return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
618 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
619 ASSERT(IsLdrRegisterImmediate(instr));
620 bool positive = (instr & B23) == B23;
621 int offset = instr & kOff12Mask; // Zero extended offset.
622 return positive ? offset : -offset;
626 int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
627 ASSERT(IsVldrDRegisterImmediate(instr));
628 bool positive = (instr & B23) == B23;
629 int offset = instr & kOff8Mask; // Zero extended offset.
631 return positive ? offset : -offset;
635 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
636 ASSERT(IsLdrRegisterImmediate(instr));
637 bool positive = offset >= 0;
638 if (!positive) offset = -offset;
639 ASSERT(is_uint12(offset));
640 // Set bit indicating whether the offset should be added.
641 instr = (instr & ~B23) | (positive ? B23 : 0);
642 // Set the actual offset.
643 return (instr & ~kOff12Mask) | offset;
647 Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
648 ASSERT(IsVldrDRegisterImmediate(instr));
649 ASSERT((offset & ~3) == offset); // Must be 64-bit aligned.
650 bool positive = offset >= 0;
651 if (!positive) offset = -offset;
652 ASSERT(is_uint10(offset));
653 // Set bit indicating whether the offset should be added.
654 instr = (instr & ~B23) | (positive ? B23 : 0);
655 // Set the actual offset. Its bottom 2 bits are zero.
656 return (instr & ~kOff8Mask) | (offset >> 2);
660 bool Assembler::IsStrRegisterImmediate(Instr instr) {
661 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
665 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
666 ASSERT(IsStrRegisterImmediate(instr));
667 bool positive = offset >= 0;
668 if (!positive) offset = -offset;
669 ASSERT(is_uint12(offset));
670 // Set bit indicating whether the offset should be added.
671 instr = (instr & ~B23) | (positive ? B23 : 0);
672 // Set the actual offset.
673 return (instr & ~kOff12Mask) | offset;
677 bool Assembler::IsAddRegisterImmediate(Instr instr) {
678 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
682 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
683 ASSERT(IsAddRegisterImmediate(instr));
685 ASSERT(is_uint12(offset));
687 return (instr & ~kOff12Mask) | offset;
691 Register Assembler::GetRd(Instr instr) {
693 reg.code_ = Instruction::RdValue(instr);
698 Register Assembler::GetRn(Instr instr) {
700 reg.code_ = Instruction::RnValue(instr);
705 Register Assembler::GetRm(Instr instr) {
707 reg.code_ = Instruction::RmValue(instr);
712 bool Assembler::IsPush(Instr instr) {
713 return ((instr & ~kRdMask) == kPushRegPattern);
717 bool Assembler::IsPop(Instr instr) {
718 return ((instr & ~kRdMask) == kPopRegPattern);
722 bool Assembler::IsStrRegFpOffset(Instr instr) {
723 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
727 bool Assembler::IsLdrRegFpOffset(Instr instr) {
728 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
732 bool Assembler::IsStrRegFpNegOffset(Instr instr) {
733 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
737 bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
738 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
742 bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
743 // Check the instruction is indeed a
744 // ldr<cond> <Rd>, [pc +/- offset_12].
745 return (instr & kLdrPCMask) == kLdrPCPattern;
749 bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
750 // Check the instruction is indeed a
751 // ldr<cond> <Rd>, [pp +/- offset_12].
752 return (instr & kLdrPpMask) == kLdrPpPattern;
756 bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
757 // Check the instruction is indeed a
758 // vldr<cond> <Dd>, [pc +/- offset_10].
759 return (instr & kVldrDPCMask) == kVldrDPCPattern;
763 bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
764 // Check the instruction is indeed a
765 // vldr<cond> <Dd>, [pp +/- offset_10].
766 return (instr & kVldrDPpMask) == kVldrDPpPattern;
770 bool Assembler::IsTstImmediate(Instr instr) {
771 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
776 bool Assembler::IsCmpRegister(Instr instr) {
777 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
782 bool Assembler::IsCmpImmediate(Instr instr) {
783 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
788 Register Assembler::GetCmpImmediateRegister(Instr instr) {
789 ASSERT(IsCmpImmediate(instr));
794 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
795 ASSERT(IsCmpImmediate(instr));
796 return instr & kOff12Mask;
800 // Labels refer to positions in the (to be) generated code.
801 // There are bound, linked, and unused labels.
803 // Bound labels refer to known positions in the already
804 // generated code. pos() is the position the label refers to.
806 // Linked labels refer to unknown positions in the code
807 // to be generated; pos() is the position of the last
808 // instruction using the label.
810 // The linked labels form a link chain by making the branch offset
811 // in the instruction steam to point to the previous branch
812 // instruction using the same label.
814 // The link chain is terminated by a branch offset pointing to the
818 int Assembler::target_at(int pos) {
819 Instr instr = instr_at(pos);
820 if (is_uint24(instr)) {
821 // Emitted link to a label, not part of a branch.
824 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
825 int imm26 = ((instr & kImm24Mask) << 8) >> 6;
826 if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
827 ((instr & B24) != 0)) {
828 // blx uses bit 24 to encode bit 2 of imm26
831 return pos + kPcLoadDelta + imm26;
835 void Assembler::target_at_put(int pos, int target_pos) {
836 Instr instr = instr_at(pos);
837 if (is_uint24(instr)) {
838 ASSERT(target_pos == pos || target_pos >= 0);
839 // Emitted link to a label, not part of a branch.
840 // Load the position of the label relative to the generated code object
841 // pointer in a register.
843 // Here are the instructions we need to emit:
844 // For ARMv7: target24 => target16_1:target16_0
845 // movw dst, #target16_0
846 // movt dst, #target16_1
847 // For ARMv6: target24 => target8_2:target8_1:target8_0
848 // mov dst, #target8_0
849 // orr dst, dst, #target8_1 << 8
850 // orr dst, dst, #target8_2 << 16
852 // We extract the destination register from the emitted nop instruction.
853 Register dst = Register::from_code(
854 Instruction::RmValue(instr_at(pos + kInstrSize)));
855 ASSERT(IsNop(instr_at(pos + kInstrSize), dst.code()));
856 uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
857 ASSERT(is_uint24(target24));
858 if (is_uint8(target24)) {
859 // If the target fits in a byte then only patch with a mov
861 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
863 CodePatcher::DONT_FLUSH);
864 patcher.masm()->mov(dst, Operand(target24));
866 uint16_t target16_0 = target24 & kImm16Mask;
867 uint16_t target16_1 = target24 >> 16;
868 if (CpuFeatures::IsSupported(ARMv7)) {
869 // Patch with movw/movt.
870 if (target16_1 == 0) {
871 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
873 CodePatcher::DONT_FLUSH);
874 patcher.masm()->movw(dst, target16_0);
876 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
878 CodePatcher::DONT_FLUSH);
879 patcher.masm()->movw(dst, target16_0);
880 patcher.masm()->movt(dst, target16_1);
883 // Patch with a sequence of mov/orr/orr instructions.
884 uint8_t target8_0 = target16_0 & kImm8Mask;
885 uint8_t target8_1 = target16_0 >> 8;
886 uint8_t target8_2 = target16_1 & kImm8Mask;
887 if (target8_2 == 0) {
888 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
890 CodePatcher::DONT_FLUSH);
891 patcher.masm()->mov(dst, Operand(target8_0));
892 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
894 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
896 CodePatcher::DONT_FLUSH);
897 patcher.masm()->mov(dst, Operand(target8_0));
898 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
899 patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
905 int imm26 = target_pos - (pos + kPcLoadDelta);
906 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
907 if (Instruction::ConditionField(instr) == kSpecialCondition) {
908 // blx uses bit 24 to encode bit 2 of imm26
909 ASSERT((imm26 & 1) == 0);
910 instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
912 ASSERT((imm26 & 3) == 0);
913 instr &= ~kImm24Mask;
915 int imm24 = imm26 >> 2;
916 ASSERT(is_int24(imm24));
917 instr_at_put(pos, instr | (imm24 & kImm24Mask));
921 void Assembler::print(Label* L) {
922 if (L->is_unused()) {
923 PrintF("unused label\n");
924 } else if (L->is_bound()) {
925 PrintF("bound label to %d\n", L->pos());
926 } else if (L->is_linked()) {
928 PrintF("unbound label");
929 while (l.is_linked()) {
930 PrintF("@ %d ", l.pos());
931 Instr instr = instr_at(l.pos());
932 if ((instr & ~kImm24Mask) == 0) {
935 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
936 Condition cond = Instruction::ConditionField(instr);
939 if (cond == kSpecialCondition) {
943 if ((instr & B24) != 0)
949 case eq: c = "eq"; break;
950 case ne: c = "ne"; break;
951 case hs: c = "hs"; break;
952 case lo: c = "lo"; break;
953 case mi: c = "mi"; break;
954 case pl: c = "pl"; break;
955 case vs: c = "vs"; break;
956 case vc: c = "vc"; break;
957 case hi: c = "hi"; break;
958 case ls: c = "ls"; break;
959 case ge: c = "ge"; break;
960 case lt: c = "lt"; break;
961 case gt: c = "gt"; break;
962 case le: c = "le"; break;
963 case al: c = ""; break;
969 PrintF("%s%s\n", b, c);
974 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
979 void Assembler::bind_to(Label* L, int pos) {
980 ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
981 while (L->is_linked()) {
982 int fixup_pos = L->pos();
983 next(L); // call next before overwriting link with target at fixup_pos
984 target_at_put(fixup_pos, pos);
988 // Keep track of the last bound label so we don't eliminate any instructions
989 // before a bound label.
990 if (pos > last_bound_pos_)
991 last_bound_pos_ = pos;
995 void Assembler::bind(Label* L) {
996 ASSERT(!L->is_bound()); // label can only be bound once
997 bind_to(L, pc_offset());
1001 void Assembler::next(Label* L) {
1002 ASSERT(L->is_linked());
1003 int link = target_at(L->pos());
1004 if (link == L->pos()) {
1005 // Branch target points to the same instuction. This is the end of the link
1015 // Low-level code emission routines depending on the addressing mode.
1016 // If this returns true then you have to use the rotate_imm and immed_8
1017 // that it returns, because it may have already changed the instruction
1019 static bool fits_shifter(uint32_t imm32,
1020 uint32_t* rotate_imm,
1023 // imm32 must be unsigned.
1024 for (int rot = 0; rot < 16; rot++) {
1025 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
1026 if ((imm8 <= 0xff)) {
1032 // If the opcode is one with a complementary version and the complementary
1033 // immediate fits, change the opcode.
1034 if (instr != NULL) {
1035 if ((*instr & kMovMvnMask) == kMovMvnPattern) {
1036 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
1037 *instr ^= kMovMvnFlip;
1039 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
1040 if (CpuFeatures::IsSupported(ARMv7)) {
1041 if (imm32 < 0x10000) {
1042 *instr ^= kMovwLeaveCCFlip;
1043 *instr |= EncodeMovwImmediate(imm32);
1044 *rotate_imm = *immed_8 = 0; // Not used for movw.
1049 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
1050 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
1051 *instr ^= kCmpCmnFlip;
1055 Instr alu_insn = (*instr & kALUMask);
1056 if (alu_insn == ADD ||
1058 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
1059 *instr ^= kAddSubFlip;
1062 } else if (alu_insn == AND ||
1064 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
1065 *instr ^= kAndBicFlip;
1075 // We have to use the temporary register for things that can be relocated even
1076 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
1077 // space. There is no guarantee that the relocated location can be similarly
1079 bool Operand::must_output_reloc_info(Isolate* isolate,
1080 const Assembler* assembler) const {
1081 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1082 if (assembler != NULL && assembler->predictable_code_size()) return true;
1083 return Serializer::enabled(isolate);
1084 } else if (RelocInfo::IsNone(rmode_)) {
1091 static bool use_mov_immediate_load(Isolate* isolate,
1093 const Assembler* assembler) {
1094 if (assembler != NULL && !assembler->can_use_constant_pool()) {
1095 // If there is no constant pool available, we must use an mov immediate.
1096 // TODO(rmcilroy): enable ARMv6 support.
1097 ASSERT(CpuFeatures::IsSupported(ARMv7));
1099 } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
1100 (assembler == NULL || !assembler->predictable_code_size())) {
1101 // Prefer movw / movt to constant pool if it is more efficient on the CPU.
1103 } else if (x.must_output_reloc_info(isolate, assembler)) {
1104 // Prefer constant pool if data is likely to be patched.
1107 // Otherwise, use immediate load if movw / movt is available.
1108 return CpuFeatures::IsSupported(ARMv7);
1113 bool Operand::is_single_instruction(Isolate* isolate,
1114 const Assembler* assembler,
1115 Instr instr) const {
1116 if (rm_.is_valid()) return true;
1117 uint32_t dummy1, dummy2;
1118 if (must_output_reloc_info(isolate, assembler) ||
1119 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
1120 // The immediate operand cannot be encoded as a shifter operand, or use of
1121 // constant pool is required. For a mov instruction not setting the
1122 // condition code additional instruction conventions can be used.
1123 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
1124 return !use_mov_immediate_load(isolate, *this, assembler);
1126 // If this is not a mov or mvn instruction there will always an additional
1127 // instructions - either mov or ldr. The mov might actually be two
1128 // instructions mov or movw followed by movt so including the actual
1129 // instruction two or three instructions will be generated.
1133 // No use of constant pool and the immediate operand can be encoded as a
1140 void Assembler::move_32_bit_immediate(Register rd,
1143 RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
1144 if (x.must_output_reloc_info(isolate(), this)) {
1145 RecordRelocInfo(rinfo);
1148 if (use_mov_immediate_load(isolate(), x, this)) {
1149 Register target = rd.code() == pc.code() ? ip : rd;
1150 // TODO(rmcilroy): add ARMv6 support for immediate loads.
1151 ASSERT(CpuFeatures::IsSupported(ARMv7));
1152 if (!FLAG_enable_ool_constant_pool &&
1153 x.must_output_reloc_info(isolate(), this)) {
1154 // Make sure the movw/movt doesn't get separated.
1155 BlockConstPoolFor(2);
1157 emit(cond | 0x30*B20 | target.code()*B12 |
1158 EncodeMovwImmediate(x.imm32_ & 0xffff));
1159 movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond);
1160 if (target.code() != rd.code()) {
1161 mov(rd, target, LeaveCC, cond);
1164 ASSERT(can_use_constant_pool());
1165 ConstantPoolAddEntry(rinfo);
1166 ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
1171 void Assembler::addrmod1(Instr instr,
1176 ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
1177 if (!x.rm_.is_valid()) {
1179 uint32_t rotate_imm;
1181 if (x.must_output_reloc_info(isolate(), this) ||
1182 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
1183 // The immediate operand cannot be encoded as a shifter operand, so load
1184 // it first to register ip and change the original instruction to use ip.
1185 // However, if the original instruction is a 'mov rd, x' (not setting the
1186 // condition code), then replace it with a 'ldr rd, [pc]'.
1187 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
1188 Condition cond = Instruction::ConditionField(instr);
1189 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
1190 move_32_bit_immediate(rd, x, cond);
1192 mov(ip, x, LeaveCC, cond);
1193 addrmod1(instr, rn, rd, Operand(ip));
1197 instr |= I | rotate_imm*B8 | immed_8;
1198 } else if (!x.rs_.is_valid()) {
1200 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1203 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
1204 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
1206 emit(instr | rn.code()*B16 | rd.code()*B12);
1207 if (rn.is(pc) || x.rm_.is(pc)) {
1208 // Block constant pool emission for one instruction after reading pc.
1209 BlockConstPoolFor(1);
1214 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
1215 ASSERT((instr & ~(kCondMask | B | L)) == B26);
1217 if (!x.rm_.is_valid()) {
1218 // Immediate offset.
1219 int offset_12 = x.offset_;
1220 if (offset_12 < 0) {
1221 offset_12 = -offset_12;
1224 if (!is_uint12(offset_12)) {
1225 // Immediate offset cannot be encoded, load it first to register ip
1226 // rn (and rd in a load) should never be ip, or will be trashed.
1227 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1228 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
1229 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
1232 ASSERT(offset_12 >= 0); // no masking needed
1235 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
1236 // register offset the constructors make sure than both shift_imm_
1237 // and shift_op_ are initialized.
1238 ASSERT(!x.rm_.is(pc));
1239 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1241 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1242 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1246 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
1247 ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
1248 ASSERT(x.rn_.is_valid());
1250 if (!x.rm_.is_valid()) {
1251 // Immediate offset.
1252 int offset_8 = x.offset_;
1254 offset_8 = -offset_8;
1257 if (!is_uint8(offset_8)) {
1258 // Immediate offset cannot be encoded, load it first to register ip
1259 // rn (and rd in a load) should never be ip, or will be trashed.
1260 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1261 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
1262 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1265 ASSERT(offset_8 >= 0); // no masking needed
1266 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
1267 } else if (x.shift_imm_ != 0) {
1268 // Scaled register offset not supported, load index first
1269 // rn (and rd in a load) should never be ip, or will be trashed.
1270 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1271 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
1272 Instruction::ConditionField(instr));
1273 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1277 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
1278 instr |= x.rm_.code();
1280 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1281 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1285 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
1286 ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
1289 emit(instr | rn.code()*B16 | rl);
1293 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
1294 // Unindexed addressing is not encoded by this function.
1295 ASSERT_EQ((B27 | B26),
1296 (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
1297 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
1299 int offset_8 = x.offset_;
1300 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
1303 offset_8 = -offset_8;
1306 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
1307 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1309 // Post-indexed addressing requires W == 1; different than in addrmod2/3.
1313 ASSERT(offset_8 >= 0); // no masking needed
1314 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
1318 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1320 if (L->is_bound()) {
1321 target_pos = L->pos();
1323 if (L->is_linked()) {
1324 // Point to previous instruction that uses the link.
1325 target_pos = L->pos();
1327 // First entry of the link chain points to itself.
1328 target_pos = pc_offset();
1330 L->link_to(pc_offset());
1333 // Block the emission of the constant pool, since the branch instruction must
1334 // be emitted at the pc offset recorded by the label.
1335 BlockConstPoolFor(1);
1336 return target_pos - (pc_offset() + kPcLoadDelta);
1340 // Branch instructions.
1341 void Assembler::b(int branch_offset, Condition cond) {
1342 ASSERT((branch_offset & 3) == 0);
1343 int imm24 = branch_offset >> 2;
1344 ASSERT(is_int24(imm24));
1345 emit(cond | B27 | B25 | (imm24 & kImm24Mask));
1348 // Dead code is a good location to emit the constant pool.
1349 CheckConstPool(false, false);
1354 void Assembler::bl(int branch_offset, Condition cond) {
1355 positions_recorder()->WriteRecordedPositions();
1356 ASSERT((branch_offset & 3) == 0);
1357 int imm24 = branch_offset >> 2;
1358 ASSERT(is_int24(imm24));
1359 emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
1363 void Assembler::blx(int branch_offset) { // v5 and above
1364 positions_recorder()->WriteRecordedPositions();
1365 ASSERT((branch_offset & 1) == 0);
1366 int h = ((branch_offset & 2) >> 1)*B24;
1367 int imm24 = branch_offset >> 2;
1368 ASSERT(is_int24(imm24));
1369 emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
1373 void Assembler::blx(Register target, Condition cond) { // v5 and above
1374 positions_recorder()->WriteRecordedPositions();
1375 ASSERT(!target.is(pc));
1376 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
1380 void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
1381 positions_recorder()->WriteRecordedPositions();
1382 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
1383 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
1387 // Data-processing instructions.
1389 void Assembler::and_(Register dst, Register src1, const Operand& src2,
1390 SBit s, Condition cond) {
1391 addrmod1(cond | AND | s, src1, dst, src2);
1395 void Assembler::eor(Register dst, Register src1, const Operand& src2,
1396 SBit s, Condition cond) {
1397 addrmod1(cond | EOR | s, src1, dst, src2);
1401 void Assembler::sub(Register dst, Register src1, const Operand& src2,
1402 SBit s, Condition cond) {
1403 addrmod1(cond | SUB | s, src1, dst, src2);
1407 void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1408 SBit s, Condition cond) {
1409 addrmod1(cond | RSB | s, src1, dst, src2);
1413 void Assembler::add(Register dst, Register src1, const Operand& src2,
1414 SBit s, Condition cond) {
1415 addrmod1(cond | ADD | s, src1, dst, src2);
1419 void Assembler::adc(Register dst, Register src1, const Operand& src2,
1420 SBit s, Condition cond) {
1421 addrmod1(cond | ADC | s, src1, dst, src2);
1425 void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1426 SBit s, Condition cond) {
1427 addrmod1(cond | SBC | s, src1, dst, src2);
1431 void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1432 SBit s, Condition cond) {
1433 addrmod1(cond | RSC | s, src1, dst, src2);
1437 void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
1438 addrmod1(cond | TST | S, src1, r0, src2);
1442 void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
1443 addrmod1(cond | TEQ | S, src1, r0, src2);
1447 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
1448 addrmod1(cond | CMP | S, src1, r0, src2);
1452 void Assembler::cmp_raw_immediate(
1453 Register src, int raw_immediate, Condition cond) {
1454 ASSERT(is_uint12(raw_immediate));
1455 emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
1459 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
1460 addrmod1(cond | CMN | S, src1, r0, src2);
1464 void Assembler::orr(Register dst, Register src1, const Operand& src2,
1465 SBit s, Condition cond) {
1466 addrmod1(cond | ORR | s, src1, dst, src2);
1470 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1472 positions_recorder()->WriteRecordedPositions();
1474 // Don't allow nop instructions in the form mov rn, rn to be generated using
1475 // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1476 // or MarkCode(int/NopMarkerTypes) pseudo instructions.
1477 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
1478 addrmod1(cond | MOV | s, r0, dst, src);
1482 void Assembler::mov_label_offset(Register dst, Label* label) {
1483 if (label->is_bound()) {
1484 mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
1486 // Emit the link to the label in the code stream followed by extra nop
1488 // If the label is not linked, then start a new link chain by linking it to
1489 // itself, emitting pc_offset().
1490 int link = label->is_linked() ? label->pos() : pc_offset();
1491 label->link_to(pc_offset());
1493 // When the label is bound, these instructions will be patched with a
1494 // sequence of movw/movt or mov/orr/orr instructions. They will load the
1495 // destination register with the position of the label from the beginning
1498 // The link will be extracted from the first instruction and the destination
1499 // register from the second.
1508 // When the label gets bound: target_at extracts the link and target_at_put
1509 // patches the instructions.
1510 ASSERT(is_uint24(link));
1511 BlockConstPoolScope block_const_pool(this);
1514 if (!CpuFeatures::IsSupported(ARMv7)) {
1521 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1522 ASSERT(immediate < 0x10000);
1523 // May use movw if supported, but on unsupported platforms will try to use
1524 // equivalent rotated immed_8 value and other tricks before falling back to a
1525 // constant pool load.
1526 mov(reg, Operand(immediate), LeaveCC, cond);
1530 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1531 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1535 void Assembler::bic(Register dst, Register src1, const Operand& src2,
1536 SBit s, Condition cond) {
1537 addrmod1(cond | BIC | s, src1, dst, src2);
1541 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
1542 addrmod1(cond | MVN | s, r0, dst, src);
1546 // Multiply instructions.
1547 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1548 SBit s, Condition cond) {
1549 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1550 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1551 src2.code()*B8 | B7 | B4 | src1.code());
1555 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
1557 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1558 emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
1559 src2.code()*B8 | B7 | B4 | src1.code());
1563 void Assembler::sdiv(Register dst, Register src1, Register src2,
1565 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1566 ASSERT(IsEnabled(SUDIV));
1567 emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
1568 src2.code()*B8 | B4 | src1.code());
1572 void Assembler::mul(Register dst, Register src1, Register src2,
1573 SBit s, Condition cond) {
1574 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1575 // dst goes in bits 16-19 for this instruction!
1576 emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
1580 void Assembler::smlal(Register dstL,
1586 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1587 ASSERT(!dstL.is(dstH));
1588 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1589 src2.code()*B8 | B7 | B4 | src1.code());
1593 void Assembler::smull(Register dstL,
1599 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1600 ASSERT(!dstL.is(dstH));
1601 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1602 src2.code()*B8 | B7 | B4 | src1.code());
1606 void Assembler::umlal(Register dstL,
1612 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1613 ASSERT(!dstL.is(dstH));
1614 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1615 src2.code()*B8 | B7 | B4 | src1.code());
1619 void Assembler::umull(Register dstL,
1625 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1626 ASSERT(!dstL.is(dstH));
1627 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1628 src2.code()*B8 | B7 | B4 | src1.code());
1632 // Miscellaneous arithmetic instructions.
1633 void Assembler::clz(Register dst, Register src, Condition cond) {
1635 ASSERT(!dst.is(pc) && !src.is(pc));
1636 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1637 15*B8 | CLZ | src.code());
1641 // Saturating instructions.
1643 // Unsigned saturate.
1644 void Assembler::usat(Register dst,
1649 ASSERT(CpuFeatures::IsSupported(ARMv7));
1650 ASSERT(!dst.is(pc) && !src.rm_.is(pc));
1651 ASSERT((satpos >= 0) && (satpos <= 31));
1652 ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1653 ASSERT(src.rs_.is(no_reg));
1656 if (src.shift_op_ == ASR) {
1660 emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1661 src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1665 // Bitfield manipulation instructions.
1667 // Unsigned bit field extract.
1668 // Extracts #width adjacent bits from position #lsb in a register, and
1669 // writes them to the low bits of a destination register.
1670 // ubfx dst, src, #lsb, #width
1671 void Assembler::ubfx(Register dst,
1677 ASSERT(CpuFeatures::IsSupported(ARMv7));
1678 ASSERT(!dst.is(pc) && !src.is(pc));
1679 ASSERT((lsb >= 0) && (lsb <= 31));
1680 ASSERT((width >= 1) && (width <= (32 - lsb)));
1681 emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1682 lsb*B7 | B6 | B4 | src.code());
1686 // Signed bit field extract.
1687 // Extracts #width adjacent bits from position #lsb in a register, and
1688 // writes them to the low bits of a destination register. The extracted
1689 // value is sign extended to fill the destination register.
1690 // sbfx dst, src, #lsb, #width
1691 void Assembler::sbfx(Register dst,
1697 ASSERT(CpuFeatures::IsSupported(ARMv7));
1698 ASSERT(!dst.is(pc) && !src.is(pc));
1699 ASSERT((lsb >= 0) && (lsb <= 31));
1700 ASSERT((width >= 1) && (width <= (32 - lsb)));
1701 emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1702 lsb*B7 | B6 | B4 | src.code());
1707 // Sets #width adjacent bits at position #lsb in the destination register
1708 // to zero, preserving the value of the other bits.
1709 // bfc dst, #lsb, #width
1710 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1712 ASSERT(CpuFeatures::IsSupported(ARMv7));
1713 ASSERT(!dst.is(pc));
1714 ASSERT((lsb >= 0) && (lsb <= 31));
1715 ASSERT((width >= 1) && (width <= (32 - lsb)));
1716 int msb = lsb + width - 1;
1717 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1721 // Bit field insert.
1722 // Inserts #width adjacent bits from the low bits of the source register
1723 // into position #lsb of the destination register.
1724 // bfi dst, src, #lsb, #width
1725 void Assembler::bfi(Register dst,
1731 ASSERT(CpuFeatures::IsSupported(ARMv7));
1732 ASSERT(!dst.is(pc) && !src.is(pc));
1733 ASSERT((lsb >= 0) && (lsb <= 31));
1734 ASSERT((width >= 1) && (width <= (32 - lsb)));
1735 int msb = lsb + width - 1;
1736 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1741 void Assembler::pkhbt(Register dst,
1743 const Operand& src2,
1745 // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1746 // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1747 // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
1748 ASSERT(!dst.is(pc));
1749 ASSERT(!src1.is(pc));
1750 ASSERT(!src2.rm().is(pc));
1751 ASSERT(!src2.rm().is(no_reg));
1752 ASSERT(src2.rs().is(no_reg));
1753 ASSERT((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
1754 ASSERT(src2.shift_op() == LSL);
1755 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1756 src2.shift_imm_*B7 | B4 | src2.rm().code());
1760 void Assembler::pkhtb(Register dst,
1762 const Operand& src2,
1764 // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1765 // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1766 // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
1767 ASSERT(!dst.is(pc));
1768 ASSERT(!src1.is(pc));
1769 ASSERT(!src2.rm().is(pc));
1770 ASSERT(!src2.rm().is(no_reg));
1771 ASSERT(src2.rs().is(no_reg));
1772 ASSERT((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
1773 ASSERT(src2.shift_op() == ASR);
1774 int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
1775 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1776 asr*B7 | B6 | B4 | src2.rm().code());
1780 void Assembler::uxtb(Register dst,
1783 // Instruction details available in ARM DDI 0406C.b, A8.8.274.
1784 // cond(31-28) | 01101110(27-20) | 1111(19-16) |
1785 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1786 ASSERT(!dst.is(pc));
1787 ASSERT(!src.rm().is(pc));
1788 ASSERT(!src.rm().is(no_reg));
1789 ASSERT(src.rs().is(no_reg));
1790 ASSERT((src.shift_imm_ == 0) ||
1791 (src.shift_imm_ == 8) ||
1792 (src.shift_imm_ == 16) ||
1793 (src.shift_imm_ == 24));
1794 // Operand maps ROR #0 to LSL #0.
1795 ASSERT((src.shift_op() == ROR) ||
1796 ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
1797 emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
1798 ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
1802 void Assembler::uxtab(Register dst,
1804 const Operand& src2,
1806 // Instruction details available in ARM DDI 0406C.b, A8.8.271.
1807 // cond(31-28) | 01101110(27-20) | Rn(19-16) |
1808 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1809 ASSERT(!dst.is(pc));
1810 ASSERT(!src1.is(pc));
1811 ASSERT(!src2.rm().is(pc));
1812 ASSERT(!src2.rm().is(no_reg));
1813 ASSERT(src2.rs().is(no_reg));
1814 ASSERT((src2.shift_imm_ == 0) ||
1815 (src2.shift_imm_ == 8) ||
1816 (src2.shift_imm_ == 16) ||
1817 (src2.shift_imm_ == 24));
1818 // Operand maps ROR #0 to LSL #0.
1819 ASSERT((src2.shift_op() == ROR) ||
1820 ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0)));
1821 emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
1822 ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
1826 void Assembler::uxtb16(Register dst,
1829 // Instruction details available in ARM DDI 0406C.b, A8.8.275.
1830 // cond(31-28) | 01101100(27-20) | 1111(19-16) |
1831 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1832 ASSERT(!dst.is(pc));
1833 ASSERT(!src.rm().is(pc));
1834 ASSERT(!src.rm().is(no_reg));
1835 ASSERT(src.rs().is(no_reg));
1836 ASSERT((src.shift_imm_ == 0) ||
1837 (src.shift_imm_ == 8) ||
1838 (src.shift_imm_ == 16) ||
1839 (src.shift_imm_ == 24));
1840 // Operand maps ROR #0 to LSL #0.
1841 ASSERT((src.shift_op() == ROR) ||
1842 ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
1843 emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
1844 ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
1848 // Status register access instructions.
1849 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1850 ASSERT(!dst.is(pc));
1851 emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1855 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1857 ASSERT(fields >= B16 && fields < B20); // at least one field set
1859 if (!src.rm_.is_valid()) {
1861 uint32_t rotate_imm;
1863 if (src.must_output_reloc_info(isolate(), this) ||
1864 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1865 // Immediate operand cannot be encoded, load it first to register ip.
1866 move_32_bit_immediate(ip, src);
1867 msr(fields, Operand(ip), cond);
1870 instr = I | rotate_imm*B8 | immed_8;
1872 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
1873 instr = src.rm_.code();
1875 emit(cond | instr | B24 | B21 | fields | 15*B12);
1879 // Load/Store instructions.
1880 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1882 positions_recorder()->WriteRecordedPositions();
1884 addrmod2(cond | B26 | L, dst, src);
1888 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1889 addrmod2(cond | B26, src, dst);
1893 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1894 addrmod2(cond | B26 | B | L, dst, src);
1898 void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1899 addrmod2(cond | B26 | B, src, dst);
1903 void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1904 addrmod3(cond | L | B7 | H | B4, dst, src);
1908 void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1909 addrmod3(cond | B7 | H | B4, src, dst);
1913 void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1914 addrmod3(cond | L | B7 | S6 | B4, dst, src);
1918 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1919 addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1923 void Assembler::ldrd(Register dst1, Register dst2,
1924 const MemOperand& src, Condition cond) {
1925 ASSERT(IsEnabled(ARMv7));
1926 ASSERT(src.rm().is(no_reg));
1927 ASSERT(!dst1.is(lr)); // r14.
1928 ASSERT_EQ(0, dst1.code() % 2);
1929 ASSERT_EQ(dst1.code() + 1, dst2.code());
1930 addrmod3(cond | B7 | B6 | B4, dst1, src);
1934 void Assembler::strd(Register src1, Register src2,
1935 const MemOperand& dst, Condition cond) {
1936 ASSERT(dst.rm().is(no_reg));
1937 ASSERT(!src1.is(lr)); // r14.
1938 ASSERT_EQ(0, src1.code() % 2);
1939 ASSERT_EQ(src1.code() + 1, src2.code());
1940 ASSERT(IsEnabled(ARMv7));
1941 addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
1945 // Preload instructions.
1946 void Assembler::pld(const MemOperand& address) {
1947 // Instruction details available in ARM DDI 0406C.b, A8.8.128.
1948 // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
1949 // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
1950 ASSERT(address.rm().is(no_reg));
1951 ASSERT(address.am() == Offset);
1953 int offset = address.offset();
1958 ASSERT(offset < 4096);
1959 emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
1964 // Load/Store multiple instructions.
1965 void Assembler::ldm(BlockAddrMode am,
1969 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
1970 ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1972 addrmod4(cond | B27 | am | L, base, dst);
1974 // Emit the constant pool after a function return implemented by ldm ..{..pc}.
1975 if (cond == al && (dst & pc.bit()) != 0) {
1976 // There is a slight chance that the ldm instruction was actually a call,
1977 // in which case it would be wrong to return into the constant pool; we
1978 // recognize this case by checking if the emission of the pool was blocked
1979 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1980 // the case, we emit a jump over the pool.
1981 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1986 void Assembler::stm(BlockAddrMode am,
1990 addrmod4(cond | B27 | am, base, src);
1994 // Exception-generating instructions and debugging support.
1995 // Stops with a non-negative code less than kNumOfWatchedStops support
1996 // enabling/disabling and a counter feature. See simulator-arm.h .
1997 void Assembler::stop(const char* msg, Condition cond, int32_t code) {
1999 ASSERT(code >= kDefaultStopCode);
2001 // The Simulator will handle the stop instruction and get the message
2002 // address. It expects to find the address just after the svc instruction.
2003 BlockConstPoolScope block_const_pool(this);
2005 svc(kStopCode + code, cond);
2007 svc(kStopCode + kMaxStopCode, cond);
2009 emit(reinterpret_cast<Instr>(msg));
2011 #else // def __arm__
2014 b(&skip, NegateCondition(cond));
2020 #endif // def __arm__
2024 void Assembler::bkpt(uint32_t imm16) { // v5 and above
2025 ASSERT(is_uint16(imm16));
2026 emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
2030 void Assembler::svc(uint32_t imm24, Condition cond) {
2031 ASSERT(is_uint24(imm24));
2032 emit(cond | 15*B24 | imm24);
2036 // Coprocessor instructions.
2037 void Assembler::cdp(Coprocessor coproc,
2044 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
2045 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
2046 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
2050 void Assembler::cdp2(Coprocessor coproc,
2055 int opcode_2) { // v5 and above
2056 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
2060 void Assembler::mcr(Coprocessor coproc,
2067 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
2068 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
2069 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2073 void Assembler::mcr2(Coprocessor coproc,
2078 int opcode_2) { // v5 and above
2079 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2083 void Assembler::mrc(Coprocessor coproc,
2090 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
2091 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
2092 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2096 void Assembler::mrc2(Coprocessor coproc,
2101 int opcode_2) { // v5 and above
2102 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2106 void Assembler::ldc(Coprocessor coproc,
2108 const MemOperand& src,
2111 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
2115 void Assembler::ldc(Coprocessor coproc,
2121 // Unindexed addressing.
2122 ASSERT(is_uint8(option));
2123 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
2124 coproc*B8 | (option & 255));
2128 void Assembler::ldc2(Coprocessor coproc,
2130 const MemOperand& src,
2131 LFlag l) { // v5 and above
2132 ldc(coproc, crd, src, l, kSpecialCondition);
2136 void Assembler::ldc2(Coprocessor coproc,
2140 LFlag l) { // v5 and above
2141 ldc(coproc, crd, rn, option, l, kSpecialCondition);
2147 void Assembler::vldr(const DwVfpRegister dst,
2148 const Register base,
2150 const Condition cond) {
2151 // Ddst = MEM(Rbase + offset).
2152 // Instruction details available in ARM DDI 0406C.b, A8-924.
2153 // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
2154 // Vd(15-12) | 1011(11-8) | offset
2161 dst.split_code(&vd, &d);
2163 ASSERT(offset >= 0);
2164 if ((offset % 4) == 0 && (offset / 4) < 256) {
2165 emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
2166 0xB*B8 | ((offset / 4) & 255));
2168 // Larger offsets must be handled by computing the correct address
2169 // in the ip register.
2170 ASSERT(!base.is(ip));
2172 add(ip, base, Operand(offset));
2174 sub(ip, base, Operand(offset));
2176 emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
2181 void Assembler::vldr(const DwVfpRegister dst,
2182 const MemOperand& operand,
2183 const Condition cond) {
2184 ASSERT(!operand.rm().is_valid());
2185 ASSERT(operand.am_ == Offset);
2186 vldr(dst, operand.rn(), operand.offset(), cond);
2190 void Assembler::vldr(const SwVfpRegister dst,
2191 const Register base,
2193 const Condition cond) {
2194 // Sdst = MEM(Rbase + offset).
2195 // Instruction details available in ARM DDI 0406A, A8-628.
2196 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
2197 // Vdst(15-12) | 1010(11-8) | offset
2204 dst.split_code(&sd, &d);
2205 ASSERT(offset >= 0);
2207 if ((offset % 4) == 0 && (offset / 4) < 256) {
2208 emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
2209 0xA*B8 | ((offset / 4) & 255));
2211 // Larger offsets must be handled by computing the correct address
2212 // in the ip register.
2213 ASSERT(!base.is(ip));
2215 add(ip, base, Operand(offset));
2217 sub(ip, base, Operand(offset));
2219 emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2224 void Assembler::vldr(const SwVfpRegister dst,
2225 const MemOperand& operand,
2226 const Condition cond) {
2227 ASSERT(!operand.rm().is_valid());
2228 ASSERT(operand.am_ == Offset);
2229 vldr(dst, operand.rn(), operand.offset(), cond);
2233 void Assembler::vstr(const DwVfpRegister src,
2234 const Register base,
2236 const Condition cond) {
2237 // MEM(Rbase + offset) = Dsrc.
2238 // Instruction details available in ARM DDI 0406C.b, A8-1082.
2239 // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
2240 // Vd(15-12) | 1011(11-8) | (offset/4)
2246 ASSERT(offset >= 0);
2248 src.split_code(&vd, &d);
2250 if ((offset % 4) == 0 && (offset / 4) < 256) {
2251 emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
2252 ((offset / 4) & 255));
2254 // Larger offsets must be handled by computing the correct address
2255 // in the ip register.
2256 ASSERT(!base.is(ip));
2258 add(ip, base, Operand(offset));
2260 sub(ip, base, Operand(offset));
2262 emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
2267 void Assembler::vstr(const DwVfpRegister src,
2268 const MemOperand& operand,
2269 const Condition cond) {
2270 ASSERT(!operand.rm().is_valid());
2271 ASSERT(operand.am_ == Offset);
2272 vstr(src, operand.rn(), operand.offset(), cond);
2276 void Assembler::vstr(const SwVfpRegister src,
2277 const Register base,
2279 const Condition cond) {
2280 // MEM(Rbase + offset) = SSrc.
2281 // Instruction details available in ARM DDI 0406A, A8-786.
2282 // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
2283 // Vdst(15-12) | 1010(11-8) | (offset/4)
2290 src.split_code(&sd, &d);
2291 ASSERT(offset >= 0);
2292 if ((offset % 4) == 0 && (offset / 4) < 256) {
2293 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
2294 0xA*B8 | ((offset / 4) & 255));
2296 // Larger offsets must be handled by computing the correct address
2297 // in the ip register.
2298 ASSERT(!base.is(ip));
2300 add(ip, base, Operand(offset));
2302 sub(ip, base, Operand(offset));
2304 emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2309 void Assembler::vstr(const SwVfpRegister src,
2310 const MemOperand& operand,
2311 const Condition cond) {
2312 ASSERT(!operand.rm().is_valid());
2313 ASSERT(operand.am_ == Offset);
2314 vstr(src, operand.rn(), operand.offset(), cond);
2318 void Assembler::vldm(BlockAddrMode am,
2320 DwVfpRegister first,
2323 // Instruction details available in ARM DDI 0406C.b, A8-922.
2324 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2325 // first(15-12) | 1011(11-8) | (count * 2)
2326 ASSERT_LE(first.code(), last.code());
2327 ASSERT(am == ia || am == ia_w || am == db_w);
2328 ASSERT(!base.is(pc));
2331 first.split_code(&sd, &d);
2332 int count = last.code() - first.code() + 1;
2333 ASSERT(count <= 16);
2334 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2339 void Assembler::vstm(BlockAddrMode am,
2341 DwVfpRegister first,
2344 // Instruction details available in ARM DDI 0406C.b, A8-1080.
2345 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2346 // first(15-12) | 1011(11-8) | (count * 2)
2347 ASSERT_LE(first.code(), last.code());
2348 ASSERT(am == ia || am == ia_w || am == db_w);
2349 ASSERT(!base.is(pc));
2352 first.split_code(&sd, &d);
2353 int count = last.code() - first.code() + 1;
2354 ASSERT(count <= 16);
2355 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2359 void Assembler::vldm(BlockAddrMode am,
2361 SwVfpRegister first,
2364 // Instruction details available in ARM DDI 0406A, A8-626.
2365 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2366 // first(15-12) | 1010(11-8) | (count/2)
2367 ASSERT_LE(first.code(), last.code());
2368 ASSERT(am == ia || am == ia_w || am == db_w);
2369 ASSERT(!base.is(pc));
2372 first.split_code(&sd, &d);
2373 int count = last.code() - first.code() + 1;
2374 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2379 void Assembler::vstm(BlockAddrMode am,
2381 SwVfpRegister first,
2384 // Instruction details available in ARM DDI 0406A, A8-784.
2385 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2386 // first(15-12) | 1011(11-8) | (count/2)
2387 ASSERT_LE(first.code(), last.code());
2388 ASSERT(am == ia || am == ia_w || am == db_w);
2389 ASSERT(!base.is(pc));
2392 first.split_code(&sd, &d);
2393 int count = last.code() - first.code() + 1;
2394 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2399 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2401 OS::MemCopy(&i, &d, 8);
2403 *lo = i & 0xffffffff;
2408 // Only works for little endian floating point formats.
2409 // We don't support VFP on the mixed endian floating point platform.
2410 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
2411 ASSERT(CpuFeatures::IsSupported(VFP3));
2413 // VMOV can accept an immediate of the form:
2415 // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
2417 // The immediate is encoded using an 8-bit quantity, comprised of two
2418 // 4-bit fields. For an 8-bit immediate of the form:
2422 // where a is the MSB and h is the LSB, an immediate 64-bit double can be
2423 // created of the form:
2425 // [aBbbbbbb,bbcdefgh,00000000,00000000,
2426 // 00000000,00000000,00000000,00000000]
2432 DoubleAsTwoUInt32(d, &lo, &hi);
2434 // The most obvious constraint is the long block of zeroes.
2435 if ((lo != 0) || ((hi & 0xffff) != 0)) {
2439 // Bits 62:55 must be all clear or all set.
2440 if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
2444 // Bit 63 must be NOT bit 62.
2445 if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
2449 // Create the encoded immediate in the form:
2450 // [00000000,0000abcd,00000000,0000efgh]
2451 *encoding = (hi >> 16) & 0xf; // Low nybble.
2452 *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
2453 *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
2459 void Assembler::vmov(const DwVfpRegister dst,
2461 const Register scratch) {
2463 if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
2464 // The double can be encoded in the instruction.
2467 // Instruction details available in ARM DDI 0406C.b, A8-936.
2468 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
2469 // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
2471 dst.split_code(&vd, &d);
2472 emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
2473 } else if (FLAG_enable_vldr_imm && can_use_constant_pool()) {
2474 // TODO(jfb) Temporarily turned off until we have constant blinding or
2475 // some equivalent mitigation: an attacker can otherwise control
2476 // generated data which also happens to be executable, a Very Bad
2478 // Blinding gets tricky because we don't have xor, we probably
2479 // need to add/subtract without losing precision, which requires a
2480 // cookie value that Lithium is probably better positioned to
2482 // We could also add a few peepholes here like detecting 0.0 and
2483 // -0.0 and doing a vmov from the sequestered d14, forcing denorms
2484 // to zero (we set flush-to-zero), and normalizing NaN values.
2485 // We could also detect redundant values.
2486 // The code could also randomize the order of values, though
2487 // that's tricky because vldr has a limited reach. Furthermore
2488 // it breaks load locality.
2489 RelocInfo rinfo(pc_, imm);
2490 ConstantPoolAddEntry(rinfo);
2491 vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
2493 // Synthesise the double from ARM immediates.
2495 DoubleAsTwoUInt32(imm, &lo, &hi);
2497 if (scratch.is(no_reg)) {
2498 if (dst.code() < 16) {
2499 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
2500 // Move the low part of the double into the lower of the corresponsing S
2501 // registers of D register dst.
2502 mov(ip, Operand(lo));
2503 vmov(loc.low(), ip);
2505 // Move the high part of the double into the higher of the
2506 // corresponsing S registers of D register dst.
2507 mov(ip, Operand(hi));
2508 vmov(loc.high(), ip);
2510 // D16-D31 does not have S registers, so move the low and high parts
2511 // directly to the D register using vmov.32.
2512 // Note: This may be slower, so we only do this when we have to.
2513 mov(ip, Operand(lo));
2514 vmov(dst, VmovIndexLo, ip);
2515 mov(ip, Operand(hi));
2516 vmov(dst, VmovIndexHi, ip);
2519 // Move the low and high parts of the double to a D register in one
2521 mov(ip, Operand(lo));
2522 mov(scratch, Operand(hi));
2523 vmov(dst, ip, scratch);
2529 void Assembler::vmov(const SwVfpRegister dst,
2530 const SwVfpRegister src,
2531 const Condition cond) {
2533 // Instruction details available in ARM DDI 0406B, A8-642.
2535 dst.split_code(&sd, &d);
2536 src.split_code(&sm, &m);
2537 emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
2541 void Assembler::vmov(const DwVfpRegister dst,
2542 const DwVfpRegister src,
2543 const Condition cond) {
2545 // Instruction details available in ARM DDI 0406C.b, A8-938.
2546 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2547 // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2549 dst.split_code(&vd, &d);
2551 src.split_code(&vm, &m);
2552 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
2557 void Assembler::vmov(const DwVfpRegister dst,
2558 const VmovIndex index,
2560 const Condition cond) {
2562 // Instruction details available in ARM DDI 0406C.b, A8-940.
2563 // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
2564 // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2565 ASSERT(index.index == 0 || index.index == 1);
2567 dst.split_code(&vd, &d);
2568 emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
2573 void Assembler::vmov(const Register dst,
2574 const VmovIndex index,
2575 const DwVfpRegister src,
2576 const Condition cond) {
2578 // Instruction details available in ARM DDI 0406C.b, A8.8.342.
2579 // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
2580 // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2581 ASSERT(index.index == 0 || index.index == 1);
2583 src.split_code(&vn, &n);
2584 emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
2585 0xB*B8 | n*B7 | B4);
2589 void Assembler::vmov(const DwVfpRegister dst,
2590 const Register src1,
2591 const Register src2,
2592 const Condition cond) {
2594 // Instruction details available in ARM DDI 0406C.b, A8-948.
2595 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2596 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2597 ASSERT(!src1.is(pc) && !src2.is(pc));
2599 dst.split_code(&vm, &m);
2600 emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
2601 src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2605 void Assembler::vmov(const Register dst1,
2606 const Register dst2,
2607 const DwVfpRegister src,
2608 const Condition cond) {
2610 // Instruction details available in ARM DDI 0406C.b, A8-948.
2611 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2612 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2613 ASSERT(!dst1.is(pc) && !dst2.is(pc));
2615 src.split_code(&vm, &m);
2616 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2617 dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2621 void Assembler::vmov(const SwVfpRegister dst,
2623 const Condition cond) {
2625 // Instruction details available in ARM DDI 0406A, A8-642.
2626 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2627 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2628 ASSERT(!src.is(pc));
2630 dst.split_code(&sn, &n);
2631 emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
2635 void Assembler::vmov(const Register dst,
2636 const SwVfpRegister src,
2637 const Condition cond) {
2639 // Instruction details available in ARM DDI 0406A, A8-642.
2640 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2641 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2642 ASSERT(!dst.is(pc));
2644 src.split_code(&sn, &n);
2645 emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
2649 // Type of data to read from or write to VFP register.
2650 // Used as specifier in generic vcvt instruction.
2651 enum VFPType { S32, U32, F32, F64 };
2654 static bool IsSignedVFPType(VFPType type) {
2667 static bool IsIntegerVFPType(VFPType type) {
2682 static bool IsDoubleVFPType(VFPType type) {
2695 // Split five bit reg_code based on size of reg_type.
2696 // 32-bit register codes are Vm:M
2697 // 64-bit register codes are M:Vm
2698 // where Vm is four bits, and M is a single bit.
2699 static void SplitRegCode(VFPType reg_type,
2703 ASSERT((reg_code >= 0) && (reg_code <= 31));
2704 if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2706 *m = reg_code & 0x1;
2707 *vm = reg_code >> 1;
2710 *m = (reg_code & 0x10) >> 4;
2711 *vm = reg_code & 0x0F;
2716 // Encode vcvt.src_type.dst_type instruction.
2717 static Instr EncodeVCVT(const VFPType dst_type,
2719 const VFPType src_type,
2721 VFPConversionMode mode,
2722 const Condition cond) {
2723 ASSERT(src_type != dst_type);
2725 SplitRegCode(src_type, src_code, &Vm, &M);
2726 SplitRegCode(dst_type, dst_code, &Vd, &D);
2728 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2729 // Conversion between IEEE floating point and 32-bit integer.
2730 // Instruction details available in ARM DDI 0406B, A8.6.295.
2731 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
2732 // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2733 ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
2737 if (IsIntegerVFPType(dst_type)) {
2738 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2739 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2742 ASSERT(IsIntegerVFPType(src_type));
2744 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2745 op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
2748 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
2749 Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
2751 // Conversion between IEEE double and single precision.
2752 // Instruction details available in ARM DDI 0406B, A8.6.298.
2753 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
2754 // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2755 int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2756 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2757 Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2762 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2763 const SwVfpRegister src,
2764 VFPConversionMode mode,
2765 const Condition cond) {
2766 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
2770 void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2771 const SwVfpRegister src,
2772 VFPConversionMode mode,
2773 const Condition cond) {
2774 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
2778 void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2779 const SwVfpRegister src,
2780 VFPConversionMode mode,
2781 const Condition cond) {
2782 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
2786 void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2787 const DwVfpRegister src,
2788 VFPConversionMode mode,
2789 const Condition cond) {
2790 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
2794 void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2795 const DwVfpRegister src,
2796 VFPConversionMode mode,
2797 const Condition cond) {
2798 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
2802 void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2803 const SwVfpRegister src,
2804 VFPConversionMode mode,
2805 const Condition cond) {
2806 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
2810 void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2811 const DwVfpRegister src,
2812 VFPConversionMode mode,
2813 const Condition cond) {
2814 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
2818 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2820 const Condition cond) {
2821 // Instruction details available in ARM DDI 0406C.b, A8-874.
2822 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
2823 // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
2824 ASSERT(fraction_bits > 0 && fraction_bits <= 32);
2825 ASSERT(CpuFeatures::IsSupported(VFP3));
2827 dst.split_code(&vd, &d);
2828 int imm5 = 32 - fraction_bits;
2830 int imm4 = (imm5 >> 1) & 0xf;
2831 emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
2832 vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
2836 void Assembler::vneg(const DwVfpRegister dst,
2837 const DwVfpRegister src,
2838 const Condition cond) {
2839 // Instruction details available in ARM DDI 0406C.b, A8-968.
2840 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
2841 // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2843 dst.split_code(&vd, &d);
2845 src.split_code(&vm, &m);
2847 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
2852 void Assembler::vabs(const DwVfpRegister dst,
2853 const DwVfpRegister src,
2854 const Condition cond) {
2855 // Instruction details available in ARM DDI 0406C.b, A8-524.
2856 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2857 // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2859 dst.split_code(&vd, &d);
2861 src.split_code(&vm, &m);
2862 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
2867 void Assembler::vadd(const DwVfpRegister dst,
2868 const DwVfpRegister src1,
2869 const DwVfpRegister src2,
2870 const Condition cond) {
2871 // Dd = vadd(Dn, Dm) double precision floating point addition.
2872 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2873 // Instruction details available in ARM DDI 0406C.b, A8-830.
2874 // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
2875 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2877 dst.split_code(&vd, &d);
2879 src1.split_code(&vn, &n);
2881 src2.split_code(&vm, &m);
2882 emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2887 void Assembler::vsub(const DwVfpRegister dst,
2888 const DwVfpRegister src1,
2889 const DwVfpRegister src2,
2890 const Condition cond) {
2891 // Dd = vsub(Dn, Dm) double precision floating point subtraction.
2892 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2893 // Instruction details available in ARM DDI 0406C.b, A8-1086.
2894 // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
2895 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2897 dst.split_code(&vd, &d);
2899 src1.split_code(&vn, &n);
2901 src2.split_code(&vm, &m);
2902 emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2903 n*B7 | B6 | m*B5 | vm);
2907 void Assembler::vmul(const DwVfpRegister dst,
2908 const DwVfpRegister src1,
2909 const DwVfpRegister src2,
2910 const Condition cond) {
2911 // Dd = vmul(Dn, Dm) double precision floating point multiplication.
2912 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2913 // Instruction details available in ARM DDI 0406C.b, A8-960.
2914 // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
2915 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2917 dst.split_code(&vd, &d);
2919 src1.split_code(&vn, &n);
2921 src2.split_code(&vm, &m);
2922 emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2927 void Assembler::vmla(const DwVfpRegister dst,
2928 const DwVfpRegister src1,
2929 const DwVfpRegister src2,
2930 const Condition cond) {
2931 // Instruction details available in ARM DDI 0406C.b, A8-932.
2932 // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
2933 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
2935 dst.split_code(&vd, &d);
2937 src1.split_code(&vn, &n);
2939 src2.split_code(&vm, &m);
2940 emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
2945 void Assembler::vmls(const DwVfpRegister dst,
2946 const DwVfpRegister src1,
2947 const DwVfpRegister src2,
2948 const Condition cond) {
2949 // Instruction details available in ARM DDI 0406C.b, A8-932.
2950 // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
2951 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
2953 dst.split_code(&vd, &d);
2955 src1.split_code(&vn, &n);
2957 src2.split_code(&vm, &m);
2958 emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
2963 void Assembler::vdiv(const DwVfpRegister dst,
2964 const DwVfpRegister src1,
2965 const DwVfpRegister src2,
2966 const Condition cond) {
2967 // Dd = vdiv(Dn, Dm) double precision floating point division.
2968 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2969 // Instruction details available in ARM DDI 0406C.b, A8-882.
2970 // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
2971 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2973 dst.split_code(&vd, &d);
2975 src1.split_code(&vn, &n);
2977 src2.split_code(&vm, &m);
2978 emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
2983 void Assembler::vcmp(const DwVfpRegister src1,
2984 const DwVfpRegister src2,
2985 const Condition cond) {
2986 // vcmp(Dd, Dm) double precision floating point comparison.
2987 // Instruction details available in ARM DDI 0406C.b, A8-864.
2988 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
2989 // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2991 src1.split_code(&vd, &d);
2993 src2.split_code(&vm, &m);
2994 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
2999 void Assembler::vcmp(const DwVfpRegister src1,
3001 const Condition cond) {
3002 // vcmp(Dd, #0.0) double precision floating point comparison.
3003 // Instruction details available in ARM DDI 0406C.b, A8-864.
3004 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
3005 // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
3006 ASSERT(src2 == 0.0);
3008 src1.split_code(&vd, &d);
3009 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
3013 void Assembler::vmsr(Register dst, Condition cond) {
3014 // Instruction details available in ARM DDI 0406A, A8-652.
3015 // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
3016 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
3017 emit(cond | 0xE*B24 | 0xE*B20 | B16 |
3018 dst.code()*B12 | 0xA*B8 | B4);
3022 void Assembler::vmrs(Register dst, Condition cond) {
3023 // Instruction details available in ARM DDI 0406A, A8-652.
3024 // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
3025 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
3026 emit(cond | 0xE*B24 | 0xF*B20 | B16 |
3027 dst.code()*B12 | 0xA*B8 | B4);
3031 void Assembler::vsqrt(const DwVfpRegister dst,
3032 const DwVfpRegister src,
3033 const Condition cond) {
3034 // Instruction details available in ARM DDI 0406C.b, A8-1058.
3035 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
3036 // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
3038 dst.split_code(&vd, &d);
3040 src.split_code(&vm, &m);
3041 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
3046 // Support for NEON.
3048 void Assembler::vld1(NeonSize size,
3049 const NeonListOperand& dst,
3050 const NeonMemOperand& src) {
3051 // Instruction details available in ARM DDI 0406C.b, A8.8.320.
3052 // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
3053 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3054 ASSERT(CpuFeatures::IsSupported(NEON));
3056 dst.base().split_code(&vd, &d);
3057 emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
3058 dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
3062 void Assembler::vst1(NeonSize size,
3063 const NeonListOperand& src,
3064 const NeonMemOperand& dst) {
3065 // Instruction details available in ARM DDI 0406C.b, A8.8.404.
3066 // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
3067 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3068 ASSERT(CpuFeatures::IsSupported(NEON));
3070 src.base().split_code(&vd, &d);
3071 emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
3072 size*B6 | dst.align()*B4 | dst.rm().code());
3076 void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
3077 // Instruction details available in ARM DDI 0406C.b, A8.8.346.
3078 // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
3079 // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
3080 ASSERT(CpuFeatures::IsSupported(NEON));
3082 dst.split_code(&vd, &d);
3084 src.split_code(&vm, &m);
3085 emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
3086 (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
3090 // Pseudo instructions.
3091 void Assembler::nop(int type) {
3092 // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
3093 // some of the CPU's pipeline and has to issue. Older ARM chips simply used
3094 // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
3095 // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
3097 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
3098 emit(al | 13*B21 | type*B12 | type);
3102 bool Assembler::IsMovT(Instr instr) {
3103 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3104 ((kNumRegisters-1)*B12) | // mask out register
3105 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3106 return instr == 0x34*B20;
3110 bool Assembler::IsMovW(Instr instr) {
3111 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3112 ((kNumRegisters-1)*B12) | // mask out destination
3113 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3114 return instr == 0x30*B20;
3118 bool Assembler::IsNop(Instr instr, int type) {
3119 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
3120 // Check for mov rx, rx where x = type.
3121 return instr == (al | 13*B21 | type*B12 | type);
3125 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
3128 return fits_shifter(imm32, &dummy1, &dummy2, NULL);
3132 bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) {
3133 return is_uint12(abs(imm32));
3138 void Assembler::RecordJSReturn() {
3139 positions_recorder()->WriteRecordedPositions();
3141 RecordRelocInfo(RelocInfo::JS_RETURN);
3145 void Assembler::RecordDebugBreakSlot() {
3146 positions_recorder()->WriteRecordedPositions();
3148 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
3152 void Assembler::RecordComment(const char* msg) {
3153 if (FLAG_code_comments) {
3155 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
3160 void Assembler::RecordConstPool(int size) {
3161 // We only need this for debugger support, to correctly compute offsets in the
3163 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
3167 void Assembler::GrowBuffer() {
3168 if (!own_buffer_) FATAL("external code buffer is too small");
3170 // Compute new buffer size.
3171 CodeDesc desc; // the new buffer
3172 if (buffer_size_ < 4*KB) {
3173 desc.buffer_size = 4*KB;
3174 } else if (buffer_size_ < 1*MB) {
3175 desc.buffer_size = 2*buffer_size_;
3177 desc.buffer_size = buffer_size_ + 1*MB;
3179 CHECK_GT(desc.buffer_size, 0); // no overflow
3181 // Set up new buffer.
3182 desc.buffer = NewArray<byte>(desc.buffer_size);
3184 desc.instr_size = pc_offset();
3185 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
3188 int pc_delta = desc.buffer - buffer_;
3189 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
3190 OS::MemMove(desc.buffer, buffer_, desc.instr_size);
3191 OS::MemMove(reloc_info_writer.pos() + rc_delta,
3192 reloc_info_writer.pos(), desc.reloc_size);
3195 DeleteArray(buffer_);
3196 buffer_ = desc.buffer;
3197 buffer_size_ = desc.buffer_size;
3199 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3200 reloc_info_writer.last_pc() + pc_delta);
3202 // None of our relocation types are pc relative pointing outside the code
3203 // buffer nor pc absolute pointing inside the code buffer, so there is no need
3204 // to relocate any emitted relocation entries.
3206 // Relocate pending relocation entries.
3207 for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3208 RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3209 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
3210 rinfo.rmode() != RelocInfo::POSITION);
3211 if (rinfo.rmode() != RelocInfo::JS_RETURN) {
3212 rinfo.set_pc(rinfo.pc() + pc_delta);
3215 for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3216 RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3217 ASSERT(rinfo.rmode() == RelocInfo::NONE64);
3218 rinfo.set_pc(rinfo.pc() + pc_delta);
3220 constant_pool_builder_.Relocate(pc_delta);
3224 void Assembler::db(uint8_t data) {
3225 // No relocation info should be pending while using db. db is used
3226 // to write pure data with no pointers and the constant pool should
3227 // be emitted before using db.
3228 ASSERT(num_pending_32_bit_reloc_info_ == 0);
3229 ASSERT(num_pending_64_bit_reloc_info_ == 0);
3231 *reinterpret_cast<uint8_t*>(pc_) = data;
3232 pc_ += sizeof(uint8_t);
3236 void Assembler::dd(uint32_t data) {
3237 // No relocation info should be pending while using dd. dd is used
3238 // to write pure data with no pointers and the constant pool should
3239 // be emitted before using dd.
3240 ASSERT(num_pending_32_bit_reloc_info_ == 0);
3241 ASSERT(num_pending_64_bit_reloc_info_ == 0);
3243 *reinterpret_cast<uint32_t*>(pc_) = data;
3244 pc_ += sizeof(uint32_t);
3248 void Assembler::emit_code_stub_address(Code* stub) {
3250 *reinterpret_cast<uint32_t*>(pc_) =
3251 reinterpret_cast<uint32_t>(stub->instruction_start());
3252 pc_ += sizeof(uint32_t);
3256 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3257 RelocInfo rinfo(pc_, rmode, data, NULL);
3258 RecordRelocInfo(rinfo);
3262 void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
3263 if (!RelocInfo::IsNone(rinfo.rmode())) {
3264 // Don't record external references unless the heap will be serialized.
3265 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
3266 if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
3270 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
3271 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
3272 RelocInfo reloc_info_with_ast_id(rinfo.pc(),
3274 RecordedAstId().ToInt(),
3276 ClearRecordedAstId();
3277 reloc_info_writer.Write(&reloc_info_with_ast_id);
3279 reloc_info_writer.Write(&rinfo);
3285 void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) {
3286 if (FLAG_enable_ool_constant_pool) {
3287 constant_pool_builder_.AddEntry(this, rinfo);
3289 if (rinfo.rmode() == RelocInfo::NONE64) {
3290 ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
3291 if (num_pending_64_bit_reloc_info_ == 0) {
3292 first_const_pool_64_use_ = pc_offset();
3294 pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
3296 ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
3297 if (num_pending_32_bit_reloc_info_ == 0) {
3298 first_const_pool_32_use_ = pc_offset();
3300 pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
3302 // Make sure the constant pool is not emitted in place of the next
3303 // instruction for which we just recorded relocation info.
3304 BlockConstPoolFor(1);
3309 void Assembler::BlockConstPoolFor(int instructions) {
3310 if (FLAG_enable_ool_constant_pool) {
3311 // Should be a no-op if using an out-of-line constant pool.
3312 ASSERT(num_pending_32_bit_reloc_info_ == 0);
3313 ASSERT(num_pending_64_bit_reloc_info_ == 0);
3317 int pc_limit = pc_offset() + instructions * kInstrSize;
3318 if (no_const_pool_before_ < pc_limit) {
3319 // Max pool start (if we need a jump and an alignment).
3321 int start = pc_limit + kInstrSize + 2 * kPointerSize;
3322 ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
3323 (start - first_const_pool_32_use_ +
3324 num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
3325 ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
3326 (start - first_const_pool_64_use_ < kMaxDistToFPPool));
3328 no_const_pool_before_ = pc_limit;
3331 if (next_buffer_check_ < no_const_pool_before_) {
3332 next_buffer_check_ = no_const_pool_before_;
3337 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
3338 if (FLAG_enable_ool_constant_pool) {
3339 // Should be a no-op if using an out-of-line constant pool.
3340 ASSERT(num_pending_32_bit_reloc_info_ == 0);
3341 ASSERT(num_pending_64_bit_reloc_info_ == 0);
3345 // Some short sequence of instruction mustn't be broken up by constant pool
3346 // emission, such sequences are protected by calls to BlockConstPoolFor and
3347 // BlockConstPoolScope.
3348 if (is_const_pool_blocked()) {
3349 // Something is wrong if emission is forced and blocked at the same time.
3350 ASSERT(!force_emit);
3354 // There is nothing to do if there are no pending constant pool entries.
3355 if ((num_pending_32_bit_reloc_info_ == 0) &&
3356 (num_pending_64_bit_reloc_info_ == 0)) {
3357 // Calculate the offset of the next check.
3358 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3362 // Check that the code buffer is large enough before emitting the constant
3363 // pool (include the jump over the pool and the constant pool marker and
3364 // the gap to the relocation information).
3365 int jump_instr = require_jump ? kInstrSize : 0;
3366 int size_up_to_marker = jump_instr + kInstrSize;
3367 int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
3368 bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
3369 bool require_64_bit_align = false;
3370 if (has_fp_values) {
3371 require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
3372 if (require_64_bit_align) {
3373 size_after_marker += kInstrSize;
3375 size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
3378 int size = size_up_to_marker + size_after_marker;
3380 // We emit a constant pool when:
3381 // * requested to do so by parameter force_emit (e.g. after each function).
3382 // * the distance from the first instruction accessing the constant pool to
3383 // any of the constant pool entries will exceed its limit the next
3384 // time the pool is checked. This is overly restrictive, but we don't emit
3385 // constant pool entries in-order so it's conservatively correct.
3386 // * the instruction doesn't require a jump after itself to jump over the
3387 // constant pool, and we're getting close to running out of range.
3389 ASSERT((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
3390 bool need_emit = false;
3391 if (has_fp_values) {
3392 int dist64 = pc_offset() +
3394 num_pending_32_bit_reloc_info_ * kPointerSize -
3395 first_const_pool_64_use_;
3396 if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
3397 (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
3402 pc_offset() + size - first_const_pool_32_use_;
3403 if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
3404 (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
3407 if (!need_emit) return;
3410 int needed_space = size + kGap;
3411 while (buffer_space() <= needed_space) GrowBuffer();
3414 // Block recursive calls to CheckConstPool.
3415 BlockConstPoolScope block_const_pool(this);
3416 RecordComment("[ Constant Pool");
3417 RecordConstPool(size);
3419 // Emit jump over constant pool if necessary.
3425 // Put down constant pool marker "Undefined instruction".
3426 // The data size helps disassembly know what to print.
3427 emit(kConstantPoolMarker |
3428 EncodeConstantPoolLength(size_after_marker / kPointerSize));
3430 if (require_64_bit_align) {
3431 emit(kConstantPoolMarker);
3434 // Emit 64-bit constant pool entries first: their range is smaller than
3436 for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3437 RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3439 ASSERT(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment.
3441 Instr instr = instr_at(rinfo.pc());
3442 // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
3443 ASSERT((IsVldrDPcImmediateOffset(instr) &&
3444 GetVldrDRegisterImmediateOffset(instr) == 0));
3446 int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3447 ASSERT(is_uint10(delta));
3450 uint64_t value = rinfo.raw_data64();
3451 for (int j = 0; j < i; j++) {
3452 RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
3453 if (value == rinfo2.raw_data64()) {
3455 ASSERT(rinfo2.rmode() == RelocInfo::NONE64);
3456 Instr instr2 = instr_at(rinfo2.pc());
3457 ASSERT(IsVldrDPcImmediateOffset(instr2));
3458 delta = GetVldrDRegisterImmediateOffset(instr2);
3459 delta += rinfo2.pc() - rinfo.pc();
3464 instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
3467 uint64_t uint_data = rinfo.raw_data64();
3468 emit(uint_data & 0xFFFFFFFF);
3469 emit(uint_data >> 32);
3473 // Emit 32-bit constant pool entries.
3474 for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3475 RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3476 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
3477 rinfo.rmode() != RelocInfo::POSITION &&
3478 rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
3479 rinfo.rmode() != RelocInfo::CONST_POOL &&
3480 rinfo.rmode() != RelocInfo::NONE64);
3482 Instr instr = instr_at(rinfo.pc());
3484 // 64-bit loads shouldn't get here.
3485 ASSERT(!IsVldrDPcImmediateOffset(instr));
3487 if (IsLdrPcImmediateOffset(instr) &&
3488 GetLdrRegisterImmediateOffset(instr) == 0) {
3489 int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3490 ASSERT(is_uint12(delta));
3491 // 0 is the smallest delta:
3493 // constant pool marker
3497 if (!Serializer::enabled(isolate()) &&
3498 (rinfo.rmode() >= RelocInfo::CELL)) {
3499 for (int j = 0; j < i; j++) {
3500 RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
3502 if ((rinfo2.data() == rinfo.data()) &&
3503 (rinfo2.rmode() == rinfo.rmode())) {
3504 Instr instr2 = instr_at(rinfo2.pc());
3505 if (IsLdrPcImmediateOffset(instr2)) {
3506 delta = GetLdrRegisterImmediateOffset(instr2);
3507 delta += rinfo2.pc() - rinfo.pc();
3515 instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
3521 ASSERT(IsMovW(instr));
3525 num_pending_32_bit_reloc_info_ = 0;
3526 num_pending_64_bit_reloc_info_ = 0;
3527 first_const_pool_32_use_ = -1;
3528 first_const_pool_64_use_ = -1;
3532 if (after_pool.is_linked()) {
3537 // Since a constant pool was just emitted, move the check offset forward by
3538 // the standard interval.
3539 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3543 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
3544 if (!FLAG_enable_ool_constant_pool) {
3545 return isolate->factory()->empty_constant_pool_array();
3547 return constant_pool_builder_.New(isolate);
3551 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
3552 constant_pool_builder_.Populate(this, constant_pool);
3556 ConstantPoolBuilder::ConstantPoolBuilder()
3560 count_of_code_ptr_(0),
3561 count_of_heap_ptr_(0),
3562 count_of_32bit_(0) { }
3565 bool ConstantPoolBuilder::IsEmpty() {
3566 return entries_.size() == 0;
3570 bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) {
3571 return rmode == RelocInfo::NONE64;
3575 bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) {
3576 return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64;
3580 bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) {
3581 return RelocInfo::IsCodeTarget(rmode);
3585 bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) {
3586 return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode);
3590 void ConstantPoolBuilder::AddEntry(Assembler* assm,
3591 const RelocInfo& rinfo) {
3592 RelocInfo::Mode rmode = rinfo.rmode();
3593 ASSERT(rmode != RelocInfo::COMMENT &&
3594 rmode != RelocInfo::POSITION &&
3595 rmode != RelocInfo::STATEMENT_POSITION &&
3596 rmode != RelocInfo::CONST_POOL);
3599 // Try to merge entries which won't be patched.
3600 int merged_index = -1;
3601 if (RelocInfo::IsNone(rmode) ||
3602 (!Serializer::enabled(assm->isolate()) && (rmode >= RelocInfo::CELL))) {
3604 std::vector<RelocInfo>::const_iterator it;
3605 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
3606 if (RelocInfo::IsEqual(rinfo, *it)) {
3613 entries_.push_back(rinfo);
3614 merged_indexes_.push_back(merged_index);
3616 if (merged_index == -1) {
3617 // Not merged, so update the appropriate count.
3618 if (Is64BitEntry(rmode)) {
3620 } else if (Is32BitEntry(rmode)) {
3622 } else if (IsCodePtrEntry(rmode)) {
3623 count_of_code_ptr_++;
3625 ASSERT(IsHeapPtrEntry(rmode));
3626 count_of_heap_ptr_++;
3630 // Check if we still have room for another entry given Arm's ldr and vldr
3631 // immediate offset range.
3632 if (!(is_uint12(ConstantPoolArray::SizeFor(count_of_64bit_,
3635 count_of_32bit_))) &&
3636 is_uint10(ConstantPoolArray::SizeFor(count_of_64bit_, 0, 0, 0))) {
3637 assm->set_constant_pool_full();
3642 void ConstantPoolBuilder::Relocate(int pc_delta) {
3643 for (std::vector<RelocInfo>::iterator rinfo = entries_.begin();
3644 rinfo != entries_.end(); rinfo++) {
3645 ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN);
3646 rinfo->set_pc(rinfo->pc() + pc_delta);
3651 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
3653 return isolate->factory()->empty_constant_pool_array();
3655 return isolate->factory()->NewConstantPoolArray(count_of_64bit_,
3663 void ConstantPoolBuilder::Populate(Assembler* assm,
3664 ConstantPoolArray* constant_pool) {
3665 ASSERT(constant_pool->count_of_int64_entries() == count_of_64bit_);
3666 ASSERT(constant_pool->count_of_code_ptr_entries() == count_of_code_ptr_);
3667 ASSERT(constant_pool->count_of_heap_ptr_entries() == count_of_heap_ptr_);
3668 ASSERT(constant_pool->count_of_int32_entries() == count_of_32bit_);
3669 ASSERT(entries_.size() == merged_indexes_.size());
3671 int index_64bit = 0;
3672 int index_code_ptr = count_of_64bit_;
3673 int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_;
3674 int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_;
3677 std::vector<RelocInfo>::const_iterator rinfo;
3678 for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++, i++) {
3679 RelocInfo::Mode rmode = rinfo->rmode();
3681 // Update constant pool if necessary and get the entry's offset.
3683 if (merged_indexes_[i] == -1) {
3684 if (Is64BitEntry(rmode)) {
3685 offset = constant_pool->OffsetOfElementAt(index_64bit) - kHeapObjectTag;
3686 constant_pool->set(index_64bit++, rinfo->data64());
3687 } else if (Is32BitEntry(rmode)) {
3688 offset = constant_pool->OffsetOfElementAt(index_32bit) - kHeapObjectTag;
3689 constant_pool->set(index_32bit++, static_cast<int32_t>(rinfo->data()));
3690 } else if (IsCodePtrEntry(rmode)) {
3691 offset = constant_pool->OffsetOfElementAt(index_code_ptr) -
3693 constant_pool->set(index_code_ptr++,
3694 reinterpret_cast<Object *>(rinfo->data()));
3696 ASSERT(IsHeapPtrEntry(rmode));
3697 offset = constant_pool->OffsetOfElementAt(index_heap_ptr) -
3699 constant_pool->set(index_heap_ptr++,
3700 reinterpret_cast<Object *>(rinfo->data()));
3702 merged_indexes_[i] = offset; // Stash offset for merged entries.
3704 size_t merged_index = static_cast<size_t>(merged_indexes_[i]);
3705 ASSERT(merged_index < merged_indexes_.size() && merged_index < i);
3706 offset = merged_indexes_[merged_index];
3709 // Patch vldr/ldr instruction with correct offset.
3710 Instr instr = assm->instr_at(rinfo->pc());
3711 if (Is64BitEntry(rmode)) {
3712 // Instruction to patch must be 'vldr rd, [pp, #0]'.
3713 ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) &&
3714 Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
3715 ASSERT(is_uint10(offset));
3716 assm->instr_at_put(rinfo->pc(),
3717 Assembler::SetVldrDRegisterImmediateOffset(instr, offset));
3719 // Instruction to patch must be 'ldr rd, [pp, #0]'.
3720 ASSERT((Assembler::IsLdrPpImmediateOffset(instr) &&
3721 Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
3722 ASSERT(is_uint12(offset));
3723 assm->instr_at_put(rinfo->pc(),
3724 Assembler::SetLdrRegisterImmediateOffset(instr, offset));
3728 ASSERT((index_64bit == count_of_64bit_) &&
3729 (index_code_ptr == (index_64bit + count_of_code_ptr_)) &&
3730 (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) &&
3731 (index_32bit == (index_heap_ptr + count_of_32bit_)));
3735 } } // namespace v8::internal
3737 #endif // V8_TARGET_ARCH_ARM