1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
39 #if V8_TARGET_ARCH_ARM
41 #include "src/arm/assembler-arm-inl.h"
42 #include "src/base/bits.h"
43 #include "src/base/cpu.h"
44 #include "src/macro-assembler.h"
49 // Get the CPU features enabled by the build. For cross compilation the
50 // preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
51 // can be defined to enable ARMv7 and VFPv3 instructions when building the
53 static unsigned CpuFeaturesImpliedByCompiler() {
55 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
56 if (FLAG_enable_armv7) answer |= 1u << ARMv7;
57 #endif // CAN_USE_ARMV7_INSTRUCTIONS
58 #ifdef CAN_USE_VFP3_INSTRUCTIONS
59 if (FLAG_enable_vfp3) answer |= 1u << VFP3 | 1u << ARMv7;
60 #endif // CAN_USE_VFP3_INSTRUCTIONS
61 #ifdef CAN_USE_VFP32DREGS
62 if (FLAG_enable_32dregs) answer |= 1u << VFP32DREGS;
63 #endif // CAN_USE_VFP32DREGS
65 if (FLAG_enable_neon) answer |= 1u << NEON;
66 #endif // CAN_USE_VFP32DREGS
67 if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
68 answer |= 1u << UNALIGNED_ACCESSES;
75 void CpuFeatures::ProbeImpl(bool cross_compile) {
76 supported_ |= CpuFeaturesImpliedByCompiler();
77 cache_line_size_ = 64;
79 // Only use statically determined features for cross compile (snapshot).
80 if (cross_compile) return;
83 // For the simulator build, use whatever the flags specify.
84 if (FLAG_enable_armv7) {
85 supported_ |= 1u << ARMv7;
86 if (FLAG_enable_vfp3) supported_ |= 1u << VFP3;
87 if (FLAG_enable_neon) supported_ |= 1u << NEON | 1u << VFP32DREGS;
88 if (FLAG_enable_sudiv) supported_ |= 1u << SUDIV;
89 if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
90 if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
92 if (FLAG_enable_mls) supported_ |= 1u << MLS;
93 if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
96 // Probe for additional features at runtime.
98 if (FLAG_enable_vfp3 && cpu.has_vfp3()) {
99 // This implementation also sets the VFP flags if runtime
100 // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
102 supported_ |= 1u << VFP3 | 1u << ARMv7;
105 if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON;
106 if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV;
107 if (FLAG_enable_mls && cpu.has_thumb2()) supported_ |= 1u << MLS;
109 if (cpu.architecture() >= 7) {
110 if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
111 if (FLAG_enable_armv8 && cpu.architecture() >= 8) {
112 supported_ |= 1u << ARMv8;
114 if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
115 // Use movw/movt for QUALCOMM ARMv7 cores.
116 if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) {
117 supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
121 // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
122 if (cpu.implementer() == base::CPU::ARM &&
123 (cpu.part() == base::CPU::ARM_CORTEX_A5 ||
124 cpu.part() == base::CPU::ARM_CORTEX_A9)) {
125 cache_line_size_ = 32;
128 if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
130 if (cpu.implementer() == base::CPU::NVIDIA &&
131 cpu.variant() == base::CPU::NVIDIA_DENVER) {
132 supported_ |= 1u << COHERENT_CACHE;
136 DCHECK(!IsSupported(VFP3) || IsSupported(ARMv7));
140 void CpuFeatures::PrintTarget() {
141 const char* arm_arch = NULL;
142 const char* arm_target_type = "";
143 const char* arm_no_probe = "";
144 const char* arm_fpu = "";
145 const char* arm_thumb = "";
146 const char* arm_float_abi = NULL;
149 arm_target_type = " simulator";
152 #if defined ARM_TEST_NO_FEATURE_PROBE
153 arm_no_probe = " noprobe";
156 #if defined CAN_USE_ARMV7_INSTRUCTIONS
162 #if defined CAN_USE_NEON
164 #elif defined CAN_USE_VFP3_INSTRUCTIONS
165 # if defined CAN_USE_VFP32DREGS
168 arm_fpu = " vfp3-d16";
175 arm_float_abi = base::OS::ArmUsingHardFloat() ? "hard" : "softfp";
176 #elif USE_EABI_HARDFLOAT
177 arm_float_abi = "hard";
179 arm_float_abi = "softfp";
182 #if defined __arm__ && (defined __thumb__) || (defined __thumb2__)
183 arm_thumb = " thumb";
186 printf("target%s%s %s%s%s %s\n",
187 arm_target_type, arm_no_probe, arm_arch, arm_fpu, arm_thumb,
192 void CpuFeatures::PrintFeatures() {
194 "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
195 "MOVW_MOVT_IMMEDIATE_LOADS=%d COHERENT_CACHE=%d",
196 CpuFeatures::IsSupported(ARMv7),
197 CpuFeatures::IsSupported(VFP3),
198 CpuFeatures::IsSupported(VFP32DREGS),
199 CpuFeatures::IsSupported(NEON),
200 CpuFeatures::IsSupported(SUDIV),
201 CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
202 CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS),
203 CpuFeatures::IsSupported(COHERENT_CACHE));
205 bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
206 #elif USE_EABI_HARDFLOAT
207 bool eabi_hardfloat = true;
209 bool eabi_hardfloat = false;
211 printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
215 // -----------------------------------------------------------------------------
216 // Implementation of DwVfpRegister
218 const char* DwVfpRegister::AllocationIndexToString(int index) {
219 DCHECK(index >= 0 && index < NumAllocatableRegisters());
220 DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
221 kNumReservedRegisters - 1);
222 if (index >= kDoubleRegZero.code()) index += kNumReservedRegisters;
223 return VFPRegisters::Name(index, true);
227 // -----------------------------------------------------------------------------
228 // Implementation of RelocInfo
231 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
234 bool RelocInfo::IsCodedSpecially() {
235 // The deserializer needs to know whether a pointer is specially coded. Â Being
236 // specially coded on ARM means that it is a movw/movt instruction, or is an
237 // out of line constant pool entry. Â These only occur if
238 // FLAG_enable_ool_constant_pool is true.
239 return FLAG_enable_ool_constant_pool;
243 bool RelocInfo::IsInConstantPool() {
244 return Assembler::is_constant_pool_load(pc_);
248 // -----------------------------------------------------------------------------
249 // Implementation of Operand and MemOperand
250 // See assembler-arm-inl.h for inlined constructors
252 Operand::Operand(Handle<Object> handle) {
253 AllowDeferredHandleDereference using_raw_address;
255 // Verify all Objects referred by code are NOT in new space.
256 Object* obj = *handle;
257 if (obj->IsHeapObject()) {
258 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
259 imm32_ = reinterpret_cast<intptr_t>(handle.location());
260 rmode_ = RelocInfo::EMBEDDED_OBJECT;
262 // no relocation needed
263 imm32_ = reinterpret_cast<intptr_t>(obj);
264 rmode_ = RelocInfo::NONE32;
269 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
270 DCHECK(is_uint5(shift_imm));
274 shift_op_ = shift_op;
275 shift_imm_ = shift_imm & 31;
277 if ((shift_op == ROR) && (shift_imm == 0)) {
278 // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode
279 // RRX as ROR #0 (See below).
281 } else if (shift_op == RRX) {
282 // encoded as ROR with shift_imm == 0
283 DCHECK(shift_imm == 0);
290 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
291 DCHECK(shift_op != RRX);
294 shift_op_ = shift_op;
299 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
307 MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
316 MemOperand::MemOperand(Register rn, Register rm,
317 ShiftOp shift_op, int shift_imm, AddrMode am) {
318 DCHECK(is_uint5(shift_imm));
321 shift_op_ = shift_op;
322 shift_imm_ = shift_imm & 31;
327 NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
328 DCHECK((am == Offset) || (am == PostIndex));
330 rm_ = (am == Offset) ? pc : sp;
335 NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
342 void NeonMemOperand::SetAlignment(int align) {
364 NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
366 switch (registers_count) {
387 // -----------------------------------------------------------------------------
388 // Specific instructions, constants, and masks.
390 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
391 // register r is not encoded.
392 const Instr kPushRegPattern =
393 al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
394 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
395 // register r is not encoded.
396 const Instr kPopRegPattern =
397 al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
398 // ldr rd, [pc, #offset]
399 const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
400 const Instr kLdrPCImmedPattern = 5 * B24 | L | kRegister_pc_Code * B16;
401 // ldr rd, [pp, #offset]
402 const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
403 const Instr kLdrPpImmedPattern = 5 * B24 | L | kRegister_r8_Code * B16;
405 const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16;
406 const Instr kLdrPpRegPattern = 7 * B24 | L | kRegister_r8_Code * B16;
407 // vldr dd, [pc, #offset]
408 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
409 const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
410 // vldr dd, [pp, #offset]
411 const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
412 const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
414 const Instr kBlxRegMask =
415 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
416 const Instr kBlxRegPattern =
417 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
418 const Instr kBlxIp = al | kBlxRegPattern | ip.code();
419 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
420 const Instr kMovMvnPattern = 0xd * B21;
421 const Instr kMovMvnFlip = B22;
422 const Instr kMovLeaveCCMask = 0xdff * B16;
423 const Instr kMovLeaveCCPattern = 0x1a0 * B16;
424 const Instr kMovwPattern = 0x30 * B20;
425 const Instr kMovtPattern = 0x34 * B20;
426 const Instr kMovwLeaveCCFlip = 0x5 * B21;
427 const Instr kMovImmedMask = 0x7f * B21;
428 const Instr kMovImmedPattern = 0x1d * B21;
429 const Instr kOrrImmedMask = 0x7f * B21;
430 const Instr kOrrImmedPattern = 0x1c * B21;
431 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
432 const Instr kCmpCmnPattern = 0x15 * B20;
433 const Instr kCmpCmnFlip = B21;
434 const Instr kAddSubFlip = 0x6 * B21;
435 const Instr kAndBicFlip = 0xe * B21;
437 // A mask for the Rd register for push, pop, ldr, str instructions.
438 const Instr kLdrRegFpOffsetPattern =
439 al | B26 | L | Offset | kRegister_fp_Code * B16;
440 const Instr kStrRegFpOffsetPattern =
441 al | B26 | Offset | kRegister_fp_Code * B16;
442 const Instr kLdrRegFpNegOffsetPattern =
443 al | B26 | L | NegOffset | kRegister_fp_Code * B16;
444 const Instr kStrRegFpNegOffsetPattern =
445 al | B26 | NegOffset | kRegister_fp_Code * B16;
446 const Instr kLdrStrInstrTypeMask = 0xffff0000;
449 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
450 : AssemblerBase(isolate, buffer, buffer_size),
451 recorded_ast_id_(TypeFeedbackId::None()),
452 constant_pool_builder_(),
453 positions_recorder_(this) {
454 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
455 num_pending_32_bit_reloc_info_ = 0;
456 num_pending_64_bit_reloc_info_ = 0;
457 next_buffer_check_ = 0;
458 const_pool_blocked_nesting_ = 0;
459 no_const_pool_before_ = 0;
460 first_const_pool_32_use_ = -1;
461 first_const_pool_64_use_ = -1;
463 ClearRecordedAstId();
467 Assembler::~Assembler() {
468 DCHECK(const_pool_blocked_nesting_ == 0);
472 void Assembler::GetCode(CodeDesc* desc) {
473 reloc_info_writer.Finish();
474 if (!FLAG_enable_ool_constant_pool) {
475 // Emit constant pool if necessary.
476 CheckConstPool(true, false);
477 DCHECK(num_pending_32_bit_reloc_info_ == 0);
478 DCHECK(num_pending_64_bit_reloc_info_ == 0);
480 // Set up code descriptor.
481 desc->buffer = buffer_;
482 desc->buffer_size = buffer_size_;
483 desc->instr_size = pc_offset();
484 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
489 void Assembler::Align(int m) {
490 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
491 while ((pc_offset() & (m - 1)) != 0) {
497 void Assembler::CodeTargetAlign() {
498 // Preferred alignment of jump targets on some ARM chips.
503 Condition Assembler::GetCondition(Instr instr) {
504 return Instruction::ConditionField(instr);
508 bool Assembler::IsBranch(Instr instr) {
509 return (instr & (B27 | B25)) == (B27 | B25);
513 int Assembler::GetBranchOffset(Instr instr) {
514 DCHECK(IsBranch(instr));
515 // Take the jump offset in the lower 24 bits, sign extend it and multiply it
516 // with 4 to get the offset in bytes.
517 return ((instr & kImm24Mask) << 8) >> 6;
521 bool Assembler::IsLdrRegisterImmediate(Instr instr) {
522 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
526 bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
527 return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
531 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
532 DCHECK(IsLdrRegisterImmediate(instr));
533 bool positive = (instr & B23) == B23;
534 int offset = instr & kOff12Mask; // Zero extended offset.
535 return positive ? offset : -offset;
539 int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
540 DCHECK(IsVldrDRegisterImmediate(instr));
541 bool positive = (instr & B23) == B23;
542 int offset = instr & kOff8Mask; // Zero extended offset.
544 return positive ? offset : -offset;
548 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
549 DCHECK(IsLdrRegisterImmediate(instr));
550 bool positive = offset >= 0;
551 if (!positive) offset = -offset;
552 DCHECK(is_uint12(offset));
553 // Set bit indicating whether the offset should be added.
554 instr = (instr & ~B23) | (positive ? B23 : 0);
555 // Set the actual offset.
556 return (instr & ~kOff12Mask) | offset;
560 Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
561 DCHECK(IsVldrDRegisterImmediate(instr));
562 DCHECK((offset & ~3) == offset); // Must be 64-bit aligned.
563 bool positive = offset >= 0;
564 if (!positive) offset = -offset;
565 DCHECK(is_uint10(offset));
566 // Set bit indicating whether the offset should be added.
567 instr = (instr & ~B23) | (positive ? B23 : 0);
568 // Set the actual offset. Its bottom 2 bits are zero.
569 return (instr & ~kOff8Mask) | (offset >> 2);
573 bool Assembler::IsStrRegisterImmediate(Instr instr) {
574 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
578 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
579 DCHECK(IsStrRegisterImmediate(instr));
580 bool positive = offset >= 0;
581 if (!positive) offset = -offset;
582 DCHECK(is_uint12(offset));
583 // Set bit indicating whether the offset should be added.
584 instr = (instr & ~B23) | (positive ? B23 : 0);
585 // Set the actual offset.
586 return (instr & ~kOff12Mask) | offset;
590 bool Assembler::IsAddRegisterImmediate(Instr instr) {
591 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
595 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
596 DCHECK(IsAddRegisterImmediate(instr));
598 DCHECK(is_uint12(offset));
600 return (instr & ~kOff12Mask) | offset;
604 Register Assembler::GetRd(Instr instr) {
606 reg.code_ = Instruction::RdValue(instr);
611 Register Assembler::GetRn(Instr instr) {
613 reg.code_ = Instruction::RnValue(instr);
618 Register Assembler::GetRm(Instr instr) {
620 reg.code_ = Instruction::RmValue(instr);
625 Instr Assembler::GetConsantPoolLoadPattern() {
626 if (FLAG_enable_ool_constant_pool) {
627 return kLdrPpImmedPattern;
629 return kLdrPCImmedPattern;
634 Instr Assembler::GetConsantPoolLoadMask() {
635 if (FLAG_enable_ool_constant_pool) {
636 return kLdrPpImmedMask;
638 return kLdrPCImmedMask;
643 bool Assembler::IsPush(Instr instr) {
644 return ((instr & ~kRdMask) == kPushRegPattern);
648 bool Assembler::IsPop(Instr instr) {
649 return ((instr & ~kRdMask) == kPopRegPattern);
653 bool Assembler::IsStrRegFpOffset(Instr instr) {
654 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
658 bool Assembler::IsLdrRegFpOffset(Instr instr) {
659 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
663 bool Assembler::IsStrRegFpNegOffset(Instr instr) {
664 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
668 bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
669 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
673 bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
674 // Check the instruction is indeed a
675 // ldr<cond> <Rd>, [pc +/- offset_12].
676 return (instr & kLdrPCImmedMask) == kLdrPCImmedPattern;
680 bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
681 // Check the instruction is indeed a
682 // ldr<cond> <Rd>, [pp +/- offset_12].
683 return (instr & kLdrPpImmedMask) == kLdrPpImmedPattern;
687 bool Assembler::IsLdrPpRegOffset(Instr instr) {
688 // Check the instruction is indeed a
689 // ldr<cond> <Rd>, [pp, +/- <Rm>].
690 return (instr & kLdrPpRegMask) == kLdrPpRegPattern;
694 Instr Assembler::GetLdrPpRegOffsetPattern() { return kLdrPpRegPattern; }
697 bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
698 // Check the instruction is indeed a
699 // vldr<cond> <Dd>, [pc +/- offset_10].
700 return (instr & kVldrDPCMask) == kVldrDPCPattern;
704 bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
705 // Check the instruction is indeed a
706 // vldr<cond> <Dd>, [pp +/- offset_10].
707 return (instr & kVldrDPpMask) == kVldrDPpPattern;
711 bool Assembler::IsBlxReg(Instr instr) {
712 // Check the instruction is indeed a
714 return (instr & kBlxRegMask) == kBlxRegPattern;
718 bool Assembler::IsBlxIp(Instr instr) {
719 // Check the instruction is indeed a
721 return instr == kBlxIp;
725 bool Assembler::IsTstImmediate(Instr instr) {
726 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
731 bool Assembler::IsCmpRegister(Instr instr) {
732 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
737 bool Assembler::IsCmpImmediate(Instr instr) {
738 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
743 Register Assembler::GetCmpImmediateRegister(Instr instr) {
744 DCHECK(IsCmpImmediate(instr));
749 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
750 DCHECK(IsCmpImmediate(instr));
751 return instr & kOff12Mask;
755 // Labels refer to positions in the (to be) generated code.
756 // There are bound, linked, and unused labels.
758 // Bound labels refer to known positions in the already
759 // generated code. pos() is the position the label refers to.
761 // Linked labels refer to unknown positions in the code
762 // to be generated; pos() is the position of the last
763 // instruction using the label.
765 // The linked labels form a link chain by making the branch offset
766 // in the instruction steam to point to the previous branch
767 // instruction using the same label.
769 // The link chain is terminated by a branch offset pointing to the
773 int Assembler::target_at(int pos) {
774 Instr instr = instr_at(pos);
775 if (is_uint24(instr)) {
776 // Emitted link to a label, not part of a branch.
779 if ((instr & 7 * B25) == 5 * B25) {
780 int imm26 = ((instr & kImm24Mask) << 8) >> 6;
781 // b, bl, or blx imm24
782 if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
783 ((instr & B24) != 0)) {
784 // blx uses bit 24 to encode bit 2 of imm26
787 return pos + kPcLoadDelta + imm26;
789 // Internal reference to the label.
790 DCHECK_EQ(7 * B25 | 1 * B0, instr & (7 * B25 | 1 * B0));
791 int imm26 = (((instr >> 1) & kImm24Mask) << 8) >> 6;
796 void Assembler::target_at_put(int pos, int target_pos) {
797 Instr instr = instr_at(pos);
798 if (is_uint24(instr)) {
799 DCHECK(target_pos == pos || target_pos >= 0);
800 // Emitted link to a label, not part of a branch.
801 // Load the position of the label relative to the generated code object
802 // pointer in a register.
804 // Here are the instructions we need to emit:
805 // For ARMv7: target24 => target16_1:target16_0
806 // movw dst, #target16_0
807 // movt dst, #target16_1
808 // For ARMv6: target24 => target8_2:target8_1:target8_0
809 // mov dst, #target8_0
810 // orr dst, dst, #target8_1 << 8
811 // orr dst, dst, #target8_2 << 16
813 // We extract the destination register from the emitted nop instruction.
814 Register dst = Register::from_code(
815 Instruction::RmValue(instr_at(pos + kInstrSize)));
816 DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
817 uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
818 DCHECK(is_uint24(target24));
819 if (is_uint8(target24)) {
820 // If the target fits in a byte then only patch with a mov
822 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
824 CodePatcher::DONT_FLUSH);
825 patcher.masm()->mov(dst, Operand(target24));
827 uint16_t target16_0 = target24 & kImm16Mask;
828 uint16_t target16_1 = target24 >> 16;
829 if (CpuFeatures::IsSupported(ARMv7)) {
830 // Patch with movw/movt.
831 if (target16_1 == 0) {
832 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
834 CodePatcher::DONT_FLUSH);
835 patcher.masm()->movw(dst, target16_0);
837 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
839 CodePatcher::DONT_FLUSH);
840 patcher.masm()->movw(dst, target16_0);
841 patcher.masm()->movt(dst, target16_1);
844 // Patch with a sequence of mov/orr/orr instructions.
845 uint8_t target8_0 = target16_0 & kImm8Mask;
846 uint8_t target8_1 = target16_0 >> 8;
847 uint8_t target8_2 = target16_1 & kImm8Mask;
848 if (target8_2 == 0) {
849 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
851 CodePatcher::DONT_FLUSH);
852 patcher.masm()->mov(dst, Operand(target8_0));
853 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
855 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
857 CodePatcher::DONT_FLUSH);
858 patcher.masm()->mov(dst, Operand(target8_0));
859 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
860 patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
866 if ((instr & 7 * B25) == 5 * B25) {
867 // b, bl, or blx imm24
868 int imm26 = target_pos - (pos + kPcLoadDelta);
869 if (Instruction::ConditionField(instr) == kSpecialCondition) {
870 // blx uses bit 24 to encode bit 2 of imm26
871 DCHECK((imm26 & 1) == 0);
872 instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1) * B24;
874 DCHECK((imm26 & 3) == 0);
875 instr &= ~kImm24Mask;
877 int imm24 = imm26 >> 2;
878 DCHECK(is_int24(imm24));
879 instr_at_put(pos, instr | (imm24 & kImm24Mask));
882 // Patch internal reference to label.
883 DCHECK_EQ(7 * B25 | 1 * B0, instr & (7 * B25 | 1 * B0));
884 instr_at_put(pos, reinterpret_cast<Instr>(buffer_ + target_pos));
888 void Assembler::print(Label* L) {
889 if (L->is_unused()) {
890 PrintF("unused label\n");
891 } else if (L->is_bound()) {
892 PrintF("bound label to %d\n", L->pos());
893 } else if (L->is_linked()) {
895 PrintF("unbound label");
896 while (l.is_linked()) {
897 PrintF("@ %d ", l.pos());
898 Instr instr = instr_at(l.pos());
899 if ((instr & ~kImm24Mask) == 0) {
902 DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx
903 Condition cond = Instruction::ConditionField(instr);
906 if (cond == kSpecialCondition) {
910 if ((instr & B24) != 0)
916 case eq: c = "eq"; break;
917 case ne: c = "ne"; break;
918 case hs: c = "hs"; break;
919 case lo: c = "lo"; break;
920 case mi: c = "mi"; break;
921 case pl: c = "pl"; break;
922 case vs: c = "vs"; break;
923 case vc: c = "vc"; break;
924 case hi: c = "hi"; break;
925 case ls: c = "ls"; break;
926 case ge: c = "ge"; break;
927 case lt: c = "lt"; break;
928 case gt: c = "gt"; break;
929 case le: c = "le"; break;
930 case al: c = ""; break;
936 PrintF("%s%s\n", b, c);
941 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
946 void Assembler::bind_to(Label* L, int pos) {
947 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
948 while (L->is_linked()) {
949 int fixup_pos = L->pos();
950 next(L); // call next before overwriting link with target at fixup_pos
951 target_at_put(fixup_pos, pos);
955 // Keep track of the last bound label so we don't eliminate any instructions
956 // before a bound label.
957 if (pos > last_bound_pos_)
958 last_bound_pos_ = pos;
962 void Assembler::bind(Label* L) {
963 DCHECK(!L->is_bound()); // label can only be bound once
964 bind_to(L, pc_offset());
968 void Assembler::next(Label* L) {
969 DCHECK(L->is_linked());
970 int link = target_at(L->pos());
971 if (link == L->pos()) {
972 // Branch target points to the same instuction. This is the end of the link
982 // Low-level code emission routines depending on the addressing mode.
983 // If this returns true then you have to use the rotate_imm and immed_8
984 // that it returns, because it may have already changed the instruction
986 static bool fits_shifter(uint32_t imm32,
987 uint32_t* rotate_imm,
990 // imm32 must be unsigned.
991 for (int rot = 0; rot < 16; rot++) {
992 uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot);
993 if ((imm8 <= 0xff)) {
999 // If the opcode is one with a complementary version and the complementary
1000 // immediate fits, change the opcode.
1001 if (instr != NULL) {
1002 if ((*instr & kMovMvnMask) == kMovMvnPattern) {
1003 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
1004 *instr ^= kMovMvnFlip;
1006 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
1007 if (CpuFeatures::IsSupported(ARMv7)) {
1008 if (imm32 < 0x10000) {
1009 *instr ^= kMovwLeaveCCFlip;
1010 *instr |= Assembler::EncodeMovwImmediate(imm32);
1011 *rotate_imm = *immed_8 = 0; // Not used for movw.
1016 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
1017 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
1018 *instr ^= kCmpCmnFlip;
1022 Instr alu_insn = (*instr & kALUMask);
1023 if (alu_insn == ADD ||
1025 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
1026 *instr ^= kAddSubFlip;
1029 } else if (alu_insn == AND ||
1031 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
1032 *instr ^= kAndBicFlip;
1042 // We have to use the temporary register for things that can be relocated even
1043 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
1044 // space. There is no guarantee that the relocated location can be similarly
1046 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1047 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1048 if (assembler != NULL && assembler->predictable_code_size()) return true;
1049 return assembler->serializer_enabled();
1050 } else if (RelocInfo::IsNone(rmode_)) {
1057 static bool use_mov_immediate_load(const Operand& x,
1058 const Assembler* assembler) {
1059 if (FLAG_enable_ool_constant_pool && assembler != NULL &&
1060 !assembler->is_ool_constant_pool_available()) {
1062 } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
1063 (assembler == NULL || !assembler->predictable_code_size())) {
1064 // Prefer movw / movt to constant pool if it is more efficient on the CPU.
1066 } else if (x.must_output_reloc_info(assembler)) {
1067 // Prefer constant pool if data is likely to be patched.
1070 // Otherwise, use immediate load if movw / movt is available.
1071 return CpuFeatures::IsSupported(ARMv7);
1076 int Operand::instructions_required(const Assembler* assembler,
1077 Instr instr) const {
1078 if (rm_.is_valid()) return 1;
1079 uint32_t dummy1, dummy2;
1080 if (must_output_reloc_info(assembler) ||
1081 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
1082 // The immediate operand cannot be encoded as a shifter operand, or use of
1083 // constant pool is required. First account for the instructions required
1084 // for the constant pool or immediate load
1086 if (use_mov_immediate_load(*this, assembler)) {
1087 // A movw / movt or mov / orr immediate load.
1088 instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
1089 } else if (assembler != NULL && assembler->use_extended_constant_pool()) {
1090 // An extended constant pool load.
1091 instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
1093 // A small constant pool load.
1097 if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set
1098 // For a mov or mvn instruction which doesn't set the condition
1099 // code, the constant pool or immediate load is enough, otherwise we need
1100 // to account for the actual instruction being requested.
1103 return instructions;
1105 // No use of constant pool and the immediate operand can be encoded as a
1112 void Assembler::move_32_bit_immediate(Register rd,
1115 RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
1116 uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
1117 if (x.must_output_reloc_info(this)) {
1118 RecordRelocInfo(rinfo);
1121 if (use_mov_immediate_load(x, this)) {
1122 Register target = rd.code() == pc.code() ? ip : rd;
1123 if (CpuFeatures::IsSupported(ARMv7)) {
1124 if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
1125 // Make sure the movw/movt doesn't get separated.
1126 BlockConstPoolFor(2);
1128 movw(target, imm32 & 0xffff, cond);
1129 movt(target, imm32 >> 16, cond);
1131 DCHECK(FLAG_enable_ool_constant_pool);
1132 mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
1133 orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
1134 orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
1135 orr(target, target, Operand(imm32 & (kImm8Mask << 24)), LeaveCC, cond);
1137 if (target.code() != rd.code()) {
1138 mov(rd, target, LeaveCC, cond);
1141 DCHECK(!FLAG_enable_ool_constant_pool || is_ool_constant_pool_available());
1142 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
1143 if (section == ConstantPoolArray::EXTENDED_SECTION) {
1144 DCHECK(FLAG_enable_ool_constant_pool);
1145 Register target = rd.code() == pc.code() ? ip : rd;
1146 // Emit instructions to load constant pool offset.
1147 if (CpuFeatures::IsSupported(ARMv7)) {
1148 movw(target, 0, cond);
1149 movt(target, 0, cond);
1151 mov(target, Operand(0), LeaveCC, cond);
1152 orr(target, target, Operand(0), LeaveCC, cond);
1153 orr(target, target, Operand(0), LeaveCC, cond);
1154 orr(target, target, Operand(0), LeaveCC, cond);
1156 // Load from constant pool at offset.
1157 ldr(rd, MemOperand(pp, target), cond);
1159 DCHECK(section == ConstantPoolArray::SMALL_SECTION);
1160 ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
1166 void Assembler::addrmod1(Instr instr,
1171 DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
1172 if (!x.rm_.is_valid()) {
1174 uint32_t rotate_imm;
1176 if (x.must_output_reloc_info(this) ||
1177 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
1178 // The immediate operand cannot be encoded as a shifter operand, so load
1179 // it first to register ip and change the original instruction to use ip.
1180 // However, if the original instruction is a 'mov rd, x' (not setting the
1181 // condition code), then replace it with a 'ldr rd, [pc]'.
1182 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
1183 Condition cond = Instruction::ConditionField(instr);
1184 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
1185 move_32_bit_immediate(rd, x, cond);
1187 mov(ip, x, LeaveCC, cond);
1188 addrmod1(instr, rn, rd, Operand(ip));
1192 instr |= I | rotate_imm*B8 | immed_8;
1193 } else if (!x.rs_.is_valid()) {
1195 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1198 DCHECK(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
1199 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
1201 emit(instr | rn.code()*B16 | rd.code()*B12);
1202 if (rn.is(pc) || x.rm_.is(pc)) {
1203 // Block constant pool emission for one instruction after reading pc.
1204 BlockConstPoolFor(1);
1209 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
1210 DCHECK((instr & ~(kCondMask | B | L)) == B26);
1212 if (!x.rm_.is_valid()) {
1213 // Immediate offset.
1214 int offset_12 = x.offset_;
1215 if (offset_12 < 0) {
1216 offset_12 = -offset_12;
1219 if (!is_uint12(offset_12)) {
1220 // Immediate offset cannot be encoded, load it first to register ip
1221 // rn (and rd in a load) should never be ip, or will be trashed.
1222 DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1223 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
1224 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
1227 DCHECK(offset_12 >= 0); // no masking needed
1230 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
1231 // register offset the constructors make sure than both shift_imm_
1232 // and shift_op_ are initialized.
1233 DCHECK(!x.rm_.is(pc));
1234 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1236 DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1237 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1241 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
1242 DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
1243 DCHECK(x.rn_.is_valid());
1245 if (!x.rm_.is_valid()) {
1246 // Immediate offset.
1247 int offset_8 = x.offset_;
1249 offset_8 = -offset_8;
1252 if (!is_uint8(offset_8)) {
1253 // Immediate offset cannot be encoded, load it first to register ip
1254 // rn (and rd in a load) should never be ip, or will be trashed.
1255 DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1256 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
1257 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1260 DCHECK(offset_8 >= 0); // no masking needed
1261 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
1262 } else if (x.shift_imm_ != 0) {
1263 // Scaled register offset not supported, load index first
1264 // rn (and rd in a load) should never be ip, or will be trashed.
1265 DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1266 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
1267 Instruction::ConditionField(instr));
1268 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1272 DCHECK((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
1273 instr |= x.rm_.code();
1275 DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1276 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1280 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
1281 DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
1284 emit(instr | rn.code()*B16 | rl);
1288 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
1289 // Unindexed addressing is not encoded by this function.
1290 DCHECK_EQ((B27 | B26),
1291 (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
1292 DCHECK(x.rn_.is_valid() && !x.rm_.is_valid());
1294 int offset_8 = x.offset_;
1295 DCHECK((offset_8 & 3) == 0); // offset must be an aligned word offset
1298 offset_8 = -offset_8;
1301 DCHECK(is_uint8(offset_8)); // unsigned word offset must fit in a byte
1302 DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1304 // Post-indexed addressing requires W == 1; different than in addrmod2/3.
1308 DCHECK(offset_8 >= 0); // no masking needed
1309 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
1313 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1315 if (L->is_bound()) {
1316 target_pos = L->pos();
1318 if (L->is_linked()) {
1319 // Point to previous instruction that uses the link.
1320 target_pos = L->pos();
1322 // First entry of the link chain points to itself.
1323 target_pos = pc_offset();
1325 L->link_to(pc_offset());
1328 // Block the emission of the constant pool, since the branch instruction must
1329 // be emitted at the pc offset recorded by the label.
1330 BlockConstPoolFor(1);
1331 return target_pos - (pc_offset() + kPcLoadDelta);
1335 // Branch instructions.
1336 void Assembler::b(int branch_offset, Condition cond) {
1337 DCHECK((branch_offset & 3) == 0);
1338 int imm24 = branch_offset >> 2;
1339 CHECK(is_int24(imm24));
1340 emit(cond | B27 | B25 | (imm24 & kImm24Mask));
1343 // Dead code is a good location to emit the constant pool.
1344 CheckConstPool(false, false);
1349 void Assembler::bl(int branch_offset, Condition cond) {
1350 positions_recorder()->WriteRecordedPositions();
1351 DCHECK((branch_offset & 3) == 0);
1352 int imm24 = branch_offset >> 2;
1353 CHECK(is_int24(imm24));
1354 emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
1358 void Assembler::blx(int branch_offset) { // v5 and above
1359 positions_recorder()->WriteRecordedPositions();
1360 DCHECK((branch_offset & 1) == 0);
1361 int h = ((branch_offset & 2) >> 1)*B24;
1362 int imm24 = branch_offset >> 2;
1363 CHECK(is_int24(imm24));
1364 emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
1368 void Assembler::blx(Register target, Condition cond) { // v5 and above
1369 positions_recorder()->WriteRecordedPositions();
1370 DCHECK(!target.is(pc));
1371 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
1375 void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
1376 positions_recorder()->WriteRecordedPositions();
1377 DCHECK(!target.is(pc)); // use of pc is actually allowed, but discouraged
1378 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
1382 // Data-processing instructions.
1384 void Assembler::and_(Register dst, Register src1, const Operand& src2,
1385 SBit s, Condition cond) {
1386 addrmod1(cond | AND | s, src1, dst, src2);
1390 void Assembler::eor(Register dst, Register src1, const Operand& src2,
1391 SBit s, Condition cond) {
1392 addrmod1(cond | EOR | s, src1, dst, src2);
1396 void Assembler::sub(Register dst, Register src1, const Operand& src2,
1397 SBit s, Condition cond) {
1398 addrmod1(cond | SUB | s, src1, dst, src2);
1402 void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1403 SBit s, Condition cond) {
1404 addrmod1(cond | RSB | s, src1, dst, src2);
1408 void Assembler::add(Register dst, Register src1, const Operand& src2,
1409 SBit s, Condition cond) {
1410 addrmod1(cond | ADD | s, src1, dst, src2);
1414 void Assembler::adc(Register dst, Register src1, const Operand& src2,
1415 SBit s, Condition cond) {
1416 addrmod1(cond | ADC | s, src1, dst, src2);
1420 void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1421 SBit s, Condition cond) {
1422 addrmod1(cond | SBC | s, src1, dst, src2);
1426 void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1427 SBit s, Condition cond) {
1428 addrmod1(cond | RSC | s, src1, dst, src2);
1432 void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
1433 addrmod1(cond | TST | S, src1, r0, src2);
1437 void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
1438 addrmod1(cond | TEQ | S, src1, r0, src2);
1442 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
1443 addrmod1(cond | CMP | S, src1, r0, src2);
1447 void Assembler::cmp_raw_immediate(
1448 Register src, int raw_immediate, Condition cond) {
1449 DCHECK(is_uint12(raw_immediate));
1450 emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
1454 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
1455 addrmod1(cond | CMN | S, src1, r0, src2);
1459 void Assembler::orr(Register dst, Register src1, const Operand& src2,
1460 SBit s, Condition cond) {
1461 addrmod1(cond | ORR | s, src1, dst, src2);
1465 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1467 positions_recorder()->WriteRecordedPositions();
1469 // Don't allow nop instructions in the form mov rn, rn to be generated using
1470 // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1471 // or MarkCode(int/NopMarkerTypes) pseudo instructions.
1472 DCHECK(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
1473 addrmod1(cond | MOV | s, r0, dst, src);
1477 void Assembler::mov_label_offset(Register dst, Label* label) {
1478 if (label->is_bound()) {
1479 mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
1481 // Emit the link to the label in the code stream followed by extra nop
1483 // If the label is not linked, then start a new link chain by linking it to
1484 // itself, emitting pc_offset().
1485 int link = label->is_linked() ? label->pos() : pc_offset();
1486 label->link_to(pc_offset());
1488 // When the label is bound, these instructions will be patched with a
1489 // sequence of movw/movt or mov/orr/orr instructions. They will load the
1490 // destination register with the position of the label from the beginning
1493 // The link will be extracted from the first instruction and the destination
1494 // register from the second.
1503 // When the label gets bound: target_at extracts the link and target_at_put
1504 // patches the instructions.
1505 CHECK(is_uint24(link));
1506 BlockConstPoolScope block_const_pool(this);
1509 if (!CpuFeatures::IsSupported(ARMv7)) {
1516 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1517 DCHECK(CpuFeatures::IsSupported(ARMv7));
1518 emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1522 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1523 DCHECK(CpuFeatures::IsSupported(ARMv7));
1524 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1528 void Assembler::bic(Register dst, Register src1, const Operand& src2,
1529 SBit s, Condition cond) {
1530 addrmod1(cond | BIC | s, src1, dst, src2);
1534 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
1535 addrmod1(cond | MVN | s, r0, dst, src);
1539 // Multiply instructions.
1540 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1541 SBit s, Condition cond) {
1542 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1543 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1544 src2.code()*B8 | B7 | B4 | src1.code());
1548 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
1550 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1551 DCHECK(IsEnabled(MLS));
1552 emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
1553 src2.code()*B8 | B7 | B4 | src1.code());
1557 void Assembler::sdiv(Register dst, Register src1, Register src2,
1559 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1560 DCHECK(IsEnabled(SUDIV));
1561 emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
1562 src2.code()*B8 | B4 | src1.code());
1566 void Assembler::udiv(Register dst, Register src1, Register src2,
1568 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1569 DCHECK(IsEnabled(SUDIV));
1570 emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 |
1571 src2.code() * B8 | B4 | src1.code());
1575 void Assembler::mul(Register dst, Register src1, Register src2, SBit s,
1577 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1578 // dst goes in bits 16-19 for this instruction!
1579 emit(cond | s | dst.code() * B16 | src2.code() * B8 | B7 | B4 | src1.code());
1583 void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
1585 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1586 emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 |
1587 srcA.code() * B12 | src2.code() * B8 | B4 | src1.code());
1591 void Assembler::smmul(Register dst, Register src1, Register src2,
1593 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1594 emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xf * B12 |
1595 src2.code() * B8 | B4 | src1.code());
1599 void Assembler::smlal(Register dstL,
1605 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1606 DCHECK(!dstL.is(dstH));
1607 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1608 src2.code()*B8 | B7 | B4 | src1.code());
1612 void Assembler::smull(Register dstL,
1618 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1619 DCHECK(!dstL.is(dstH));
1620 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1621 src2.code()*B8 | B7 | B4 | src1.code());
1625 void Assembler::umlal(Register dstL,
1631 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1632 DCHECK(!dstL.is(dstH));
1633 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1634 src2.code()*B8 | B7 | B4 | src1.code());
1638 void Assembler::umull(Register dstL,
1644 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1645 DCHECK(!dstL.is(dstH));
1646 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1647 src2.code()*B8 | B7 | B4 | src1.code());
1651 // Miscellaneous arithmetic instructions.
1652 void Assembler::clz(Register dst, Register src, Condition cond) {
1654 DCHECK(!dst.is(pc) && !src.is(pc));
1655 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1656 15*B8 | CLZ | src.code());
1660 // Saturating instructions.
1662 // Unsigned saturate.
1663 void Assembler::usat(Register dst,
1668 DCHECK(CpuFeatures::IsSupported(ARMv7));
1669 DCHECK(!dst.is(pc) && !src.rm_.is(pc));
1670 DCHECK((satpos >= 0) && (satpos <= 31));
1671 DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1672 DCHECK(src.rs_.is(no_reg));
1675 if (src.shift_op_ == ASR) {
1679 emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1680 src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1684 // Bitfield manipulation instructions.
1686 // Unsigned bit field extract.
1687 // Extracts #width adjacent bits from position #lsb in a register, and
1688 // writes them to the low bits of a destination register.
1689 // ubfx dst, src, #lsb, #width
1690 void Assembler::ubfx(Register dst,
1696 DCHECK(CpuFeatures::IsSupported(ARMv7));
1697 DCHECK(!dst.is(pc) && !src.is(pc));
1698 DCHECK((lsb >= 0) && (lsb <= 31));
1699 DCHECK((width >= 1) && (width <= (32 - lsb)));
1700 emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1701 lsb*B7 | B6 | B4 | src.code());
1705 // Signed bit field extract.
1706 // Extracts #width adjacent bits from position #lsb in a register, and
1707 // writes them to the low bits of a destination register. The extracted
1708 // value is sign extended to fill the destination register.
1709 // sbfx dst, src, #lsb, #width
1710 void Assembler::sbfx(Register dst,
1716 DCHECK(CpuFeatures::IsSupported(ARMv7));
1717 DCHECK(!dst.is(pc) && !src.is(pc));
1718 DCHECK((lsb >= 0) && (lsb <= 31));
1719 DCHECK((width >= 1) && (width <= (32 - lsb)));
1720 emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1721 lsb*B7 | B6 | B4 | src.code());
1726 // Sets #width adjacent bits at position #lsb in the destination register
1727 // to zero, preserving the value of the other bits.
1728 // bfc dst, #lsb, #width
1729 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1731 DCHECK(CpuFeatures::IsSupported(ARMv7));
1732 DCHECK(!dst.is(pc));
1733 DCHECK((lsb >= 0) && (lsb <= 31));
1734 DCHECK((width >= 1) && (width <= (32 - lsb)));
1735 int msb = lsb + width - 1;
1736 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1740 // Bit field insert.
1741 // Inserts #width adjacent bits from the low bits of the source register
1742 // into position #lsb of the destination register.
1743 // bfi dst, src, #lsb, #width
1744 void Assembler::bfi(Register dst,
1750 DCHECK(CpuFeatures::IsSupported(ARMv7));
1751 DCHECK(!dst.is(pc) && !src.is(pc));
1752 DCHECK((lsb >= 0) && (lsb <= 31));
1753 DCHECK((width >= 1) && (width <= (32 - lsb)));
1754 int msb = lsb + width - 1;
1755 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1760 void Assembler::pkhbt(Register dst,
1762 const Operand& src2,
1764 // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1765 // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1766 // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
1767 DCHECK(!dst.is(pc));
1768 DCHECK(!src1.is(pc));
1769 DCHECK(!src2.rm().is(pc));
1770 DCHECK(!src2.rm().is(no_reg));
1771 DCHECK(src2.rs().is(no_reg));
1772 DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
1773 DCHECK(src2.shift_op() == LSL);
1774 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1775 src2.shift_imm_*B7 | B4 | src2.rm().code());
1779 void Assembler::pkhtb(Register dst,
1781 const Operand& src2,
1783 // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1784 // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1785 // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
1786 DCHECK(!dst.is(pc));
1787 DCHECK(!src1.is(pc));
1788 DCHECK(!src2.rm().is(pc));
1789 DCHECK(!src2.rm().is(no_reg));
1790 DCHECK(src2.rs().is(no_reg));
1791 DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
1792 DCHECK(src2.shift_op() == ASR);
1793 int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
1794 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1795 asr*B7 | B6 | B4 | src2.rm().code());
1799 void Assembler::sxtb(Register dst, Register src, int rotate, Condition cond) {
1800 // Instruction details available in ARM DDI 0406C.b, A8.8.233.
1801 // cond(31-28) | 01101010(27-20) | 1111(19-16) |
1802 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1803 DCHECK(!dst.is(pc));
1804 DCHECK(!src.is(pc));
1805 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1806 emit(cond | 0x6A * B20 | 0xF * B16 | dst.code() * B12 |
1807 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1811 void Assembler::sxtab(Register dst, Register src1, Register src2, int rotate,
1813 // Instruction details available in ARM DDI 0406C.b, A8.8.233.
1814 // cond(31-28) | 01101010(27-20) | Rn(19-16) |
1815 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1816 DCHECK(!dst.is(pc));
1817 DCHECK(!src1.is(pc));
1818 DCHECK(!src2.is(pc));
1819 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1820 emit(cond | 0x6A * B20 | src1.code() * B16 | dst.code() * B12 |
1821 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
1825 void Assembler::sxth(Register dst, Register src, int rotate, Condition cond) {
1826 // Instruction details available in ARM DDI 0406C.b, A8.8.235.
1827 // cond(31-28) | 01101011(27-20) | 1111(19-16) |
1828 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1829 DCHECK(!dst.is(pc));
1830 DCHECK(!src.is(pc));
1831 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1832 emit(cond | 0x6B * B20 | 0xF * B16 | dst.code() * B12 |
1833 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1837 void Assembler::sxtah(Register dst, Register src1, Register src2, int rotate,
1839 // Instruction details available in ARM DDI 0406C.b, A8.8.235.
1840 // cond(31-28) | 01101011(27-20) | Rn(19-16) |
1841 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1842 DCHECK(!dst.is(pc));
1843 DCHECK(!src1.is(pc));
1844 DCHECK(!src2.is(pc));
1845 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1846 emit(cond | 0x6B * B20 | src1.code() * B16 | dst.code() * B12 |
1847 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
1851 void Assembler::uxtb(Register dst, Register src, int rotate, Condition cond) {
1852 // Instruction details available in ARM DDI 0406C.b, A8.8.274.
1853 // cond(31-28) | 01101110(27-20) | 1111(19-16) |
1854 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1855 DCHECK(!dst.is(pc));
1856 DCHECK(!src.is(pc));
1857 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1858 emit(cond | 0x6E * B20 | 0xF * B16 | dst.code() * B12 |
1859 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1863 void Assembler::uxtab(Register dst, Register src1, Register src2, int rotate,
1865 // Instruction details available in ARM DDI 0406C.b, A8.8.271.
1866 // cond(31-28) | 01101110(27-20) | Rn(19-16) |
1867 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1868 DCHECK(!dst.is(pc));
1869 DCHECK(!src1.is(pc));
1870 DCHECK(!src2.is(pc));
1871 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1872 emit(cond | 0x6E * B20 | src1.code() * B16 | dst.code() * B12 |
1873 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
1877 void Assembler::uxtb16(Register dst, Register src, int rotate, Condition cond) {
1878 // Instruction details available in ARM DDI 0406C.b, A8.8.275.
1879 // cond(31-28) | 01101100(27-20) | 1111(19-16) |
1880 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1881 DCHECK(!dst.is(pc));
1882 DCHECK(!src.is(pc));
1883 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1884 emit(cond | 0x6C * B20 | 0xF * B16 | dst.code() * B12 |
1885 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1889 void Assembler::uxth(Register dst, Register src, int rotate, Condition cond) {
1890 // Instruction details available in ARM DDI 0406C.b, A8.8.276.
1891 // cond(31-28) | 01101111(27-20) | 1111(19-16) |
1892 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1893 DCHECK(!dst.is(pc));
1894 DCHECK(!src.is(pc));
1895 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1896 emit(cond | 0x6F * B20 | 0xF * B16 | dst.code() * B12 |
1897 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1901 void Assembler::uxtah(Register dst, Register src1, Register src2, int rotate,
1903 // Instruction details available in ARM DDI 0406C.b, A8.8.273.
1904 // cond(31-28) | 01101111(27-20) | Rn(19-16) |
1905 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1906 DCHECK(!dst.is(pc));
1907 DCHECK(!src1.is(pc));
1908 DCHECK(!src2.is(pc));
1909 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1910 emit(cond | 0x6F * B20 | src1.code() * B16 | dst.code() * B12 |
1911 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
1915 // Status register access instructions.
1916 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1917 DCHECK(!dst.is(pc));
1918 emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1922 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1924 DCHECK(fields >= B16 && fields < B20); // at least one field set
1926 if (!src.rm_.is_valid()) {
1928 uint32_t rotate_imm;
1930 if (src.must_output_reloc_info(this) ||
1931 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1932 // Immediate operand cannot be encoded, load it first to register ip.
1933 move_32_bit_immediate(ip, src);
1934 msr(fields, Operand(ip), cond);
1937 instr = I | rotate_imm*B8 | immed_8;
1939 DCHECK(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
1940 instr = src.rm_.code();
1942 emit(cond | instr | B24 | B21 | fields | 15*B12);
1946 // Load/Store instructions.
1947 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1949 positions_recorder()->WriteRecordedPositions();
1951 addrmod2(cond | B26 | L, dst, src);
1955 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1956 addrmod2(cond | B26, src, dst);
1960 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1961 addrmod2(cond | B26 | B | L, dst, src);
1965 void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1966 addrmod2(cond | B26 | B, src, dst);
1970 void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1971 addrmod3(cond | L | B7 | H | B4, dst, src);
1975 void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1976 addrmod3(cond | B7 | H | B4, src, dst);
1980 void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1981 addrmod3(cond | L | B7 | S6 | B4, dst, src);
1985 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1986 addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1990 void Assembler::ldrd(Register dst1, Register dst2,
1991 const MemOperand& src, Condition cond) {
1992 DCHECK(IsEnabled(ARMv7));
1993 DCHECK(src.rm().is(no_reg));
1994 DCHECK(!dst1.is(lr)); // r14.
1995 DCHECK_EQ(0, dst1.code() % 2);
1996 DCHECK_EQ(dst1.code() + 1, dst2.code());
1997 addrmod3(cond | B7 | B6 | B4, dst1, src);
2001 void Assembler::strd(Register src1, Register src2,
2002 const MemOperand& dst, Condition cond) {
2003 DCHECK(dst.rm().is(no_reg));
2004 DCHECK(!src1.is(lr)); // r14.
2005 DCHECK_EQ(0, src1.code() % 2);
2006 DCHECK_EQ(src1.code() + 1, src2.code());
2007 DCHECK(IsEnabled(ARMv7));
2008 addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
2012 // Preload instructions.
2013 void Assembler::pld(const MemOperand& address) {
2014 // Instruction details available in ARM DDI 0406C.b, A8.8.128.
2015 // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
2016 // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
2017 DCHECK(address.rm().is(no_reg));
2018 DCHECK(address.am() == Offset);
2020 int offset = address.offset();
2025 DCHECK(offset < 4096);
2026 emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
2031 // Load/Store multiple instructions.
2032 void Assembler::ldm(BlockAddrMode am,
2036 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
2037 DCHECK(base.is(sp) || (dst & sp.bit()) == 0);
2039 addrmod4(cond | B27 | am | L, base, dst);
2041 // Emit the constant pool after a function return implemented by ldm ..{..pc}.
2042 if (cond == al && (dst & pc.bit()) != 0) {
2043 // There is a slight chance that the ldm instruction was actually a call,
2044 // in which case it would be wrong to return into the constant pool; we
2045 // recognize this case by checking if the emission of the pool was blocked
2046 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
2047 // the case, we emit a jump over the pool.
2048 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
2053 void Assembler::stm(BlockAddrMode am,
2057 addrmod4(cond | B27 | am, base, src);
2061 // Exception-generating instructions and debugging support.
2062 // Stops with a non-negative code less than kNumOfWatchedStops support
2063 // enabling/disabling and a counter feature. See simulator-arm.h .
2064 void Assembler::stop(const char* msg, Condition cond, int32_t code) {
2066 DCHECK(code >= kDefaultStopCode);
2068 // The Simulator will handle the stop instruction and get the message
2069 // address. It expects to find the address just after the svc instruction.
2070 BlockConstPoolScope block_const_pool(this);
2072 svc(kStopCode + code, cond);
2074 svc(kStopCode + kMaxStopCode, cond);
2076 emit(reinterpret_cast<Instr>(msg));
2078 #else // def __arm__
2081 b(&skip, NegateCondition(cond));
2087 #endif // def __arm__
2091 void Assembler::bkpt(uint32_t imm16) { // v5 and above
2092 DCHECK(is_uint16(imm16));
2093 emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
2097 void Assembler::svc(uint32_t imm24, Condition cond) {
2098 DCHECK(is_uint24(imm24));
2099 emit(cond | 15*B24 | imm24);
2103 // Coprocessor instructions.
2104 void Assembler::cdp(Coprocessor coproc,
2111 DCHECK(is_uint4(opcode_1) && is_uint3(opcode_2));
2112 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
2113 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
2117 void Assembler::cdp2(Coprocessor coproc,
2122 int opcode_2) { // v5 and above
2123 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
2127 void Assembler::mcr(Coprocessor coproc,
2134 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
2135 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
2136 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2140 void Assembler::mcr2(Coprocessor coproc,
2145 int opcode_2) { // v5 and above
2146 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2150 void Assembler::mrc(Coprocessor coproc,
2157 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
2158 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
2159 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2163 void Assembler::mrc2(Coprocessor coproc,
2168 int opcode_2) { // v5 and above
2169 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2173 void Assembler::ldc(Coprocessor coproc,
2175 const MemOperand& src,
2178 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
2182 void Assembler::ldc(Coprocessor coproc,
2188 // Unindexed addressing.
2189 DCHECK(is_uint8(option));
2190 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
2191 coproc*B8 | (option & 255));
2195 void Assembler::ldc2(Coprocessor coproc,
2197 const MemOperand& src,
2198 LFlag l) { // v5 and above
2199 ldc(coproc, crd, src, l, kSpecialCondition);
2203 void Assembler::ldc2(Coprocessor coproc,
2207 LFlag l) { // v5 and above
2208 ldc(coproc, crd, rn, option, l, kSpecialCondition);
2214 void Assembler::vldr(const DwVfpRegister dst,
2215 const Register base,
2217 const Condition cond) {
2218 // Ddst = MEM(Rbase + offset).
2219 // Instruction details available in ARM DDI 0406C.b, A8-924.
2220 // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
2221 // Vd(15-12) | 1011(11-8) | offset
2228 dst.split_code(&vd, &d);
2230 DCHECK(offset >= 0);
2231 if ((offset % 4) == 0 && (offset / 4) < 256) {
2232 emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
2233 0xB*B8 | ((offset / 4) & 255));
2235 // Larger offsets must be handled by computing the correct address
2236 // in the ip register.
2237 DCHECK(!base.is(ip));
2239 add(ip, base, Operand(offset));
2241 sub(ip, base, Operand(offset));
2243 emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
2248 void Assembler::vldr(const DwVfpRegister dst,
2249 const MemOperand& operand,
2250 const Condition cond) {
2251 DCHECK(operand.am_ == Offset);
2252 if (operand.rm().is_valid()) {
2253 add(ip, operand.rn(),
2254 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2255 vldr(dst, ip, 0, cond);
2257 vldr(dst, operand.rn(), operand.offset(), cond);
2262 void Assembler::vldr(const SwVfpRegister dst,
2263 const Register base,
2265 const Condition cond) {
2266 // Sdst = MEM(Rbase + offset).
2267 // Instruction details available in ARM DDI 0406A, A8-628.
2268 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
2269 // Vdst(15-12) | 1010(11-8) | offset
2276 dst.split_code(&sd, &d);
2277 DCHECK(offset >= 0);
2279 if ((offset % 4) == 0 && (offset / 4) < 256) {
2280 emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
2281 0xA*B8 | ((offset / 4) & 255));
2283 // Larger offsets must be handled by computing the correct address
2284 // in the ip register.
2285 DCHECK(!base.is(ip));
2287 add(ip, base, Operand(offset));
2289 sub(ip, base, Operand(offset));
2291 emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2296 void Assembler::vldr(const SwVfpRegister dst,
2297 const MemOperand& operand,
2298 const Condition cond) {
2299 DCHECK(operand.am_ == Offset);
2300 if (operand.rm().is_valid()) {
2301 add(ip, operand.rn(),
2302 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2303 vldr(dst, ip, 0, cond);
2305 vldr(dst, operand.rn(), operand.offset(), cond);
2310 void Assembler::vstr(const DwVfpRegister src,
2311 const Register base,
2313 const Condition cond) {
2314 // MEM(Rbase + offset) = Dsrc.
2315 // Instruction details available in ARM DDI 0406C.b, A8-1082.
2316 // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
2317 // Vd(15-12) | 1011(11-8) | (offset/4)
2323 DCHECK(offset >= 0);
2325 src.split_code(&vd, &d);
2327 if ((offset % 4) == 0 && (offset / 4) < 256) {
2328 emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
2329 ((offset / 4) & 255));
2331 // Larger offsets must be handled by computing the correct address
2332 // in the ip register.
2333 DCHECK(!base.is(ip));
2335 add(ip, base, Operand(offset));
2337 sub(ip, base, Operand(offset));
2339 emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
2344 void Assembler::vstr(const DwVfpRegister src,
2345 const MemOperand& operand,
2346 const Condition cond) {
2347 DCHECK(operand.am_ == Offset);
2348 if (operand.rm().is_valid()) {
2349 add(ip, operand.rn(),
2350 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2351 vstr(src, ip, 0, cond);
2353 vstr(src, operand.rn(), operand.offset(), cond);
2358 void Assembler::vstr(const SwVfpRegister src,
2359 const Register base,
2361 const Condition cond) {
2362 // MEM(Rbase + offset) = SSrc.
2363 // Instruction details available in ARM DDI 0406A, A8-786.
2364 // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
2365 // Vdst(15-12) | 1010(11-8) | (offset/4)
2372 src.split_code(&sd, &d);
2373 DCHECK(offset >= 0);
2374 if ((offset % 4) == 0 && (offset / 4) < 256) {
2375 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
2376 0xA*B8 | ((offset / 4) & 255));
2378 // Larger offsets must be handled by computing the correct address
2379 // in the ip register.
2380 DCHECK(!base.is(ip));
2382 add(ip, base, Operand(offset));
2384 sub(ip, base, Operand(offset));
2386 emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2391 void Assembler::vstr(const SwVfpRegister src,
2392 const MemOperand& operand,
2393 const Condition cond) {
2394 DCHECK(operand.am_ == Offset);
2395 if (operand.rm().is_valid()) {
2396 add(ip, operand.rn(),
2397 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2398 vstr(src, ip, 0, cond);
2400 vstr(src, operand.rn(), operand.offset(), cond);
2405 void Assembler::vldm(BlockAddrMode am,
2407 DwVfpRegister first,
2410 // Instruction details available in ARM DDI 0406C.b, A8-922.
2411 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2412 // first(15-12) | 1011(11-8) | (count * 2)
2413 DCHECK_LE(first.code(), last.code());
2414 DCHECK(am == ia || am == ia_w || am == db_w);
2415 DCHECK(!base.is(pc));
2418 first.split_code(&sd, &d);
2419 int count = last.code() - first.code() + 1;
2420 DCHECK(count <= 16);
2421 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2426 void Assembler::vstm(BlockAddrMode am,
2428 DwVfpRegister first,
2431 // Instruction details available in ARM DDI 0406C.b, A8-1080.
2432 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2433 // first(15-12) | 1011(11-8) | (count * 2)
2434 DCHECK_LE(first.code(), last.code());
2435 DCHECK(am == ia || am == ia_w || am == db_w);
2436 DCHECK(!base.is(pc));
2439 first.split_code(&sd, &d);
2440 int count = last.code() - first.code() + 1;
2441 DCHECK(count <= 16);
2442 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2446 void Assembler::vldm(BlockAddrMode am,
2448 SwVfpRegister first,
2451 // Instruction details available in ARM DDI 0406A, A8-626.
2452 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2453 // first(15-12) | 1010(11-8) | (count/2)
2454 DCHECK_LE(first.code(), last.code());
2455 DCHECK(am == ia || am == ia_w || am == db_w);
2456 DCHECK(!base.is(pc));
2459 first.split_code(&sd, &d);
2460 int count = last.code() - first.code() + 1;
2461 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2466 void Assembler::vstm(BlockAddrMode am,
2468 SwVfpRegister first,
2471 // Instruction details available in ARM DDI 0406A, A8-784.
2472 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2473 // first(15-12) | 1011(11-8) | (count/2)
2474 DCHECK_LE(first.code(), last.code());
2475 DCHECK(am == ia || am == ia_w || am == db_w);
2476 DCHECK(!base.is(pc));
2479 first.split_code(&sd, &d);
2480 int count = last.code() - first.code() + 1;
2481 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2486 void Assembler::vmov(const SwVfpRegister dst, float imm) {
2487 mov(ip, Operand(bit_cast<int32_t>(imm)));
2492 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2496 *lo = i & 0xffffffff;
2501 // Only works for little endian floating point formats.
2502 // We don't support VFP on the mixed endian floating point platform.
2503 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
2504 DCHECK(CpuFeatures::IsSupported(VFP3));
2506 // VMOV can accept an immediate of the form:
2508 // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
2510 // The immediate is encoded using an 8-bit quantity, comprised of two
2511 // 4-bit fields. For an 8-bit immediate of the form:
2515 // where a is the MSB and h is the LSB, an immediate 64-bit double can be
2516 // created of the form:
2518 // [aBbbbbbb,bbcdefgh,00000000,00000000,
2519 // 00000000,00000000,00000000,00000000]
2525 DoubleAsTwoUInt32(d, &lo, &hi);
2527 // The most obvious constraint is the long block of zeroes.
2528 if ((lo != 0) || ((hi & 0xffff) != 0)) {
2532 // Bits 62:55 must be all clear or all set.
2533 if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
2537 // Bit 63 must be NOT bit 62.
2538 if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
2542 // Create the encoded immediate in the form:
2543 // [00000000,0000abcd,00000000,0000efgh]
2544 *encoding = (hi >> 16) & 0xf; // Low nybble.
2545 *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
2546 *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
2552 void Assembler::vmov(const DwVfpRegister dst,
2554 const Register scratch) {
2556 if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
2557 // The double can be encoded in the instruction.
2560 // Instruction details available in ARM DDI 0406C.b, A8-936.
2561 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
2562 // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
2564 dst.split_code(&vd, &d);
2565 emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
2566 } else if (FLAG_enable_vldr_imm && is_ool_constant_pool_available()) {
2567 // TODO(jfb) Temporarily turned off until we have constant blinding or
2568 // some equivalent mitigation: an attacker can otherwise control
2569 // generated data which also happens to be executable, a Very Bad
2571 // Blinding gets tricky because we don't have xor, we probably
2572 // need to add/subtract without losing precision, which requires a
2573 // cookie value that Lithium is probably better positioned to
2575 // We could also add a few peepholes here like detecting 0.0 and
2576 // -0.0 and doing a vmov from the sequestered d14, forcing denorms
2577 // to zero (we set flush-to-zero), and normalizing NaN values.
2578 // We could also detect redundant values.
2579 // The code could also randomize the order of values, though
2580 // that's tricky because vldr has a limited reach. Furthermore
2581 // it breaks load locality.
2582 RelocInfo rinfo(pc_, imm);
2583 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
2584 if (section == ConstantPoolArray::EXTENDED_SECTION) {
2585 DCHECK(FLAG_enable_ool_constant_pool);
2586 // Emit instructions to load constant pool offset.
2589 // Load from constant pool at offset.
2590 vldr(dst, MemOperand(pp, ip));
2592 DCHECK(section == ConstantPoolArray::SMALL_SECTION);
2593 vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
2596 // Synthesise the double from ARM immediates.
2598 DoubleAsTwoUInt32(imm, &lo, &hi);
2601 // Move the low and high parts of the double to a D register in one
2603 mov(ip, Operand(lo));
2605 } else if (scratch.is(no_reg)) {
2606 mov(ip, Operand(lo));
2607 vmov(dst, VmovIndexLo, ip);
2608 if ((lo & 0xffff) == (hi & 0xffff)) {
2611 mov(ip, Operand(hi));
2613 vmov(dst, VmovIndexHi, ip);
2615 // Move the low and high parts of the double to a D register in one
2617 mov(ip, Operand(lo));
2618 mov(scratch, Operand(hi));
2619 vmov(dst, ip, scratch);
2625 void Assembler::vmov(const SwVfpRegister dst,
2626 const SwVfpRegister src,
2627 const Condition cond) {
2629 // Instruction details available in ARM DDI 0406B, A8-642.
2631 dst.split_code(&sd, &d);
2632 src.split_code(&sm, &m);
2633 emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
2637 void Assembler::vmov(const DwVfpRegister dst,
2638 const DwVfpRegister src,
2639 const Condition cond) {
2641 // Instruction details available in ARM DDI 0406C.b, A8-938.
2642 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2643 // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2645 dst.split_code(&vd, &d);
2647 src.split_code(&vm, &m);
2648 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
2653 void Assembler::vmov(const DwVfpRegister dst,
2654 const VmovIndex index,
2656 const Condition cond) {
2658 // Instruction details available in ARM DDI 0406C.b, A8-940.
2659 // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
2660 // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2661 DCHECK(index.index == 0 || index.index == 1);
2663 dst.split_code(&vd, &d);
2664 emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
2669 void Assembler::vmov(const Register dst,
2670 const VmovIndex index,
2671 const DwVfpRegister src,
2672 const Condition cond) {
2674 // Instruction details available in ARM DDI 0406C.b, A8.8.342.
2675 // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
2676 // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2677 DCHECK(index.index == 0 || index.index == 1);
2679 src.split_code(&vn, &n);
2680 emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
2681 0xB*B8 | n*B7 | B4);
2685 void Assembler::vmov(const DwVfpRegister dst,
2686 const Register src1,
2687 const Register src2,
2688 const Condition cond) {
2690 // Instruction details available in ARM DDI 0406C.b, A8-948.
2691 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2692 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2693 DCHECK(!src1.is(pc) && !src2.is(pc));
2695 dst.split_code(&vm, &m);
2696 emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
2697 src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2701 void Assembler::vmov(const Register dst1,
2702 const Register dst2,
2703 const DwVfpRegister src,
2704 const Condition cond) {
2706 // Instruction details available in ARM DDI 0406C.b, A8-948.
2707 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2708 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2709 DCHECK(!dst1.is(pc) && !dst2.is(pc));
2711 src.split_code(&vm, &m);
2712 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2713 dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2717 void Assembler::vmov(const SwVfpRegister dst,
2719 const Condition cond) {
2721 // Instruction details available in ARM DDI 0406A, A8-642.
2722 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2723 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2724 DCHECK(!src.is(pc));
2726 dst.split_code(&sn, &n);
2727 emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
2731 void Assembler::vmov(const Register dst,
2732 const SwVfpRegister src,
2733 const Condition cond) {
2735 // Instruction details available in ARM DDI 0406A, A8-642.
2736 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2737 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2738 DCHECK(!dst.is(pc));
2740 src.split_code(&sn, &n);
2741 emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
2745 // Type of data to read from or write to VFP register.
2746 // Used as specifier in generic vcvt instruction.
2747 enum VFPType { S32, U32, F32, F64 };
2750 static bool IsSignedVFPType(VFPType type) {
2763 static bool IsIntegerVFPType(VFPType type) {
2778 static bool IsDoubleVFPType(VFPType type) {
2791 // Split five bit reg_code based on size of reg_type.
2792 // 32-bit register codes are Vm:M
2793 // 64-bit register codes are M:Vm
2794 // where Vm is four bits, and M is a single bit.
2795 static void SplitRegCode(VFPType reg_type,
2799 DCHECK((reg_code >= 0) && (reg_code <= 31));
2800 if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2802 *m = reg_code & 0x1;
2803 *vm = reg_code >> 1;
2806 *m = (reg_code & 0x10) >> 4;
2807 *vm = reg_code & 0x0F;
2812 // Encode vcvt.src_type.dst_type instruction.
2813 static Instr EncodeVCVT(const VFPType dst_type,
2815 const VFPType src_type,
2817 VFPConversionMode mode,
2818 const Condition cond) {
2819 DCHECK(src_type != dst_type);
2821 SplitRegCode(src_type, src_code, &Vm, &M);
2822 SplitRegCode(dst_type, dst_code, &Vd, &D);
2824 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2825 // Conversion between IEEE floating point and 32-bit integer.
2826 // Instruction details available in ARM DDI 0406B, A8.6.295.
2827 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
2828 // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2829 DCHECK(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
2833 if (IsIntegerVFPType(dst_type)) {
2834 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2835 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2838 DCHECK(IsIntegerVFPType(src_type));
2840 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2841 op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
2844 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
2845 Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
2847 // Conversion between IEEE double and single precision.
2848 // Instruction details available in ARM DDI 0406B, A8.6.298.
2849 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
2850 // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2851 int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2852 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2853 Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2858 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2859 const SwVfpRegister src,
2860 VFPConversionMode mode,
2861 const Condition cond) {
2862 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
2866 void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2867 const SwVfpRegister src,
2868 VFPConversionMode mode,
2869 const Condition cond) {
2870 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
2874 void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2875 const SwVfpRegister src,
2876 VFPConversionMode mode,
2877 const Condition cond) {
2878 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
2882 void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2883 const DwVfpRegister src,
2884 VFPConversionMode mode,
2885 const Condition cond) {
2886 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
2890 void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2891 const DwVfpRegister src,
2892 VFPConversionMode mode,
2893 const Condition cond) {
2894 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
2898 void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2899 const SwVfpRegister src,
2900 VFPConversionMode mode,
2901 const Condition cond) {
2902 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
2906 void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2907 const DwVfpRegister src,
2908 VFPConversionMode mode,
2909 const Condition cond) {
2910 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
2914 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2916 const Condition cond) {
2917 // Instruction details available in ARM DDI 0406C.b, A8-874.
2918 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
2919 // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
2920 DCHECK(fraction_bits > 0 && fraction_bits <= 32);
2921 DCHECK(CpuFeatures::IsSupported(VFP3));
2923 dst.split_code(&vd, &d);
2924 int imm5 = 32 - fraction_bits;
2926 int imm4 = (imm5 >> 1) & 0xf;
2927 emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
2928 vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
2932 void Assembler::vneg(const DwVfpRegister dst,
2933 const DwVfpRegister src,
2934 const Condition cond) {
2935 // Instruction details available in ARM DDI 0406C.b, A8-968.
2936 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
2937 // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2939 dst.split_code(&vd, &d);
2941 src.split_code(&vm, &m);
2943 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
2948 void Assembler::vabs(const DwVfpRegister dst,
2949 const DwVfpRegister src,
2950 const Condition cond) {
2951 // Instruction details available in ARM DDI 0406C.b, A8-524.
2952 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2953 // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2955 dst.split_code(&vd, &d);
2957 src.split_code(&vm, &m);
2958 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
2963 void Assembler::vadd(const DwVfpRegister dst,
2964 const DwVfpRegister src1,
2965 const DwVfpRegister src2,
2966 const Condition cond) {
2967 // Dd = vadd(Dn, Dm) double precision floating point addition.
2968 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2969 // Instruction details available in ARM DDI 0406C.b, A8-830.
2970 // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
2971 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2973 dst.split_code(&vd, &d);
2975 src1.split_code(&vn, &n);
2977 src2.split_code(&vm, &m);
2978 emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2983 void Assembler::vsub(const DwVfpRegister dst,
2984 const DwVfpRegister src1,
2985 const DwVfpRegister src2,
2986 const Condition cond) {
2987 // Dd = vsub(Dn, Dm) double precision floating point subtraction.
2988 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2989 // Instruction details available in ARM DDI 0406C.b, A8-1086.
2990 // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
2991 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2993 dst.split_code(&vd, &d);
2995 src1.split_code(&vn, &n);
2997 src2.split_code(&vm, &m);
2998 emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2999 n*B7 | B6 | m*B5 | vm);
3003 void Assembler::vmul(const DwVfpRegister dst,
3004 const DwVfpRegister src1,
3005 const DwVfpRegister src2,
3006 const Condition cond) {
3007 // Dd = vmul(Dn, Dm) double precision floating point multiplication.
3008 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
3009 // Instruction details available in ARM DDI 0406C.b, A8-960.
3010 // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
3011 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3013 dst.split_code(&vd, &d);
3015 src1.split_code(&vn, &n);
3017 src2.split_code(&vm, &m);
3018 emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
3023 void Assembler::vmla(const DwVfpRegister dst,
3024 const DwVfpRegister src1,
3025 const DwVfpRegister src2,
3026 const Condition cond) {
3027 // Instruction details available in ARM DDI 0406C.b, A8-932.
3028 // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3029 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
3031 dst.split_code(&vd, &d);
3033 src1.split_code(&vn, &n);
3035 src2.split_code(&vm, &m);
3036 emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
3041 void Assembler::vmls(const DwVfpRegister dst,
3042 const DwVfpRegister src1,
3043 const DwVfpRegister src2,
3044 const Condition cond) {
3045 // Instruction details available in ARM DDI 0406C.b, A8-932.
3046 // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3047 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
3049 dst.split_code(&vd, &d);
3051 src1.split_code(&vn, &n);
3053 src2.split_code(&vm, &m);
3054 emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
3059 void Assembler::vdiv(const DwVfpRegister dst,
3060 const DwVfpRegister src1,
3061 const DwVfpRegister src2,
3062 const Condition cond) {
3063 // Dd = vdiv(Dn, Dm) double precision floating point division.
3064 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
3065 // Instruction details available in ARM DDI 0406C.b, A8-882.
3066 // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
3067 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3069 dst.split_code(&vd, &d);
3071 src1.split_code(&vn, &n);
3073 src2.split_code(&vm, &m);
3074 emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
3079 void Assembler::vcmp(const DwVfpRegister src1,
3080 const DwVfpRegister src2,
3081 const Condition cond) {
3082 // vcmp(Dd, Dm) double precision floating point comparison.
3083 // Instruction details available in ARM DDI 0406C.b, A8-864.
3084 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
3085 // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3087 src1.split_code(&vd, &d);
3089 src2.split_code(&vm, &m);
3090 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
3095 void Assembler::vcmp(const DwVfpRegister src1,
3097 const Condition cond) {
3098 // vcmp(Dd, #0.0) double precision floating point comparison.
3099 // Instruction details available in ARM DDI 0406C.b, A8-864.
3100 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
3101 // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
3102 DCHECK(src2 == 0.0);
3104 src1.split_code(&vd, &d);
3105 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
3109 void Assembler::vmsr(Register dst, Condition cond) {
3110 // Instruction details available in ARM DDI 0406A, A8-652.
3111 // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
3112 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
3113 emit(cond | 0xE*B24 | 0xE*B20 | B16 |
3114 dst.code()*B12 | 0xA*B8 | B4);
3118 void Assembler::vmrs(Register dst, Condition cond) {
3119 // Instruction details available in ARM DDI 0406A, A8-652.
3120 // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
3121 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
3122 emit(cond | 0xE*B24 | 0xF*B20 | B16 |
3123 dst.code()*B12 | 0xA*B8 | B4);
3127 void Assembler::vsqrt(const DwVfpRegister dst,
3128 const DwVfpRegister src,
3129 const Condition cond) {
3130 // Instruction details available in ARM DDI 0406C.b, A8-1058.
3131 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
3132 // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
3134 dst.split_code(&vd, &d);
3136 src.split_code(&vm, &m);
3137 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
3142 void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
3143 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3144 // 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3145 // M(5) | 0(4) | Vm(3-0)
3146 DCHECK(CpuFeatures::IsSupported(ARMv8));
3148 dst.split_code(&vd, &d);
3150 src.split_code(&vm, &m);
3151 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
3152 0x5 * B9 | B8 | B6 | m * B5 | vm);
3156 void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
3157 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3158 // 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3159 // M(5) | 0(4) | Vm(3-0)
3160 DCHECK(CpuFeatures::IsSupported(ARMv8));
3162 dst.split_code(&vd, &d);
3164 src.split_code(&vm, &m);
3165 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
3166 vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
3170 void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
3171 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3172 // 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3173 // M(5) | 0(4) | Vm(3-0)
3174 DCHECK(CpuFeatures::IsSupported(ARMv8));
3176 dst.split_code(&vd, &d);
3178 src.split_code(&vm, &m);
3179 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
3180 vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
3184 void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
3185 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3186 // 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3187 // M(5) | 0(4) | Vm(3-0)
3188 DCHECK(CpuFeatures::IsSupported(ARMv8));
3190 dst.split_code(&vd, &d);
3192 src.split_code(&vm, &m);
3193 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
3194 vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
3198 void Assembler::vrintz(const DwVfpRegister dst, const DwVfpRegister src,
3199 const Condition cond) {
3200 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
3201 // Vd(15-12) | 101(11-9) | sz=1(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3202 DCHECK(CpuFeatures::IsSupported(ARMv8));
3204 dst.split_code(&vd, &d);
3206 src.split_code(&vm, &m);
3207 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
3208 0x5 * B9 | B8 | B7 | B6 | m * B5 | vm);
3212 // Support for NEON.
3214 void Assembler::vld1(NeonSize size,
3215 const NeonListOperand& dst,
3216 const NeonMemOperand& src) {
3217 // Instruction details available in ARM DDI 0406C.b, A8.8.320.
3218 // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
3219 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3220 DCHECK(CpuFeatures::IsSupported(NEON));
3222 dst.base().split_code(&vd, &d);
3223 emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
3224 dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
3228 void Assembler::vst1(NeonSize size,
3229 const NeonListOperand& src,
3230 const NeonMemOperand& dst) {
3231 // Instruction details available in ARM DDI 0406C.b, A8.8.404.
3232 // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
3233 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3234 DCHECK(CpuFeatures::IsSupported(NEON));
3236 src.base().split_code(&vd, &d);
3237 emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
3238 size*B6 | dst.align()*B4 | dst.rm().code());
3242 void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
3243 // Instruction details available in ARM DDI 0406C.b, A8.8.346.
3244 // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
3245 // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
3246 DCHECK(CpuFeatures::IsSupported(NEON));
3248 dst.split_code(&vd, &d);
3250 src.split_code(&vm, &m);
3251 emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
3252 (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
3256 // Pseudo instructions.
3257 void Assembler::nop(int type) {
3258 // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
3259 // some of the CPU's pipeline and has to issue. Older ARM chips simply used
3260 // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
3261 // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
3263 DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
3264 emit(al | 13*B21 | type*B12 | type);
3268 bool Assembler::IsMovT(Instr instr) {
3269 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3270 ((kNumRegisters-1)*B12) | // mask out register
3271 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3272 return instr == kMovtPattern;
3276 bool Assembler::IsMovW(Instr instr) {
3277 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3278 ((kNumRegisters-1)*B12) | // mask out destination
3279 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3280 return instr == kMovwPattern;
3284 Instr Assembler::GetMovTPattern() { return kMovtPattern; }
3287 Instr Assembler::GetMovWPattern() { return kMovwPattern; }
3290 Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
3291 DCHECK(immediate < 0x10000);
3292 return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
3296 Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
3297 instruction &= ~EncodeMovwImmediate(0xffff);
3298 return instruction | EncodeMovwImmediate(immediate);
3302 int Assembler::DecodeShiftImm(Instr instr) {
3303 int rotate = Instruction::RotateValue(instr) * 2;
3304 int immed8 = Instruction::Immed8Value(instr);
3305 return base::bits::RotateRight32(immed8, rotate);
3309 Instr Assembler::PatchShiftImm(Instr instr, int immed) {
3310 uint32_t rotate_imm = 0;
3311 uint32_t immed_8 = 0;
3312 bool immed_fits = fits_shifter(immed, &rotate_imm, &immed_8, NULL);
3315 return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
3319 bool Assembler::IsNop(Instr instr, int type) {
3320 DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
3321 // Check for mov rx, rx where x = type.
3322 return instr == (al | 13*B21 | type*B12 | type);
3326 bool Assembler::IsMovImmed(Instr instr) {
3327 return (instr & kMovImmedMask) == kMovImmedPattern;
3331 bool Assembler::IsOrrImmed(Instr instr) {
3332 return (instr & kOrrImmedMask) == kOrrImmedPattern;
3337 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
3340 return fits_shifter(imm32, &dummy1, &dummy2, NULL);
3344 bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) {
3345 return is_uint12(abs(imm32));
3350 void Assembler::RecordConstPool(int size) {
3351 // We only need this for debugger support, to correctly compute offsets in the
3353 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
3357 void Assembler::GrowBuffer() {
3358 if (!own_buffer_) FATAL("external code buffer is too small");
3360 // Compute new buffer size.
3361 CodeDesc desc; // the new buffer
3362 if (buffer_size_ < 1 * MB) {
3363 desc.buffer_size = 2*buffer_size_;
3365 desc.buffer_size = buffer_size_ + 1*MB;
3367 CHECK_GT(desc.buffer_size, 0); // no overflow
3369 // Set up new buffer.
3370 desc.buffer = NewArray<byte>(desc.buffer_size);
3372 desc.instr_size = pc_offset();
3373 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
3376 int pc_delta = desc.buffer - buffer_;
3377 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
3378 MemMove(desc.buffer, buffer_, desc.instr_size);
3379 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
3383 DeleteArray(buffer_);
3384 buffer_ = desc.buffer;
3385 buffer_size_ = desc.buffer_size;
3387 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3388 reloc_info_writer.last_pc() + pc_delta);
3390 // Relocate internal references.
3391 for (RelocIterator it(desc); !it.done(); it.next()) {
3392 if (it.rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE) {
3393 // Don't patch unbound internal references (bit 0 set); those are still
3394 // hooked up in the Label chain and will be automatically patched once
3395 // the label is bound.
3396 int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
3397 if ((*p & 1 * B0) == 0) *p += pc_delta;
3401 // Relocate pending relocation entries.
3402 for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3403 RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3404 DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
3405 rinfo.rmode() != RelocInfo::POSITION);
3406 if (rinfo.rmode() != RelocInfo::JS_RETURN) {
3407 rinfo.set_pc(rinfo.pc() + pc_delta);
3410 for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3411 RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3412 DCHECK(rinfo.rmode() == RelocInfo::NONE64);
3413 rinfo.set_pc(rinfo.pc() + pc_delta);
3415 constant_pool_builder_.Relocate(pc_delta);
3419 void Assembler::db(uint8_t data) {
3420 // No relocation info should be pending while using db. db is used
3421 // to write pure data with no pointers and the constant pool should
3422 // be emitted before using db.
3423 DCHECK(num_pending_32_bit_reloc_info_ == 0);
3424 DCHECK(num_pending_64_bit_reloc_info_ == 0);
3426 *reinterpret_cast<uint8_t*>(pc_) = data;
3427 pc_ += sizeof(uint8_t);
3431 void Assembler::dd(uint32_t data) {
3432 // No relocation info should be pending while using dd. dd is used
3433 // to write pure data with no pointers and the constant pool should
3434 // be emitted before using dd.
3435 DCHECK(num_pending_32_bit_reloc_info_ == 0);
3436 DCHECK(num_pending_64_bit_reloc_info_ == 0);
3438 *reinterpret_cast<uint32_t*>(pc_) = data;
3439 pc_ += sizeof(uint32_t);
3443 void Assembler::dd(Label* label) {
3445 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3446 if (label->is_bound()) {
3447 uint32_t data = reinterpret_cast<uint32_t>(buffer_ + label->pos());
3448 DCHECK_EQ(0u, data & 1 * B0);
3449 *reinterpret_cast<uint32_t*>(pc_) = data;
3450 pc_ += sizeof(uint32_t);
3453 if (label->is_linked()) {
3454 // Point to previous instruction that uses the link.
3455 target_pos = label->pos();
3457 // First entry of the link chain points to itself.
3458 target_pos = pc_offset();
3460 label->link_to(pc_offset());
3461 // Encode internal reference to unbound label. We set the least significant
3462 // bit to distinguish unbound internal references in GrowBuffer() below.
3463 int imm26 = target_pos - pc_offset();
3464 DCHECK_EQ(0, imm26 & 3);
3465 int imm24 = imm26 >> 2;
3466 DCHECK(is_int24(imm24));
3467 // We use bit pattern 0000111<imm24>1 because that doesn't match any branch
3468 // or load that would also appear on the label chain.
3469 emit(7 * B25 | ((imm24 & kImm24Mask) << 1) | 1 * B0);
3474 void Assembler::emit_code_stub_address(Code* stub) {
3476 *reinterpret_cast<uint32_t*>(pc_) =
3477 reinterpret_cast<uint32_t>(stub->instruction_start());
3478 pc_ += sizeof(uint32_t);
3482 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3483 RelocInfo rinfo(pc_, rmode, data, NULL);
3484 RecordRelocInfo(rinfo);
3488 void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
3489 if (!RelocInfo::IsNone(rinfo.rmode())) {
3490 // Don't record external references unless the heap will be serialized.
3491 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE &&
3492 !serializer_enabled() && !emit_debug_code()) {
3495 DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
3496 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
3497 RelocInfo reloc_info_with_ast_id(rinfo.pc(),
3499 RecordedAstId().ToInt(),
3501 ClearRecordedAstId();
3502 reloc_info_writer.Write(&reloc_info_with_ast_id);
3504 reloc_info_writer.Write(&rinfo);
3510 ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry(
3511 const RelocInfo& rinfo) {
3512 if (FLAG_enable_ool_constant_pool) {
3513 return constant_pool_builder_.AddEntry(this, rinfo);
3515 if (rinfo.rmode() == RelocInfo::NONE64) {
3516 DCHECK(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
3517 if (num_pending_64_bit_reloc_info_ == 0) {
3518 first_const_pool_64_use_ = pc_offset();
3520 pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
3522 DCHECK(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
3523 if (num_pending_32_bit_reloc_info_ == 0) {
3524 first_const_pool_32_use_ = pc_offset();
3526 pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
3528 // Make sure the constant pool is not emitted in place of the next
3529 // instruction for which we just recorded relocation info.
3530 BlockConstPoolFor(1);
3531 return ConstantPoolArray::SMALL_SECTION;
3536 void Assembler::BlockConstPoolFor(int instructions) {
3537 if (FLAG_enable_ool_constant_pool) {
3538 // Should be a no-op if using an out-of-line constant pool.
3539 DCHECK(num_pending_32_bit_reloc_info_ == 0);
3540 DCHECK(num_pending_64_bit_reloc_info_ == 0);
3544 int pc_limit = pc_offset() + instructions * kInstrSize;
3545 if (no_const_pool_before_ < pc_limit) {
3546 // Max pool start (if we need a jump and an alignment).
3548 int start = pc_limit + kInstrSize + 2 * kPointerSize;
3549 DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
3550 (start - first_const_pool_32_use_ +
3551 num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
3552 DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
3553 (start - first_const_pool_64_use_ < kMaxDistToFPPool));
3555 no_const_pool_before_ = pc_limit;
3558 if (next_buffer_check_ < no_const_pool_before_) {
3559 next_buffer_check_ = no_const_pool_before_;
3564 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
3565 if (FLAG_enable_ool_constant_pool) {
3566 // Should be a no-op if using an out-of-line constant pool.
3567 DCHECK(num_pending_32_bit_reloc_info_ == 0);
3568 DCHECK(num_pending_64_bit_reloc_info_ == 0);
3572 // Some short sequence of instruction mustn't be broken up by constant pool
3573 // emission, such sequences are protected by calls to BlockConstPoolFor and
3574 // BlockConstPoolScope.
3575 if (is_const_pool_blocked()) {
3576 // Something is wrong if emission is forced and blocked at the same time.
3577 DCHECK(!force_emit);
3581 // There is nothing to do if there are no pending constant pool entries.
3582 if ((num_pending_32_bit_reloc_info_ == 0) &&
3583 (num_pending_64_bit_reloc_info_ == 0)) {
3584 // Calculate the offset of the next check.
3585 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3589 // Check that the code buffer is large enough before emitting the constant
3590 // pool (include the jump over the pool and the constant pool marker and
3591 // the gap to the relocation information).
3592 int jump_instr = require_jump ? kInstrSize : 0;
3593 int size_up_to_marker = jump_instr + kInstrSize;
3594 int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
3595 bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
3596 bool require_64_bit_align = false;
3597 if (has_fp_values) {
3598 require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
3599 if (require_64_bit_align) {
3600 size_after_marker += kInstrSize;
3602 size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
3605 int size = size_up_to_marker + size_after_marker;
3607 // We emit a constant pool when:
3608 // * requested to do so by parameter force_emit (e.g. after each function).
3609 // * the distance from the first instruction accessing the constant pool to
3610 // any of the constant pool entries will exceed its limit the next
3611 // time the pool is checked. This is overly restrictive, but we don't emit
3612 // constant pool entries in-order so it's conservatively correct.
3613 // * the instruction doesn't require a jump after itself to jump over the
3614 // constant pool, and we're getting close to running out of range.
3616 DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
3617 bool need_emit = false;
3618 if (has_fp_values) {
3619 int dist64 = pc_offset() +
3621 num_pending_32_bit_reloc_info_ * kPointerSize -
3622 first_const_pool_64_use_;
3623 if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
3624 (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
3629 pc_offset() + size - first_const_pool_32_use_;
3630 if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
3631 (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
3634 if (!need_emit) return;
3637 int needed_space = size + kGap;
3638 while (buffer_space() <= needed_space) GrowBuffer();
3641 // Block recursive calls to CheckConstPool.
3642 BlockConstPoolScope block_const_pool(this);
3643 RecordComment("[ Constant Pool");
3644 RecordConstPool(size);
3646 // Emit jump over constant pool if necessary.
3652 // Put down constant pool marker "Undefined instruction".
3653 // The data size helps disassembly know what to print.
3654 emit(kConstantPoolMarker |
3655 EncodeConstantPoolLength(size_after_marker / kPointerSize));
3657 if (require_64_bit_align) {
3658 emit(kConstantPoolMarker);
3661 // Emit 64-bit constant pool entries first: their range is smaller than
3663 for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3664 RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3666 DCHECK(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment.
3668 Instr instr = instr_at(rinfo.pc());
3669 // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
3670 DCHECK((IsVldrDPcImmediateOffset(instr) &&
3671 GetVldrDRegisterImmediateOffset(instr) == 0));
3673 int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3674 DCHECK(is_uint10(delta));
3677 uint64_t value = rinfo.raw_data64();
3678 for (int j = 0; j < i; j++) {
3679 RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
3680 if (value == rinfo2.raw_data64()) {
3682 DCHECK(rinfo2.rmode() == RelocInfo::NONE64);
3683 Instr instr2 = instr_at(rinfo2.pc());
3684 DCHECK(IsVldrDPcImmediateOffset(instr2));
3685 delta = GetVldrDRegisterImmediateOffset(instr2);
3686 delta += rinfo2.pc() - rinfo.pc();
3691 instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
3694 uint64_t uint_data = rinfo.raw_data64();
3695 emit(uint_data & 0xFFFFFFFF);
3696 emit(uint_data >> 32);
3700 // Emit 32-bit constant pool entries.
3701 for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3702 RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3703 DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
3704 rinfo.rmode() != RelocInfo::POSITION &&
3705 rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
3706 rinfo.rmode() != RelocInfo::CONST_POOL &&
3707 rinfo.rmode() != RelocInfo::NONE64);
3709 Instr instr = instr_at(rinfo.pc());
3711 // 64-bit loads shouldn't get here.
3712 DCHECK(!IsVldrDPcImmediateOffset(instr));
3714 if (IsLdrPcImmediateOffset(instr) &&
3715 GetLdrRegisterImmediateOffset(instr) == 0) {
3716 int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3717 DCHECK(is_uint12(delta));
3718 // 0 is the smallest delta:
3720 // constant pool marker
3724 if (!serializer_enabled() && rinfo.rmode() >= RelocInfo::CELL) {
3725 for (int j = 0; j < i; j++) {
3726 RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
3728 if ((rinfo2.data() == rinfo.data()) &&
3729 (rinfo2.rmode() == rinfo.rmode())) {
3730 Instr instr2 = instr_at(rinfo2.pc());
3731 if (IsLdrPcImmediateOffset(instr2)) {
3732 delta = GetLdrRegisterImmediateOffset(instr2);
3733 delta += rinfo2.pc() - rinfo.pc();
3741 instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
3747 DCHECK(IsMovW(instr));
3751 num_pending_32_bit_reloc_info_ = 0;
3752 num_pending_64_bit_reloc_info_ = 0;
3753 first_const_pool_32_use_ = -1;
3754 first_const_pool_64_use_ = -1;
3758 if (after_pool.is_linked()) {
3763 // Since a constant pool was just emitted, move the check offset forward by
3764 // the standard interval.
3765 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3769 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
3770 if (!FLAG_enable_ool_constant_pool) {
3771 return isolate->factory()->empty_constant_pool_array();
3773 return constant_pool_builder_.New(isolate);
3777 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
3778 constant_pool_builder_.Populate(this, constant_pool);
3782 ConstantPoolBuilder::ConstantPoolBuilder()
3783 : entries_(), current_section_(ConstantPoolArray::SMALL_SECTION) {}
3786 bool ConstantPoolBuilder::IsEmpty() {
3787 return entries_.size() == 0;
3791 ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
3792 RelocInfo::Mode rmode) {
3793 if (rmode == RelocInfo::NONE64) {
3794 return ConstantPoolArray::INT64;
3795 } else if (!RelocInfo::IsGCRelocMode(rmode)) {
3796 return ConstantPoolArray::INT32;
3797 } else if (RelocInfo::IsCodeTarget(rmode)) {
3798 return ConstantPoolArray::CODE_PTR;
3800 DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
3801 return ConstantPoolArray::HEAP_PTR;
3806 ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
3807 Assembler* assm, const RelocInfo& rinfo) {
3808 RelocInfo::Mode rmode = rinfo.rmode();
3809 DCHECK(rmode != RelocInfo::COMMENT &&
3810 rmode != RelocInfo::POSITION &&
3811 rmode != RelocInfo::STATEMENT_POSITION &&
3812 rmode != RelocInfo::CONST_POOL);
3814 // Try to merge entries which won't be patched.
3815 int merged_index = -1;
3816 ConstantPoolArray::LayoutSection entry_section = current_section_;
3817 if (RelocInfo::IsNone(rmode) ||
3818 (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
3820 std::vector<ConstantPoolEntry>::const_iterator it;
3821 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
3822 if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
3823 // Merge with found entry.
3825 entry_section = entries_[i].section_;
3830 DCHECK(entry_section <= current_section_);
3831 entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
3833 if (merged_index == -1) {
3834 // Not merged, so update the appropriate count.
3835 number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
3838 // Check if we still have room for another entry in the small section
3839 // given Arm's ldr and vldr immediate offset range.
3840 if (current_section_ == ConstantPoolArray::SMALL_SECTION &&
3841 !(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) &&
3842 is_uint10(ConstantPoolArray::MaxInt64Offset(
3843 small_entries()->count_of(ConstantPoolArray::INT64))))) {
3844 current_section_ = ConstantPoolArray::EXTENDED_SECTION;
3846 return entry_section;
3850 void ConstantPoolBuilder::Relocate(int pc_delta) {
3851 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
3852 entry != entries_.end(); entry++) {
3853 DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
3854 entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
3859 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
3861 return isolate->factory()->empty_constant_pool_array();
3862 } else if (extended_entries()->is_empty()) {
3863 return isolate->factory()->NewConstantPoolArray(*small_entries());
3865 DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
3866 return isolate->factory()->NewExtendedConstantPoolArray(
3867 *small_entries(), *extended_entries());
3872 void ConstantPoolBuilder::Populate(Assembler* assm,
3873 ConstantPoolArray* constant_pool) {
3874 DCHECK_EQ(extended_entries()->is_empty(),
3875 !constant_pool->is_extended_layout());
3876 DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
3877 constant_pool, ConstantPoolArray::SMALL_SECTION)));
3878 if (constant_pool->is_extended_layout()) {
3879 DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
3880 constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
3883 // Set up initial offsets.
3884 int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
3885 [ConstantPoolArray::NUMBER_OF_TYPES];
3886 for (int section = 0; section <= constant_pool->final_section(); section++) {
3887 int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
3888 ? small_entries()->total_count()
3890 for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) {
3891 ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i);
3892 if (number_of_entries_[section].count_of(type) != 0) {
3893 offsets[section][type] = constant_pool->OffsetOfElementAt(
3894 number_of_entries_[section].base_of(type) + section_start);
3899 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
3900 entry != entries_.end(); entry++) {
3901 RelocInfo rinfo = entry->rinfo_;
3902 RelocInfo::Mode rmode = entry->rinfo_.rmode();
3903 ConstantPoolArray::Type type = GetConstantPoolType(rmode);
3905 // Update constant pool if necessary and get the entry's offset.
3907 if (entry->merged_index_ == -1) {
3908 offset = offsets[entry->section_][type];
3909 offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
3910 if (type == ConstantPoolArray::INT64) {
3911 constant_pool->set_at_offset(offset, rinfo.data64());
3912 } else if (type == ConstantPoolArray::INT32) {
3913 constant_pool->set_at_offset(offset,
3914 static_cast<int32_t>(rinfo.data()));
3915 } else if (type == ConstantPoolArray::CODE_PTR) {
3916 constant_pool->set_at_offset(offset,
3917 reinterpret_cast<Address>(rinfo.data()));
3919 DCHECK(type == ConstantPoolArray::HEAP_PTR);
3920 constant_pool->set_at_offset(offset,
3921 reinterpret_cast<Object*>(rinfo.data()));
3923 offset -= kHeapObjectTag;
3924 entry->merged_index_ = offset; // Stash offset for merged entries.
3926 DCHECK(entry->merged_index_ < (entry - entries_.begin()));
3927 offset = entries_[entry->merged_index_].merged_index_;
3930 // Patch vldr/ldr instruction with correct offset.
3931 Instr instr = assm->instr_at(rinfo.pc());
3932 if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
3933 if (CpuFeatures::IsSupported(ARMv7)) {
3934 // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
3935 Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
3936 DCHECK((Assembler::IsMovW(instr) &&
3937 Instruction::ImmedMovwMovtValue(instr) == 0));
3938 DCHECK((Assembler::IsMovT(next_instr) &&
3939 Instruction::ImmedMovwMovtValue(next_instr) == 0));
3941 rinfo.pc(), Assembler::PatchMovwImmediate(instr, offset & 0xffff));
3943 rinfo.pc() + Assembler::kInstrSize,
3944 Assembler::PatchMovwImmediate(next_instr, offset >> 16));
3946 // Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
3947 Instr instr_2 = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
3948 Instr instr_3 = assm->instr_at(rinfo.pc() + 2 * Assembler::kInstrSize);
3949 Instr instr_4 = assm->instr_at(rinfo.pc() + 3 * Assembler::kInstrSize);
3950 DCHECK((Assembler::IsMovImmed(instr) &&
3951 Instruction::Immed8Value(instr) == 0));
3952 DCHECK((Assembler::IsOrrImmed(instr_2) &&
3953 Instruction::Immed8Value(instr_2) == 0) &&
3954 Assembler::GetRn(instr_2).is(Assembler::GetRd(instr_2)));
3955 DCHECK((Assembler::IsOrrImmed(instr_3) &&
3956 Instruction::Immed8Value(instr_3) == 0) &&
3957 Assembler::GetRn(instr_3).is(Assembler::GetRd(instr_3)));
3958 DCHECK((Assembler::IsOrrImmed(instr_4) &&
3959 Instruction::Immed8Value(instr_4) == 0) &&
3960 Assembler::GetRn(instr_4).is(Assembler::GetRd(instr_4)));
3962 rinfo.pc(), Assembler::PatchShiftImm(instr, (offset & kImm8Mask)));
3964 rinfo.pc() + Assembler::kInstrSize,
3965 Assembler::PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
3967 rinfo.pc() + 2 * Assembler::kInstrSize,
3968 Assembler::PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
3970 rinfo.pc() + 3 * Assembler::kInstrSize,
3971 Assembler::PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
3973 } else if (type == ConstantPoolArray::INT64) {
3974 // Instruction to patch must be 'vldr rd, [pp, #0]'.
3975 DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) &&
3976 Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
3977 DCHECK(is_uint10(offset));
3978 assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset(
3981 // Instruction to patch must be 'ldr rd, [pp, #0]'.
3982 DCHECK((Assembler::IsLdrPpImmediateOffset(instr) &&
3983 Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
3984 DCHECK(is_uint12(offset));
3986 rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset));
3992 } } // namespace v8::internal
3994 #endif // V8_TARGET_ARCH_ARM