1 // Copyright 2013 the V8 project authors. All rights reserved.
3 // Redistribution and use in source and binary forms, with or without
4 // modification, are permitted provided that the following conditions are
7 // * Redistributions of source code must retain the above copyright
8 // notice, this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above
10 // copyright notice, this list of conditions and the following
11 // disclaimer in the documentation and/or other materials provided
12 // with the distribution.
13 // * Neither the name of Google Inc. nor the names of its
14 // contributors may be used to endorse or promote products derived
15 // from this software without specific prior written permission.
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #if V8_TARGET_ARCH_ARM64
33 #define ARM64_DEFINE_REG_STATICS
35 #include "src/arm64/assembler-arm64-inl.h"
36 #include "src/base/bits.h"
37 #include "src/base/cpu.h"
43 // -----------------------------------------------------------------------------
44 // CpuFeatures implementation.
46 void CpuFeatures::ProbeImpl(bool cross_compile) {
47 // AArch64 has no configuration options, no further probing is required.
50 // Only use statically determined features for cross compile (snapshot).
51 if (cross_compile) return;
53 // Probe for runtime features
55 if (cpu.implementer() == base::CPU::NVIDIA &&
56 cpu.variant() == base::CPU::NVIDIA_DENVER) {
57 supported_ |= 1u << COHERENT_CACHE;
62 void CpuFeatures::PrintTarget() { }
65 void CpuFeatures::PrintFeatures() {
66 printf("COHERENT_CACHE=%d\n", CpuFeatures::IsSupported(COHERENT_CACHE));
70 // -----------------------------------------------------------------------------
71 // CPURegList utilities.
73 CPURegister CPURegList::PopLowestIndex() {
78 int index = CountTrailingZeros(list_, kRegListSizeInBits);
79 DCHECK((1 << index) & list_);
81 return CPURegister::Create(index, size_, type_);
85 CPURegister CPURegList::PopHighestIndex() {
90 int index = CountLeadingZeros(list_, kRegListSizeInBits);
91 index = kRegListSizeInBits - 1 - index;
92 DCHECK((1 << index) & list_);
94 return CPURegister::Create(index, size_, type_);
98 void CPURegList::RemoveCalleeSaved() {
99 if (type() == CPURegister::kRegister) {
100 Remove(GetCalleeSaved(RegisterSizeInBits()));
101 } else if (type() == CPURegister::kFPRegister) {
102 Remove(GetCalleeSavedFP(RegisterSizeInBits()));
104 DCHECK(type() == CPURegister::kNoRegister);
106 // The list must already be empty, so do nothing.
111 CPURegList CPURegList::GetCalleeSaved(unsigned size) {
112 return CPURegList(CPURegister::kRegister, size, 19, 29);
116 CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
117 return CPURegList(CPURegister::kFPRegister, size, 8, 15);
121 CPURegList CPURegList::GetCallerSaved(unsigned size) {
122 // Registers x0-x18 and lr (x30) are caller-saved.
123 CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
129 CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
130 // Registers d0-d7 and d16-d31 are caller-saved.
131 CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
132 list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
137 // This function defines the list of registers which are associated with a
138 // safepoint slot. Safepoint register slots are saved contiguously on the stack.
139 // MacroAssembler::SafepointRegisterStackIndex handles mapping from register
140 // code to index in the safepoint register slots. Any change here can affect
142 CPURegList CPURegList::GetSafepointSavedRegisters() {
143 CPURegList list = CPURegList::GetCalleeSaved();
145 CPURegList(CPURegister::kRegister, kXRegSizeInBits, kJSCallerSaved));
147 // Note that unfortunately we can't use symbolic names for registers and have
148 // to directly use register codes. This is because this function is used to
149 // initialize some static variables and we can't rely on register variables
150 // to be initialized due to static initialization order issues in C++.
152 // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be
153 // preserved outside of the macro assembler.
157 // Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it
158 // is a caller-saved register according to the procedure call standard.
161 // Drop jssp as the stack pointer doesn't need to be included.
164 // Add the link register (x30) to the safepoint list.
171 // -----------------------------------------------------------------------------
172 // Implementation of RelocInfo
174 const int RelocInfo::kApplyMask = 0;
177 bool RelocInfo::IsCodedSpecially() {
178 // The deserializer needs to know whether a pointer is specially coded. Being
179 // specially coded on ARM64 means that it is a movz/movk sequence. We don't
180 // generate those for relocatable pointers.
185 bool RelocInfo::IsInConstantPool() {
186 Instruction* instr = reinterpret_cast<Instruction*>(pc_);
187 return instr->IsLdrLiteralX();
191 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
192 // Patch the code at the current address with the supplied instructions.
193 Instr* pc = reinterpret_cast<Instr*>(pc_);
194 Instr* instr = reinterpret_cast<Instr*>(instructions);
195 for (int i = 0; i < instruction_count; i++) {
196 *(pc + i) = *(instr + i);
199 // Indicate that code has changed.
200 CpuFeatures::FlushICache(pc_, instruction_count * kInstructionSize);
204 // Patch the code at the current PC with a call to the target address.
205 // Additional guard instructions can be added if required.
206 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
211 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
212 Register reg3, Register reg4) {
213 CPURegList regs(reg1, reg2, reg3, reg4);
214 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
215 Register candidate = Register::FromAllocationIndex(i);
216 if (regs.IncludesAliasOf(candidate)) continue;
224 bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
225 const CPURegister& reg3, const CPURegister& reg4,
226 const CPURegister& reg5, const CPURegister& reg6,
227 const CPURegister& reg7, const CPURegister& reg8) {
228 int number_of_valid_regs = 0;
229 int number_of_valid_fpregs = 0;
231 RegList unique_regs = 0;
232 RegList unique_fpregs = 0;
234 const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
236 for (unsigned i = 0; i < arraysize(regs); i++) {
237 if (regs[i].IsRegister()) {
238 number_of_valid_regs++;
239 unique_regs |= regs[i].Bit();
240 } else if (regs[i].IsFPRegister()) {
241 number_of_valid_fpregs++;
242 unique_fpregs |= regs[i].Bit();
244 DCHECK(!regs[i].IsValid());
248 int number_of_unique_regs =
249 CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
250 int number_of_unique_fpregs =
251 CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
253 DCHECK(number_of_valid_regs >= number_of_unique_regs);
254 DCHECK(number_of_valid_fpregs >= number_of_unique_fpregs);
256 return (number_of_valid_regs != number_of_unique_regs) ||
257 (number_of_valid_fpregs != number_of_unique_fpregs);
261 bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
262 const CPURegister& reg3, const CPURegister& reg4,
263 const CPURegister& reg5, const CPURegister& reg6,
264 const CPURegister& reg7, const CPURegister& reg8) {
265 DCHECK(reg1.IsValid());
267 match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
268 match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
269 match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
270 match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
271 match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
272 match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
273 match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
278 void Immediate::InitializeHandle(Handle<Object> handle) {
279 AllowDeferredHandleDereference using_raw_address;
281 // Verify all Objects referred by code are NOT in new space.
282 Object* obj = *handle;
283 if (obj->IsHeapObject()) {
284 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
285 value_ = reinterpret_cast<intptr_t>(handle.location());
286 rmode_ = RelocInfo::EMBEDDED_OBJECT;
288 STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
289 value_ = reinterpret_cast<intptr_t>(obj);
290 rmode_ = RelocInfo::NONE64;
295 bool Operand::NeedsRelocation(const Assembler* assembler) const {
296 RelocInfo::Mode rmode = immediate_.rmode();
298 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
299 return assembler->serializer_enabled();
302 return !RelocInfo::IsNone(rmode);
307 void ConstPool::RecordEntry(intptr_t data,
308 RelocInfo::Mode mode) {
309 DCHECK(mode != RelocInfo::COMMENT &&
310 mode != RelocInfo::POSITION &&
311 mode != RelocInfo::STATEMENT_POSITION &&
312 mode != RelocInfo::CONST_POOL &&
313 mode != RelocInfo::VENEER_POOL &&
314 mode != RelocInfo::CODE_AGE_SEQUENCE &&
315 mode != RelocInfo::DEOPT_REASON);
316 uint64_t raw_data = static_cast<uint64_t>(data);
317 int offset = assm_->pc_offset();
322 std::pair<uint64_t, int> entry = std::make_pair(raw_data, offset);
323 if (CanBeShared(mode)) {
324 shared_entries_.insert(entry);
325 if (shared_entries_.count(entry.first) == 1) {
326 shared_entries_count++;
329 unique_entries_.push_back(entry);
332 if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
333 // Request constant pool emission after the next instruction.
334 assm_->SetNextConstPoolCheckIn(1);
339 int ConstPool::DistanceToFirstUse() {
340 DCHECK(first_use_ >= 0);
341 return assm_->pc_offset() - first_use_;
345 int ConstPool::MaxPcOffset() {
346 // There are no pending entries in the pool so we can never get out of
348 if (IsEmpty()) return kMaxInt;
350 // Entries are not necessarily emitted in the order they are added so in the
351 // worst case the first constant pool use will be accessing the last entry.
352 return first_use_ + kMaxLoadLiteralRange - WorstCaseSize();
356 int ConstPool::WorstCaseSize() {
357 if (IsEmpty()) return 0;
359 // Max size prologue:
361 // ldr xzr, #pool_size
364 // All entries are 64-bit for now.
365 return 4 * kInstructionSize + EntryCount() * kPointerSize;
369 int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
370 if (IsEmpty()) return 0;
373 // b over ;; if require_jump
374 // ldr xzr, #pool_size
376 // nop ;; if not 64-bit aligned
377 int prologue_size = require_jump ? kInstructionSize : 0;
378 prologue_size += 2 * kInstructionSize;
379 prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ?
380 0 : kInstructionSize;
382 // All entries are 64-bit for now.
383 return prologue_size + EntryCount() * kPointerSize;
387 void ConstPool::Emit(bool require_jump) {
388 DCHECK(!assm_->is_const_pool_blocked());
389 // Prevent recursive pool emission and protect from veneer pools.
390 Assembler::BlockPoolsScope block_pools(assm_);
392 int size = SizeIfEmittedAtCurrentPc(require_jump);
394 assm_->bind(&size_check);
396 assm_->RecordConstPool(size);
397 // Emit the constant pool. It is preceded by an optional branch if
398 // require_jump and a header which will:
399 // 1) Encode the size of the constant pool, for use by the disassembler.
400 // 2) Terminate the program, to try to prevent execution from accidentally
401 // flowing into the constant pool.
402 // 3) align the pool entries to 64-bit.
403 // The header is therefore made of up to three arm64 instructions:
404 // ldr xzr, #<size of the constant pool in 32-bit words>
408 // If executed, the header will likely segfault and lr will point to the
409 // instruction following the offending blr.
410 // TODO(all): Make the alignment part less fragile. Currently code is
411 // allocated as a byte array so there are no guarantees the alignment will
412 // be preserved on compaction. Currently it works as allocation seems to be
415 // Emit branch if required
418 assm_->b(&after_pool);
422 assm_->RecordComment("[ Constant Pool");
427 // Emit constant pool entries.
428 // TODO(all): currently each relocated constant is 64 bits, consider adding
429 // support for 32-bit entries.
431 assm_->RecordComment("]");
433 if (after_pool.is_linked()) {
434 assm_->bind(&after_pool);
437 DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) ==
438 static_cast<unsigned>(size));
442 void ConstPool::Clear() {
443 shared_entries_.clear();
444 shared_entries_count = 0;
445 unique_entries_.clear();
450 bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
451 // Constant pool currently does not support 32-bit entries.
452 DCHECK(mode != RelocInfo::NONE32);
454 return RelocInfo::IsNone(mode) ||
455 (!assm_->serializer_enabled() && (mode >= RelocInfo::CELL));
459 void ConstPool::EmitMarker() {
460 // A constant pool size is expressed in number of 32-bits words.
461 // Currently all entries are 64-bit.
462 // + 1 is for the crash guard.
463 // + 0/1 for alignment.
464 int word_count = EntryCount() * 2 + 1 +
465 (IsAligned(assm_->pc_offset(), 8) ? 0 : 1);
466 assm_->Emit(LDR_x_lit |
467 Assembler::ImmLLiteral(word_count) |
472 MemOperand::PairResult MemOperand::AreConsistentForPair(
473 const MemOperand& operandA,
474 const MemOperand& operandB,
475 int access_size_log2) {
476 DCHECK(access_size_log2 >= 0);
477 DCHECK(access_size_log2 <= 3);
478 // Step one: check that they share the same base, that the mode is Offset
479 // and that the offset is a multiple of access size.
480 if (!operandA.base().Is(operandB.base()) ||
481 (operandA.addrmode() != Offset) ||
482 (operandB.addrmode() != Offset) ||
483 ((operandA.offset() & ((1 << access_size_log2) - 1)) != 0)) {
486 // Step two: check that the offsets are contiguous and that the range
487 // is OK for ldp/stp.
488 if ((operandB.offset() == operandA.offset() + (1 << access_size_log2)) &&
489 is_int7(operandA.offset() >> access_size_log2)) {
492 if ((operandA.offset() == operandB.offset() + (1 << access_size_log2)) &&
493 is_int7(operandB.offset() >> access_size_log2)) {
500 void ConstPool::EmitGuard() {
502 Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc());
503 DCHECK(instr->preceding()->IsLdrLiteralX() &&
504 instr->preceding()->Rt() == xzr.code());
506 assm_->EmitPoolGuard();
510 void ConstPool::EmitEntries() {
511 DCHECK(IsAligned(assm_->pc_offset(), 8));
513 typedef std::multimap<uint64_t, int>::const_iterator SharedEntriesIterator;
514 SharedEntriesIterator value_it;
515 // Iterate through the keys (constant pool values).
516 for (value_it = shared_entries_.begin();
517 value_it != shared_entries_.end();
518 value_it = shared_entries_.upper_bound(value_it->first)) {
519 std::pair<SharedEntriesIterator, SharedEntriesIterator> range;
520 uint64_t data = value_it->first;
521 range = shared_entries_.equal_range(data);
522 SharedEntriesIterator offset_it;
523 // Iterate through the offsets of a given key.
524 for (offset_it = range.first; offset_it != range.second; offset_it++) {
525 Instruction* instr = assm_->InstructionAt(offset_it->second);
527 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
528 DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
529 instr->SetImmPCOffsetTarget(assm_->pc());
533 shared_entries_.clear();
534 shared_entries_count = 0;
536 // Emit unique entries.
537 std::vector<std::pair<uint64_t, int> >::const_iterator unique_it;
538 for (unique_it = unique_entries_.begin();
539 unique_it != unique_entries_.end();
541 Instruction* instr = assm_->InstructionAt(unique_it->second);
543 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
544 DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
545 instr->SetImmPCOffsetTarget(assm_->pc());
546 assm_->dc64(unique_it->first);
548 unique_entries_.clear();
554 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
555 : AssemblerBase(isolate, buffer, buffer_size),
557 recorded_ast_id_(TypeFeedbackId::None()),
558 unresolved_branches_(),
559 positions_recorder_(this) {
560 const_pool_blocked_nesting_ = 0;
561 veneer_pool_blocked_nesting_ = 0;
566 Assembler::~Assembler() {
567 DCHECK(constpool_.IsEmpty());
568 DCHECK(const_pool_blocked_nesting_ == 0);
569 DCHECK(veneer_pool_blocked_nesting_ == 0);
573 void Assembler::Reset() {
575 DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
576 DCHECK(const_pool_blocked_nesting_ == 0);
577 DCHECK(veneer_pool_blocked_nesting_ == 0);
578 DCHECK(unresolved_branches_.empty());
579 memset(buffer_, 0, pc_ - buffer_);
582 reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
583 reinterpret_cast<byte*>(pc_));
585 next_constant_pool_check_ = 0;
586 next_veneer_pool_check_ = kMaxInt;
587 no_const_pool_before_ = 0;
588 ClearRecordedAstId();
592 void Assembler::GetCode(CodeDesc* desc) {
593 reloc_info_writer.Finish();
594 // Emit constant pool if necessary.
595 CheckConstPool(true, false);
596 DCHECK(constpool_.IsEmpty());
598 // Set up code descriptor.
600 desc->buffer = reinterpret_cast<byte*>(buffer_);
601 desc->buffer_size = buffer_size_;
602 desc->instr_size = pc_offset();
603 desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
604 reloc_info_writer.pos();
610 void Assembler::Align(int m) {
611 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
612 while ((pc_offset() & (m - 1)) != 0) {
618 void Assembler::CheckLabelLinkChain(Label const * label) {
620 if (label->is_linked()) {
621 static const int kMaxLinksToCheck = 64; // Avoid O(n2) behaviour.
622 int links_checked = 0;
623 int linkoffset = label->pos();
624 bool end_of_chain = false;
625 while (!end_of_chain) {
626 if (++links_checked > kMaxLinksToCheck) break;
627 Instruction * link = InstructionAt(linkoffset);
628 int linkpcoffset = link->ImmPCOffset();
629 int prevlinkoffset = linkoffset + linkpcoffset;
631 end_of_chain = (linkoffset == prevlinkoffset);
632 linkoffset = linkoffset + linkpcoffset;
639 void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
641 Instruction* label_veneer) {
642 DCHECK(label->is_linked());
644 CheckLabelLinkChain(label);
646 Instruction* link = InstructionAt(label->pos());
647 Instruction* prev_link = link;
648 Instruction* next_link;
649 bool end_of_chain = false;
651 while (link != branch && !end_of_chain) {
652 next_link = link->ImmPCOffsetTarget();
653 end_of_chain = (link == next_link);
658 DCHECK(branch == link);
659 next_link = branch->ImmPCOffsetTarget();
661 if (branch == prev_link) {
662 // The branch is the first instruction in the chain.
663 if (branch == next_link) {
664 // It is also the last instruction in the chain, so it is the only branch
665 // currently referring to this label.
668 label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
671 } else if (branch == next_link) {
672 // The branch is the last (but not also the first) instruction in the chain.
673 prev_link->SetImmPCOffsetTarget(prev_link);
676 // The branch is in the middle of the chain.
677 if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
678 prev_link->SetImmPCOffsetTarget(next_link);
679 } else if (label_veneer != NULL) {
680 // Use the veneer for all previous links in the chain.
681 prev_link->SetImmPCOffsetTarget(prev_link);
683 end_of_chain = false;
685 while (!end_of_chain) {
686 next_link = link->ImmPCOffsetTarget();
687 end_of_chain = (link == next_link);
688 link->SetImmPCOffsetTarget(label_veneer);
692 // The assert below will fire.
693 // Some other work could be attempted to fix up the chain, but it would be
694 // rather complicated. If we crash here, we may want to consider using an
695 // other mechanism than a chain of branches.
697 // Note that this situation currently should not happen, as we always call
698 // this function with a veneer to the target label.
699 // However this could happen with a MacroAssembler in the following state:
703 // Tbz(label); // First tbz. Pointing to unconditional branch.
705 // Tbz(label); // Second tbz. Pointing to the first tbz.
707 // and this function is called to remove the first tbz from the label link
708 // chain. Since tbz has a range of +-32KB, the second tbz cannot point to
709 // the unconditional branch.
710 CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
715 CheckLabelLinkChain(label);
719 void Assembler::bind(Label* label) {
720 // Bind label to the address at pc_. All instructions (most likely branches)
721 // that are linked to this label will be updated to point to the newly-bound
724 DCHECK(!label->is_near_linked());
725 DCHECK(!label->is_bound());
727 DeleteUnresolvedBranchInfoForLabel(label);
729 // If the label is linked, the link chain looks something like this:
731 // |--I----I-------I-------L
732 // |---------------------->| pc_offset
733 // |-------------->| linkoffset = label->pos()
734 // |<------| link->ImmPCOffset()
735 // |------>| prevlinkoffset = linkoffset + link->ImmPCOffset()
737 // On each iteration, the last link is updated and then removed from the
738 // chain until only one remains. At that point, the label is bound.
740 // If the label is not linked, no preparation is required before binding.
741 while (label->is_linked()) {
742 int linkoffset = label->pos();
743 Instruction* link = InstructionAt(linkoffset);
744 int prevlinkoffset = linkoffset + link->ImmPCOffset();
746 CheckLabelLinkChain(label);
748 DCHECK(linkoffset >= 0);
749 DCHECK(linkoffset < pc_offset());
750 DCHECK((linkoffset > prevlinkoffset) ||
751 (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
752 DCHECK(prevlinkoffset >= 0);
754 // Update the link to point to the label.
755 link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
757 // Link the label to the previous link in the chain.
758 if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
759 // We hit kStartOfLabelLinkChain, so the chain is fully processed.
762 // Update the label for the next iteration.
763 label->link_to(prevlinkoffset);
766 label->bind_to(pc_offset());
768 DCHECK(label->is_bound());
769 DCHECK(!label->is_linked());
773 int Assembler::LinkAndGetByteOffsetTo(Label* label) {
774 DCHECK(sizeof(*pc_) == 1);
775 CheckLabelLinkChain(label);
778 if (label->is_bound()) {
779 // The label is bound, so it does not need to be updated. Referring
780 // instructions must link directly to the label as they will not be
783 // In this case, label->pos() returns the offset of the label from the
784 // start of the buffer.
786 // Note that offset can be zero for self-referential instructions. (This
787 // could be useful for ADR, for example.)
788 offset = label->pos() - pc_offset();
791 if (label->is_linked()) {
792 // The label is linked, so the referring instruction should be added onto
793 // the end of the label's link chain.
795 // In this case, label->pos() returns the offset of the last linked
796 // instruction from the start of the buffer.
797 offset = label->pos() - pc_offset();
798 DCHECK(offset != kStartOfLabelLinkChain);
799 // Note that the offset here needs to be PC-relative only so that the
800 // first instruction in a buffer can link to an unbound label. Otherwise,
801 // the offset would be 0 for this case, and 0 is reserved for
802 // kStartOfLabelLinkChain.
804 // The label is unused, so it now becomes linked and the referring
805 // instruction is at the start of the new link chain.
806 offset = kStartOfLabelLinkChain;
808 // The instruction at pc is now the last link in the label's chain.
809 label->link_to(pc_offset());
816 void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
817 DCHECK(label->is_linked());
818 CheckLabelLinkChain(label);
820 int link_offset = label->pos();
822 bool end_of_chain = false;
824 while (!end_of_chain) {
825 Instruction * link = InstructionAt(link_offset);
826 link_pcoffset = link->ImmPCOffset();
828 // ADR instructions are not handled by veneers.
829 if (link->IsImmBranch()) {
830 int max_reachable_pc = InstructionOffset(link) +
831 Instruction::ImmBranchRange(link->BranchType());
832 typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
833 std::pair<unresolved_info_it, unresolved_info_it> range;
834 range = unresolved_branches_.equal_range(max_reachable_pc);
835 unresolved_info_it it;
836 for (it = range.first; it != range.second; ++it) {
837 if (it->second.pc_offset_ == link_offset) {
838 unresolved_branches_.erase(it);
844 end_of_chain = (link_pcoffset == 0);
845 link_offset = link_offset + link_pcoffset;
850 void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
851 if (unresolved_branches_.empty()) {
852 DCHECK(next_veneer_pool_check_ == kMaxInt);
856 if (label->is_linked()) {
857 // Branches to this label will be resolved when the label is bound, normally
858 // just after all the associated info has been deleted.
859 DeleteUnresolvedBranchInfoForLabelTraverse(label);
861 if (unresolved_branches_.empty()) {
862 next_veneer_pool_check_ = kMaxInt;
864 next_veneer_pool_check_ =
865 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
870 void Assembler::StartBlockConstPool() {
871 if (const_pool_blocked_nesting_++ == 0) {
872 // Prevent constant pool checks happening by setting the next check to
873 // the biggest possible offset.
874 next_constant_pool_check_ = kMaxInt;
879 void Assembler::EndBlockConstPool() {
880 if (--const_pool_blocked_nesting_ == 0) {
881 // Check the constant pool hasn't been blocked for too long.
882 DCHECK(pc_offset() < constpool_.MaxPcOffset());
884 // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
886 // * no_const_pool_before_ < next_constant_pool_check_ and the next emit
887 // will trigger a check.
888 next_constant_pool_check_ = no_const_pool_before_;
893 bool Assembler::is_const_pool_blocked() const {
894 return (const_pool_blocked_nesting_ > 0) ||
895 (pc_offset() < no_const_pool_before_);
899 bool Assembler::IsConstantPoolAt(Instruction* instr) {
900 // The constant pool marker is made of two instructions. These instructions
901 // will never be emitted by the JIT, so checking for the first one is enough:
902 // 0: ldr xzr, #<size of pool>
903 bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
905 // It is still worth asserting the marker is complete.
907 DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() &&
908 instr->following()->Rn() == xzr.code()));
914 int Assembler::ConstantPoolSizeAt(Instruction* instr) {
916 // Assembler::debug() embeds constants directly into the instruction stream.
917 // Although this is not a genuine constant pool, treat it like one to avoid
918 // disassembling the constants.
919 if ((instr->Mask(ExceptionMask) == HLT) &&
920 (instr->ImmException() == kImmExceptionIsDebug)) {
921 const char* message =
922 reinterpret_cast<const char*>(
923 instr->InstructionAtOffset(kDebugMessageOffset));
924 int size = kDebugMessageOffset + strlen(message) + 1;
925 return RoundUp(size, kInstructionSize) / kInstructionSize;
927 // Same for printf support, see MacroAssembler::CallPrintf().
928 if ((instr->Mask(ExceptionMask) == HLT) &&
929 (instr->ImmException() == kImmExceptionIsPrintf)) {
930 return kPrintfLength / kInstructionSize;
933 if (IsConstantPoolAt(instr)) {
934 return instr->ImmLLiteral();
941 void Assembler::EmitPoolGuard() {
942 // We must generate only one instruction as this is used in scopes that
943 // control the size of the code generated.
948 void Assembler::StartBlockVeneerPool() {
949 ++veneer_pool_blocked_nesting_;
953 void Assembler::EndBlockVeneerPool() {
954 if (--veneer_pool_blocked_nesting_ == 0) {
955 // Check the veneer pool hasn't been blocked for too long.
956 DCHECK(unresolved_branches_.empty() ||
957 (pc_offset() < unresolved_branches_first_limit()));
962 void Assembler::br(const Register& xn) {
963 positions_recorder()->WriteRecordedPositions();
964 DCHECK(xn.Is64Bits());
969 void Assembler::blr(const Register& xn) {
970 positions_recorder()->WriteRecordedPositions();
971 DCHECK(xn.Is64Bits());
972 // The pattern 'blr xzr' is used as a guard to detect when execution falls
973 // through the constant pool. It should not be emitted.
979 void Assembler::ret(const Register& xn) {
980 positions_recorder()->WriteRecordedPositions();
981 DCHECK(xn.Is64Bits());
986 void Assembler::b(int imm26) {
987 Emit(B | ImmUncondBranch(imm26));
991 void Assembler::b(Label* label) {
992 positions_recorder()->WriteRecordedPositions();
993 b(LinkAndGetInstructionOffsetTo(label));
997 void Assembler::b(int imm19, Condition cond) {
998 Emit(B_cond | ImmCondBranch(imm19) | cond);
1002 void Assembler::b(Label* label, Condition cond) {
1003 positions_recorder()->WriteRecordedPositions();
1004 b(LinkAndGetInstructionOffsetTo(label), cond);
1008 void Assembler::bl(int imm26) {
1009 positions_recorder()->WriteRecordedPositions();
1010 Emit(BL | ImmUncondBranch(imm26));
1014 void Assembler::bl(Label* label) {
1015 positions_recorder()->WriteRecordedPositions();
1016 bl(LinkAndGetInstructionOffsetTo(label));
1020 void Assembler::cbz(const Register& rt,
1022 positions_recorder()->WriteRecordedPositions();
1023 Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
1027 void Assembler::cbz(const Register& rt,
1029 positions_recorder()->WriteRecordedPositions();
1030 cbz(rt, LinkAndGetInstructionOffsetTo(label));
1034 void Assembler::cbnz(const Register& rt,
1036 positions_recorder()->WriteRecordedPositions();
1037 Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
1041 void Assembler::cbnz(const Register& rt,
1043 positions_recorder()->WriteRecordedPositions();
1044 cbnz(rt, LinkAndGetInstructionOffsetTo(label));
1048 void Assembler::tbz(const Register& rt,
1051 positions_recorder()->WriteRecordedPositions();
1052 DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
1053 Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
1057 void Assembler::tbz(const Register& rt,
1060 positions_recorder()->WriteRecordedPositions();
1061 tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
1065 void Assembler::tbnz(const Register& rt,
1068 positions_recorder()->WriteRecordedPositions();
1069 DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
1070 Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
1074 void Assembler::tbnz(const Register& rt,
1077 positions_recorder()->WriteRecordedPositions();
1078 tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
1082 void Assembler::adr(const Register& rd, int imm21) {
1083 DCHECK(rd.Is64Bits());
1084 Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
1088 void Assembler::adr(const Register& rd, Label* label) {
1089 adr(rd, LinkAndGetByteOffsetTo(label));
1093 void Assembler::add(const Register& rd,
1095 const Operand& operand) {
1096 AddSub(rd, rn, operand, LeaveFlags, ADD);
1100 void Assembler::adds(const Register& rd,
1102 const Operand& operand) {
1103 AddSub(rd, rn, operand, SetFlags, ADD);
1107 void Assembler::cmn(const Register& rn,
1108 const Operand& operand) {
1109 Register zr = AppropriateZeroRegFor(rn);
1110 adds(zr, rn, operand);
1114 void Assembler::sub(const Register& rd,
1116 const Operand& operand) {
1117 AddSub(rd, rn, operand, LeaveFlags, SUB);
1121 void Assembler::subs(const Register& rd,
1123 const Operand& operand) {
1124 AddSub(rd, rn, operand, SetFlags, SUB);
1128 void Assembler::cmp(const Register& rn, const Operand& operand) {
1129 Register zr = AppropriateZeroRegFor(rn);
1130 subs(zr, rn, operand);
1134 void Assembler::neg(const Register& rd, const Operand& operand) {
1135 Register zr = AppropriateZeroRegFor(rd);
1136 sub(rd, zr, operand);
1140 void Assembler::negs(const Register& rd, const Operand& operand) {
1141 Register zr = AppropriateZeroRegFor(rd);
1142 subs(rd, zr, operand);
1146 void Assembler::adc(const Register& rd,
1148 const Operand& operand) {
1149 AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
1153 void Assembler::adcs(const Register& rd,
1155 const Operand& operand) {
1156 AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
1160 void Assembler::sbc(const Register& rd,
1162 const Operand& operand) {
1163 AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
1167 void Assembler::sbcs(const Register& rd,
1169 const Operand& operand) {
1170 AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
1174 void Assembler::ngc(const Register& rd, const Operand& operand) {
1175 Register zr = AppropriateZeroRegFor(rd);
1176 sbc(rd, zr, operand);
1180 void Assembler::ngcs(const Register& rd, const Operand& operand) {
1181 Register zr = AppropriateZeroRegFor(rd);
1182 sbcs(rd, zr, operand);
1186 // Logical instructions.
1187 void Assembler::and_(const Register& rd,
1189 const Operand& operand) {
1190 Logical(rd, rn, operand, AND);
1194 void Assembler::ands(const Register& rd,
1196 const Operand& operand) {
1197 Logical(rd, rn, operand, ANDS);
1201 void Assembler::tst(const Register& rn,
1202 const Operand& operand) {
1203 ands(AppropriateZeroRegFor(rn), rn, operand);
1207 void Assembler::bic(const Register& rd,
1209 const Operand& operand) {
1210 Logical(rd, rn, operand, BIC);
1214 void Assembler::bics(const Register& rd,
1216 const Operand& operand) {
1217 Logical(rd, rn, operand, BICS);
1221 void Assembler::orr(const Register& rd,
1223 const Operand& operand) {
1224 Logical(rd, rn, operand, ORR);
1228 void Assembler::orn(const Register& rd,
1230 const Operand& operand) {
1231 Logical(rd, rn, operand, ORN);
1235 void Assembler::eor(const Register& rd,
1237 const Operand& operand) {
1238 Logical(rd, rn, operand, EOR);
1242 void Assembler::eon(const Register& rd,
1244 const Operand& operand) {
1245 Logical(rd, rn, operand, EON);
1249 void Assembler::lslv(const Register& rd,
1251 const Register& rm) {
1252 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1253 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1254 Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
1258 void Assembler::lsrv(const Register& rd,
1260 const Register& rm) {
1261 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1262 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1263 Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
1267 void Assembler::asrv(const Register& rd,
1269 const Register& rm) {
1270 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1271 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1272 Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
1276 void Assembler::rorv(const Register& rd,
1278 const Register& rm) {
1279 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1280 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1281 Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
1285 // Bitfield operations.
1286 void Assembler::bfm(const Register& rd,
1290 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1291 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1292 Emit(SF(rd) | BFM | N |
1293 ImmR(immr, rd.SizeInBits()) |
1294 ImmS(imms, rn.SizeInBits()) |
1299 void Assembler::sbfm(const Register& rd,
1303 DCHECK(rd.Is64Bits() || rn.Is32Bits());
1304 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1305 Emit(SF(rd) | SBFM | N |
1306 ImmR(immr, rd.SizeInBits()) |
1307 ImmS(imms, rn.SizeInBits()) |
1312 void Assembler::ubfm(const Register& rd,
1316 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1317 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1318 Emit(SF(rd) | UBFM | N |
1319 ImmR(immr, rd.SizeInBits()) |
1320 ImmS(imms, rn.SizeInBits()) |
1325 void Assembler::extr(const Register& rd,
1329 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1330 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1331 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1332 Emit(SF(rd) | EXTR | N | Rm(rm) |
1333 ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
1337 void Assembler::csel(const Register& rd,
1341 ConditionalSelect(rd, rn, rm, cond, CSEL);
1345 void Assembler::csinc(const Register& rd,
1349 ConditionalSelect(rd, rn, rm, cond, CSINC);
1353 void Assembler::csinv(const Register& rd,
1357 ConditionalSelect(rd, rn, rm, cond, CSINV);
1361 void Assembler::csneg(const Register& rd,
1365 ConditionalSelect(rd, rn, rm, cond, CSNEG);
1369 void Assembler::cset(const Register &rd, Condition cond) {
1370 DCHECK((cond != al) && (cond != nv));
1371 Register zr = AppropriateZeroRegFor(rd);
1372 csinc(rd, zr, zr, NegateCondition(cond));
1376 void Assembler::csetm(const Register &rd, Condition cond) {
1377 DCHECK((cond != al) && (cond != nv));
1378 Register zr = AppropriateZeroRegFor(rd);
1379 csinv(rd, zr, zr, NegateCondition(cond));
1383 void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
1384 DCHECK((cond != al) && (cond != nv));
1385 csinc(rd, rn, rn, NegateCondition(cond));
1389 void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
1390 DCHECK((cond != al) && (cond != nv));
1391 csinv(rd, rn, rn, NegateCondition(cond));
1395 void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
1396 DCHECK((cond != al) && (cond != nv));
1397 csneg(rd, rn, rn, NegateCondition(cond));
1401 void Assembler::ConditionalSelect(const Register& rd,
1405 ConditionalSelectOp op) {
1406 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1407 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1408 Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
1412 void Assembler::ccmn(const Register& rn,
1413 const Operand& operand,
1416 ConditionalCompare(rn, operand, nzcv, cond, CCMN);
1420 void Assembler::ccmp(const Register& rn,
1421 const Operand& operand,
1424 ConditionalCompare(rn, operand, nzcv, cond, CCMP);
1428 void Assembler::DataProcessing3Source(const Register& rd,
1432 DataProcessing3SourceOp op) {
1433 Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
1437 void Assembler::mul(const Register& rd,
1439 const Register& rm) {
1440 DCHECK(AreSameSizeAndType(rd, rn, rm));
1441 Register zr = AppropriateZeroRegFor(rn);
1442 DataProcessing3Source(rd, rn, rm, zr, MADD);
1446 void Assembler::madd(const Register& rd,
1449 const Register& ra) {
1450 DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
1451 DataProcessing3Source(rd, rn, rm, ra, MADD);
1455 void Assembler::mneg(const Register& rd,
1457 const Register& rm) {
1458 DCHECK(AreSameSizeAndType(rd, rn, rm));
1459 Register zr = AppropriateZeroRegFor(rn);
1460 DataProcessing3Source(rd, rn, rm, zr, MSUB);
1464 void Assembler::msub(const Register& rd,
1467 const Register& ra) {
1468 DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
1469 DataProcessing3Source(rd, rn, rm, ra, MSUB);
1473 void Assembler::smaddl(const Register& rd,
1476 const Register& ra) {
1477 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1478 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1479 DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
1483 void Assembler::smsubl(const Register& rd,
1486 const Register& ra) {
1487 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1488 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1489 DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
1493 void Assembler::umaddl(const Register& rd,
1496 const Register& ra) {
1497 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1498 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1499 DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
1503 void Assembler::umsubl(const Register& rd,
1506 const Register& ra) {
1507 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1508 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1509 DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
1513 void Assembler::smull(const Register& rd,
1515 const Register& rm) {
1516 DCHECK(rd.Is64Bits());
1517 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1518 DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
1522 void Assembler::smulh(const Register& rd,
1524 const Register& rm) {
1525 DCHECK(AreSameSizeAndType(rd, rn, rm));
1526 DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
1530 void Assembler::sdiv(const Register& rd,
1532 const Register& rm) {
1533 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1534 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1535 Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
1539 void Assembler::udiv(const Register& rd,
1541 const Register& rm) {
1542 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1543 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1544 Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
1548 void Assembler::rbit(const Register& rd,
1549 const Register& rn) {
1550 DataProcessing1Source(rd, rn, RBIT);
1554 void Assembler::rev16(const Register& rd,
1555 const Register& rn) {
1556 DataProcessing1Source(rd, rn, REV16);
1560 void Assembler::rev32(const Register& rd,
1561 const Register& rn) {
1562 DCHECK(rd.Is64Bits());
1563 DataProcessing1Source(rd, rn, REV);
1567 void Assembler::rev(const Register& rd,
1568 const Register& rn) {
1569 DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
1573 void Assembler::clz(const Register& rd,
1574 const Register& rn) {
1575 DataProcessing1Source(rd, rn, CLZ);
1579 void Assembler::cls(const Register& rd,
1580 const Register& rn) {
1581 DataProcessing1Source(rd, rn, CLS);
1585 void Assembler::ldp(const CPURegister& rt,
1586 const CPURegister& rt2,
1587 const MemOperand& src) {
1588 LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
1592 void Assembler::stp(const CPURegister& rt,
1593 const CPURegister& rt2,
1594 const MemOperand& dst) {
1595 LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
1599 void Assembler::ldpsw(const Register& rt,
1600 const Register& rt2,
1601 const MemOperand& src) {
1602 DCHECK(rt.Is64Bits());
1603 LoadStorePair(rt, rt2, src, LDPSW_x);
1607 void Assembler::LoadStorePair(const CPURegister& rt,
1608 const CPURegister& rt2,
1609 const MemOperand& addr,
1610 LoadStorePairOp op) {
1611 // 'rt' and 'rt2' can only be aliased for stores.
1612 DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
1613 DCHECK(AreSameSizeAndType(rt, rt2));
1615 Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1616 ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
1619 if (addr.IsImmediateOffset()) {
1620 addrmodeop = LoadStorePairOffsetFixed;
1622 // Pre-index and post-index modes.
1623 DCHECK(!rt.Is(addr.base()));
1624 DCHECK(!rt2.Is(addr.base()));
1625 DCHECK(addr.offset() != 0);
1626 if (addr.IsPreIndex()) {
1627 addrmodeop = LoadStorePairPreIndexFixed;
1629 DCHECK(addr.IsPostIndex());
1630 addrmodeop = LoadStorePairPostIndexFixed;
1633 Emit(addrmodeop | memop);
1637 void Assembler::ldnp(const CPURegister& rt,
1638 const CPURegister& rt2,
1639 const MemOperand& src) {
1640 LoadStorePairNonTemporal(rt, rt2, src,
1641 LoadPairNonTemporalOpFor(rt, rt2));
1645 void Assembler::stnp(const CPURegister& rt,
1646 const CPURegister& rt2,
1647 const MemOperand& dst) {
1648 LoadStorePairNonTemporal(rt, rt2, dst,
1649 StorePairNonTemporalOpFor(rt, rt2));
1653 void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
1654 const CPURegister& rt2,
1655 const MemOperand& addr,
1656 LoadStorePairNonTemporalOp op) {
1657 DCHECK(!rt.Is(rt2));
1658 DCHECK(AreSameSizeAndType(rt, rt2));
1659 DCHECK(addr.IsImmediateOffset());
1661 LSDataSize size = CalcLSPairDataSize(
1662 static_cast<LoadStorePairOp>(op & LoadStorePairMask));
1663 Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1664 ImmLSPair(addr.offset(), size));
1668 // Memory instructions.
1669 void Assembler::ldrb(const Register& rt, const MemOperand& src) {
1670 LoadStore(rt, src, LDRB_w);
1674 void Assembler::strb(const Register& rt, const MemOperand& dst) {
1675 LoadStore(rt, dst, STRB_w);
1679 void Assembler::ldrsb(const Register& rt, const MemOperand& src) {
1680 LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
1684 void Assembler::ldrh(const Register& rt, const MemOperand& src) {
1685 LoadStore(rt, src, LDRH_w);
1689 void Assembler::strh(const Register& rt, const MemOperand& dst) {
1690 LoadStore(rt, dst, STRH_w);
1694 void Assembler::ldrsh(const Register& rt, const MemOperand& src) {
1695 LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
1699 void Assembler::ldr(const CPURegister& rt, const MemOperand& src) {
1700 LoadStore(rt, src, LoadOpFor(rt));
1704 void Assembler::str(const CPURegister& rt, const MemOperand& src) {
1705 LoadStore(rt, src, StoreOpFor(rt));
1709 void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
1710 DCHECK(rt.Is64Bits());
1711 LoadStore(rt, src, LDRSW_x);
1715 void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) {
1716 // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
1717 // constant pool. It should not be emitted.
1718 DCHECK(!rt.IsZero());
1719 Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
1723 void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
1724 // Currently we only support 64-bit literals.
1725 DCHECK(rt.Is64Bits());
1727 RecordRelocInfo(imm.rmode(), imm.value());
1728 BlockConstPoolFor(1);
1729 // The load will be patched when the constpool is emitted, patching code
1730 // expect a load literal with offset 0.
1735 void Assembler::mov(const Register& rd, const Register& rm) {
1736 // Moves involving the stack pointer are encoded as add immediate with
1737 // second operand of zero. Otherwise, orr with first operand zr is
1739 if (rd.IsSP() || rm.IsSP()) {
1742 orr(rd, AppropriateZeroRegFor(rd), rm);
1747 void Assembler::mvn(const Register& rd, const Operand& operand) {
1748 orn(rd, AppropriateZeroRegFor(rd), operand);
1752 void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
1753 DCHECK(rt.Is64Bits());
1754 Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
1758 void Assembler::msr(SystemRegister sysreg, const Register& rt) {
1759 DCHECK(rt.Is64Bits());
1760 Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
1764 void Assembler::hint(SystemHint code) {
1765 Emit(HINT | ImmHint(code) | Rt(xzr));
1769 void Assembler::dmb(BarrierDomain domain, BarrierType type) {
1770 Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1774 void Assembler::dsb(BarrierDomain domain, BarrierType type) {
1775 Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1779 void Assembler::isb() {
1780 Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
1784 void Assembler::fmov(FPRegister fd, double imm) {
1785 DCHECK(fd.Is64Bits());
1786 DCHECK(IsImmFP64(imm));
1787 Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
1791 void Assembler::fmov(FPRegister fd, float imm) {
1792 DCHECK(fd.Is32Bits());
1793 DCHECK(IsImmFP32(imm));
1794 Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm));
1798 void Assembler::fmov(Register rd, FPRegister fn) {
1799 DCHECK(rd.SizeInBits() == fn.SizeInBits());
1800 FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
1801 Emit(op | Rd(rd) | Rn(fn));
1805 void Assembler::fmov(FPRegister fd, Register rn) {
1806 DCHECK(fd.SizeInBits() == rn.SizeInBits());
1807 FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
1808 Emit(op | Rd(fd) | Rn(rn));
1812 void Assembler::fmov(FPRegister fd, FPRegister fn) {
1813 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1814 Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
1818 void Assembler::fadd(const FPRegister& fd,
1819 const FPRegister& fn,
1820 const FPRegister& fm) {
1821 FPDataProcessing2Source(fd, fn, fm, FADD);
1825 void Assembler::fsub(const FPRegister& fd,
1826 const FPRegister& fn,
1827 const FPRegister& fm) {
1828 FPDataProcessing2Source(fd, fn, fm, FSUB);
1832 void Assembler::fmul(const FPRegister& fd,
1833 const FPRegister& fn,
1834 const FPRegister& fm) {
1835 FPDataProcessing2Source(fd, fn, fm, FMUL);
1839 void Assembler::fmadd(const FPRegister& fd,
1840 const FPRegister& fn,
1841 const FPRegister& fm,
1842 const FPRegister& fa) {
1843 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
1847 void Assembler::fmsub(const FPRegister& fd,
1848 const FPRegister& fn,
1849 const FPRegister& fm,
1850 const FPRegister& fa) {
1851 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
1855 void Assembler::fnmadd(const FPRegister& fd,
1856 const FPRegister& fn,
1857 const FPRegister& fm,
1858 const FPRegister& fa) {
1859 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
1863 void Assembler::fnmsub(const FPRegister& fd,
1864 const FPRegister& fn,
1865 const FPRegister& fm,
1866 const FPRegister& fa) {
1867 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
1871 void Assembler::fdiv(const FPRegister& fd,
1872 const FPRegister& fn,
1873 const FPRegister& fm) {
1874 FPDataProcessing2Source(fd, fn, fm, FDIV);
1878 void Assembler::fmax(const FPRegister& fd,
1879 const FPRegister& fn,
1880 const FPRegister& fm) {
1881 FPDataProcessing2Source(fd, fn, fm, FMAX);
1885 void Assembler::fmaxnm(const FPRegister& fd,
1886 const FPRegister& fn,
1887 const FPRegister& fm) {
1888 FPDataProcessing2Source(fd, fn, fm, FMAXNM);
1892 void Assembler::fmin(const FPRegister& fd,
1893 const FPRegister& fn,
1894 const FPRegister& fm) {
1895 FPDataProcessing2Source(fd, fn, fm, FMIN);
1899 void Assembler::fminnm(const FPRegister& fd,
1900 const FPRegister& fn,
1901 const FPRegister& fm) {
1902 FPDataProcessing2Source(fd, fn, fm, FMINNM);
1906 void Assembler::fabs(const FPRegister& fd,
1907 const FPRegister& fn) {
1908 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1909 FPDataProcessing1Source(fd, fn, FABS);
1913 void Assembler::fneg(const FPRegister& fd,
1914 const FPRegister& fn) {
1915 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1916 FPDataProcessing1Source(fd, fn, FNEG);
1920 void Assembler::fsqrt(const FPRegister& fd,
1921 const FPRegister& fn) {
1922 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1923 FPDataProcessing1Source(fd, fn, FSQRT);
1927 void Assembler::frinta(const FPRegister& fd,
1928 const FPRegister& fn) {
1929 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1930 FPDataProcessing1Source(fd, fn, FRINTA);
1934 void Assembler::frintm(const FPRegister& fd,
1935 const FPRegister& fn) {
1936 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1937 FPDataProcessing1Source(fd, fn, FRINTM);
1941 void Assembler::frintn(const FPRegister& fd,
1942 const FPRegister& fn) {
1943 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1944 FPDataProcessing1Source(fd, fn, FRINTN);
1948 void Assembler::frintp(const FPRegister& fd, const FPRegister& fn) {
1949 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1950 FPDataProcessing1Source(fd, fn, FRINTP);
1954 void Assembler::frintz(const FPRegister& fd,
1955 const FPRegister& fn) {
1956 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1957 FPDataProcessing1Source(fd, fn, FRINTZ);
1961 void Assembler::fcmp(const FPRegister& fn,
1962 const FPRegister& fm) {
1963 DCHECK(fn.SizeInBits() == fm.SizeInBits());
1964 Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
1968 void Assembler::fcmp(const FPRegister& fn,
1971 // Although the fcmp instruction can strictly only take an immediate value of
1972 // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
1973 // affect the result of the comparison.
1974 DCHECK(value == 0.0);
1975 Emit(FPType(fn) | FCMP_zero | Rn(fn));
1979 void Assembler::fccmp(const FPRegister& fn,
1980 const FPRegister& fm,
1983 DCHECK(fn.SizeInBits() == fm.SizeInBits());
1984 Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
1988 void Assembler::fcsel(const FPRegister& fd,
1989 const FPRegister& fn,
1990 const FPRegister& fm,
1992 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1993 DCHECK(fd.SizeInBits() == fm.SizeInBits());
1994 Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
1998 void Assembler::FPConvertToInt(const Register& rd,
1999 const FPRegister& fn,
2000 FPIntegerConvertOp op) {
2001 Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
2005 void Assembler::fcvt(const FPRegister& fd,
2006 const FPRegister& fn) {
2007 if (fd.Is64Bits()) {
2008 // Convert float to double.
2009 DCHECK(fn.Is32Bits());
2010 FPDataProcessing1Source(fd, fn, FCVT_ds);
2012 // Convert double to float.
2013 DCHECK(fn.Is64Bits());
2014 FPDataProcessing1Source(fd, fn, FCVT_sd);
2019 void Assembler::fcvtau(const Register& rd, const FPRegister& fn) {
2020 FPConvertToInt(rd, fn, FCVTAU);
2024 void Assembler::fcvtas(const Register& rd, const FPRegister& fn) {
2025 FPConvertToInt(rd, fn, FCVTAS);
2029 void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) {
2030 FPConvertToInt(rd, fn, FCVTMU);
2034 void Assembler::fcvtms(const Register& rd, const FPRegister& fn) {
2035 FPConvertToInt(rd, fn, FCVTMS);
2039 void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) {
2040 FPConvertToInt(rd, fn, FCVTNU);
2044 void Assembler::fcvtns(const Register& rd, const FPRegister& fn) {
2045 FPConvertToInt(rd, fn, FCVTNS);
2049 void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) {
2050 FPConvertToInt(rd, fn, FCVTZU);
2054 void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) {
2055 FPConvertToInt(rd, fn, FCVTZS);
2059 void Assembler::scvtf(const FPRegister& fd,
2063 Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd));
2065 Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
2071 void Assembler::ucvtf(const FPRegister& fd,
2075 Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
2077 Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
2084 // Below, a difference in case for the same letter indicates a
2086 // If b is 1, then B is 0.
2087 Instr Assembler::ImmFP32(float imm) {
2088 DCHECK(IsImmFP32(imm));
2089 // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
2090 uint32_t bits = float_to_rawbits(imm);
2092 uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
2094 uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
2095 // bit5_to_0: 00cd.efgh
2096 uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
2098 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
2102 Instr Assembler::ImmFP64(double imm) {
2103 DCHECK(IsImmFP64(imm));
2104 // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
2105 // 0000.0000.0000.0000.0000.0000.0000.0000
2106 uint64_t bits = double_to_rawbits(imm);
2108 uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
2110 uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
2111 // bit5_to_0: 00cd.efgh
2112 uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
2114 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
2118 // Code generation helpers.
2119 void Assembler::MoveWide(const Register& rd,
2122 MoveWideImmediateOp mov_op) {
2123 // Ignore the top 32 bits of an immediate if we're moving to a W register.
2124 if (rd.Is32Bits()) {
2125 // Check that the top 32 bits are zero (a positive 32-bit number) or top
2126 // 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
2127 DCHECK(((imm >> kWRegSizeInBits) == 0) ||
2128 ((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff));
2133 // Explicit shift specified.
2134 DCHECK((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
2135 DCHECK(rd.Is64Bits() || (shift == 0) || (shift == 16));
2138 // Calculate a new immediate and shift combination to encode the immediate
2141 if ((imm & ~0xffffUL) == 0) {
2143 } else if ((imm & ~(0xffffUL << 16)) == 0) {
2146 } else if ((imm & ~(0xffffUL << 32)) == 0) {
2147 DCHECK(rd.Is64Bits());
2150 } else if ((imm & ~(0xffffUL << 48)) == 0) {
2151 DCHECK(rd.Is64Bits());
2157 DCHECK(is_uint16(imm));
2159 Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
2160 Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
2164 void Assembler::AddSub(const Register& rd,
2166 const Operand& operand,
2169 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2170 DCHECK(!operand.NeedsRelocation(this));
2171 if (operand.IsImmediate()) {
2172 int64_t immediate = operand.ImmediateValue();
2173 DCHECK(IsImmAddSub(immediate));
2174 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
2175 Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
2176 ImmAddSub(immediate) | dest_reg | RnSP(rn));
2177 } else if (operand.IsShiftedRegister()) {
2178 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
2179 DCHECK(operand.shift() != ROR);
2181 // For instructions of the form:
2182 // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
2183 // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ]
2184 // add/sub wsp, wsp, <Wm> [, LSL #0-3 ]
2185 // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
2186 // or their 64-bit register equivalents, convert the operand from shifted to
2187 // extended register mode, and emit an add/sub extended instruction.
2188 if (rn.IsSP() || rd.IsSP()) {
2189 DCHECK(!(rd.IsSP() && (S == SetFlags)));
2190 DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
2191 AddSubExtendedFixed | op);
2193 DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
2196 DCHECK(operand.IsExtendedRegister());
2197 DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
2202 void Assembler::AddSubWithCarry(const Register& rd,
2204 const Operand& operand,
2206 AddSubWithCarryOp op) {
2207 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2208 DCHECK(rd.SizeInBits() == operand.reg().SizeInBits());
2209 DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
2210 DCHECK(!operand.NeedsRelocation(this));
2211 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
2215 void Assembler::hlt(int code) {
2216 DCHECK(is_uint16(code));
2217 Emit(HLT | ImmException(code));
2221 void Assembler::brk(int code) {
2222 DCHECK(is_uint16(code));
2223 Emit(BRK | ImmException(code));
2227 void Assembler::EmitStringData(const char* string) {
2228 size_t len = strlen(string) + 1;
2229 DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
2230 EmitData(string, len);
2231 // Pad with NULL characters until pc_ is aligned.
2232 const char pad[] = {'\0', '\0', '\0', '\0'};
2233 STATIC_ASSERT(sizeof(pad) == kInstructionSize);
2234 EmitData(pad, RoundUp(pc_offset(), kInstructionSize) - pc_offset());
2238 void Assembler::debug(const char* message, uint32_t code, Instr params) {
2239 #ifdef USE_SIMULATOR
2240 // Don't generate simulator specific code if we are building a snapshot, which
2241 // might be run on real hardware.
2242 if (!serializer_enabled()) {
2243 // The arguments to the debug marker need to be contiguous in memory, so
2244 // make sure we don't try to emit pools.
2245 BlockPoolsScope scope(this);
2250 // Refer to instructions-arm64.h for a description of the marker and its
2252 hlt(kImmExceptionIsDebug);
2253 DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
2255 DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
2257 DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
2258 EmitStringData(message);
2259 hlt(kImmExceptionIsUnreachable);
2263 // Fall through if Serializer is enabled.
2266 if (params & BREAK) {
2267 hlt(kImmExceptionIsDebug);
2272 void Assembler::Logical(const Register& rd,
2274 const Operand& operand,
2276 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2277 DCHECK(!operand.NeedsRelocation(this));
2278 if (operand.IsImmediate()) {
2279 int64_t immediate = operand.ImmediateValue();
2280 unsigned reg_size = rd.SizeInBits();
2282 DCHECK(immediate != 0);
2283 DCHECK(immediate != -1);
2284 DCHECK(rd.Is64Bits() || is_uint32(immediate));
2286 // If the operation is NOT, invert the operation and immediate.
2287 if ((op & NOT) == NOT) {
2288 op = static_cast<LogicalOp>(op & ~NOT);
2289 immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
2292 unsigned n, imm_s, imm_r;
2293 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
2294 // Immediate can be encoded in the instruction.
2295 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
2297 // This case is handled in the macro assembler.
2301 DCHECK(operand.IsShiftedRegister());
2302 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
2303 Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
2304 DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
2309 void Assembler::LogicalImmediate(const Register& rd,
2315 unsigned reg_size = rd.SizeInBits();
2316 Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
2317 Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
2318 ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
2323 void Assembler::ConditionalCompare(const Register& rn,
2324 const Operand& operand,
2327 ConditionalCompareOp op) {
2329 DCHECK(!operand.NeedsRelocation(this));
2330 if (operand.IsImmediate()) {
2331 int64_t immediate = operand.ImmediateValue();
2332 DCHECK(IsImmConditionalCompare(immediate));
2333 ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
2335 DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
2336 ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
2338 Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
2342 void Assembler::DataProcessing1Source(const Register& rd,
2344 DataProcessing1SourceOp op) {
2345 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2346 Emit(SF(rn) | op | Rn(rn) | Rd(rd));
2350 void Assembler::FPDataProcessing1Source(const FPRegister& fd,
2351 const FPRegister& fn,
2352 FPDataProcessing1SourceOp op) {
2353 Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
2357 void Assembler::FPDataProcessing2Source(const FPRegister& fd,
2358 const FPRegister& fn,
2359 const FPRegister& fm,
2360 FPDataProcessing2SourceOp op) {
2361 DCHECK(fd.SizeInBits() == fn.SizeInBits());
2362 DCHECK(fd.SizeInBits() == fm.SizeInBits());
2363 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
2367 void Assembler::FPDataProcessing3Source(const FPRegister& fd,
2368 const FPRegister& fn,
2369 const FPRegister& fm,
2370 const FPRegister& fa,
2371 FPDataProcessing3SourceOp op) {
2372 DCHECK(AreSameSizeAndType(fd, fn, fm, fa));
2373 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
2377 void Assembler::EmitShift(const Register& rd,
2380 unsigned shift_amount) {
2383 lsl(rd, rn, shift_amount);
2386 lsr(rd, rn, shift_amount);
2389 asr(rd, rn, shift_amount);
2392 ror(rd, rn, shift_amount);
2400 void Assembler::EmitExtendShift(const Register& rd,
2403 unsigned left_shift) {
2404 DCHECK(rd.SizeInBits() >= rn.SizeInBits());
2405 unsigned reg_size = rd.SizeInBits();
2406 // Use the correct size of register.
2407 Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
2408 // Bits extracted are high_bit:0.
2409 unsigned high_bit = (8 << (extend & 0x3)) - 1;
2410 // Number of bits left in the result that are not introduced by the shift.
2411 unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
2413 if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
2417 case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
2420 case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
2423 DCHECK(rn.SizeInBits() == kXRegSizeInBits);
2424 // Nothing to extend. Just shift.
2425 lsl(rd, rn_, left_shift);
2428 default: UNREACHABLE();
2431 // No need to extend as the extended bits would be shifted away.
2432 lsl(rd, rn_, left_shift);
2437 void Assembler::DataProcShiftedRegister(const Register& rd,
2439 const Operand& operand,
2442 DCHECK(operand.IsShiftedRegister());
2443 DCHECK(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
2444 DCHECK(!operand.NeedsRelocation(this));
2445 Emit(SF(rd) | op | Flags(S) |
2446 ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
2447 Rm(operand.reg()) | Rn(rn) | Rd(rd));
2451 void Assembler::DataProcExtendedRegister(const Register& rd,
2453 const Operand& operand,
2456 DCHECK(!operand.NeedsRelocation(this));
2457 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
2458 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
2459 ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
2460 dest_reg | RnSP(rn));
2464 bool Assembler::IsImmAddSub(int64_t immediate) {
2465 return is_uint12(immediate) ||
2466 (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
2469 void Assembler::LoadStore(const CPURegister& rt,
2470 const MemOperand& addr,
2472 Instr memop = op | Rt(rt) | RnSP(addr.base());
2473 int64_t offset = addr.offset();
2475 if (addr.IsImmediateOffset()) {
2476 LSDataSize size = CalcLSDataSize(op);
2477 if (IsImmLSScaled(offset, size)) {
2478 // Use the scaled addressing mode.
2479 Emit(LoadStoreUnsignedOffsetFixed | memop |
2480 ImmLSUnsigned(offset >> size));
2481 } else if (IsImmLSUnscaled(offset)) {
2482 // Use the unscaled addressing mode.
2483 Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
2485 // This case is handled in the macro assembler.
2488 } else if (addr.IsRegisterOffset()) {
2489 Extend ext = addr.extend();
2490 Shift shift = addr.shift();
2491 unsigned shift_amount = addr.shift_amount();
2493 // LSL is encoded in the option field as UXTX.
2498 // Shifts are encoded in one bit, indicating a left shift by the memory
2500 DCHECK((shift_amount == 0) ||
2501 (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
2502 Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
2503 ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
2505 // Pre-index and post-index modes.
2506 DCHECK(!rt.Is(addr.base()));
2507 if (IsImmLSUnscaled(offset)) {
2508 if (addr.IsPreIndex()) {
2509 Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
2511 DCHECK(addr.IsPostIndex());
2512 Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
2515 // This case is handled in the macro assembler.
2522 bool Assembler::IsImmLSUnscaled(int64_t offset) {
2523 return is_int9(offset);
2527 bool Assembler::IsImmLSScaled(int64_t offset, LSDataSize size) {
2528 bool offset_is_size_multiple = (((offset >> size) << size) == offset);
2529 return offset_is_size_multiple && is_uint12(offset >> size);
2533 bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) {
2534 bool offset_is_size_multiple = (((offset >> size) << size) == offset);
2535 return offset_is_size_multiple && is_int7(offset >> size);
2539 // Test if a given value can be encoded in the immediate field of a logical
2541 // If it can be encoded, the function returns true, and values pointed to by n,
2542 // imm_s and imm_r are updated with immediates encoded in the format required
2543 // by the corresponding fields in the logical instruction.
2544 // If it can not be encoded, the function returns false, and the values pointed
2545 // to by n, imm_s and imm_r are undefined.
2546 bool Assembler::IsImmLogical(uint64_t value,
2551 DCHECK((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
2552 DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
2554 bool negate = false;
2556 // Logical immediates are encoded using parameters n, imm_s and imm_r using
2557 // the following table:
2559 // N imms immr size S R
2560 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
2561 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
2562 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
2563 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
2564 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
2565 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
2566 // (s bits must not be all set)
2568 // A pattern is constructed of size bits, where the least significant S+1 bits
2569 // are set. The pattern is rotated right by R, and repeated across a 32 or
2570 // 64-bit value, depending on destination register width.
2572 // Put another way: the basic format of a logical immediate is a single
2573 // contiguous stretch of 1 bits, repeated across the whole word at intervals
2574 // given by a power of 2. To identify them quickly, we first locate the
2575 // lowest stretch of 1 bits, then the next 1 bit above that; that combination
2576 // is different for every logical immediate, so it gives us all the
2577 // information we need to identify the only logical immediate that our input
2578 // could be, and then we simply check if that's the value we actually have.
2580 // (The rotation parameter does give the possibility of the stretch of 1 bits
2581 // going 'round the end' of the word. To deal with that, we observe that in
2582 // any situation where that happens the bitwise NOT of the value is also a
2583 // valid logical immediate. So we simply invert the input whenever its low bit
2584 // is set, and then we know that the rotated case can't arise.)
2587 // If the low bit is 1, negate the value, and set a flag to remember that we
2588 // did (so that we can adjust the return values appropriately).
2593 if (width == kWRegSizeInBits) {
2594 // To handle 32-bit logical immediates, the very easiest thing is to repeat
2595 // the input value twice to make a 64-bit word. The correct encoding of that
2596 // as a logical immediate will also be the correct encoding of the 32-bit
2599 // The most-significant 32 bits may not be zero (ie. negate is true) so
2600 // shift the value left before duplicating it.
2601 value <<= kWRegSizeInBits;
2602 value |= value >> kWRegSizeInBits;
2605 // The basic analysis idea: imagine our input word looks like this.
2607 // 0011111000111110001111100011111000111110001111100011111000111110
2611 // We find the lowest set bit (as an actual power-of-2 value, not its index)
2612 // and call it a. Then we add a to our original number, which wipes out the
2613 // bottommost stretch of set bits and replaces it with a 1 carried into the
2614 // next zero bit. Then we look for the new lowest set bit, which is in
2615 // position b, and subtract it, so now our number is just like the original
2616 // but with the lowest stretch of set bits completely gone. Now we find the
2617 // lowest set bit again, which is position c in the diagram above. Then we'll
2618 // measure the distance d between bit positions a and c (using CLZ), and that
2619 // tells us that the only valid logical immediate that could possibly be equal
2620 // to this number is the one in which a stretch of bits running from a to just
2621 // below b is replicated every d bits.
2622 uint64_t a = LargestPowerOf2Divisor(value);
2623 uint64_t value_plus_a = value + a;
2624 uint64_t b = LargestPowerOf2Divisor(value_plus_a);
2625 uint64_t value_plus_a_minus_b = value_plus_a - b;
2626 uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b);
2628 int d, clz_a, out_n;
2632 // The general case, in which there is more than one stretch of set bits.
2633 // Compute the repeat distance d, and set up a bitmask covering the basic
2634 // unit of repetition (i.e. a word with the bottom d bits set). Also, in all
2635 // of these cases the N bit of the output will be zero.
2636 clz_a = CountLeadingZeros(a, kXRegSizeInBits);
2637 int clz_c = CountLeadingZeros(c, kXRegSizeInBits);
2639 mask = ((V8_UINT64_C(1) << d) - 1);
2642 // Handle degenerate cases.
2644 // If any of those 'find lowest set bit' operations didn't find a set bit at
2645 // all, then the word will have been zero thereafter, so in particular the
2646 // last lowest_set_bit operation will have returned zero. So we can test for
2647 // all the special case conditions in one go by seeing if c is zero.
2649 // The input was zero (or all 1 bits, which will come to here too after we
2650 // inverted it at the start of the function), for which we just return
2654 // Otherwise, if c was zero but a was not, then there's just one stretch
2655 // of set bits in our word, meaning that we have the trivial case of
2656 // d == 64 and only one 'repetition'. Set up all the same variables as in
2657 // the general case above, and set the N bit in the output.
2658 clz_a = CountLeadingZeros(a, kXRegSizeInBits);
2660 mask = ~V8_UINT64_C(0);
2665 // If the repeat period d is not a power of two, it can't be encoded.
2666 if (!IS_POWER_OF_TWO(d)) {
2670 if (((b - a) & ~mask) != 0) {
2671 // If the bit stretch (b - a) does not fit within the mask derived from the
2672 // repeat period, then fail.
2676 // The only possible option is b - a repeated every d bits. Now we're going to
2677 // actually construct the valid logical immediate derived from that
2678 // specification, and see if it equals our original input.
2680 // To repeat a value every d bits, we multiply it by a number of the form
2681 // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
2682 // be derived using a table lookup on CLZ(d).
2683 static const uint64_t multipliers[] = {
2684 0x0000000000000001UL,
2685 0x0000000100000001UL,
2686 0x0001000100010001UL,
2687 0x0101010101010101UL,
2688 0x1111111111111111UL,
2689 0x5555555555555555UL,
2691 int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57;
2692 // Ensure that the index to the multipliers array is within bounds.
2693 DCHECK((multiplier_idx >= 0) &&
2694 (static_cast<size_t>(multiplier_idx) < arraysize(multipliers)));
2695 uint64_t multiplier = multipliers[multiplier_idx];
2696 uint64_t candidate = (b - a) * multiplier;
2698 if (value != candidate) {
2699 // The candidate pattern doesn't match our input value, so fail.
2703 // We have a match! This is a valid logical immediate, so now we have to
2704 // construct the bits and pieces of the instruction encoding that generates
2707 // Count the set bits in our basic stretch. The special case of clz(0) == -1
2708 // makes the answer come out right for stretches that reach the very top of
2709 // the word (e.g. numbers like 0xffffc00000000000).
2710 int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits);
2711 int s = clz_a - clz_b;
2713 // Decide how many bits to rotate right by, to put the low bit of that basic
2714 // stretch in position a.
2717 // If we inverted the input right at the start of this function, here's
2718 // where we compensate: the number of set bits becomes the number of clear
2719 // bits, and the rotation count is based on position b rather than position
2720 // a (since b is the location of the 'lowest' 1 bit after inversion).
2722 r = (clz_b + 1) & (d - 1);
2724 r = (clz_a + 1) & (d - 1);
2727 // Now we're done, except for having to encode the S output in such a way that
2728 // it gives both the number of set bits and the length of the repeated
2729 // segment. The s field is encoded like this:
2732 // ssssss 64 UInt(ssssss)
2733 // 0sssss 32 UInt(sssss)
2734 // 10ssss 16 UInt(ssss)
2735 // 110sss 8 UInt(sss)
2736 // 1110ss 4 UInt(ss)
2739 // So we 'or' (-d << 1) with our computed s to form imms.
2741 *imm_s = ((-d << 1) | (s - 1)) & 0x3f;
2748 bool Assembler::IsImmConditionalCompare(int64_t immediate) {
2749 return is_uint5(immediate);
2753 bool Assembler::IsImmFP32(float imm) {
2754 // Valid values will have the form:
2755 // aBbb.bbbc.defg.h000.0000.0000.0000.0000
2756 uint32_t bits = float_to_rawbits(imm);
2757 // bits[19..0] are cleared.
2758 if ((bits & 0x7ffff) != 0) {
2762 // bits[29..25] are all set or all cleared.
2763 uint32_t b_pattern = (bits >> 16) & 0x3e00;
2764 if (b_pattern != 0 && b_pattern != 0x3e00) {
2768 // bit[30] and bit[29] are opposite.
2769 if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
2777 bool Assembler::IsImmFP64(double imm) {
2778 // Valid values will have the form:
2779 // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
2780 // 0000.0000.0000.0000.0000.0000.0000.0000
2781 uint64_t bits = double_to_rawbits(imm);
2782 // bits[47..0] are cleared.
2783 if ((bits & 0xffffffffffffL) != 0) {
2787 // bits[61..54] are all set or all cleared.
2788 uint32_t b_pattern = (bits >> 48) & 0x3fc0;
2789 if (b_pattern != 0 && b_pattern != 0x3fc0) {
2793 // bit[62] and bit[61] are opposite.
2794 if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
2802 void Assembler::GrowBuffer() {
2803 if (!own_buffer_) FATAL("external code buffer is too small");
2805 // Compute new buffer size.
2806 CodeDesc desc; // the new buffer
2807 if (buffer_size_ < 1 * MB) {
2808 desc.buffer_size = 2 * buffer_size_;
2810 desc.buffer_size = buffer_size_ + 1 * MB;
2812 CHECK_GT(desc.buffer_size, 0); // No overflow.
2814 byte* buffer = reinterpret_cast<byte*>(buffer_);
2816 // Set up new buffer.
2817 desc.buffer = NewArray<byte>(desc.buffer_size);
2819 desc.instr_size = pc_offset();
2820 desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
2823 intptr_t pc_delta = desc.buffer - buffer;
2824 intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
2825 (buffer + buffer_size_);
2826 memmove(desc.buffer, buffer, desc.instr_size);
2827 memmove(reloc_info_writer.pos() + rc_delta,
2828 reloc_info_writer.pos(), desc.reloc_size);
2831 DeleteArray(buffer_);
2832 buffer_ = desc.buffer;
2833 buffer_size_ = desc.buffer_size;
2834 pc_ = reinterpret_cast<byte*>(pc_) + pc_delta;
2835 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2836 reloc_info_writer.last_pc() + pc_delta);
2838 // None of our relocation types are pc relative pointing outside the code
2839 // buffer nor pc absolute pointing inside the code buffer, so there is no need
2840 // to relocate any emitted relocation entries.
2842 // Pending relocation entries are also relative, no need to relocate.
2846 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2847 // We do not try to reuse pool constants.
2848 RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
2849 if (((rmode >= RelocInfo::JS_RETURN) &&
2850 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
2851 (rmode == RelocInfo::CONST_POOL) ||
2852 (rmode == RelocInfo::VENEER_POOL) ||
2853 (rmode == RelocInfo::DEOPT_REASON)) {
2854 // Adjust code for new modes.
2855 DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
2856 || RelocInfo::IsJSReturn(rmode)
2857 || RelocInfo::IsComment(rmode)
2858 || RelocInfo::IsDeoptReason(rmode)
2859 || RelocInfo::IsPosition(rmode)
2860 || RelocInfo::IsConstPool(rmode)
2861 || RelocInfo::IsVeneerPool(rmode));
2862 // These modes do not need an entry in the constant pool.
2864 constpool_.RecordEntry(data, rmode);
2865 // Make sure the constant pool is not emitted in place of the next
2866 // instruction for which we just recorded relocation info.
2867 BlockConstPoolFor(1);
2870 if (!RelocInfo::IsNone(rmode)) {
2871 // Don't record external references unless the heap will be serialized.
2872 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
2873 !serializer_enabled() && !emit_debug_code()) {
2876 DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
2877 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2878 RelocInfo reloc_info_with_ast_id(
2879 reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
2880 ClearRecordedAstId();
2881 reloc_info_writer.Write(&reloc_info_with_ast_id);
2883 reloc_info_writer.Write(&rinfo);
2889 void Assembler::BlockConstPoolFor(int instructions) {
2890 int pc_limit = pc_offset() + instructions * kInstructionSize;
2891 if (no_const_pool_before_ < pc_limit) {
2892 no_const_pool_before_ = pc_limit;
2893 // Make sure the pool won't be blocked for too long.
2894 DCHECK(pc_limit < constpool_.MaxPcOffset());
2897 if (next_constant_pool_check_ < no_const_pool_before_) {
2898 next_constant_pool_check_ = no_const_pool_before_;
2903 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
2904 // Some short sequence of instruction mustn't be broken up by constant pool
2905 // emission, such sequences are protected by calls to BlockConstPoolFor and
2906 // BlockConstPoolScope.
2907 if (is_const_pool_blocked()) {
2908 // Something is wrong if emission is forced and blocked at the same time.
2909 DCHECK(!force_emit);
2913 // There is nothing to do if there are no pending constant pool entries.
2914 if (constpool_.IsEmpty()) {
2915 // Calculate the offset of the next check.
2916 SetNextConstPoolCheckIn(kCheckConstPoolInterval);
2920 // We emit a constant pool when:
2921 // * requested to do so by parameter force_emit (e.g. after each function).
2922 // * the distance to the first instruction accessing the constant pool is
2923 // kApproxMaxDistToConstPool or more.
2924 // * the number of entries in the pool is kApproxMaxPoolEntryCount or more.
2925 int dist = constpool_.DistanceToFirstUse();
2926 int count = constpool_.EntryCount();
2928 (dist < kApproxMaxDistToConstPool) &&
2929 (count < kApproxMaxPoolEntryCount)) {
2934 // Emit veneers for branches that would go out of range during emission of the
2936 int worst_case_size = constpool_.WorstCaseSize();
2937 CheckVeneerPool(false, require_jump,
2938 kVeneerDistanceMargin + worst_case_size);
2940 // Check that the code buffer is large enough before emitting the constant
2941 // pool (this includes the gap to the relocation information).
2942 int needed_space = worst_case_size + kGap + 1 * kInstructionSize;
2943 while (buffer_space() <= needed_space) {
2949 constpool_.Emit(require_jump);
2950 DCHECK(SizeOfCodeGeneratedSince(&size_check) <=
2951 static_cast<unsigned>(worst_case_size));
2953 // Since a constant pool was just emitted, move the check offset forward by
2954 // the standard interval.
2955 SetNextConstPoolCheckIn(kCheckConstPoolInterval);
2959 bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
2960 // Account for the branch around the veneers and the guard.
2961 int protection_offset = 2 * kInstructionSize;
2962 return pc_offset() > max_reachable_pc - margin - protection_offset -
2963 static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
2967 void Assembler::RecordVeneerPool(int location_offset, int size) {
2968 RelocInfo rinfo(buffer_ + location_offset,
2969 RelocInfo::VENEER_POOL, static_cast<intptr_t>(size),
2971 reloc_info_writer.Write(&rinfo);
2975 void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
2976 BlockPoolsScope scope(this);
2977 RecordComment("[ Veneers");
2979 // The exact size of the veneer pool must be recorded (see the comment at the
2980 // declaration site of RecordConstPool()), but computing the number of
2981 // veneers that will be generated is not obvious. So instead we remember the
2982 // current position and will record the size after the pool has been
2986 int veneer_pool_relocinfo_loc = pc_offset();
2989 if (need_protection) {
2995 Label veneer_size_check;
2997 std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
2999 it = unresolved_branches_.begin();
3000 while (it != unresolved_branches_.end()) {
3001 if (force_emit || ShouldEmitVeneer(it->first, margin)) {
3002 Instruction* branch = InstructionAt(it->second.pc_offset_);
3003 Label* label = it->second.label_;
3006 bind(&veneer_size_check);
3008 // Patch the branch to point to the current position, and emit a branch
3010 Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
3011 RemoveBranchFromLabelLinkChain(branch, label, veneer);
3012 branch->SetImmPCOffsetTarget(veneer);
3015 DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=
3016 static_cast<uint64_t>(kMaxVeneerCodeSize));
3017 veneer_size_check.Unuse();
3020 it_to_delete = it++;
3021 unresolved_branches_.erase(it_to_delete);
3027 // Record the veneer pool size.
3028 int pool_size = SizeOfCodeGeneratedSince(&size_check);
3029 RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
3031 if (unresolved_branches_.empty()) {
3032 next_veneer_pool_check_ = kMaxInt;
3034 next_veneer_pool_check_ =
3035 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
3044 void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
3046 // There is nothing to do if there are no pending veneer pool entries.
3047 if (unresolved_branches_.empty()) {
3048 DCHECK(next_veneer_pool_check_ == kMaxInt);
3052 DCHECK(pc_offset() < unresolved_branches_first_limit());
3054 // Some short sequence of instruction mustn't be broken up by veneer pool
3055 // emission, such sequences are protected by calls to BlockVeneerPoolFor and
3056 // BlockVeneerPoolScope.
3057 if (is_veneer_pool_blocked()) {
3058 DCHECK(!force_emit);
3062 if (!require_jump) {
3063 // Prefer emitting veneers protected by an existing instruction.
3064 margin *= kVeneerNoProtectionFactor;
3066 if (force_emit || ShouldEmitVeneers(margin)) {
3067 EmitVeneers(force_emit, require_jump, margin);
3069 next_veneer_pool_check_ =
3070 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
3075 int Assembler::buffer_space() const {
3076 return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
3080 void Assembler::RecordConstPool(int size) {
3081 // We only need this for debugger support, to correctly compute offsets in the
3083 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
3087 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
3088 // No out-of-line constant pool support.
3089 DCHECK(!FLAG_enable_ool_constant_pool);
3090 return isolate->factory()->empty_constant_pool_array();
3094 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
3095 // No out-of-line constant pool support.
3096 DCHECK(!FLAG_enable_ool_constant_pool);
3101 void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
3102 // The code at the current instruction should be:
3108 // Verify the expected code.
3109 Instruction* expected_adr = InstructionAt(0);
3110 CHECK(expected_adr->IsAdr() && (expected_adr->ImmPCRel() == 0));
3111 int rd_code = expected_adr->Rd();
3112 for (int i = 0; i < kAdrFarPatchableNNops; ++i) {
3113 CHECK(InstructionAt((i + 1) * kInstructionSize)->IsNop(ADR_FAR_NOP));
3115 Instruction* expected_movz =
3116 InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstructionSize);
3117 CHECK(expected_movz->IsMovz() &&
3118 (expected_movz->ImmMoveWide() == 0) &&
3119 (expected_movz->ShiftMoveWide() == 0));
3120 int scratch_code = expected_movz->Rd();
3122 // Patch to load the correct address.
3123 Register rd = Register::XRegFromCode(rd_code);
3124 Register scratch = Register::XRegFromCode(scratch_code);
3125 // Addresses are only 48 bits.
3126 adr(rd, target_offset & 0xFFFF);
3127 movz(scratch, (target_offset >> 16) & 0xFFFF, 16);
3128 movk(scratch, (target_offset >> 32) & 0xFFFF, 32);
3129 DCHECK((target_offset >> 48) == 0);
3130 add(rd, rd, scratch);
3134 } } // namespace v8::internal
3136 #endif // V8_TARGET_ARCH_ARM64