1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_
6 #define V8_ARM64_ASSEMBLER_ARM64_INL_H_
8 #include "arm64/assembler-arm64.h"
17 void RelocInfo::apply(intptr_t delta) {
22 void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
23 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
24 Assembler::set_target_address_at(pc_, host_, target);
25 if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
26 Object* target_code = Code::GetCodeFromTargetAddress(target);
27 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
28 host(), this, HeapObject::cast(target_code));
33 inline unsigned CPURegister::code() const {
39 inline CPURegister::RegisterType CPURegister::type() const {
40 ASSERT(IsValidOrNone());
45 inline RegList CPURegister::Bit() const {
46 ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte));
47 return IsValid() ? 1UL << reg_code : 0;
51 inline unsigned CPURegister::SizeInBits() const {
57 inline int CPURegister::SizeInBytes() const {
59 ASSERT(SizeInBits() % 8 == 0);
64 inline bool CPURegister::Is32Bits() const {
66 return reg_size == 32;
70 inline bool CPURegister::Is64Bits() const {
72 return reg_size == 64;
76 inline bool CPURegister::IsValid() const {
77 if (IsValidRegister() || IsValidFPRegister()) {
87 inline bool CPURegister::IsValidRegister() const {
88 return IsRegister() &&
89 ((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)) &&
90 ((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
94 inline bool CPURegister::IsValidFPRegister() const {
95 return IsFPRegister() &&
96 ((reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits)) &&
97 (reg_code < kNumberOfFPRegisters);
101 inline bool CPURegister::IsNone() const {
102 // kNoRegister types should always have size 0 and code 0.
103 ASSERT((reg_type != kNoRegister) || (reg_code == 0));
104 ASSERT((reg_type != kNoRegister) || (reg_size == 0));
106 return reg_type == kNoRegister;
110 inline bool CPURegister::Is(const CPURegister& other) const {
111 ASSERT(IsValidOrNone() && other.IsValidOrNone());
112 return (reg_code == other.reg_code) && (reg_size == other.reg_size) &&
113 (reg_type == other.reg_type);
117 inline bool CPURegister::IsRegister() const {
118 return reg_type == kRegister;
122 inline bool CPURegister::IsFPRegister() const {
123 return reg_type == kFPRegister;
127 inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
128 return (reg_size == other.reg_size) && (reg_type == other.reg_type);
132 inline bool CPURegister::IsValidOrNone() const {
133 return IsValid() || IsNone();
137 inline bool CPURegister::IsZero() const {
139 return IsRegister() && (reg_code == kZeroRegCode);
143 inline bool CPURegister::IsSP() const {
145 return IsRegister() && (reg_code == kSPRegInternalCode);
149 inline void CPURegList::Combine(const CPURegList& other) {
151 ASSERT(other.type() == type_);
152 ASSERT(other.RegisterSizeInBits() == size_);
153 list_ |= other.list();
157 inline void CPURegList::Remove(const CPURegList& other) {
159 if (other.type() == type_) {
160 list_ &= ~other.list();
165 inline void CPURegList::Combine(const CPURegister& other) {
166 ASSERT(other.type() == type_);
167 ASSERT(other.SizeInBits() == size_);
168 Combine(other.code());
172 inline void CPURegList::Remove(const CPURegister& other1,
173 const CPURegister& other2,
174 const CPURegister& other3,
175 const CPURegister& other4) {
176 if (!other1.IsNone() && (other1.type() == type_)) Remove(other1.code());
177 if (!other2.IsNone() && (other2.type() == type_)) Remove(other2.code());
178 if (!other3.IsNone() && (other3.type() == type_)) Remove(other3.code());
179 if (!other4.IsNone() && (other4.type() == type_)) Remove(other4.code());
183 inline void CPURegList::Combine(int code) {
185 ASSERT(CPURegister::Create(code, size_, type_).IsValid());
186 list_ |= (1UL << code);
190 inline void CPURegList::Remove(int code) {
192 ASSERT(CPURegister::Create(code, size_, type_).IsValid());
193 list_ &= ~(1UL << code);
197 inline Register Register::XRegFromCode(unsigned code) {
198 // This function returns the zero register when code = 31. The stack pointer
199 // can not be returned.
200 ASSERT(code < kNumberOfRegisters);
201 return Register::Create(code, kXRegSizeInBits);
205 inline Register Register::WRegFromCode(unsigned code) {
206 ASSERT(code < kNumberOfRegisters);
207 return Register::Create(code, kWRegSizeInBits);
211 inline FPRegister FPRegister::SRegFromCode(unsigned code) {
212 ASSERT(code < kNumberOfFPRegisters);
213 return FPRegister::Create(code, kSRegSizeInBits);
217 inline FPRegister FPRegister::DRegFromCode(unsigned code) {
218 ASSERT(code < kNumberOfFPRegisters);
219 return FPRegister::Create(code, kDRegSizeInBits);
223 inline Register CPURegister::W() const {
224 ASSERT(IsValidRegister());
225 return Register::WRegFromCode(reg_code);
229 inline Register CPURegister::X() const {
230 ASSERT(IsValidRegister());
231 return Register::XRegFromCode(reg_code);
235 inline FPRegister CPURegister::S() const {
236 ASSERT(IsValidFPRegister());
237 return FPRegister::SRegFromCode(reg_code);
241 inline FPRegister CPURegister::D() const {
242 ASSERT(IsValidFPRegister());
243 return FPRegister::DRegFromCode(reg_code);
249 Operand::Operand(Handle<T> value) : reg_(NoReg) {
250 initialize_handle(value);
254 // Default initializer is for int types
255 template<typename int_t>
256 struct OperandInitializer {
257 static const bool kIsIntType = true;
258 static inline RelocInfo::Mode rmode_for(int_t) {
259 return sizeof(int_t) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
261 static inline int64_t immediate_for(int_t t) {
262 STATIC_ASSERT(sizeof(int_t) <= 8);
269 struct OperandInitializer<Smi*> {
270 static const bool kIsIntType = false;
271 static inline RelocInfo::Mode rmode_for(Smi* t) {
272 return RelocInfo::NONE64;
274 static inline int64_t immediate_for(Smi* t) {;
275 return reinterpret_cast<int64_t>(t);
281 struct OperandInitializer<ExternalReference> {
282 static const bool kIsIntType = false;
283 static inline RelocInfo::Mode rmode_for(ExternalReference t) {
284 return RelocInfo::EXTERNAL_REFERENCE;
286 static inline int64_t immediate_for(ExternalReference t) {;
287 return reinterpret_cast<int64_t>(t.address());
293 Operand::Operand(T t)
294 : immediate_(OperandInitializer<T>::immediate_for(t)),
296 rmode_(OperandInitializer<T>::rmode_for(t)) {}
300 Operand::Operand(T t, RelocInfo::Mode rmode)
301 : immediate_(OperandInitializer<T>::immediate_for(t)),
304 STATIC_ASSERT(OperandInitializer<T>::kIsIntType);
308 Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
312 shift_amount_(shift_amount),
313 rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
314 ASSERT(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
315 ASSERT(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
320 Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
324 shift_amount_(shift_amount),
325 rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
326 ASSERT(reg.IsValid());
327 ASSERT(shift_amount <= 4);
330 // Extend modes SXTX and UXTX require a 64-bit register.
331 ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
335 bool Operand::IsImmediate() const {
336 return reg_.Is(NoReg);
340 bool Operand::IsShiftedRegister() const {
341 return reg_.IsValid() && (shift_ != NO_SHIFT);
345 bool Operand::IsExtendedRegister() const {
346 return reg_.IsValid() && (extend_ != NO_EXTEND);
350 bool Operand::IsZero() const {
352 return immediate() == 0;
354 return reg().IsZero();
359 Operand Operand::ToExtendedRegister() const {
360 ASSERT(IsShiftedRegister());
361 ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
362 return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
366 int64_t Operand::immediate() const {
367 ASSERT(IsImmediate());
372 Register Operand::reg() const {
373 ASSERT(IsShiftedRegister() || IsExtendedRegister());
378 Shift Operand::shift() const {
379 ASSERT(IsShiftedRegister());
384 Extend Operand::extend() const {
385 ASSERT(IsExtendedRegister());
390 unsigned Operand::shift_amount() const {
391 ASSERT(IsShiftedRegister() || IsExtendedRegister());
392 return shift_amount_;
396 Operand Operand::UntagSmi(Register smi) {
397 ASSERT(smi.Is64Bits());
398 return Operand(smi, ASR, kSmiShift);
402 Operand Operand::UntagSmiAndScale(Register smi, int scale) {
403 ASSERT(smi.Is64Bits());
404 ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
405 if (scale > kSmiShift) {
406 return Operand(smi, LSL, scale - kSmiShift);
407 } else if (scale < kSmiShift) {
408 return Operand(smi, ASR, kSmiShift - scale);
414 MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
415 : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
416 shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
417 ASSERT(base.Is64Bits() && !base.IsZero());
421 MemOperand::MemOperand(Register base,
424 unsigned shift_amount)
425 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
426 shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
427 ASSERT(base.Is64Bits() && !base.IsZero());
428 ASSERT(!regoffset.IsSP());
429 ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
431 // SXTX extend mode requires a 64-bit offset register.
432 ASSERT(regoffset.Is64Bits() || (extend != SXTX));
436 MemOperand::MemOperand(Register base,
439 unsigned shift_amount)
440 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
441 shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
442 ASSERT(base.Is64Bits() && !base.IsZero());
443 ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
444 ASSERT(shift == LSL);
448 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
449 : base_(base), addrmode_(addrmode) {
450 ASSERT(base.Is64Bits() && !base.IsZero());
452 if (offset.IsImmediate()) {
453 offset_ = offset.immediate();
456 } else if (offset.IsShiftedRegister()) {
457 ASSERT(addrmode == Offset);
459 regoffset_ = offset.reg();
460 shift_= offset.shift();
461 shift_amount_ = offset.shift_amount();
466 // These assertions match those in the shifted-register constructor.
467 ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
468 ASSERT(shift_ == LSL);
470 ASSERT(offset.IsExtendedRegister());
471 ASSERT(addrmode == Offset);
473 regoffset_ = offset.reg();
474 extend_ = offset.extend();
475 shift_amount_ = offset.shift_amount();
480 // These assertions match those in the extended-register constructor.
481 ASSERT(!regoffset_.IsSP());
482 ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
483 ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
487 bool MemOperand::IsImmediateOffset() const {
488 return (addrmode_ == Offset) && regoffset_.Is(NoReg);
492 bool MemOperand::IsRegisterOffset() const {
493 return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
497 bool MemOperand::IsPreIndex() const {
498 return addrmode_ == PreIndex;
502 bool MemOperand::IsPostIndex() const {
503 return addrmode_ == PostIndex;
506 Operand MemOperand::OffsetAsOperand() const {
507 if (IsImmediateOffset()) {
510 ASSERT(IsRegisterOffset());
511 if (extend() == NO_EXTEND) {
512 return Operand(regoffset(), shift(), shift_amount());
514 return Operand(regoffset(), extend(), shift_amount());
520 void Assembler::Unreachable() {
522 debug("UNREACHABLE", __LINE__, BREAK);
524 // Crash by branching to 0. lr now points near the fault.
530 Address Assembler::target_pointer_address_at(Address pc) {
531 Instruction* instr = reinterpret_cast<Instruction*>(pc);
532 ASSERT(instr->IsLdrLiteralX());
533 return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
537 // Read/Modify the code target address in the branch/call instruction at pc.
538 Address Assembler::target_address_at(Address pc,
539 ConstantPoolArray* constant_pool) {
540 return Memory::Address_at(target_pointer_address_at(pc));
544 Address Assembler::target_address_at(Address pc, Code* code) {
545 ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
546 return target_address_at(pc, constant_pool);
550 Address Assembler::target_address_from_return_address(Address pc) {
551 // Returns the address of the call target from the return address that will
552 // be returned to after a call.
553 // Call sequence on ARM64 is:
554 // ldr ip0, #... @ load from literal pool
556 Address candidate = pc - 2 * kInstructionSize;
557 Instruction* instr = reinterpret_cast<Instruction*>(candidate);
559 ASSERT(instr->IsLdrLiteralX());
564 Address Assembler::return_address_from_call_start(Address pc) {
565 // The call, generated by MacroAssembler::Call, is one of two possible
568 // Without relocation:
569 // movz temp, #(target & 0x000000000000ffff)
570 // movk temp, #(target & 0x00000000ffff0000)
571 // movk temp, #(target & 0x0000ffff00000000)
578 // The return address is immediately after the blr instruction in both cases,
579 // so it can be found by adding the call size to the address at the start of
580 // the call sequence.
581 STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 4 * kInstructionSize);
582 STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize);
584 Instruction* instr = reinterpret_cast<Instruction*>(pc);
585 if (instr->IsMovz()) {
586 // Verify the instruction sequence.
587 ASSERT(instr->following(1)->IsMovk());
588 ASSERT(instr->following(2)->IsMovk());
589 ASSERT(instr->following(3)->IsBranchAndLinkToRegister());
590 return pc + Assembler::kCallSizeWithoutRelocation;
592 // Verify the instruction sequence.
593 ASSERT(instr->IsLdrLiteralX());
594 ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
595 return pc + Assembler::kCallSizeWithRelocation;
600 void Assembler::deserialization_set_special_target_at(
601 Address constant_pool_entry, Code* code, Address target) {
602 Memory::Address_at(constant_pool_entry) = target;
606 void Assembler::set_target_address_at(Address pc,
607 ConstantPoolArray* constant_pool,
609 Memory::Address_at(target_pointer_address_at(pc)) = target;
610 // Intuitively, we would think it is necessary to always flush the
611 // instruction cache after patching a target address in the code as follows:
612 // CPU::FlushICache(pc, sizeof(target));
613 // However, on ARM, an instruction is actually patched in the case of
614 // embedded constants of the form:
615 // ldr ip, [pc, #...]
616 // since the instruction accessing this address in the constant pool remains
617 // unchanged, a flush is not required.
621 void Assembler::set_target_address_at(Address pc,
624 ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
625 set_target_address_at(pc, constant_pool, target);
629 int RelocInfo::target_address_size() {
634 Address RelocInfo::target_address() {
635 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
636 return Assembler::target_address_at(pc_, host_);
640 Address RelocInfo::target_address_address() {
641 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
642 || rmode_ == EMBEDDED_OBJECT
643 || rmode_ == EXTERNAL_REFERENCE);
644 return Assembler::target_pointer_address_at(pc_);
648 Address RelocInfo::constant_pool_entry_address() {
649 ASSERT(IsInConstantPool());
650 return Assembler::target_pointer_address_at(pc_);
654 Object* RelocInfo::target_object() {
655 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
656 return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
660 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
661 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
662 return Handle<Object>(reinterpret_cast<Object**>(
663 Assembler::target_address_at(pc_, host_)));
667 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
668 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
669 ASSERT(!target->IsConsString());
670 Assembler::set_target_address_at(pc_, host_,
671 reinterpret_cast<Address>(target));
672 if (mode == UPDATE_WRITE_BARRIER &&
674 target->IsHeapObject()) {
675 host()->GetHeap()->incremental_marking()->RecordWrite(
676 host(), &Memory::Object_at(pc_), HeapObject::cast(target));
681 Address RelocInfo::target_reference() {
682 ASSERT(rmode_ == EXTERNAL_REFERENCE);
683 return Assembler::target_address_at(pc_, host_);
687 Address RelocInfo::target_runtime_entry(Assembler* origin) {
688 ASSERT(IsRuntimeEntry(rmode_));
689 return target_address();
693 void RelocInfo::set_target_runtime_entry(Address target,
694 WriteBarrierMode mode) {
695 ASSERT(IsRuntimeEntry(rmode_));
696 if (target_address() != target) set_target_address(target, mode);
700 Handle<Cell> RelocInfo::target_cell_handle() {
702 Cell *null_cell = NULL;
703 return Handle<Cell>(null_cell);
707 Cell* RelocInfo::target_cell() {
708 ASSERT(rmode_ == RelocInfo::CELL);
709 return Cell::FromValueAddress(Memory::Address_at(pc_));
713 void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
718 static const int kCodeAgeSequenceSize = 5 * kInstructionSize;
719 static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
722 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
723 UNREACHABLE(); // This should never be reached on ARM64.
724 return Handle<Object>();
728 Code* RelocInfo::code_age_stub() {
729 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
730 ASSERT(!Code::IsYoungSequence(pc_));
731 // Read the stub entry point from the code age sequence.
732 Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
733 return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
737 void RelocInfo::set_code_age_stub(Code* stub) {
738 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
739 ASSERT(!Code::IsYoungSequence(pc_));
740 // Overwrite the stub entry point in the code age sequence. This is loaded as
741 // a literal so there is no need to call FlushICache here.
742 Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
743 Memory::Address_at(stub_entry_address) = stub->instruction_start();
747 Address RelocInfo::call_address() {
748 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
749 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
750 // For the above sequences the Relocinfo points to the load literal loading
752 return Assembler::target_address_at(pc_, host_);
756 void RelocInfo::set_call_address(Address target) {
757 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
758 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
759 Assembler::set_target_address_at(pc_, host_, target);
760 if (host() != NULL) {
761 Object* target_code = Code::GetCodeFromTargetAddress(target);
762 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
763 host(), this, HeapObject::cast(target_code));
768 void RelocInfo::WipeOut() {
769 ASSERT(IsEmbeddedObject(rmode_) ||
770 IsCodeTarget(rmode_) ||
771 IsRuntimeEntry(rmode_) ||
772 IsExternalReference(rmode_));
773 Assembler::set_target_address_at(pc_, host_, NULL);
777 bool RelocInfo::IsPatchedReturnSequence() {
778 // The sequence must be:
779 // ldr ip0, [pc, #offset]
781 // See arm64/debug-arm64.cc BreakLocationIterator::SetDebugBreakAtReturn().
782 Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
783 Instruction* i2 = i1->following();
784 return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
785 i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
789 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
790 Instruction* current_instr = reinterpret_cast<Instruction*>(pc_);
791 return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
795 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
796 RelocInfo::Mode mode = rmode();
797 if (mode == RelocInfo::EMBEDDED_OBJECT) {
798 visitor->VisitEmbeddedPointer(this);
799 } else if (RelocInfo::IsCodeTarget(mode)) {
800 visitor->VisitCodeTarget(this);
801 } else if (mode == RelocInfo::CELL) {
802 visitor->VisitCell(this);
803 } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
804 visitor->VisitExternalReference(this);
805 } else if (((RelocInfo::IsJSReturn(mode) &&
806 IsPatchedReturnSequence()) ||
807 (RelocInfo::IsDebugBreakSlot(mode) &&
808 IsPatchedDebugBreakSlotSequence())) &&
809 isolate->debug()->has_break_points()) {
810 visitor->VisitDebugTarget(this);
811 } else if (RelocInfo::IsRuntimeEntry(mode)) {
812 visitor->VisitRuntimeEntry(this);
817 template<typename StaticVisitor>
818 void RelocInfo::Visit(Heap* heap) {
819 RelocInfo::Mode mode = rmode();
820 if (mode == RelocInfo::EMBEDDED_OBJECT) {
821 StaticVisitor::VisitEmbeddedPointer(heap, this);
822 } else if (RelocInfo::IsCodeTarget(mode)) {
823 StaticVisitor::VisitCodeTarget(heap, this);
824 } else if (mode == RelocInfo::CELL) {
825 StaticVisitor::VisitCell(heap, this);
826 } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
827 StaticVisitor::VisitExternalReference(this);
828 } else if (heap->isolate()->debug()->has_break_points() &&
829 ((RelocInfo::IsJSReturn(mode) &&
830 IsPatchedReturnSequence()) ||
831 (RelocInfo::IsDebugBreakSlot(mode) &&
832 IsPatchedDebugBreakSlotSequence()))) {
833 StaticVisitor::VisitDebugTarget(heap, this);
834 } else if (RelocInfo::IsRuntimeEntry(mode)) {
835 StaticVisitor::VisitRuntimeEntry(this);
840 LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
841 ASSERT(rt.IsValid());
842 if (rt.IsRegister()) {
843 return rt.Is64Bits() ? LDR_x : LDR_w;
845 ASSERT(rt.IsFPRegister());
846 return rt.Is64Bits() ? LDR_d : LDR_s;
851 LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
852 const CPURegister& rt2) {
853 ASSERT(AreSameSizeAndType(rt, rt2));
855 if (rt.IsRegister()) {
856 return rt.Is64Bits() ? LDP_x : LDP_w;
858 ASSERT(rt.IsFPRegister());
859 return rt.Is64Bits() ? LDP_d : LDP_s;
864 LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
865 ASSERT(rt.IsValid());
866 if (rt.IsRegister()) {
867 return rt.Is64Bits() ? STR_x : STR_w;
869 ASSERT(rt.IsFPRegister());
870 return rt.Is64Bits() ? STR_d : STR_s;
875 LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
876 const CPURegister& rt2) {
877 ASSERT(AreSameSizeAndType(rt, rt2));
879 if (rt.IsRegister()) {
880 return rt.Is64Bits() ? STP_x : STP_w;
882 ASSERT(rt.IsFPRegister());
883 return rt.Is64Bits() ? STP_d : STP_s;
888 LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
889 const CPURegister& rt, const CPURegister& rt2) {
890 ASSERT(AreSameSizeAndType(rt, rt2));
892 if (rt.IsRegister()) {
893 return rt.Is64Bits() ? LDNP_x : LDNP_w;
895 ASSERT(rt.IsFPRegister());
896 return rt.Is64Bits() ? LDNP_d : LDNP_s;
901 LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
902 const CPURegister& rt, const CPURegister& rt2) {
903 ASSERT(AreSameSizeAndType(rt, rt2));
905 if (rt.IsRegister()) {
906 return rt.Is64Bits() ? STNP_x : STNP_w;
908 ASSERT(rt.IsFPRegister());
909 return rt.Is64Bits() ? STNP_d : STNP_s;
914 int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
915 ASSERT(kStartOfLabelLinkChain == 0);
916 int offset = LinkAndGetByteOffsetTo(label);
917 ASSERT(IsAligned(offset, kInstructionSize));
918 return offset >> kInstructionSizeLog2;
922 Instr Assembler::Flags(FlagsUpdate S) {
924 return 1 << FlagsUpdate_offset;
925 } else if (S == LeaveFlags) {
926 return 0 << FlagsUpdate_offset;
933 Instr Assembler::Cond(Condition cond) {
934 return cond << Condition_offset;
938 Instr Assembler::ImmPCRelAddress(int imm21) {
939 CHECK(is_int21(imm21));
940 Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
941 Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
942 Instr immlo = imm << ImmPCRelLo_offset;
943 return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
947 Instr Assembler::ImmUncondBranch(int imm26) {
948 CHECK(is_int26(imm26));
949 return truncate_to_int26(imm26) << ImmUncondBranch_offset;
953 Instr Assembler::ImmCondBranch(int imm19) {
954 CHECK(is_int19(imm19));
955 return truncate_to_int19(imm19) << ImmCondBranch_offset;
959 Instr Assembler::ImmCmpBranch(int imm19) {
960 CHECK(is_int19(imm19));
961 return truncate_to_int19(imm19) << ImmCmpBranch_offset;
965 Instr Assembler::ImmTestBranch(int imm14) {
966 CHECK(is_int14(imm14));
967 return truncate_to_int14(imm14) << ImmTestBranch_offset;
971 Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
972 ASSERT(is_uint6(bit_pos));
973 // Subtract five from the shift offset, as we need bit 5 from bit_pos.
974 unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
975 unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
976 b5 &= ImmTestBranchBit5_mask;
977 b40 &= ImmTestBranchBit40_mask;
982 Instr Assembler::SF(Register rd) {
983 return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
987 Instr Assembler::ImmAddSub(int64_t imm) {
988 ASSERT(IsImmAddSub(imm));
989 if (is_uint12(imm)) { // No shift required.
990 return imm << ImmAddSub_offset;
992 return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
997 Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
998 ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
999 ((reg_size == kWRegSizeInBits) && is_uint5(imms)));
1001 return imms << ImmS_offset;
1005 Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
1006 ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
1007 ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
1009 ASSERT(is_uint6(immr));
1010 return immr << ImmR_offset;
1014 Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
1015 ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
1016 ASSERT(is_uint6(imms));
1017 ASSERT((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
1019 return imms << ImmSetBits_offset;
1023 Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
1024 ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
1025 ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
1026 ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
1028 return immr << ImmRotate_offset;
1032 Instr Assembler::ImmLLiteral(int imm19) {
1033 CHECK(is_int19(imm19));
1034 return truncate_to_int19(imm19) << ImmLLiteral_offset;
1038 Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
1039 ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
1040 ASSERT((reg_size == kXRegSizeInBits) || (bitn == 0));
1042 return bitn << BitN_offset;
1046 Instr Assembler::ShiftDP(Shift shift) {
1047 ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
1048 return shift << ShiftDP_offset;
1052 Instr Assembler::ImmDPShift(unsigned amount) {
1053 ASSERT(is_uint6(amount));
1054 return amount << ImmDPShift_offset;
1058 Instr Assembler::ExtendMode(Extend extend) {
1059 return extend << ExtendMode_offset;
1063 Instr Assembler::ImmExtendShift(unsigned left_shift) {
1064 ASSERT(left_shift <= 4);
1065 return left_shift << ImmExtendShift_offset;
1069 Instr Assembler::ImmCondCmp(unsigned imm) {
1070 ASSERT(is_uint5(imm));
1071 return imm << ImmCondCmp_offset;
1075 Instr Assembler::Nzcv(StatusFlags nzcv) {
1076 return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
1080 Instr Assembler::ImmLSUnsigned(int imm12) {
1081 ASSERT(is_uint12(imm12));
1082 return imm12 << ImmLSUnsigned_offset;
1086 Instr Assembler::ImmLS(int imm9) {
1087 ASSERT(is_int9(imm9));
1088 return truncate_to_int9(imm9) << ImmLS_offset;
1092 Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
1093 ASSERT(((imm7 >> size) << size) == imm7);
1094 int scaled_imm7 = imm7 >> size;
1095 ASSERT(is_int7(scaled_imm7));
1096 return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
1100 Instr Assembler::ImmShiftLS(unsigned shift_amount) {
1101 ASSERT(is_uint1(shift_amount));
1102 return shift_amount << ImmShiftLS_offset;
1106 Instr Assembler::ImmException(int imm16) {
1107 ASSERT(is_uint16(imm16));
1108 return imm16 << ImmException_offset;
1112 Instr Assembler::ImmSystemRegister(int imm15) {
1113 ASSERT(is_uint15(imm15));
1114 return imm15 << ImmSystemRegister_offset;
1118 Instr Assembler::ImmHint(int imm7) {
1119 ASSERT(is_uint7(imm7));
1120 return imm7 << ImmHint_offset;
1124 Instr Assembler::ImmBarrierDomain(int imm2) {
1125 ASSERT(is_uint2(imm2));
1126 return imm2 << ImmBarrierDomain_offset;
1130 Instr Assembler::ImmBarrierType(int imm2) {
1131 ASSERT(is_uint2(imm2));
1132 return imm2 << ImmBarrierType_offset;
1136 LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
1137 ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
1138 return static_cast<LSDataSize>(op >> SizeLS_offset);
1142 Instr Assembler::ImmMoveWide(uint64_t imm) {
1143 ASSERT(is_uint16(imm));
1144 return imm << ImmMoveWide_offset;
1148 Instr Assembler::ShiftMoveWide(int64_t shift) {
1149 ASSERT(is_uint2(shift));
1150 return shift << ShiftMoveWide_offset;
1154 Instr Assembler::FPType(FPRegister fd) {
1155 return fd.Is64Bits() ? FP64 : FP32;
1159 Instr Assembler::FPScale(unsigned scale) {
1160 ASSERT(is_uint6(scale));
1161 return scale << FPScale_offset;
1165 const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
1166 return reg.Is64Bits() ? xzr : wzr;
1170 void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) {
1171 LoadRelocatedValue(rt, operand, LDR_x_lit);
1175 inline void Assembler::CheckBufferSpace() {
1176 ASSERT(pc_ < (buffer_ + buffer_size_));
1177 if (buffer_space() < kGap) {
1183 inline void Assembler::CheckBuffer() {
1185 if (pc_offset() >= next_veneer_pool_check_) {
1186 CheckVeneerPool(false, true);
1188 if (pc_offset() >= next_constant_pool_check_) {
1189 CheckConstPool(false, true);
1194 TypeFeedbackId Assembler::RecordedAstId() {
1195 ASSERT(!recorded_ast_id_.IsNone());
1196 return recorded_ast_id_;
1200 void Assembler::ClearRecordedAstId() {
1201 recorded_ast_id_ = TypeFeedbackId::None();
1205 } } // namespace v8::internal
1207 #endif // V8_ARM64_ASSEMBLER_ARM64_INL_H_