1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM64_CODE_STUBS_ARM64_H_
6 #define V8_ARM64_CODE_STUBS_ARM64_H_
12 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
15 class StringHelper : public AllStatic {
17 // Compares two flat one-byte strings and returns result in x0.
18 static void GenerateCompareFlatOneByteStrings(
19 MacroAssembler* masm, Register left, Register right, Register scratch1,
20 Register scratch2, Register scratch3, Register scratch4);
22 // Compare two flat one-byte strings for equality and returns result in x0.
23 static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
24 Register left, Register right,
30 static void GenerateOneByteCharsCompareLoop(
31 MacroAssembler* masm, Register left, Register right, Register length,
32 Register scratch1, Register scratch2, Label* chars_not_equal);
34 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
38 class StoreRegistersStateStub: public PlatformCodeStub {
40 explicit StoreRegistersStateStub(Isolate* isolate)
41 : PlatformCodeStub(isolate) {}
43 static Register to_be_pushed_lr() { return ip0; }
45 static void GenerateAheadOfTime(Isolate* isolate);
48 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
49 DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
53 class RestoreRegistersStateStub: public PlatformCodeStub {
55 explicit RestoreRegistersStateStub(Isolate* isolate)
56 : PlatformCodeStub(isolate) {}
58 static void GenerateAheadOfTime(Isolate* isolate);
61 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
62 DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
66 class RecordWriteStub: public PlatformCodeStub {
68 // Stub to record the write of 'value' at 'address' in 'object'.
69 // Typically 'address' = 'object' + <some offset>.
70 // See MacroAssembler::RecordWriteField() for example.
71 RecordWriteStub(Isolate* isolate,
75 RememberedSetAction remembered_set_action,
76 SaveFPRegsMode fp_mode)
77 : PlatformCodeStub(isolate),
78 regs_(object, // An input reg.
79 address, // An input reg.
80 value) { // One scratch reg.
81 DCHECK(object.Is64Bits());
82 DCHECK(value.Is64Bits());
83 DCHECK(address.Is64Bits());
84 minor_key_ = ObjectBits::encode(object.code()) |
85 ValueBits::encode(value.code()) |
86 AddressBits::encode(address.code()) |
87 RememberedSetActionBits::encode(remembered_set_action) |
88 SaveFPRegsModeBits::encode(fp_mode);
91 RecordWriteStub(uint32_t key, Isolate* isolate)
92 : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
97 INCREMENTAL_COMPACTION
100 virtual bool SometimesSetsUpAFrame() { return false; }
102 static Mode GetMode(Code* stub) {
103 // Find the mode depending on the first two instructions.
104 Instruction* instr1 =
105 reinterpret_cast<Instruction*>(stub->instruction_start());
106 Instruction* instr2 = instr1->following();
108 if (instr1->IsUncondBranchImm()) {
109 DCHECK(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
113 DCHECK(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
115 if (instr2->IsUncondBranchImm()) {
116 return INCREMENTAL_COMPACTION;
119 DCHECK(instr2->IsPCRelAddressing());
121 return STORE_BUFFER_ONLY;
124 // We patch the two first instructions of the stub back and forth between an
125 // adr and branch when we start and stop incremental heap marking.
130 // so effectively a nop.
131 static void Patch(Code* stub, Mode mode) {
132 // We are going to patch the two first instructions of the stub.
133 PatchingAssembler patcher(
134 reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
135 Instruction* instr1 = patcher.InstructionAt(0);
136 Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
137 // Instructions must be either 'adr' or 'b'.
138 DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
139 DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
140 // Retrieve the offsets to the labels.
141 int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
142 int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
145 case STORE_BUFFER_ONLY:
146 DCHECK(GetMode(stub) == INCREMENTAL ||
147 GetMode(stub) == INCREMENTAL_COMPACTION);
148 patcher.adr(xzr, offset_to_incremental_noncompacting);
149 patcher.adr(xzr, offset_to_incremental_compacting);
152 DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
153 patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
154 patcher.adr(xzr, offset_to_incremental_compacting);
156 case INCREMENTAL_COMPACTION:
157 DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
158 patcher.adr(xzr, offset_to_incremental_noncompacting);
159 patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
162 DCHECK(GetMode(stub) == mode);
165 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
168 // This is a helper class to manage the registers associated with the stub.
169 // The 'object' and 'address' registers must be preserved.
170 class RegisterAllocation {
172 RegisterAllocation(Register object,
178 saved_regs_(kCallerSaved),
179 saved_fp_regs_(kCallerSavedFP) {
180 DCHECK(!AreAliased(scratch, object, address));
182 // The SaveCallerSaveRegisters method needs to save caller-saved
183 // registers, but we don't bother saving MacroAssembler scratch registers.
184 saved_regs_.Remove(MacroAssembler::DefaultTmpList());
185 saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList());
187 // We would like to require more scratch registers for this stub,
188 // but the number of registers comes down to the ones used in
189 // FullCodeGen::SetVar(), which is architecture independent.
190 // We allocate 2 extra scratch registers that we'll save on the stack.
191 CPURegList pool_available = GetValidRegistersForAllocation();
192 CPURegList used_regs(object, address, scratch);
193 pool_available.Remove(used_regs);
194 scratch1_ = Register(pool_available.PopLowestIndex());
195 scratch2_ = Register(pool_available.PopLowestIndex());
197 // The scratch registers will be restored by other means so we don't need
198 // to save them with the other caller saved registers.
199 saved_regs_.Remove(scratch0_);
200 saved_regs_.Remove(scratch1_);
201 saved_regs_.Remove(scratch2_);
204 void Save(MacroAssembler* masm) {
205 // We don't have to save scratch0_ because it was given to us as
206 // a scratch register.
207 masm->Push(scratch1_, scratch2_);
210 void Restore(MacroAssembler* masm) {
211 masm->Pop(scratch2_, scratch1_);
214 // If we have to call into C then we need to save and restore all caller-
215 // saved registers that were not already preserved.
216 void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
217 // TODO(all): This can be very expensive, and it is likely that not every
218 // register will need to be preserved. Can we improve this?
219 masm->PushCPURegList(saved_regs_);
220 if (mode == kSaveFPRegs) {
221 masm->PushCPURegList(saved_fp_regs_);
225 void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
226 // TODO(all): This can be very expensive, and it is likely that not every
227 // register will need to be preserved. Can we improve this?
228 if (mode == kSaveFPRegs) {
229 masm->PopCPURegList(saved_fp_regs_);
231 masm->PopCPURegList(saved_regs_);
234 Register object() { return object_; }
235 Register address() { return address_; }
236 Register scratch0() { return scratch0_; }
237 Register scratch1() { return scratch1_; }
238 Register scratch2() { return scratch2_; }
246 CPURegList saved_regs_;
247 CPURegList saved_fp_regs_;
249 // TODO(all): We should consider moving this somewhere else.
250 static CPURegList GetValidRegistersForAllocation() {
251 // The list of valid registers for allocation is defined as all the
252 // registers without those with a special meaning.
254 // The default list excludes registers x26 to x31 because they are
255 // reserved for the following purpose:
256 // - x26 root register
257 // - x27 context pointer register
259 // - x29 frame pointer
260 // - x30 link register(lr)
261 // - x31 xzr/stack pointer
262 CPURegList list(CPURegister::kRegister, kXRegSizeInBits, 0, 25);
264 // We also remove MacroAssembler's scratch registers.
265 list.Remove(MacroAssembler::DefaultTmpList());
270 friend class RecordWriteStub;
273 enum OnNoNeedToInformIncrementalMarker {
274 kReturnOnNoNeedToInformIncrementalMarker,
275 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
278 virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
280 virtual void Generate(MacroAssembler* masm) OVERRIDE;
281 void GenerateIncremental(MacroAssembler* masm, Mode mode);
282 void CheckNeedsToInformIncrementalMarker(
283 MacroAssembler* masm,
284 OnNoNeedToInformIncrementalMarker on_no_need,
286 void InformIncrementalMarker(MacroAssembler* masm);
288 void Activate(Code* code) {
289 code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
292 Register object() const {
293 return Register::from_code(ObjectBits::decode(minor_key_));
296 Register value() const {
297 return Register::from_code(ValueBits::decode(minor_key_));
300 Register address() const {
301 return Register::from_code(AddressBits::decode(minor_key_));
304 RememberedSetAction remembered_set_action() const {
305 return RememberedSetActionBits::decode(minor_key_);
308 SaveFPRegsMode save_fp_regs_mode() const {
309 return SaveFPRegsModeBits::decode(minor_key_);
312 class ObjectBits: public BitField<int, 0, 5> {};
313 class ValueBits: public BitField<int, 5, 5> {};
314 class AddressBits: public BitField<int, 10, 5> {};
315 class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
316 class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
319 RegisterAllocation regs_;
323 // Helper to call C++ functions from generated code. The caller must prepare
324 // the exit frame before doing the call with GenerateCall.
325 class DirectCEntryStub: public PlatformCodeStub {
327 explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
328 void GenerateCall(MacroAssembler* masm, Register target);
331 bool NeedsImmovableCode() { return true; }
333 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
334 DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
338 class NameDictionaryLookupStub: public PlatformCodeStub {
340 enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
342 NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
343 : PlatformCodeStub(isolate) {
344 minor_key_ = LookupModeBits::encode(mode);
347 static void GenerateNegativeLookup(MacroAssembler* masm,
355 static void GeneratePositiveLookup(MacroAssembler* masm,
363 virtual bool SometimesSetsUpAFrame() { return false; }
366 static const int kInlinedProbes = 4;
367 static const int kTotalProbes = 20;
369 static const int kCapacityOffset =
370 NameDictionary::kHeaderSize +
371 NameDictionary::kCapacityIndex * kPointerSize;
373 static const int kElementsStartOffset =
374 NameDictionary::kHeaderSize +
375 NameDictionary::kElementsStartIndex * kPointerSize;
377 LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
379 class LookupModeBits: public BitField<LookupMode, 0, 1> {};
381 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
382 DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
385 } } // namespace v8::internal
387 #endif // V8_ARM64_CODE_STUBS_ARM64_H_