1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM64_CODE_STUBS_ARM64_H_
6 #define V8_ARM64_CODE_STUBS_ARM64_H_
12 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
15 class StringHelper : public AllStatic {
17 // Compares two flat one-byte strings and returns result in x0.
18 static void GenerateCompareFlatOneByteStrings(
19 MacroAssembler* masm, Register left, Register right, Register scratch1,
20 Register scratch2, Register scratch3, Register scratch4);
22 // Compare two flat one-byte strings for equality and returns result in x0.
23 static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
24 Register left, Register right,
30 static void GenerateOneByteCharsCompareLoop(
31 MacroAssembler* masm, Register left, Register right, Register length,
32 Register scratch1, Register scratch2, Label* chars_not_equal);
34 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
38 class StoreRegistersStateStub: public PlatformCodeStub {
40 explicit StoreRegistersStateStub(Isolate* isolate)
41 : PlatformCodeStub(isolate) {}
43 static Register to_be_pushed_lr() { return ip0; }
45 static void GenerateAheadOfTime(Isolate* isolate);
48 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
49 DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
53 class RestoreRegistersStateStub: public PlatformCodeStub {
55 explicit RestoreRegistersStateStub(Isolate* isolate)
56 : PlatformCodeStub(isolate) {}
58 static void GenerateAheadOfTime(Isolate* isolate);
61 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
62 DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
66 class RecordWriteStub: public PlatformCodeStub {
68 // Stub to record the write of 'value' at 'address' in 'object'.
69 // Typically 'address' = 'object' + <some offset>.
70 // See MacroAssembler::RecordWriteField() for example.
71 RecordWriteStub(Isolate* isolate,
75 RememberedSetAction remembered_set_action,
76 SaveFPRegsMode fp_mode)
77 : PlatformCodeStub(isolate),
78 regs_(object, // An input reg.
79 address, // An input reg.
80 value) { // One scratch reg.
81 DCHECK(object.Is64Bits());
82 DCHECK(value.Is64Bits());
83 DCHECK(address.Is64Bits());
84 minor_key_ = ObjectBits::encode(object.code()) |
85 ValueBits::encode(value.code()) |
86 AddressBits::encode(address.code()) |
87 RememberedSetActionBits::encode(remembered_set_action) |
88 SaveFPRegsModeBits::encode(fp_mode);
91 RecordWriteStub(uint32_t key, Isolate* isolate)
92 : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
97 INCREMENTAL_COMPACTION
100 bool SometimesSetsUpAFrame() override { return false; }
102 static Mode GetMode(Code* stub) {
103 // Find the mode depending on the first two instructions.
104 Instruction* instr1 =
105 reinterpret_cast<Instruction*>(stub->instruction_start());
106 Instruction* instr2 = instr1->following();
108 if (instr1->IsUncondBranchImm()) {
109 DCHECK(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
113 DCHECK(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
115 if (instr2->IsUncondBranchImm()) {
116 return INCREMENTAL_COMPACTION;
119 DCHECK(instr2->IsPCRelAddressing());
121 return STORE_BUFFER_ONLY;
124 // We patch the two first instructions of the stub back and forth between an
125 // adr and branch when we start and stop incremental heap marking.
130 // so effectively a nop.
131 static void Patch(Code* stub, Mode mode) {
132 // We are going to patch the two first instructions of the stub.
133 PatchingAssembler patcher(
134 reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
135 Instruction* instr1 = patcher.InstructionAt(0);
136 Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
137 // Instructions must be either 'adr' or 'b'.
138 DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
139 DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
140 // Retrieve the offsets to the labels.
141 auto offset_to_incremental_noncompacting =
142 static_cast<int32_t>(instr1->ImmPCOffset());
143 auto offset_to_incremental_compacting =
144 static_cast<int32_t>(instr2->ImmPCOffset());
147 case STORE_BUFFER_ONLY:
148 DCHECK(GetMode(stub) == INCREMENTAL ||
149 GetMode(stub) == INCREMENTAL_COMPACTION);
150 patcher.adr(xzr, offset_to_incremental_noncompacting);
151 patcher.adr(xzr, offset_to_incremental_compacting);
154 DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
155 patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
156 patcher.adr(xzr, offset_to_incremental_compacting);
158 case INCREMENTAL_COMPACTION:
159 DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
160 patcher.adr(xzr, offset_to_incremental_noncompacting);
161 patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
164 DCHECK(GetMode(stub) == mode);
167 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
170 // This is a helper class to manage the registers associated with the stub.
171 // The 'object' and 'address' registers must be preserved.
172 class RegisterAllocation {
174 RegisterAllocation(Register object,
180 saved_regs_(kCallerSaved),
181 saved_fp_regs_(kCallerSavedFP) {
182 DCHECK(!AreAliased(scratch, object, address));
184 // The SaveCallerSaveRegisters method needs to save caller-saved
185 // registers, but we don't bother saving MacroAssembler scratch registers.
186 saved_regs_.Remove(MacroAssembler::DefaultTmpList());
187 saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList());
189 // We would like to require more scratch registers for this stub,
190 // but the number of registers comes down to the ones used in
191 // FullCodeGen::SetVar(), which is architecture independent.
192 // We allocate 2 extra scratch registers that we'll save on the stack.
193 CPURegList pool_available = GetValidRegistersForAllocation();
194 CPURegList used_regs(object, address, scratch);
195 pool_available.Remove(used_regs);
196 scratch1_ = Register(pool_available.PopLowestIndex());
197 scratch2_ = Register(pool_available.PopLowestIndex());
199 // The scratch registers will be restored by other means so we don't need
200 // to save them with the other caller saved registers.
201 saved_regs_.Remove(scratch0_);
202 saved_regs_.Remove(scratch1_);
203 saved_regs_.Remove(scratch2_);
206 void Save(MacroAssembler* masm) {
207 // We don't have to save scratch0_ because it was given to us as
208 // a scratch register.
209 masm->Push(scratch1_, scratch2_);
212 void Restore(MacroAssembler* masm) {
213 masm->Pop(scratch2_, scratch1_);
216 // If we have to call into C then we need to save and restore all caller-
217 // saved registers that were not already preserved.
218 void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
219 // TODO(all): This can be very expensive, and it is likely that not every
220 // register will need to be preserved. Can we improve this?
221 masm->PushCPURegList(saved_regs_);
222 if (mode == kSaveFPRegs) {
223 masm->PushCPURegList(saved_fp_regs_);
227 void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
228 // TODO(all): This can be very expensive, and it is likely that not every
229 // register will need to be preserved. Can we improve this?
230 if (mode == kSaveFPRegs) {
231 masm->PopCPURegList(saved_fp_regs_);
233 masm->PopCPURegList(saved_regs_);
236 Register object() { return object_; }
237 Register address() { return address_; }
238 Register scratch0() { return scratch0_; }
239 Register scratch1() { return scratch1_; }
240 Register scratch2() { return scratch2_; }
248 CPURegList saved_regs_;
249 CPURegList saved_fp_regs_;
251 // TODO(all): We should consider moving this somewhere else.
252 static CPURegList GetValidRegistersForAllocation() {
253 // The list of valid registers for allocation is defined as all the
254 // registers without those with a special meaning.
256 // The default list excludes registers x26 to x31 because they are
257 // reserved for the following purpose:
258 // - x26 root register
259 // - x27 context pointer register
261 // - x29 frame pointer
262 // - x30 link register(lr)
263 // - x31 xzr/stack pointer
264 CPURegList list(CPURegister::kRegister, kXRegSizeInBits, 0, 25);
266 // We also remove MacroAssembler's scratch registers.
267 list.Remove(MacroAssembler::DefaultTmpList());
272 friend class RecordWriteStub;
275 enum OnNoNeedToInformIncrementalMarker {
276 kReturnOnNoNeedToInformIncrementalMarker,
277 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
280 inline Major MajorKey() const final { return RecordWrite; }
282 void Generate(MacroAssembler* masm) override;
283 void GenerateIncremental(MacroAssembler* masm, Mode mode);
284 void CheckNeedsToInformIncrementalMarker(
285 MacroAssembler* masm,
286 OnNoNeedToInformIncrementalMarker on_no_need,
288 void InformIncrementalMarker(MacroAssembler* masm);
290 void Activate(Code* code) override {
291 code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
294 Register object() const {
295 return Register::from_code(ObjectBits::decode(minor_key_));
298 Register value() const {
299 return Register::from_code(ValueBits::decode(minor_key_));
302 Register address() const {
303 return Register::from_code(AddressBits::decode(minor_key_));
306 RememberedSetAction remembered_set_action() const {
307 return RememberedSetActionBits::decode(minor_key_);
310 SaveFPRegsMode save_fp_regs_mode() const {
311 return SaveFPRegsModeBits::decode(minor_key_);
314 class ObjectBits: public BitField<int, 0, 5> {};
315 class ValueBits: public BitField<int, 5, 5> {};
316 class AddressBits: public BitField<int, 10, 5> {};
317 class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
318 class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
321 RegisterAllocation regs_;
325 // Helper to call C++ functions from generated code. The caller must prepare
326 // the exit frame before doing the call with GenerateCall.
327 class DirectCEntryStub: public PlatformCodeStub {
329 explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
330 void GenerateCall(MacroAssembler* masm, Register target);
333 bool NeedsImmovableCode() override { return true; }
335 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
336 DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
340 class NameDictionaryLookupStub: public PlatformCodeStub {
342 enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
344 NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
345 : PlatformCodeStub(isolate) {
346 minor_key_ = LookupModeBits::encode(mode);
349 static void GenerateNegativeLookup(MacroAssembler* masm,
357 static void GeneratePositiveLookup(MacroAssembler* masm,
365 bool SometimesSetsUpAFrame() override { return false; }
368 static const int kInlinedProbes = 4;
369 static const int kTotalProbes = 20;
371 static const int kCapacityOffset =
372 NameDictionary::kHeaderSize +
373 NameDictionary::kCapacityIndex * kPointerSize;
375 static const int kElementsStartOffset =
376 NameDictionary::kHeaderSize +
377 NameDictionary::kElementsStartIndex * kPointerSize;
379 LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
381 class LookupModeBits: public BitField<LookupMode, 0, 1> {};
383 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
384 DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
387 } // namespace internal
390 #endif // V8_ARM64_CODE_STUBS_ARM64_H_