1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
6 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
10 #include "src/bailout-reason.h"
11 #include "src/globals.h"
13 #include "src/arm64/assembler-arm64-inl.h"
14 #include "src/base/bits.h"
16 // Simulator specific helpers.
18 // TODO(all): If possible automatically prepend an indicator like
19 // UNIMPLEMENTED or LOCATION.
20 #define ASM_UNIMPLEMENTED(message) \
21 __ Debug(message, __LINE__, NO_PARAM)
22 #define ASM_UNIMPLEMENTED_BREAK(message) \
23 __ Debug(message, __LINE__, \
24 FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
25 #define ASM_LOCATION(message) \
26 __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
28 #define ASM_UNIMPLEMENTED(message)
29 #define ASM_UNIMPLEMENTED_BREAK(message)
30 #define ASM_LOCATION(message)
37 #define LS_MACRO_LIST(V) \
38 V(Ldrb, Register&, rt, LDRB_w) \
39 V(Strb, Register&, rt, STRB_w) \
40 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
41 V(Ldrh, Register&, rt, LDRH_w) \
42 V(Strh, Register&, rt, STRH_w) \
43 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
44 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
45 V(Str, CPURegister&, rt, StoreOpFor(rt)) \
46 V(Ldrsw, Register&, rt, LDRSW_x)
48 #define LSPAIR_MACRO_LIST(V) \
49 V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
50 V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
51 V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
54 // ----------------------------------------------------------------------------
55 // Static helper functions
57 // Generate a MemOperand for loading a field from an object.
58 inline MemOperand FieldMemOperand(Register object, int offset);
59 inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
61 // Generate a MemOperand for loading a SMI from memory.
62 inline MemOperand UntagSmiMemOperand(Register object, int offset);
65 // ----------------------------------------------------------------------------
69 // Copies of architectural conditions.
70 // The associated conditions can be used in place of those, the code will
71 // take care of reinterpreting them with the correct type.
89 // These two are *different* from the architectural codes al and nv.
90 // 'always' is used to generate unconditional branches.
91 // 'never' is used to not generate a branch (generally as the inverse
92 // branch type of 'always).
95 reg_zero, reg_not_zero,
97 reg_bit_clear, reg_bit_set,
100 kBranchTypeFirstCondition = eq,
101 kBranchTypeLastCondition = nv,
102 kBranchTypeFirstUsingReg = reg_zero,
103 kBranchTypeFirstUsingBit = reg_bit_clear
106 inline BranchType InvertBranchType(BranchType type) {
107 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
108 return static_cast<BranchType>(
109 NegateCondition(static_cast<Condition>(type)));
111 return static_cast<BranchType>(type ^ 1);
115 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
116 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
117 enum PointersToHereCheck {
118 kPointersToHereMaybeInteresting,
119 kPointersToHereAreAlwaysInteresting
121 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
122 enum TargetAddressStorageMode {
123 CAN_INLINE_TARGET_ADDRESS,
124 NEVER_INLINE_TARGET_ADDRESS
126 enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
127 enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
128 enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
129 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
130 enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
132 class MacroAssembler : public Assembler {
134 MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
136 inline Handle<Object> CodeObject();
138 // Instruction set functions ------------------------------------------------
140 inline void And(const Register& rd,
142 const Operand& operand);
143 inline void Ands(const Register& rd,
145 const Operand& operand);
146 inline void Bic(const Register& rd,
148 const Operand& operand);
149 inline void Bics(const Register& rd,
151 const Operand& operand);
152 inline void Orr(const Register& rd,
154 const Operand& operand);
155 inline void Orn(const Register& rd,
157 const Operand& operand);
158 inline void Eor(const Register& rd,
160 const Operand& operand);
161 inline void Eon(const Register& rd,
163 const Operand& operand);
164 inline void Tst(const Register& rn, const Operand& operand);
165 void LogicalMacro(const Register& rd,
167 const Operand& operand,
170 // Add and sub macros.
171 inline void Add(const Register& rd,
173 const Operand& operand);
174 inline void Adds(const Register& rd,
176 const Operand& operand);
177 inline void Sub(const Register& rd,
179 const Operand& operand);
180 inline void Subs(const Register& rd,
182 const Operand& operand);
183 inline void Cmn(const Register& rn, const Operand& operand);
184 inline void Cmp(const Register& rn, const Operand& operand);
185 inline void Neg(const Register& rd,
186 const Operand& operand);
187 inline void Negs(const Register& rd,
188 const Operand& operand);
190 void AddSubMacro(const Register& rd,
192 const Operand& operand,
196 // Add/sub with carry macros.
197 inline void Adc(const Register& rd,
199 const Operand& operand);
200 inline void Adcs(const Register& rd,
202 const Operand& operand);
203 inline void Sbc(const Register& rd,
205 const Operand& operand);
206 inline void Sbcs(const Register& rd,
208 const Operand& operand);
209 inline void Ngc(const Register& rd,
210 const Operand& operand);
211 inline void Ngcs(const Register& rd,
212 const Operand& operand);
213 void AddSubWithCarryMacro(const Register& rd,
215 const Operand& operand,
217 AddSubWithCarryOp op);
220 void Mov(const Register& rd,
221 const Operand& operand,
222 DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
223 void Mov(const Register& rd, uint64_t imm);
224 inline void Mvn(const Register& rd, uint64_t imm);
225 void Mvn(const Register& rd, const Operand& operand);
226 static bool IsImmMovn(uint64_t imm, unsigned reg_size);
227 static bool IsImmMovz(uint64_t imm, unsigned reg_size);
228 static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
230 // Try to move an immediate into the destination register in a single
231 // instruction. Returns true for success, and updates the contents of dst.
232 // Returns false, otherwise.
233 bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
235 // Move an immediate into register dst, and return an Operand object for use
236 // with a subsequent instruction that accepts a shift. The value moved into
237 // dst is not necessarily equal to imm; it may have had a shifting operation
238 // applied to it that will be subsequently undone by the shift applied in the
240 Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
242 // Conditional macros.
243 inline void Ccmp(const Register& rn,
244 const Operand& operand,
247 inline void Ccmn(const Register& rn,
248 const Operand& operand,
251 void ConditionalCompareMacro(const Register& rn,
252 const Operand& operand,
255 ConditionalCompareOp op);
256 void Csel(const Register& rd,
258 const Operand& operand,
261 // Load/store macros.
262 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
263 inline void FN(const REGTYPE REG, const MemOperand& addr);
264 LS_MACRO_LIST(DECLARE_FUNCTION)
265 #undef DECLARE_FUNCTION
267 void LoadStoreMacro(const CPURegister& rt,
268 const MemOperand& addr,
271 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
272 inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
273 LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
274 #undef DECLARE_FUNCTION
276 void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
277 const MemOperand& addr, LoadStorePairOp op);
279 // V8-specific load/store helpers.
280 void Load(const Register& rt, const MemOperand& addr, Representation r);
281 void Store(const Register& rt, const MemOperand& addr, Representation r);
284 // The target must be within the immediate range of adr.
286 // The target may be outside of the immediate range of adr. Additional
287 // instructions may be emitted.
290 void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
292 // Remaining instructions are simple pass-through calls to the assembler.
293 inline void Asr(const Register& rd, const Register& rn, unsigned shift);
294 inline void Asr(const Register& rd, const Register& rn, const Register& rm);
296 // Branch type inversion relies on these relations.
297 STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
298 (reg_bit_clear == (reg_bit_set ^ 1)) &&
299 (always == (never ^ 1)));
301 void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
303 inline void B(Label* label);
304 inline void B(Condition cond, Label* label);
305 void B(Label* label, Condition cond);
306 inline void Bfi(const Register& rd,
310 inline void Bfxil(const Register& rd,
314 inline void Bind(Label* label);
315 inline void Bl(Label* label);
316 inline void Blr(const Register& xn);
317 inline void Br(const Register& xn);
318 inline void Brk(int code);
319 void Cbnz(const Register& rt, Label* label);
320 void Cbz(const Register& rt, Label* label);
321 inline void Cinc(const Register& rd, const Register& rn, Condition cond);
322 inline void Cinv(const Register& rd, const Register& rn, Condition cond);
323 inline void Cls(const Register& rd, const Register& rn);
324 inline void Clz(const Register& rd, const Register& rn);
325 inline void Cneg(const Register& rd, const Register& rn, Condition cond);
326 inline void CzeroX(const Register& rd, Condition cond);
327 inline void CmovX(const Register& rd, const Register& rn, Condition cond);
328 inline void Cset(const Register& rd, Condition cond);
329 inline void Csetm(const Register& rd, Condition cond);
330 inline void Csinc(const Register& rd,
334 inline void Csinv(const Register& rd,
338 inline void Csneg(const Register& rd,
342 inline void Dmb(BarrierDomain domain, BarrierType type);
343 inline void Dsb(BarrierDomain domain, BarrierType type);
344 inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
345 inline void Extr(const Register& rd,
349 inline void Fabs(const FPRegister& fd, const FPRegister& fn);
350 inline void Fadd(const FPRegister& fd,
351 const FPRegister& fn,
352 const FPRegister& fm);
353 inline void Fccmp(const FPRegister& fn,
354 const FPRegister& fm,
357 inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
358 inline void Fcmp(const FPRegister& fn, double value);
359 inline void Fcsel(const FPRegister& fd,
360 const FPRegister& fn,
361 const FPRegister& fm,
363 inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
364 inline void Fcvtas(const Register& rd, const FPRegister& fn);
365 inline void Fcvtau(const Register& rd, const FPRegister& fn);
366 inline void Fcvtms(const Register& rd, const FPRegister& fn);
367 inline void Fcvtmu(const Register& rd, const FPRegister& fn);
368 inline void Fcvtns(const Register& rd, const FPRegister& fn);
369 inline void Fcvtnu(const Register& rd, const FPRegister& fn);
370 inline void Fcvtzs(const Register& rd, const FPRegister& fn);
371 inline void Fcvtzu(const Register& rd, const FPRegister& fn);
372 inline void Fdiv(const FPRegister& fd,
373 const FPRegister& fn,
374 const FPRegister& fm);
375 inline void Fmadd(const FPRegister& fd,
376 const FPRegister& fn,
377 const FPRegister& fm,
378 const FPRegister& fa);
379 inline void Fmax(const FPRegister& fd,
380 const FPRegister& fn,
381 const FPRegister& fm);
382 inline void Fmaxnm(const FPRegister& fd,
383 const FPRegister& fn,
384 const FPRegister& fm);
385 inline void Fmin(const FPRegister& fd,
386 const FPRegister& fn,
387 const FPRegister& fm);
388 inline void Fminnm(const FPRegister& fd,
389 const FPRegister& fn,
390 const FPRegister& fm);
391 inline void Fmov(FPRegister fd, FPRegister fn);
392 inline void Fmov(FPRegister fd, Register rn);
393 // Provide explicit double and float interfaces for FP immediate moves, rather
394 // than relying on implicit C++ casts. This allows signalling NaNs to be
395 // preserved when the immediate matches the format of fd. Most systems convert
396 // signalling NaNs to quiet NaNs when converting between float and double.
397 inline void Fmov(FPRegister fd, double imm);
398 inline void Fmov(FPRegister fd, float imm);
399 // Provide a template to allow other types to be converted automatically.
401 void Fmov(FPRegister fd, T imm) {
402 DCHECK(allow_macro_instructions_);
403 Fmov(fd, static_cast<double>(imm));
405 inline void Fmov(Register rd, FPRegister fn);
406 inline void Fmsub(const FPRegister& fd,
407 const FPRegister& fn,
408 const FPRegister& fm,
409 const FPRegister& fa);
410 inline void Fmul(const FPRegister& fd,
411 const FPRegister& fn,
412 const FPRegister& fm);
413 inline void Fneg(const FPRegister& fd, const FPRegister& fn);
414 inline void Fnmadd(const FPRegister& fd,
415 const FPRegister& fn,
416 const FPRegister& fm,
417 const FPRegister& fa);
418 inline void Fnmsub(const FPRegister& fd,
419 const FPRegister& fn,
420 const FPRegister& fm,
421 const FPRegister& fa);
422 inline void Frinta(const FPRegister& fd, const FPRegister& fn);
423 inline void Frintm(const FPRegister& fd, const FPRegister& fn);
424 inline void Frintn(const FPRegister& fd, const FPRegister& fn);
425 inline void Frintp(const FPRegister& fd, const FPRegister& fn);
426 inline void Frintz(const FPRegister& fd, const FPRegister& fn);
427 inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
428 inline void Fsub(const FPRegister& fd,
429 const FPRegister& fn,
430 const FPRegister& fm);
431 inline void Hint(SystemHint code);
432 inline void Hlt(int code);
434 inline void Ldnp(const CPURegister& rt,
435 const CPURegister& rt2,
436 const MemOperand& src);
437 // Load a literal from the inline constant pool.
438 inline void Ldr(const CPURegister& rt, const Immediate& imm);
439 // Helper function for double immediate.
440 inline void Ldr(const CPURegister& rt, double imm);
441 inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
442 inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
443 inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
444 inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
445 inline void Madd(const Register& rd,
449 inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
450 inline void Mov(const Register& rd, const Register& rm);
451 inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
452 inline void Mrs(const Register& rt, SystemRegister sysreg);
453 inline void Msr(SystemRegister sysreg, const Register& rt);
454 inline void Msub(const Register& rd,
458 inline void Mul(const Register& rd, const Register& rn, const Register& rm);
459 inline void Nop() { nop(); }
460 inline void Rbit(const Register& rd, const Register& rn);
461 inline void Ret(const Register& xn = lr);
462 inline void Rev(const Register& rd, const Register& rn);
463 inline void Rev16(const Register& rd, const Register& rn);
464 inline void Rev32(const Register& rd, const Register& rn);
465 inline void Ror(const Register& rd, const Register& rs, unsigned shift);
466 inline void Ror(const Register& rd, const Register& rn, const Register& rm);
467 inline void Sbfiz(const Register& rd,
471 inline void Sbfx(const Register& rd,
475 inline void Scvtf(const FPRegister& fd,
478 inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
479 inline void Smaddl(const Register& rd,
483 inline void Smsubl(const Register& rd,
487 inline void Smull(const Register& rd,
490 inline void Smulh(const Register& rd,
493 inline void Umull(const Register& rd, const Register& rn, const Register& rm);
494 inline void Stnp(const CPURegister& rt,
495 const CPURegister& rt2,
496 const MemOperand& dst);
497 inline void Sxtb(const Register& rd, const Register& rn);
498 inline void Sxth(const Register& rd, const Register& rn);
499 inline void Sxtw(const Register& rd, const Register& rn);
500 void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
501 void Tbz(const Register& rt, unsigned bit_pos, Label* label);
502 inline void Ubfiz(const Register& rd,
506 inline void Ubfx(const Register& rd,
510 inline void Ucvtf(const FPRegister& fd,
513 inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
514 inline void Umaddl(const Register& rd,
518 inline void Umsubl(const Register& rd,
522 inline void Uxtb(const Register& rd, const Register& rn);
523 inline void Uxth(const Register& rd, const Register& rn);
524 inline void Uxtw(const Register& rd, const Register& rn);
526 // Pseudo-instructions ------------------------------------------------------
528 // Compute rd = abs(rm).
529 // This function clobbers the condition flags. On output the overflow flag is
530 // set iff the negation overflowed.
532 // If rm is the minimum representable value, the result is not representable.
533 // Handlers for each case can be specified using the relevant labels.
534 void Abs(const Register& rd, const Register& rm,
535 Label * is_not_representable = NULL,
536 Label * is_representable = NULL);
538 // Push or pop up to 4 registers of the same width to or from the stack,
539 // using the current stack pointer as set by SetStackPointer.
541 // If an argument register is 'NoReg', all further arguments are also assumed
542 // to be 'NoReg', and are thus not pushed or popped.
544 // Arguments are ordered such that "Push(a, b);" is functionally equivalent
545 // to "Push(a); Push(b);".
547 // It is valid to push the same register more than once, and there is no
548 // restriction on the order in which registers are specified.
550 // It is not valid to pop into the same register more than once in one
551 // operation, not even into the zero register.
553 // If the current stack pointer (as set by SetStackPointer) is csp, then it
554 // must be aligned to 16 bytes on entry and the total size of the specified
555 // registers must also be a multiple of 16 bytes.
557 // Even if the current stack pointer is not the system stack pointer (csp),
558 // Push (and derived methods) will still modify the system stack pointer in
559 // order to comply with ABI rules about accessing memory below the system
562 // Other than the registers passed into Pop, the stack pointer and (possibly)
563 // the system stack pointer, these methods do not modify any other registers.
564 void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
565 const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
566 void Push(const CPURegister& src0, const CPURegister& src1,
567 const CPURegister& src2, const CPURegister& src3,
568 const CPURegister& src4, const CPURegister& src5 = NoReg,
569 const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
570 void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
571 const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
572 void Pop(const CPURegister& dst0, const CPURegister& dst1,
573 const CPURegister& dst2, const CPURegister& dst3,
574 const CPURegister& dst4, const CPURegister& dst5 = NoReg,
575 const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
576 void Push(const Register& src0, const FPRegister& src1);
578 // Alternative forms of Push and Pop, taking a RegList or CPURegList that
579 // specifies the registers that are to be pushed or popped. Higher-numbered
580 // registers are associated with higher memory addresses (as in the A32 push
581 // and pop instructions).
583 // (Push|Pop)SizeRegList allow you to specify the register size as a
584 // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
585 // kSRegSizeInBits are supported.
587 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
588 void PushCPURegList(CPURegList registers);
589 void PopCPURegList(CPURegList registers);
591 inline void PushSizeRegList(RegList registers, unsigned reg_size,
592 CPURegister::RegisterType type = CPURegister::kRegister) {
593 PushCPURegList(CPURegList(type, reg_size, registers));
595 inline void PopSizeRegList(RegList registers, unsigned reg_size,
596 CPURegister::RegisterType type = CPURegister::kRegister) {
597 PopCPURegList(CPURegList(type, reg_size, registers));
599 inline void PushXRegList(RegList regs) {
600 PushSizeRegList(regs, kXRegSizeInBits);
602 inline void PopXRegList(RegList regs) {
603 PopSizeRegList(regs, kXRegSizeInBits);
605 inline void PushWRegList(RegList regs) {
606 PushSizeRegList(regs, kWRegSizeInBits);
608 inline void PopWRegList(RegList regs) {
609 PopSizeRegList(regs, kWRegSizeInBits);
611 inline void PushDRegList(RegList regs) {
612 PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
614 inline void PopDRegList(RegList regs) {
615 PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
617 inline void PushSRegList(RegList regs) {
618 PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
620 inline void PopSRegList(RegList regs) {
621 PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
624 // Push the specified register 'count' times.
625 void PushMultipleTimes(CPURegister src, Register count);
626 void PushMultipleTimes(CPURegister src, int count);
628 // This is a convenience method for pushing a single Handle<Object>.
629 inline void Push(Handle<Object> handle);
630 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
632 // Aliases of Push and Pop, required for V8 compatibility.
633 inline void push(Register src) {
636 inline void pop(Register dst) {
640 // Sometimes callers need to push or pop multiple registers in a way that is
641 // difficult to structure efficiently for fixed Push or Pop calls. This scope
642 // allows push requests to be queued up, then flushed at once. The
643 // MacroAssembler will try to generate the most efficient sequence required.
645 // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of
646 // register sizes and types.
649 explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { }
652 DCHECK(queued_.empty());
655 void Queue(const CPURegister& rt) {
656 size_ += rt.SizeInBytes();
657 queued_.push_back(rt);
660 enum PreambleDirective {
664 void PushQueued(PreambleDirective preamble_directive = WITH_PREAMBLE);
668 MacroAssembler* masm_;
670 std::vector<CPURegister> queued_;
673 // Poke 'src' onto the stack. The offset is in bytes.
675 // If the current stack pointer (according to StackPointer()) is csp, then
676 // csp must be aligned to 16 bytes.
677 void Poke(const CPURegister& src, const Operand& offset);
679 // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
681 // If the current stack pointer (according to StackPointer()) is csp, then
682 // csp must be aligned to 16 bytes.
683 void Peek(const CPURegister& dst, const Operand& offset);
685 // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
686 // with 'src2' at a higher address than 'src1'. The offset is in bytes.
688 // If the current stack pointer (according to StackPointer()) is csp, then
689 // csp must be aligned to 16 bytes.
690 void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
692 // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
693 // values peeked will be adjacent, with the value in 'dst2' being from a
694 // higher address than 'dst1'. The offset is in bytes.
696 // If the current stack pointer (according to StackPointer()) is csp, then
697 // csp must be aligned to 16 bytes.
698 void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
700 // Claim or drop stack space without actually accessing memory.
702 // In debug mode, both of these will write invalid data into the claimed or
705 // If the current stack pointer (according to StackPointer()) is csp, then it
706 // must be aligned to 16 bytes and the size claimed or dropped must be a
707 // multiple of 16 bytes.
709 // Note that unit_size must be specified in bytes. For variants which take a
710 // Register count, the unit size must be a power of two.
711 inline void Claim(uint64_t count, uint64_t unit_size = kXRegSize);
712 inline void Claim(const Register& count,
713 uint64_t unit_size = kXRegSize);
714 inline void Drop(uint64_t count, uint64_t unit_size = kXRegSize);
715 inline void Drop(const Register& count,
716 uint64_t unit_size = kXRegSize);
718 // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
720 inline void ClaimBySMI(const Register& count_smi,
721 uint64_t unit_size = kXRegSize);
722 inline void DropBySMI(const Register& count_smi,
723 uint64_t unit_size = kXRegSize);
725 // Compare a register with an operand, and branch to label depending on the
726 // condition. May corrupt the status flags.
727 inline void CompareAndBranch(const Register& lhs,
732 // Test the bits of register defined by bit_pattern, and branch if ANY of
733 // those bits are set. May corrupt the status flags.
734 inline void TestAndBranchIfAnySet(const Register& reg,
735 const uint64_t bit_pattern,
738 // Test the bits of register defined by bit_pattern, and branch if ALL of
739 // those bits are clear (ie. not set.) May corrupt the status flags.
740 inline void TestAndBranchIfAllClear(const Register& reg,
741 const uint64_t bit_pattern,
744 // Insert one or more instructions into the instruction stream that encode
745 // some caller-defined data. The instructions used will be executable with no
747 inline void InlineData(uint64_t data);
749 // Insert an instrumentation enable marker into the instruction stream.
750 inline void EnableInstrumentation();
752 // Insert an instrumentation disable marker into the instruction stream.
753 inline void DisableInstrumentation();
755 // Insert an instrumentation event marker into the instruction stream. These
756 // will be picked up by the instrumentation system to annotate an instruction
757 // profile. The argument marker_name must be a printable two character string;
758 // it will be encoded in the event marker.
759 inline void AnnotateInstrumentation(const char* marker_name);
761 // If emit_debug_code() is true, emit a run-time check to ensure that
762 // StackPointer() does not point below the system stack pointer.
764 // Whilst it is architecturally legal for StackPointer() to point below csp,
765 // it can be evidence of a potential bug because the ABI forbids accesses
768 // If StackPointer() is the system stack pointer (csp), then csp will be
769 // dereferenced to cause the processor (or simulator) to abort if it is not
772 // If emit_debug_code() is false, this emits no code.
773 void AssertStackConsistency();
775 // Preserve the callee-saved registers (as defined by AAPCS64).
777 // Higher-numbered registers are pushed before lower-numbered registers, and
778 // thus get higher addresses.
779 // Floating-point registers are pushed before general-purpose registers, and
780 // thus get higher addresses.
782 // Note that registers are not checked for invalid values. Use this method
783 // only if you know that the GC won't try to examine the values on the stack.
785 // This method must not be called unless the current stack pointer (as set by
786 // SetStackPointer) is the system stack pointer (csp), and is aligned to
787 // ActivationFrameAlignment().
788 void PushCalleeSavedRegisters();
790 // Restore the callee-saved registers (as defined by AAPCS64).
792 // Higher-numbered registers are popped after lower-numbered registers, and
793 // thus come from higher addresses.
794 // Floating-point registers are popped after general-purpose registers, and
795 // thus come from higher addresses.
797 // This method must not be called unless the current stack pointer (as set by
798 // SetStackPointer) is the system stack pointer (csp), and is aligned to
799 // ActivationFrameAlignment().
800 void PopCalleeSavedRegisters();
802 // Set the current stack pointer, but don't generate any code.
803 inline void SetStackPointer(const Register& stack_pointer) {
804 DCHECK(!TmpList()->IncludesAliasOf(stack_pointer));
808 // Return the current stack pointer, as set by SetStackPointer.
809 inline const Register& StackPointer() const {
813 // Align csp for a frame, as per ActivationFrameAlignment, and make it the
814 // current stack pointer.
815 inline void AlignAndSetCSPForFrame() {
816 int sp_alignment = ActivationFrameAlignment();
817 // AAPCS64 mandates at least 16-byte alignment.
818 DCHECK(sp_alignment >= 16);
819 DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
820 Bic(csp, StackPointer(), sp_alignment - 1);
821 SetStackPointer(csp);
824 // Push the system stack pointer (csp) down to allow the same to be done to
825 // the current stack pointer (according to StackPointer()). This must be
826 // called _before_ accessing the memory.
828 // This is necessary when pushing or otherwise adding things to the stack, to
829 // satisfy the AAPCS64 constraint that the memory below the system stack
830 // pointer is not accessed. The amount pushed will be increased as necessary
831 // to ensure csp remains aligned to 16 bytes.
833 // This method asserts that StackPointer() is not csp, since the call does
834 // not make sense in that context.
835 inline void BumpSystemStackPointer(const Operand& space);
837 // Re-synchronizes the system stack pointer (csp) with the current stack
838 // pointer (according to StackPointer()).
840 // This method asserts that StackPointer() is not csp, since the call does
841 // not make sense in that context.
842 inline void SyncSystemStackPointer();
844 // Helpers ------------------------------------------------------------------
846 inline void InitializeRootRegister();
848 void AssertFPCRState(Register fpcr = NoReg);
849 void ConfigureFPCR();
850 void CanonicalizeNaN(const FPRegister& dst, const FPRegister& src);
851 void CanonicalizeNaN(const FPRegister& reg) {
852 CanonicalizeNaN(reg, reg);
855 // Load an object from the root table.
856 void LoadRoot(CPURegister destination,
857 Heap::RootListIndex index);
858 // Store an object to the root table.
859 void StoreRoot(Register source,
860 Heap::RootListIndex index);
862 // Load both TrueValue and FalseValue roots.
863 void LoadTrueFalseRoots(Register true_root, Register false_root);
865 void LoadHeapObject(Register dst, Handle<HeapObject> object);
867 void LoadObject(Register result, Handle<Object> object) {
868 AllowDeferredHandleDereference heap_object_check;
869 if (object->IsHeapObject()) {
870 LoadHeapObject(result, Handle<HeapObject>::cast(object));
872 DCHECK(object->IsSmi());
873 Mov(result, Operand(object));
877 static int SafepointRegisterStackIndex(int reg_code);
879 // This is required for compatibility with architecture independant code.
880 // Remove if not needed.
881 inline void Move(Register dst, Register src) { Mov(dst, src); }
883 void LoadInstanceDescriptors(Register map,
884 Register descriptors);
885 void EnumLengthUntagged(Register dst, Register map);
886 void EnumLengthSmi(Register dst, Register map);
887 void NumberOfOwnDescriptors(Register dst, Register map);
888 void LoadAccessor(Register dst, Register holder, int accessor_index,
889 AccessorComponent accessor);
891 template<typename Field>
892 void DecodeField(Register dst, Register src) {
893 static const int shift = Field::kShift;
894 static const int setbits = CountSetBits(Field::kMask, 32);
895 Ubfx(dst, src, shift, setbits);
898 template<typename Field>
899 void DecodeField(Register reg) {
900 DecodeField<Field>(reg, reg);
903 // ---- SMI and Number Utilities ----
905 inline void SmiTag(Register dst, Register src);
906 inline void SmiTag(Register smi);
907 inline void SmiUntag(Register dst, Register src);
908 inline void SmiUntag(Register smi);
909 inline void SmiUntagToDouble(FPRegister dst,
911 UntagMode mode = kNotSpeculativeUntag);
912 inline void SmiUntagToFloat(FPRegister dst,
914 UntagMode mode = kNotSpeculativeUntag);
916 // Tag and push in one step.
917 inline void SmiTagAndPush(Register src);
918 inline void SmiTagAndPush(Register src1, Register src2);
920 inline void JumpIfSmi(Register value,
922 Label* not_smi_label = NULL);
923 inline void JumpIfNotSmi(Register value, Label* not_smi_label);
924 inline void JumpIfBothSmi(Register value1,
926 Label* both_smi_label,
927 Label* not_smi_label = NULL);
928 inline void JumpIfEitherSmi(Register value1,
930 Label* either_smi_label,
931 Label* not_smi_label = NULL);
932 inline void JumpIfEitherNotSmi(Register value1,
934 Label* not_smi_label);
935 inline void JumpIfBothNotSmi(Register value1,
937 Label* not_smi_label);
939 // Abort execution if argument is a smi, enabled via --debug-code.
940 void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
941 void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
943 inline void ObjectTag(Register tagged_obj, Register obj);
944 inline void ObjectUntag(Register untagged_obj, Register obj);
946 // Abort execution if argument is not a name, enabled via --debug-code.
947 void AssertName(Register object);
949 // Abort execution if argument is not undefined or an AllocationSite, enabled
951 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
953 // Abort execution if argument is not a string, enabled via --debug-code.
954 void AssertString(Register object);
956 void JumpIfHeapNumber(Register object, Label* on_heap_number,
957 SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
958 void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
959 SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
961 // Sets the vs flag if the input is -0.0.
962 void TestForMinusZero(DoubleRegister input);
964 // Jump to label if the input double register contains -0.0.
965 void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
967 // Jump to label if the input integer register contains the double precision
968 // floating point representation of -0.0.
969 void JumpIfMinusZero(Register input, Label* on_negative_zero);
971 // Generate code to do a lookup in the number string cache. If the number in
972 // the register object is found in the cache the generated code falls through
973 // with the result in the result register. The object and the result register
974 // can be the same. If the number is not found in the cache the code jumps to
975 // the label not_found with only the content of register object unchanged.
976 void LookupNumberStringCache(Register object,
983 // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
985 void ClampInt32ToUint8(Register in_out);
986 void ClampInt32ToUint8(Register output, Register input);
988 // Saturate a double in input to an unsigned 8-bit integer in output.
989 void ClampDoubleToUint8(Register output,
990 DoubleRegister input,
991 DoubleRegister dbl_scratch);
993 // Try to represent a double as a signed 32-bit int.
994 // This succeeds if the result compares equal to the input, so inputs of -0.0
995 // are represented as 0 and handled as a success.
997 // On output the Z flag is set if the operation was successful.
998 void TryRepresentDoubleAsInt32(Register as_int,
1000 FPRegister scratch_d,
1001 Label* on_successful_conversion = NULL,
1002 Label* on_failed_conversion = NULL) {
1003 DCHECK(as_int.Is32Bits());
1004 TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
1005 on_failed_conversion);
1008 // Try to represent a double as a signed 64-bit int.
1009 // This succeeds if the result compares equal to the input, so inputs of -0.0
1010 // are represented as 0 and handled as a success.
1012 // On output the Z flag is set if the operation was successful.
1013 void TryRepresentDoubleAsInt64(Register as_int,
1015 FPRegister scratch_d,
1016 Label* on_successful_conversion = NULL,
1017 Label* on_failed_conversion = NULL) {
1018 DCHECK(as_int.Is64Bits());
1019 TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
1020 on_failed_conversion);
1023 // ---- Object Utilities ----
1025 // Copy fields from 'src' to 'dst', where both are tagged objects.
1026 // The 'temps' list is a list of X registers which can be used for scratch
1027 // values. The temps list must include at least one register.
1029 // Currently, CopyFields cannot make use of more than three registers from
1030 // the 'temps' list.
1032 // CopyFields expects to be able to take at least two registers from
1033 // MacroAssembler::TmpList().
1034 void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
1036 // Starting at address in dst, initialize field_count 64-bit fields with
1037 // 64-bit value in register filler. Register dst is corrupted.
1038 void FillFields(Register dst,
1039 Register field_count,
1042 // Copies a number of bytes from src to dst. All passed registers are
1043 // clobbered. On exit src and dst will point to the place just after where the
1044 // last byte was read or written and length will be zero. Hint may be used to
1045 // determine which is the most efficient algorithm to use for copying.
1046 void CopyBytes(Register dst,
1050 CopyHint hint = kCopyUnknown);
1052 // ---- String Utilities ----
1055 // Jump to label if either object is not a sequential one-byte string.
1056 // Optionally perform a smi check on the objects first.
1057 void JumpIfEitherIsNotSequentialOneByteStrings(
1058 Register first, Register second, Register scratch1, Register scratch2,
1059 Label* failure, SmiCheckType smi_check = DO_SMI_CHECK);
1061 // Check if instance type is sequential one-byte string and jump to label if
1063 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1066 // Checks if both instance types are sequential one-byte strings and jumps to
1067 // label if either is not.
1068 void JumpIfEitherInstanceTypeIsNotSequentialOneByte(
1069 Register first_object_instance_type, Register second_object_instance_type,
1070 Register scratch1, Register scratch2, Label* failure);
1072 // Checks if both instance types are sequential one-byte strings and jumps to
1073 // label if either is not.
1074 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1075 Register first_object_instance_type, Register second_object_instance_type,
1076 Register scratch1, Register scratch2, Label* failure);
1078 void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name);
1080 // ---- Calling / Jumping helpers ----
1082 // This is required for compatibility in architecture indepenedant code.
1083 inline void jmp(Label* L) { B(L); }
1085 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
1086 void TailCallStub(CodeStub* stub);
1088 void CallRuntime(const Runtime::Function* f,
1090 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1092 void CallRuntime(Runtime::FunctionId id,
1094 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1095 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1098 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1099 const Runtime::Function* function = Runtime::FunctionForId(id);
1100 CallRuntime(function, function->nargs, kSaveFPRegs);
1103 void TailCallRuntime(Runtime::FunctionId fid,
1107 int ActivationFrameAlignment();
1109 // Calls a C function.
1110 // The called function is not allowed to trigger a
1111 // garbage collection, since that might move the code and invalidate the
1112 // return address (unless this is somehow accounted for by the called
1114 void CallCFunction(ExternalReference function,
1115 int num_reg_arguments);
1116 void CallCFunction(ExternalReference function,
1117 int num_reg_arguments,
1118 int num_double_arguments);
1119 void CallCFunction(Register function,
1120 int num_reg_arguments,
1121 int num_double_arguments);
1123 // Jump to a runtime routine.
1124 void JumpToExternalReference(const ExternalReference& builtin);
1125 // Tail call of a runtime routine (jump).
1126 // Like JumpToExternalReference, but also takes care of passing the number
1128 void TailCallExternalReference(const ExternalReference& ext,
1131 void CallExternalReference(const ExternalReference& ext,
1135 // Invoke specified builtin JavaScript function. Adds an entry to
1136 // the unresolved list if the name does not resolve.
1137 void InvokeBuiltin(Builtins::JavaScript id,
1139 const CallWrapper& call_wrapper = NullCallWrapper());
1141 // Store the code object for the given builtin in the target register and
1142 // setup the function in the function register.
1143 void GetBuiltinEntry(Register target,
1145 Builtins::JavaScript id);
1147 // Store the function for the given builtin in the target register.
1148 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1150 void Jump(Register target);
1151 void Jump(Address target, RelocInfo::Mode rmode);
1152 void Jump(Handle<Code> code, RelocInfo::Mode rmode);
1153 void Jump(intptr_t target, RelocInfo::Mode rmode);
1155 void Call(Register target);
1156 void Call(Label* target);
1157 void Call(Address target, RelocInfo::Mode rmode);
1158 void Call(Handle<Code> code,
1159 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
1160 TypeFeedbackId ast_id = TypeFeedbackId::None());
1162 // For every Call variant, there is a matching CallSize function that returns
1163 // the size (in bytes) of the call sequence.
1164 static int CallSize(Register target);
1165 static int CallSize(Label* target);
1166 static int CallSize(Address target, RelocInfo::Mode rmode);
1167 static int CallSize(Handle<Code> code,
1168 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
1169 TypeFeedbackId ast_id = TypeFeedbackId::None());
1171 // Registers used through the invocation chain are hard-coded.
1172 // We force passing the parameters to ensure the contracts are correctly
1173 // honoured by the caller.
1174 // 'function' must be x1.
1175 // 'actual' must use an immediate or x0.
1176 // 'expected' must use an immediate or x2.
1177 // 'call_kind' must be x5.
1178 void InvokePrologue(const ParameterCount& expected,
1179 const ParameterCount& actual,
1180 Handle<Code> code_constant,
1184 bool* definitely_mismatches,
1185 const CallWrapper& call_wrapper);
1186 void InvokeCode(Register code,
1187 const ParameterCount& expected,
1188 const ParameterCount& actual,
1190 const CallWrapper& call_wrapper);
1191 // Invoke the JavaScript function in the given register.
1192 // Changes the current context to the context in the function before invoking.
1193 void InvokeFunction(Register function,
1194 const ParameterCount& actual,
1196 const CallWrapper& call_wrapper);
1197 void InvokeFunction(Register function,
1198 const ParameterCount& expected,
1199 const ParameterCount& actual,
1201 const CallWrapper& call_wrapper);
1202 void InvokeFunction(Handle<JSFunction> function,
1203 const ParameterCount& expected,
1204 const ParameterCount& actual,
1206 const CallWrapper& call_wrapper);
1209 // ---- Floating point helpers ----
1211 // Perform a conversion from a double to a signed int64. If the input fits in
1212 // range of the 64-bit result, execution branches to done. Otherwise,
1213 // execution falls through, and the sign of the result can be used to
1214 // determine if overflow was towards positive or negative infinity.
1216 // On successful conversion, the least significant 32 bits of the result are
1217 // equivalent to the ECMA-262 operation "ToInt32".
1219 // Only public for the test code in test-code-stubs-arm64.cc.
1220 void TryConvertDoubleToInt64(Register result,
1221 DoubleRegister input,
1224 // Performs a truncating conversion of a floating point number as used by
1225 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1226 // Exits with 'result' holding the answer.
1227 void TruncateDoubleToI(Register result, DoubleRegister double_input);
1229 // Performs a truncating conversion of a heap number as used by
1230 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1231 // must be different registers. Exits with 'result' holding the answer.
1232 void TruncateHeapNumberToI(Register result, Register object);
1234 // Converts the smi or heap number in object to an int32 using the rules
1235 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1236 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1237 // different registers.
1238 void TruncateNumberToI(Register object,
1240 Register heap_number_map,
1243 // ---- Code generation helpers ----
1245 void set_generating_stub(bool value) { generating_stub_ = value; }
1246 bool generating_stub() const { return generating_stub_; }
1248 void set_allow_macro_instructions(bool value) {
1249 allow_macro_instructions_ = value;
1251 bool allow_macro_instructions() const { return allow_macro_instructions_; }
1253 bool use_real_aborts() const { return use_real_aborts_; }
1254 void set_has_frame(bool value) { has_frame_ = value; }
1255 bool has_frame() const { return has_frame_; }
1256 bool AllowThisStubCall(CodeStub* stub);
1258 class NoUseRealAbortsScope {
1260 explicit NoUseRealAbortsScope(MacroAssembler* masm) :
1261 saved_(masm->use_real_aborts_), masm_(masm) {
1262 masm_->use_real_aborts_ = false;
1264 ~NoUseRealAbortsScope() {
1265 masm_->use_real_aborts_ = saved_;
1269 MacroAssembler* masm_;
1272 // ---------------------------------------------------------------------------
1277 // ---------------------------------------------------------------------------
1278 // Exception handling
1280 // Push a new stack handler and link into stack handler chain.
1281 void PushStackHandler();
1283 // Unlink the stack handler on top of the stack from the stack handler chain.
1284 // Must preserve the result register.
1285 void PopStackHandler();
1288 // ---------------------------------------------------------------------------
1289 // Allocation support
1291 // Allocate an object in new space or old space. The object_size is
1292 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
1293 // is passed. The allocated object is returned in result.
1295 // If the new space is exhausted control continues at the gc_required label.
1296 // In this case, the result and scratch registers may still be clobbered.
1297 // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
1298 void Allocate(Register object_size,
1303 AllocationFlags flags);
1305 void Allocate(int object_size,
1310 AllocationFlags flags);
1312 void AllocateTwoByteString(Register result,
1317 Label* gc_required);
1318 void AllocateOneByteString(Register result, Register length,
1319 Register scratch1, Register scratch2,
1320 Register scratch3, Label* gc_required);
1321 void AllocateTwoByteConsString(Register result,
1325 Label* gc_required);
1326 void AllocateOneByteConsString(Register result, Register length,
1327 Register scratch1, Register scratch2,
1328 Label* gc_required);
1329 void AllocateTwoByteSlicedString(Register result,
1333 Label* gc_required);
1334 void AllocateOneByteSlicedString(Register result, Register length,
1335 Register scratch1, Register scratch2,
1336 Label* gc_required);
1338 // Allocates a heap number or jumps to the gc_required label if the young
1339 // space is full and a scavenge is needed.
1340 // All registers are clobbered.
1341 // If no heap_number_map register is provided, the function will take care of
1343 void AllocateHeapNumber(Register result,
1347 CPURegister value = NoFPReg,
1348 CPURegister heap_number_map = NoReg,
1349 MutableMode mode = IMMUTABLE);
1351 // ---------------------------------------------------------------------------
1352 // Support functions.
1354 // Try to get function prototype of a function and puts the value in the
1355 // result register. Checks that the function really is a function and jumps
1356 // to the miss label if the fast checks fail. The function register will be
1357 // untouched; the other registers may be clobbered.
1358 enum BoundFunctionAction {
1359 kMissOnBoundFunction,
1360 kDontMissOnBoundFunction
1363 // Machine code version of Map::GetConstructor().
1364 // |temp| holds |result|'s map when done, and |temp2| its instance type.
1365 void GetMapConstructor(Register result, Register map, Register temp,
1368 void TryGetFunctionPrototype(Register function,
1372 BoundFunctionAction action =
1373 kDontMissOnBoundFunction);
1375 // Compare object type for heap object. heap_object contains a non-Smi
1376 // whose object type should be compared with the given type. This both
1377 // sets the flags and leaves the object type in the type_reg register.
1378 // It leaves the map in the map register (unless the type_reg and map register
1379 // are the same register). It leaves the heap object in the heap_object
1380 // register unless the heap_object register is the same register as one of the
1382 void CompareObjectType(Register heap_object,
1388 // Compare object type for heap object, and branch if equal (or not.)
1389 // heap_object contains a non-Smi whose object type should be compared with
1390 // the given type. This both sets the flags and leaves the object type in
1391 // the type_reg register. It leaves the map in the map register (unless the
1392 // type_reg and map register are the same register). It leaves the heap
1393 // object in the heap_object register unless the heap_object register is the
1394 // same register as one of the other registers.
1395 void JumpIfObjectType(Register object,
1399 Label* if_cond_pass,
1400 Condition cond = eq);
1402 void JumpIfNotObjectType(Register object,
1406 Label* if_not_object);
1408 // Compare instance type in a map. map contains a valid map object whose
1409 // object type should be compared with the given type. This both
1410 // sets the flags and leaves the object type in the type_reg register.
1411 void CompareInstanceType(Register map,
1415 // Compare an object's map with the specified map. Condition flags are set
1416 // with result of map compare.
1417 void CompareObjectMap(Register obj, Heap::RootListIndex index);
1419 // Compare an object's map with the specified map. Condition flags are set
1420 // with result of map compare.
1421 void CompareObjectMap(Register obj, Register scratch, Handle<Map> map);
1423 // As above, but the map of the object is already loaded into the register
1424 // which is preserved by the code generated.
1425 void CompareMap(Register obj_map,
1428 // Check if the map of an object is equal to a specified map and branch to
1429 // label if not. Skip the smi check if not required (object is known to be a
1430 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1431 // against maps that are ElementsKind transition maps of the specified map.
1432 void CheckMap(Register obj,
1436 SmiCheckType smi_check_type);
1439 void CheckMap(Register obj,
1441 Heap::RootListIndex index,
1443 SmiCheckType smi_check_type);
1445 // As above, but the map of the object is already loaded into obj_map, and is
1447 void CheckMap(Register obj_map,
1450 SmiCheckType smi_check_type);
1452 // Check if the map of an object is equal to a specified weak map and branch
1453 // to a specified target if equal. Skip the smi check if not required
1454 // (object is known to be a heap object)
1455 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1456 Handle<WeakCell> cell, Handle<Code> success,
1457 SmiCheckType smi_check_type);
1459 // Compare the given value and the value of weak cell.
1460 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
1462 void GetWeakValue(Register value, Handle<WeakCell> cell);
1464 // Load the value of the weak cell in the value register. Branch to the given
1465 // miss label if the weak cell was cleared.
1466 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
1468 // Test the bitfield of the heap object map with mask and set the condition
1469 // flags. The object register is preserved.
1470 void TestMapBitfield(Register object, uint64_t mask);
1472 // Load the elements kind field from a map, and return it in the result
1474 void LoadElementsKindFromMap(Register result, Register map);
1476 // Compare the object in a register to a value from the root list.
1477 void CompareRoot(const Register& obj, Heap::RootListIndex index);
1479 // Compare the object in a register to a value and jump if they are equal.
1480 void JumpIfRoot(const Register& obj,
1481 Heap::RootListIndex index,
1484 // Compare the object in a register to a value and jump if they are not equal.
1485 void JumpIfNotRoot(const Register& obj,
1486 Heap::RootListIndex index,
1487 Label* if_not_equal);
1489 // Load and check the instance type of an object for being a unique name.
1490 // Loads the type into the second argument register.
1491 // The object and type arguments can be the same register; in that case it
1492 // will be overwritten with the type.
1493 // Fall-through if the object was a string and jump on fail otherwise.
1494 inline void IsObjectNameType(Register object, Register type, Label* fail);
1496 inline void IsObjectJSObjectType(Register heap_object,
1501 // Check the instance type in the given map to see if it corresponds to a
1502 // JS object type. Jump to the fail label if this is not the case and fall
1503 // through otherwise. However if fail label is NULL, no branch will be
1504 // performed and the flag will be updated. You can test the flag for "le"
1505 // condition to test if it is a valid JS object type.
1506 inline void IsInstanceJSObjectType(Register map,
1510 // Load and check the instance type of an object for being a string.
1511 // Loads the type into the second argument register.
1512 // The object and type arguments can be the same register; in that case it
1513 // will be overwritten with the type.
1514 // Jumps to not_string or string appropriate. If the appropriate label is
1515 // NULL, fall through.
1516 inline void IsObjectJSStringType(Register object, Register type,
1517 Label* not_string, Label* string = NULL);
1519 // Compare the contents of a register with an operand, and branch to true,
1520 // false or fall through, depending on condition.
1521 void CompareAndSplit(const Register& lhs,
1526 Label* fall_through);
1528 // Test the bits of register defined by bit_pattern, and branch to
1529 // if_any_set, if_all_clear or fall_through accordingly.
1530 void TestAndSplit(const Register& reg,
1531 uint64_t bit_pattern,
1532 Label* if_all_clear,
1534 Label* fall_through);
1536 // Check if a map for a JSObject indicates that the object has fast elements.
1537 // Jump to the specified label if it does not.
1538 void CheckFastElements(Register map, Register scratch, Label* fail);
1540 // Check if a map for a JSObject indicates that the object can have both smi
1541 // and HeapObject elements. Jump to the specified label if it does not.
1542 void CheckFastObjectElements(Register map, Register scratch, Label* fail);
1544 // Check to see if number can be stored as a double in FastDoubleElements.
1545 // If it can, store it at the index specified by key_reg in the array,
1546 // otherwise jump to fail.
1547 void StoreNumberToDoubleElements(Register value_reg,
1549 Register elements_reg,
1551 FPRegister fpscratch1,
1553 int elements_offset = 0);
1555 // Picks out an array index from the hash field.
1557 // hash - holds the index's hash. Clobbered.
1558 // index - holds the overwritten index on exit.
1559 void IndexFromHash(Register hash, Register index);
1561 // ---------------------------------------------------------------------------
1562 // Inline caching support.
1564 void EmitSeqStringSetCharCheck(Register string,
1566 SeqStringSetCharCheckIndexType index_type,
1568 uint32_t encoding_mask);
1570 // Generate code for checking access rights - used for security checks
1571 // on access to global objects across environments. The holder register
1572 // is left untouched, whereas both scratch registers are clobbered.
1573 void CheckAccessGlobalProxy(Register holder_reg,
1578 // Hash the interger value in 'key' register.
1579 // It uses the same algorithm as ComputeIntegerHash in utils.h.
1580 void GetNumberHash(Register key, Register scratch);
1582 // Load value from the dictionary.
1584 // elements - holds the slow-case elements of the receiver on entry.
1585 // Unchanged unless 'result' is the same register.
1587 // key - holds the smi key on entry.
1588 // Unchanged unless 'result' is the same register.
1590 // result - holds the result on exit if the load succeeded.
1591 // Allowed to be the same as 'key' or 'result'.
1592 // Unchanged on bailout so 'key' or 'result' can be used
1593 // in further computation.
1594 void LoadFromNumberDictionary(Label* miss,
1603 // ---------------------------------------------------------------------------
1606 // Activation support.
1607 void EnterFrame(StackFrame::Type type);
1608 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
1609 void LeaveFrame(StackFrame::Type type);
1611 // Returns map with validated enum cache in object register.
1612 void CheckEnumCache(Register object,
1613 Register null_value,
1618 Label* call_runtime);
1620 // AllocationMemento support. Arrays may have an associated
1621 // AllocationMemento object that can be checked for in order to pretransition
1623 // On entry, receiver should point to the array object.
1624 // If allocation info is present, the Z flag is set (so that the eq
1625 // condition will pass).
1626 void TestJSArrayForAllocationMemento(Register receiver,
1629 Label* no_memento_found);
1631 void JumpIfJSArrayHasAllocationMemento(Register receiver,
1634 Label* memento_found) {
1635 Label no_memento_found;
1636 TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
1638 B(eq, memento_found);
1639 Bind(&no_memento_found);
1642 // The stack pointer has to switch between csp and jssp when setting up and
1643 // destroying the exit frame. Hence preserving/restoring the registers is
1644 // slightly more complicated than simple push/pop operations.
1645 void ExitFramePreserveFPRegs();
1646 void ExitFrameRestoreFPRegs();
1648 // Generates function and stub prologue code.
1649 void StubPrologue();
1650 void Prologue(bool code_pre_aging);
1652 // Enter exit frame. Exit frames are used when calling C code from generated
1653 // (JavaScript) code.
1655 // The stack pointer must be jssp on entry, and will be set to csp by this
1656 // function. The frame pointer is also configured, but the only other
1657 // registers modified by this function are the provided scratch register, and
1660 // The 'extra_space' argument can be used to allocate some space in the exit
1661 // frame that will be ignored by the GC. This space will be reserved in the
1662 // bottom of the frame immediately above the return address slot.
1664 // Set up a stack frame and registers as follows:
1665 // fp[8]: CallerPC (lr)
1666 // fp -> fp[0]: CallerFP (old fp)
1667 // fp[-8]: SPOffset (new csp)
1668 // fp[-16]: CodeObject()
1669 // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
1670 // csp[8]: Memory reserved for the caller if extra_space != 0.
1671 // Alignment padding, if necessary.
1672 // csp -> csp[0]: Space reserved for the return address.
1674 // This function also stores the new frame information in the top frame, so
1675 // that the new frame becomes the current frame.
1676 void EnterExitFrame(bool save_doubles,
1677 const Register& scratch,
1678 int extra_space = 0);
1680 // Leave the current exit frame, after a C function has returned to generated
1681 // (JavaScript) code.
1683 // This effectively unwinds the operation of EnterExitFrame:
1684 // * Preserved doubles are restored (if restore_doubles is true).
1685 // * The frame information is removed from the top frame.
1686 // * The exit frame is dropped.
1687 // * The stack pointer is reset to jssp.
1689 // The stack pointer must be csp on entry.
1690 void LeaveExitFrame(bool save_doubles,
1691 const Register& scratch,
1692 bool restore_context);
1694 void LoadContext(Register dst, int context_chain_length);
1696 // Emit code for a truncating division by a constant. The dividend register is
1697 // unchanged. Dividend and result must be different.
1698 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1700 // ---------------------------------------------------------------------------
1701 // StatsCounter support
1703 void SetCounter(StatsCounter* counter, int value, Register scratch1,
1705 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1707 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1710 // ---------------------------------------------------------------------------
1711 // Garbage collector support (GC).
1713 enum RememberedSetFinalAction {
1718 // Record in the remembered set the fact that we have a pointer to new space
1719 // at the address pointed to by the addr register. Only works if addr is not
1721 void RememberedSetHelper(Register object, // Used for debug code.
1724 SaveFPRegsMode save_fp,
1725 RememberedSetFinalAction and_then);
1727 // Push and pop the registers that can hold pointers, as defined by the
1728 // RegList constant kSafepointSavedRegisters.
1729 void PushSafepointRegisters();
1730 void PopSafepointRegisters();
1732 void PushSafepointRegistersAndDoubles();
1733 void PopSafepointRegistersAndDoubles();
1735 // Store value in register src in the safepoint stack slot for register dst.
1736 void StoreToSafepointRegisterSlot(Register src, Register dst) {
1737 Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
1740 // Load the value of the src register from its safepoint stack slot
1741 // into register dst.
1742 void LoadFromSafepointRegisterSlot(Register dst, Register src) {
1743 Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
1746 void CheckPageFlagSet(const Register& object,
1747 const Register& scratch,
1751 void CheckPageFlagClear(const Register& object,
1752 const Register& scratch,
1754 Label* if_all_clear);
1756 // Check if object is in new space and jump accordingly.
1757 // Register 'object' is preserved.
1758 void JumpIfNotInNewSpace(Register object,
1760 InNewSpace(object, ne, branch);
1763 void JumpIfInNewSpace(Register object,
1765 InNewSpace(object, eq, branch);
1768 // Notify the garbage collector that we wrote a pointer into an object.
1769 // |object| is the object being stored into, |value| is the object being
1770 // stored. value and scratch registers are clobbered by the operation.
1771 // The offset is the offset from the start of the object, not the offset from
1772 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
1773 void RecordWriteField(
1778 LinkRegisterStatus lr_status,
1779 SaveFPRegsMode save_fp,
1780 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1781 SmiCheck smi_check = INLINE_SMI_CHECK,
1782 PointersToHereCheck pointers_to_here_check_for_value =
1783 kPointersToHereMaybeInteresting);
1785 // As above, but the offset has the tag presubtracted. For use with
1786 // MemOperand(reg, off).
1787 inline void RecordWriteContextSlot(
1792 LinkRegisterStatus lr_status,
1793 SaveFPRegsMode save_fp,
1794 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1795 SmiCheck smi_check = INLINE_SMI_CHECK,
1796 PointersToHereCheck pointers_to_here_check_for_value =
1797 kPointersToHereMaybeInteresting) {
1798 RecordWriteField(context,
1799 offset + kHeapObjectTag,
1804 remembered_set_action,
1806 pointers_to_here_check_for_value);
1809 void RecordWriteForMap(
1813 LinkRegisterStatus lr_status,
1814 SaveFPRegsMode save_fp);
1816 // For a given |object| notify the garbage collector that the slot |address|
1817 // has been written. |value| is the object being stored. The value and
1818 // address registers are clobbered by the operation.
1823 LinkRegisterStatus lr_status,
1824 SaveFPRegsMode save_fp,
1825 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1826 SmiCheck smi_check = INLINE_SMI_CHECK,
1827 PointersToHereCheck pointers_to_here_check_for_value =
1828 kPointersToHereMaybeInteresting);
1830 // Checks the color of an object. If the object is already grey or black
1831 // then we just fall through, since it is already live. If it is white and
1832 // we can determine that it doesn't need to be scanned, then we just mark it
1833 // black and fall through. For the rest we jump to the label so the
1834 // incremental marker can fix its assumptions.
1835 void EnsureNotWhite(Register object,
1840 Label* object_is_white_and_not_data);
1842 // Detects conservatively whether an object is data-only, i.e. it does need to
1843 // be scanned by the garbage collector.
1844 void JumpIfDataObject(Register value,
1846 Label* not_data_object);
1848 // Helper for finding the mark bits for an address.
1849 // Note that the behaviour slightly differs from other architectures.
1851 // - addr_reg is unchanged.
1852 // - The bitmap register points at the word with the mark bits.
1853 // - The shift register contains the index of the first color bit for this
1854 // object in the bitmap.
1855 inline void GetMarkBits(Register addr_reg,
1856 Register bitmap_reg,
1857 Register shift_reg);
1859 // Check if an object has a given incremental marking color.
1860 void HasColor(Register object,
1867 void JumpIfBlack(Register object,
1873 // Get the location of a relocated constant (its address in the constant pool)
1874 // from its load site.
1875 void GetRelocatedValueLocation(Register ldr_location,
1879 // ---------------------------------------------------------------------------
1882 // Calls Abort(msg) if the condition cond is not satisfied.
1883 // Use --debug_code to enable.
1884 void Assert(Condition cond, BailoutReason reason);
1885 void AssertRegisterIsClear(Register reg, BailoutReason reason);
1886 void AssertRegisterIsRoot(
1888 Heap::RootListIndex index,
1889 BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
1890 void AssertFastElements(Register elements);
1892 // Abort if the specified register contains the invalid color bit pattern.
1893 // The pattern must be in bits [1:0] of 'reg' register.
1895 // If emit_debug_code() is false, this emits no code.
1896 void AssertHasValidColor(const Register& reg);
1898 // Abort if 'object' register doesn't point to a string object.
1900 // If emit_debug_code() is false, this emits no code.
1901 void AssertIsString(const Register& object);
1903 // Like Assert(), but always enabled.
1904 void Check(Condition cond, BailoutReason reason);
1905 void CheckRegisterIsClear(Register reg, BailoutReason reason);
1907 // Print a message to stderr and abort execution.
1908 void Abort(BailoutReason reason);
1910 // Conditionally load the cached Array transitioned map of type
1911 // transitioned_kind from the native context if the map in register
1912 // map_in_out is the cached Array map in the native context of
1914 void LoadTransitionedArrayMapConditional(
1915 ElementsKind expected_kind,
1916 ElementsKind transitioned_kind,
1917 Register map_in_out,
1920 Label* no_map_match);
1922 void LoadGlobalFunction(int index, Register function);
1924 // Load the initial map from the global function. The registers function and
1925 // map can be the same, function is then overwritten.
1926 void LoadGlobalFunctionInitialMap(Register function,
1930 CPURegList* TmpList() { return &tmp_list_; }
1931 CPURegList* FPTmpList() { return &fptmp_list_; }
1933 static CPURegList DefaultTmpList();
1934 static CPURegList DefaultFPTmpList();
1936 // Like printf, but print at run-time from generated code.
1938 // The caller must ensure that arguments for floating-point placeholders
1939 // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
1940 // placeholders are Registers.
1942 // At the moment it is only possible to print the value of csp if it is the
1943 // current stack pointer. Otherwise, the MacroAssembler will automatically
1944 // update csp on every push (using BumpSystemStackPointer), so determining its
1945 // value is difficult.
1947 // Format placeholders that refer to more than one argument, or to a specific
1948 // argument, are not supported. This includes formats like "%1$d" or "%.*d".
1950 // This function automatically preserves caller-saved registers so that
1951 // calling code can use Printf at any point without having to worry about
1952 // corruption. The preservation mechanism generates a lot of code. If this is
1953 // a problem, preserve the important registers manually and then call
1954 // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
1955 // implicitly preserved.
1956 void Printf(const char * format,
1957 CPURegister arg0 = NoCPUReg,
1958 CPURegister arg1 = NoCPUReg,
1959 CPURegister arg2 = NoCPUReg,
1960 CPURegister arg3 = NoCPUReg);
1962 // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
1964 // The return code from the system printf call will be returned in x0.
1965 void PrintfNoPreserve(const char * format,
1966 const CPURegister& arg0 = NoCPUReg,
1967 const CPURegister& arg1 = NoCPUReg,
1968 const CPURegister& arg2 = NoCPUReg,
1969 const CPURegister& arg3 = NoCPUReg);
1971 // Code ageing support functions.
1973 // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
1974 // function as old, it replaces some of the function prologue (generated by
1975 // FullCodeGenerator::Generate) with a call to a special stub (ultimately
1976 // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
1977 // function prologue to its initial young state (indicating that it has been
1978 // recently run) and continues. A young function is therefore one which has a
1979 // normal frame setup sequence, and an old function has a code age sequence
1980 // which calls a code ageing stub.
1982 // Set up a basic stack frame for young code (or code exempt from ageing) with
1983 // type FUNCTION. It may be patched later for code ageing support. This is
1984 // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
1986 // This function takes an Assembler so it can be called from either a
1987 // MacroAssembler or a PatchingAssembler context.
1988 static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
1990 // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
1991 void EmitFrameSetupForCodeAgePatching();
1993 // Emit a code age sequence that calls the relevant code age stub. The code
1994 // generated by this sequence is expected to replace the code generated by
1995 // EmitFrameSetupForCodeAgePatching, and represents an old function.
1997 // If stub is NULL, this function generates the code age sequence but omits
1998 // the stub address that is normally embedded in the instruction stream. This
1999 // can be used by debug code to verify code age sequences.
2000 static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
2002 // Call EmitCodeAgeSequence from a MacroAssembler context.
2003 void EmitCodeAgeSequence(Code* stub);
2005 // Return true if the sequence is a young sequence geneated by
2006 // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
2007 // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
2008 static bool IsYoungSequence(Isolate* isolate, byte* sequence);
2010 // Jumps to found label if a prototype map has dictionary elements.
2011 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
2012 Register scratch1, Label* found);
2014 // Perform necessary maintenance operations before a push or after a pop.
2016 // Note that size is specified in bytes.
2017 void PushPreamble(Operand total_size);
2018 void PopPostamble(Operand total_size);
2020 void PushPreamble(int count, int size) { PushPreamble(count * size); }
2021 void PopPostamble(int count, int size) { PopPostamble(count * size); }
2024 // Helpers for CopyFields.
2025 // These each implement CopyFields in a different way.
2026 void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
2027 Register scratch1, Register scratch2,
2028 Register scratch3, Register scratch4,
2030 void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
2031 Register scratch1, Register scratch2,
2032 Register scratch3, Register scratch4);
2033 void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
2034 Register scratch1, Register scratch2,
2037 // The actual Push and Pop implementations. These don't generate any code
2038 // other than that required for the push or pop. This allows
2039 // (Push|Pop)CPURegList to bundle together run-time assertions for a large
2040 // block of registers.
2042 // Note that size is per register, and is specified in bytes.
2043 void PushHelper(int count, int size,
2044 const CPURegister& src0, const CPURegister& src1,
2045 const CPURegister& src2, const CPURegister& src3);
2046 void PopHelper(int count, int size,
2047 const CPURegister& dst0, const CPURegister& dst1,
2048 const CPURegister& dst2, const CPURegister& dst3);
2050 // Call Printf. On a native build, a simple call will be generated, but if the
2051 // simulator is being used then a suitable pseudo-instruction is used. The
2052 // arguments and stack (csp) must be prepared by the caller as for a normal
2053 // AAPCS64 call to 'printf'.
2055 // The 'args' argument should point to an array of variable arguments in their
2056 // proper PCS registers (and in calling order). The argument registers can
2057 // have mixed types. The format string (x0) should not be included.
2058 void CallPrintf(int arg_count = 0, const CPURegister * args = NULL);
2060 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
2061 void InNewSpace(Register object,
2062 Condition cond, // eq for new space, ne otherwise.
2065 // Try to represent a double as an int so that integer fast-paths may be
2066 // used. Not every valid integer value is guaranteed to be caught.
2067 // It supports both 32-bit and 64-bit integers depending whether 'as_int'
2068 // is a W or X register.
2070 // This does not distinguish between +0 and -0, so if this distinction is
2071 // important it must be checked separately.
2073 // On output the Z flag is set if the operation was successful.
2074 void TryRepresentDoubleAsInt(Register as_int,
2076 FPRegister scratch_d,
2077 Label* on_successful_conversion = NULL,
2078 Label* on_failed_conversion = NULL);
2080 bool generating_stub_;
2082 // Tell whether any of the macro instruction can be used. When false the
2083 // MacroAssembler will assert if a method which can emit a variable number
2084 // of instructions is called.
2085 bool allow_macro_instructions_;
2089 // The Abort method should call a V8 runtime function, but the CallRuntime
2090 // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
2091 // use a simpler abort mechanism that doesn't depend on CEntryStub.
2093 // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
2095 bool use_real_aborts_;
2097 // This handle will be patched with the code object on installation.
2098 Handle<Object> code_object_;
2100 // The register to use as a stack pointer for stack operations.
2103 // Scratch registers available for use by the MacroAssembler.
2104 CPURegList tmp_list_;
2105 CPURegList fptmp_list_;
2107 void InitializeNewString(Register string,
2109 Heap::RootListIndex map_index,
2114 // Far branches resolving.
2116 // The various classes of branch instructions with immediate offsets have
2117 // different ranges. While the Assembler will fail to assemble a branch
2118 // exceeding its range, the MacroAssembler offers a mechanism to resolve
2119 // branches to too distant targets, either by tweaking the generated code to
2120 // use branch instructions with wider ranges or generating veneers.
2122 // Currently branches to distant targets are resolved using unconditional
2123 // branch isntructions with a range of +-128MB. If that becomes too little
2124 // (!), the mechanism can be extended to generate special veneers for really
2127 // Helps resolve branching to labels potentially out of range.
2128 // If the label is not bound, it registers the information necessary to later
2129 // be able to emit a veneer for this branch if necessary.
2130 // If the label is bound, it returns true if the label (or the previous link
2131 // in the label chain) is out of range. In that case the caller is responsible
2132 // for generating appropriate code.
2133 // Otherwise it returns false.
2134 // This function also checks wether veneers need to be emitted.
2135 bool NeedExtraInstructionsOrRegisterBranch(Label *label,
2136 ImmBranchType branch_type);
2140 // Use this scope when you need a one-to-one mapping bewteen methods and
2141 // instructions. This scope prevents the MacroAssembler from being called and
2142 // literal pools from being emitted. It also asserts the number of instructions
2143 // emitted is what you specified when creating the scope.
2144 class InstructionAccurateScope BASE_EMBEDDED {
2146 explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
2150 size_(count * kInstructionSize)
2153 // Before blocking the const pool, see if it needs to be emitted.
2154 masm_->CheckConstPool(false, true);
2155 masm_->CheckVeneerPool(false, true);
2157 masm_->StartBlockPools();
2160 masm_->bind(&start_);
2162 previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
2163 masm_->set_allow_macro_instructions(false);
2167 ~InstructionAccurateScope() {
2168 masm_->EndBlockPools();
2170 if (start_.is_bound()) {
2171 DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
2173 masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
2178 MacroAssembler* masm_;
2182 bool previous_allow_macro_instructions_;
2187 // This scope utility allows scratch registers to be managed safely. The
2188 // MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
2189 // registers. These registers can be allocated on demand, and will be returned
2190 // at the end of the scope.
2192 // When the scope ends, the MacroAssembler's lists will be restored to their
2193 // original state, even if the lists were modified by some other means.
2194 class UseScratchRegisterScope {
2196 explicit UseScratchRegisterScope(MacroAssembler* masm)
2197 : available_(masm->TmpList()),
2198 availablefp_(masm->FPTmpList()),
2199 old_available_(available_->list()),
2200 old_availablefp_(availablefp_->list()) {
2201 DCHECK(available_->type() == CPURegister::kRegister);
2202 DCHECK(availablefp_->type() == CPURegister::kFPRegister);
2205 ~UseScratchRegisterScope();
2207 // Take a register from the appropriate temps list. It will be returned
2208 // automatically when the scope ends.
2209 Register AcquireW() { return AcquireNextAvailable(available_).W(); }
2210 Register AcquireX() { return AcquireNextAvailable(available_).X(); }
2211 FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
2212 FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
2214 Register UnsafeAcquire(const Register& reg) {
2215 return Register(UnsafeAcquire(available_, reg));
2218 Register AcquireSameSizeAs(const Register& reg);
2219 FPRegister AcquireSameSizeAs(const FPRegister& reg);
2222 static CPURegister AcquireNextAvailable(CPURegList* available);
2223 static CPURegister UnsafeAcquire(CPURegList* available,
2224 const CPURegister& reg);
2226 // Available scratch registers.
2227 CPURegList* available_; // kRegister
2228 CPURegList* availablefp_; // kFPRegister
2230 // The state of the available lists at the start of this scope.
2231 RegList old_available_; // kRegister
2232 RegList old_availablefp_; // kFPRegister
2236 inline MemOperand ContextMemOperand(Register context, int index = 0) {
2237 return MemOperand(context, Context::SlotOffset(index));
2240 inline MemOperand GlobalObjectMemOperand() {
2241 return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
2245 // Encode and decode information about patchable inline SMI checks.
2246 class InlineSmiCheckInfo {
2248 explicit InlineSmiCheckInfo(Address info);
2250 bool HasSmiCheck() const {
2251 return smi_check_ != NULL;
2254 const Register& SmiRegister() const {
2258 Instruction* SmiCheck() const {
2262 // Use MacroAssembler::InlineData to emit information about patchable inline
2263 // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
2264 // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
2266 // The generated patch information can be read using the InlineSMICheckInfo
2268 static void Emit(MacroAssembler* masm, const Register& reg,
2269 const Label* smi_check);
2271 // Emit information to indicate that there is no inline SMI check.
2272 static void EmitNotInlined(MacroAssembler* masm) {
2274 Emit(masm, NoReg, &unbound);
2279 Instruction* smi_check_;
2281 // Fields in the data encoded by InlineData.
2283 // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
2284 // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
2285 // used in a patchable check. The Emit() method checks this.
2287 // Note that the total size of the fields is restricted by the underlying
2288 // storage size handled by the BitField class, which is a uint32_t.
2289 class RegisterBits : public BitField<unsigned, 0, 5> {};
2290 class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
2293 } } // namespace v8::internal
2295 #ifdef GENERATED_CODE_COVERAGE
2296 #error "Unsupported option"
2297 #define CODE_COVERAGE_STRINGIFY(x) #x
2298 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
2299 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
2300 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
2302 #define ACCESS_MASM(masm) masm->
2305 #endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_