1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
6 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
10 #include "src/arm64/assembler-arm64.h"
11 #include "src/bailout-reason.h"
12 #include "src/base/bits.h"
13 #include "src/globals.h"
15 // Simulator specific helpers.
17 // TODO(all): If possible automatically prepend an indicator like
18 // UNIMPLEMENTED or LOCATION.
19 #define ASM_UNIMPLEMENTED(message) \
20 __ Debug(message, __LINE__, NO_PARAM)
21 #define ASM_UNIMPLEMENTED_BREAK(message) \
22 __ Debug(message, __LINE__, \
23 FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
24 #define ASM_LOCATION(message) \
25 __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
27 #define ASM_UNIMPLEMENTED(message)
28 #define ASM_UNIMPLEMENTED_BREAK(message)
29 #define ASM_LOCATION(message)
36 // Give alias names to registers for calling conventions.
37 // TODO(titzer): arm64 is a pain for aliasing; get rid of these macros
38 #define kReturnRegister0 x0
39 #define kReturnRegister1 x1
40 #define kJSFunctionRegister x1
41 #define kContextRegister cp
42 #define kInterpreterAccumulatorRegister x0
43 #define kInterpreterRegisterFileRegister x18
44 #define kInterpreterBytecodeOffsetRegister x19
45 #define kInterpreterBytecodeArrayRegister x20
46 #define kInterpreterDispatchTableRegister x21
47 #define kRuntimeCallFunctionRegister x1
48 #define kRuntimeCallArgCountRegister x0
50 #define LS_MACRO_LIST(V) \
51 V(Ldrb, Register&, rt, LDRB_w) \
52 V(Strb, Register&, rt, STRB_w) \
53 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
54 V(Ldrh, Register&, rt, LDRH_w) \
55 V(Strh, Register&, rt, STRH_w) \
56 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
57 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
58 V(Str, CPURegister&, rt, StoreOpFor(rt)) \
59 V(Ldrsw, Register&, rt, LDRSW_x)
61 #define LSPAIR_MACRO_LIST(V) \
62 V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
63 V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
64 V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
67 // ----------------------------------------------------------------------------
68 // Static helper functions
70 // Generate a MemOperand for loading a field from an object.
71 inline MemOperand FieldMemOperand(Register object, int offset);
72 inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
74 // Generate a MemOperand for loading a SMI from memory.
75 inline MemOperand UntagSmiMemOperand(Register object, int offset);
78 // ----------------------------------------------------------------------------
82 // Copies of architectural conditions.
83 // The associated conditions can be used in place of those, the code will
84 // take care of reinterpreting them with the correct type.
102 // These two are *different* from the architectural codes al and nv.
103 // 'always' is used to generate unconditional branches.
104 // 'never' is used to not generate a branch (generally as the inverse
105 // branch type of 'always).
108 reg_zero, reg_not_zero,
110 reg_bit_clear, reg_bit_set,
113 kBranchTypeFirstCondition = eq,
114 kBranchTypeLastCondition = nv,
115 kBranchTypeFirstUsingReg = reg_zero,
116 kBranchTypeFirstUsingBit = reg_bit_clear
119 inline BranchType InvertBranchType(BranchType type) {
120 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
121 return static_cast<BranchType>(
122 NegateCondition(static_cast<Condition>(type)));
124 return static_cast<BranchType>(type ^ 1);
128 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
129 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
130 enum PointersToHereCheck {
131 kPointersToHereMaybeInteresting,
132 kPointersToHereAreAlwaysInteresting
134 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
135 enum TargetAddressStorageMode {
136 CAN_INLINE_TARGET_ADDRESS,
137 NEVER_INLINE_TARGET_ADDRESS
139 enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
140 enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
141 enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
142 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
143 enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
145 class MacroAssembler : public Assembler {
147 MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
149 inline Handle<Object> CodeObject();
151 // Instruction set functions ------------------------------------------------
153 inline void And(const Register& rd,
155 const Operand& operand);
156 inline void Ands(const Register& rd,
158 const Operand& operand);
159 inline void Bic(const Register& rd,
161 const Operand& operand);
162 inline void Bics(const Register& rd,
164 const Operand& operand);
165 inline void Orr(const Register& rd,
167 const Operand& operand);
168 inline void Orn(const Register& rd,
170 const Operand& operand);
171 inline void Eor(const Register& rd,
173 const Operand& operand);
174 inline void Eon(const Register& rd,
176 const Operand& operand);
177 inline void Tst(const Register& rn, const Operand& operand);
178 void LogicalMacro(const Register& rd,
180 const Operand& operand,
183 // Add and sub macros.
184 inline void Add(const Register& rd,
186 const Operand& operand);
187 inline void Adds(const Register& rd,
189 const Operand& operand);
190 inline void Sub(const Register& rd,
192 const Operand& operand);
193 inline void Subs(const Register& rd,
195 const Operand& operand);
196 inline void Cmn(const Register& rn, const Operand& operand);
197 inline void Cmp(const Register& rn, const Operand& operand);
198 inline void Neg(const Register& rd,
199 const Operand& operand);
200 inline void Negs(const Register& rd,
201 const Operand& operand);
203 void AddSubMacro(const Register& rd,
205 const Operand& operand,
209 // Add/sub with carry macros.
210 inline void Adc(const Register& rd,
212 const Operand& operand);
213 inline void Adcs(const Register& rd,
215 const Operand& operand);
216 inline void Sbc(const Register& rd,
218 const Operand& operand);
219 inline void Sbcs(const Register& rd,
221 const Operand& operand);
222 inline void Ngc(const Register& rd,
223 const Operand& operand);
224 inline void Ngcs(const Register& rd,
225 const Operand& operand);
226 void AddSubWithCarryMacro(const Register& rd,
228 const Operand& operand,
230 AddSubWithCarryOp op);
233 void Mov(const Register& rd,
234 const Operand& operand,
235 DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
236 void Mov(const Register& rd, uint64_t imm);
237 inline void Mvn(const Register& rd, uint64_t imm);
238 void Mvn(const Register& rd, const Operand& operand);
239 static bool IsImmMovn(uint64_t imm, unsigned reg_size);
240 static bool IsImmMovz(uint64_t imm, unsigned reg_size);
241 static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
243 // Try to move an immediate into the destination register in a single
244 // instruction. Returns true for success, and updates the contents of dst.
245 // Returns false, otherwise.
246 bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
248 // Move an immediate into register dst, and return an Operand object for use
249 // with a subsequent instruction that accepts a shift. The value moved into
250 // dst is not necessarily equal to imm; it may have had a shifting operation
251 // applied to it that will be subsequently undone by the shift applied in the
253 Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
255 // Conditional macros.
256 inline void Ccmp(const Register& rn,
257 const Operand& operand,
260 inline void Ccmn(const Register& rn,
261 const Operand& operand,
264 void ConditionalCompareMacro(const Register& rn,
265 const Operand& operand,
268 ConditionalCompareOp op);
269 void Csel(const Register& rd,
271 const Operand& operand,
274 // Load/store macros.
275 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
276 inline void FN(const REGTYPE REG, const MemOperand& addr);
277 LS_MACRO_LIST(DECLARE_FUNCTION)
278 #undef DECLARE_FUNCTION
280 void LoadStoreMacro(const CPURegister& rt,
281 const MemOperand& addr,
284 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
285 inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
286 LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
287 #undef DECLARE_FUNCTION
289 void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
290 const MemOperand& addr, LoadStorePairOp op);
292 // V8-specific load/store helpers.
293 void Load(const Register& rt, const MemOperand& addr, Representation r);
294 void Store(const Register& rt, const MemOperand& addr, Representation r);
297 // The target must be within the immediate range of adr.
299 // The target may be outside of the immediate range of adr. Additional
300 // instructions may be emitted.
303 void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
305 // Remaining instructions are simple pass-through calls to the assembler.
306 inline void Asr(const Register& rd, const Register& rn, unsigned shift);
307 inline void Asr(const Register& rd, const Register& rn, const Register& rm);
309 // Branch type inversion relies on these relations.
310 STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
311 (reg_bit_clear == (reg_bit_set ^ 1)) &&
312 (always == (never ^ 1)));
314 void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
316 inline void B(Label* label);
317 inline void B(Condition cond, Label* label);
318 void B(Label* label, Condition cond);
319 inline void Bfi(const Register& rd,
323 inline void Bfxil(const Register& rd,
327 inline void Bind(Label* label);
328 inline void Bl(Label* label);
329 inline void Blr(const Register& xn);
330 inline void Br(const Register& xn);
331 inline void Brk(int code);
332 void Cbnz(const Register& rt, Label* label);
333 void Cbz(const Register& rt, Label* label);
334 inline void Cinc(const Register& rd, const Register& rn, Condition cond);
335 inline void Cinv(const Register& rd, const Register& rn, Condition cond);
336 inline void Cls(const Register& rd, const Register& rn);
337 inline void Clz(const Register& rd, const Register& rn);
338 inline void Cneg(const Register& rd, const Register& rn, Condition cond);
339 inline void CzeroX(const Register& rd, Condition cond);
340 inline void CmovX(const Register& rd, const Register& rn, Condition cond);
341 inline void Cset(const Register& rd, Condition cond);
342 inline void Csetm(const Register& rd, Condition cond);
343 inline void Csinc(const Register& rd,
347 inline void Csinv(const Register& rd,
351 inline void Csneg(const Register& rd,
355 inline void Dmb(BarrierDomain domain, BarrierType type);
356 inline void Dsb(BarrierDomain domain, BarrierType type);
357 inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
358 inline void Extr(const Register& rd,
362 inline void Fabs(const FPRegister& fd, const FPRegister& fn);
363 inline void Fadd(const FPRegister& fd,
364 const FPRegister& fn,
365 const FPRegister& fm);
366 inline void Fccmp(const FPRegister& fn,
367 const FPRegister& fm,
370 inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
371 inline void Fcmp(const FPRegister& fn, double value);
372 inline void Fcsel(const FPRegister& fd,
373 const FPRegister& fn,
374 const FPRegister& fm,
376 inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
377 inline void Fcvtas(const Register& rd, const FPRegister& fn);
378 inline void Fcvtau(const Register& rd, const FPRegister& fn);
379 inline void Fcvtms(const Register& rd, const FPRegister& fn);
380 inline void Fcvtmu(const Register& rd, const FPRegister& fn);
381 inline void Fcvtns(const Register& rd, const FPRegister& fn);
382 inline void Fcvtnu(const Register& rd, const FPRegister& fn);
383 inline void Fcvtzs(const Register& rd, const FPRegister& fn);
384 inline void Fcvtzu(const Register& rd, const FPRegister& fn);
385 inline void Fdiv(const FPRegister& fd,
386 const FPRegister& fn,
387 const FPRegister& fm);
388 inline void Fmadd(const FPRegister& fd,
389 const FPRegister& fn,
390 const FPRegister& fm,
391 const FPRegister& fa);
392 inline void Fmax(const FPRegister& fd,
393 const FPRegister& fn,
394 const FPRegister& fm);
395 inline void Fmaxnm(const FPRegister& fd,
396 const FPRegister& fn,
397 const FPRegister& fm);
398 inline void Fmin(const FPRegister& fd,
399 const FPRegister& fn,
400 const FPRegister& fm);
401 inline void Fminnm(const FPRegister& fd,
402 const FPRegister& fn,
403 const FPRegister& fm);
404 inline void Fmov(FPRegister fd, FPRegister fn);
405 inline void Fmov(FPRegister fd, Register rn);
406 // Provide explicit double and float interfaces for FP immediate moves, rather
407 // than relying on implicit C++ casts. This allows signalling NaNs to be
408 // preserved when the immediate matches the format of fd. Most systems convert
409 // signalling NaNs to quiet NaNs when converting between float and double.
410 inline void Fmov(FPRegister fd, double imm);
411 inline void Fmov(FPRegister fd, float imm);
412 // Provide a template to allow other types to be converted automatically.
414 void Fmov(FPRegister fd, T imm) {
415 DCHECK(allow_macro_instructions_);
416 Fmov(fd, static_cast<double>(imm));
418 inline void Fmov(Register rd, FPRegister fn);
419 inline void Fmsub(const FPRegister& fd,
420 const FPRegister& fn,
421 const FPRegister& fm,
422 const FPRegister& fa);
423 inline void Fmul(const FPRegister& fd,
424 const FPRegister& fn,
425 const FPRegister& fm);
426 inline void Fneg(const FPRegister& fd, const FPRegister& fn);
427 inline void Fnmadd(const FPRegister& fd,
428 const FPRegister& fn,
429 const FPRegister& fm,
430 const FPRegister& fa);
431 inline void Fnmsub(const FPRegister& fd,
432 const FPRegister& fn,
433 const FPRegister& fm,
434 const FPRegister& fa);
435 inline void Frinta(const FPRegister& fd, const FPRegister& fn);
436 inline void Frintm(const FPRegister& fd, const FPRegister& fn);
437 inline void Frintn(const FPRegister& fd, const FPRegister& fn);
438 inline void Frintp(const FPRegister& fd, const FPRegister& fn);
439 inline void Frintz(const FPRegister& fd, const FPRegister& fn);
440 inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
441 inline void Fsub(const FPRegister& fd,
442 const FPRegister& fn,
443 const FPRegister& fm);
444 inline void Hint(SystemHint code);
445 inline void Hlt(int code);
447 inline void Ldnp(const CPURegister& rt,
448 const CPURegister& rt2,
449 const MemOperand& src);
450 // Load a literal from the inline constant pool.
451 inline void Ldr(const CPURegister& rt, const Immediate& imm);
452 // Helper function for double immediate.
453 inline void Ldr(const CPURegister& rt, double imm);
454 inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
455 inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
456 inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
457 inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
458 inline void Madd(const Register& rd,
462 inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
463 inline void Mov(const Register& rd, const Register& rm);
464 inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
465 inline void Mrs(const Register& rt, SystemRegister sysreg);
466 inline void Msr(SystemRegister sysreg, const Register& rt);
467 inline void Msub(const Register& rd,
471 inline void Mul(const Register& rd, const Register& rn, const Register& rm);
472 inline void Nop() { nop(); }
473 inline void Rbit(const Register& rd, const Register& rn);
474 inline void Ret(const Register& xn = lr);
475 inline void Rev(const Register& rd, const Register& rn);
476 inline void Rev16(const Register& rd, const Register& rn);
477 inline void Rev32(const Register& rd, const Register& rn);
478 inline void Ror(const Register& rd, const Register& rs, unsigned shift);
479 inline void Ror(const Register& rd, const Register& rn, const Register& rm);
480 inline void Sbfiz(const Register& rd,
484 inline void Sbfx(const Register& rd,
488 inline void Scvtf(const FPRegister& fd,
491 inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
492 inline void Smaddl(const Register& rd,
496 inline void Smsubl(const Register& rd,
500 inline void Smull(const Register& rd,
503 inline void Smulh(const Register& rd,
506 inline void Umull(const Register& rd, const Register& rn, const Register& rm);
507 inline void Stnp(const CPURegister& rt,
508 const CPURegister& rt2,
509 const MemOperand& dst);
510 inline void Sxtb(const Register& rd, const Register& rn);
511 inline void Sxth(const Register& rd, const Register& rn);
512 inline void Sxtw(const Register& rd, const Register& rn);
513 void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
514 void Tbz(const Register& rt, unsigned bit_pos, Label* label);
515 inline void Ubfiz(const Register& rd,
519 inline void Ubfx(const Register& rd,
523 inline void Ucvtf(const FPRegister& fd,
526 inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
527 inline void Umaddl(const Register& rd,
531 inline void Umsubl(const Register& rd,
535 inline void Uxtb(const Register& rd, const Register& rn);
536 inline void Uxth(const Register& rd, const Register& rn);
537 inline void Uxtw(const Register& rd, const Register& rn);
539 // Pseudo-instructions ------------------------------------------------------
541 // Compute rd = abs(rm).
542 // This function clobbers the condition flags. On output the overflow flag is
543 // set iff the negation overflowed.
545 // If rm is the minimum representable value, the result is not representable.
546 // Handlers for each case can be specified using the relevant labels.
547 void Abs(const Register& rd, const Register& rm,
548 Label * is_not_representable = NULL,
549 Label * is_representable = NULL);
551 // Push or pop up to 4 registers of the same width to or from the stack,
552 // using the current stack pointer as set by SetStackPointer.
554 // If an argument register is 'NoReg', all further arguments are also assumed
555 // to be 'NoReg', and are thus not pushed or popped.
557 // Arguments are ordered such that "Push(a, b);" is functionally equivalent
558 // to "Push(a); Push(b);".
560 // It is valid to push the same register more than once, and there is no
561 // restriction on the order in which registers are specified.
563 // It is not valid to pop into the same register more than once in one
564 // operation, not even into the zero register.
566 // If the current stack pointer (as set by SetStackPointer) is csp, then it
567 // must be aligned to 16 bytes on entry and the total size of the specified
568 // registers must also be a multiple of 16 bytes.
570 // Even if the current stack pointer is not the system stack pointer (csp),
571 // Push (and derived methods) will still modify the system stack pointer in
572 // order to comply with ABI rules about accessing memory below the system
575 // Other than the registers passed into Pop, the stack pointer and (possibly)
576 // the system stack pointer, these methods do not modify any other registers.
577 void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
578 const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
579 void Push(const CPURegister& src0, const CPURegister& src1,
580 const CPURegister& src2, const CPURegister& src3,
581 const CPURegister& src4, const CPURegister& src5 = NoReg,
582 const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
583 void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
584 const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
585 void Pop(const CPURegister& dst0, const CPURegister& dst1,
586 const CPURegister& dst2, const CPURegister& dst3,
587 const CPURegister& dst4, const CPURegister& dst5 = NoReg,
588 const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
589 void Push(const Register& src0, const FPRegister& src1);
591 // Alternative forms of Push and Pop, taking a RegList or CPURegList that
592 // specifies the registers that are to be pushed or popped. Higher-numbered
593 // registers are associated with higher memory addresses (as in the A32 push
594 // and pop instructions).
596 // (Push|Pop)SizeRegList allow you to specify the register size as a
597 // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
598 // kSRegSizeInBits are supported.
600 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
601 void PushCPURegList(CPURegList registers);
602 void PopCPURegList(CPURegList registers);
604 inline void PushSizeRegList(RegList registers, unsigned reg_size,
605 CPURegister::RegisterType type = CPURegister::kRegister) {
606 PushCPURegList(CPURegList(type, reg_size, registers));
608 inline void PopSizeRegList(RegList registers, unsigned reg_size,
609 CPURegister::RegisterType type = CPURegister::kRegister) {
610 PopCPURegList(CPURegList(type, reg_size, registers));
612 inline void PushXRegList(RegList regs) {
613 PushSizeRegList(regs, kXRegSizeInBits);
615 inline void PopXRegList(RegList regs) {
616 PopSizeRegList(regs, kXRegSizeInBits);
618 inline void PushWRegList(RegList regs) {
619 PushSizeRegList(regs, kWRegSizeInBits);
621 inline void PopWRegList(RegList regs) {
622 PopSizeRegList(regs, kWRegSizeInBits);
624 inline void PushDRegList(RegList regs) {
625 PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
627 inline void PopDRegList(RegList regs) {
628 PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
630 inline void PushSRegList(RegList regs) {
631 PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
633 inline void PopSRegList(RegList regs) {
634 PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
637 // Push the specified register 'count' times.
638 void PushMultipleTimes(CPURegister src, Register count);
639 void PushMultipleTimes(CPURegister src, int count);
641 // This is a convenience method for pushing a single Handle<Object>.
642 inline void Push(Handle<Object> handle);
643 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
645 // Aliases of Push and Pop, required for V8 compatibility.
646 inline void push(Register src) {
649 inline void pop(Register dst) {
653 // Sometimes callers need to push or pop multiple registers in a way that is
654 // difficult to structure efficiently for fixed Push or Pop calls. This scope
655 // allows push requests to be queued up, then flushed at once. The
656 // MacroAssembler will try to generate the most efficient sequence required.
658 // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of
659 // register sizes and types.
662 explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { }
665 DCHECK(queued_.empty());
668 void Queue(const CPURegister& rt) {
669 size_ += rt.SizeInBytes();
670 queued_.push_back(rt);
673 enum PreambleDirective {
677 void PushQueued(PreambleDirective preamble_directive = WITH_PREAMBLE);
681 MacroAssembler* masm_;
683 std::vector<CPURegister> queued_;
686 // Poke 'src' onto the stack. The offset is in bytes.
688 // If the current stack pointer (according to StackPointer()) is csp, then
689 // csp must be aligned to 16 bytes.
690 void Poke(const CPURegister& src, const Operand& offset);
692 // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
694 // If the current stack pointer (according to StackPointer()) is csp, then
695 // csp must be aligned to 16 bytes.
696 void Peek(const CPURegister& dst, const Operand& offset);
698 // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
699 // with 'src2' at a higher address than 'src1'. The offset is in bytes.
701 // If the current stack pointer (according to StackPointer()) is csp, then
702 // csp must be aligned to 16 bytes.
703 void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
705 // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
706 // values peeked will be adjacent, with the value in 'dst2' being from a
707 // higher address than 'dst1'. The offset is in bytes.
709 // If the current stack pointer (according to StackPointer()) is csp, then
710 // csp must be aligned to 16 bytes.
711 void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
713 // Claim or drop stack space without actually accessing memory.
715 // In debug mode, both of these will write invalid data into the claimed or
718 // If the current stack pointer (according to StackPointer()) is csp, then it
719 // must be aligned to 16 bytes and the size claimed or dropped must be a
720 // multiple of 16 bytes.
722 // Note that unit_size must be specified in bytes. For variants which take a
723 // Register count, the unit size must be a power of two.
724 inline void Claim(uint64_t count, uint64_t unit_size = kXRegSize);
725 inline void Claim(const Register& count,
726 uint64_t unit_size = kXRegSize);
727 inline void Drop(uint64_t count, uint64_t unit_size = kXRegSize);
728 inline void Drop(const Register& count,
729 uint64_t unit_size = kXRegSize);
731 // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
733 inline void ClaimBySMI(const Register& count_smi,
734 uint64_t unit_size = kXRegSize);
735 inline void DropBySMI(const Register& count_smi,
736 uint64_t unit_size = kXRegSize);
738 // Compare a register with an operand, and branch to label depending on the
739 // condition. May corrupt the status flags.
740 inline void CompareAndBranch(const Register& lhs,
745 // Test the bits of register defined by bit_pattern, and branch if ANY of
746 // those bits are set. May corrupt the status flags.
747 inline void TestAndBranchIfAnySet(const Register& reg,
748 const uint64_t bit_pattern,
751 // Test the bits of register defined by bit_pattern, and branch if ALL of
752 // those bits are clear (ie. not set.) May corrupt the status flags.
753 inline void TestAndBranchIfAllClear(const Register& reg,
754 const uint64_t bit_pattern,
757 // Insert one or more instructions into the instruction stream that encode
758 // some caller-defined data. The instructions used will be executable with no
760 inline void InlineData(uint64_t data);
762 // Insert an instrumentation enable marker into the instruction stream.
763 inline void EnableInstrumentation();
765 // Insert an instrumentation disable marker into the instruction stream.
766 inline void DisableInstrumentation();
768 // Insert an instrumentation event marker into the instruction stream. These
769 // will be picked up by the instrumentation system to annotate an instruction
770 // profile. The argument marker_name must be a printable two character string;
771 // it will be encoded in the event marker.
772 inline void AnnotateInstrumentation(const char* marker_name);
774 // If emit_debug_code() is true, emit a run-time check to ensure that
775 // StackPointer() does not point below the system stack pointer.
777 // Whilst it is architecturally legal for StackPointer() to point below csp,
778 // it can be evidence of a potential bug because the ABI forbids accesses
781 // If StackPointer() is the system stack pointer (csp), then csp will be
782 // dereferenced to cause the processor (or simulator) to abort if it is not
785 // If emit_debug_code() is false, this emits no code.
786 void AssertStackConsistency();
788 // Preserve the callee-saved registers (as defined by AAPCS64).
790 // Higher-numbered registers are pushed before lower-numbered registers, and
791 // thus get higher addresses.
792 // Floating-point registers are pushed before general-purpose registers, and
793 // thus get higher addresses.
795 // Note that registers are not checked for invalid values. Use this method
796 // only if you know that the GC won't try to examine the values on the stack.
798 // This method must not be called unless the current stack pointer (as set by
799 // SetStackPointer) is the system stack pointer (csp), and is aligned to
800 // ActivationFrameAlignment().
801 void PushCalleeSavedRegisters();
803 // Restore the callee-saved registers (as defined by AAPCS64).
805 // Higher-numbered registers are popped after lower-numbered registers, and
806 // thus come from higher addresses.
807 // Floating-point registers are popped after general-purpose registers, and
808 // thus come from higher addresses.
810 // This method must not be called unless the current stack pointer (as set by
811 // SetStackPointer) is the system stack pointer (csp), and is aligned to
812 // ActivationFrameAlignment().
813 void PopCalleeSavedRegisters();
815 // Set the current stack pointer, but don't generate any code.
816 inline void SetStackPointer(const Register& stack_pointer) {
817 DCHECK(!TmpList()->IncludesAliasOf(stack_pointer));
821 // Return the current stack pointer, as set by SetStackPointer.
822 inline const Register& StackPointer() const {
826 // Align csp for a frame, as per ActivationFrameAlignment, and make it the
827 // current stack pointer.
828 inline void AlignAndSetCSPForFrame() {
829 int sp_alignment = ActivationFrameAlignment();
830 // AAPCS64 mandates at least 16-byte alignment.
831 DCHECK(sp_alignment >= 16);
832 DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
833 Bic(csp, StackPointer(), sp_alignment - 1);
834 SetStackPointer(csp);
837 // Push the system stack pointer (csp) down to allow the same to be done to
838 // the current stack pointer (according to StackPointer()). This must be
839 // called _before_ accessing the memory.
841 // This is necessary when pushing or otherwise adding things to the stack, to
842 // satisfy the AAPCS64 constraint that the memory below the system stack
843 // pointer is not accessed. The amount pushed will be increased as necessary
844 // to ensure csp remains aligned to 16 bytes.
846 // This method asserts that StackPointer() is not csp, since the call does
847 // not make sense in that context.
848 inline void BumpSystemStackPointer(const Operand& space);
850 // Re-synchronizes the system stack pointer (csp) with the current stack
851 // pointer (according to StackPointer()).
853 // This method asserts that StackPointer() is not csp, since the call does
854 // not make sense in that context.
855 inline void SyncSystemStackPointer();
857 // Helpers ------------------------------------------------------------------
859 inline void InitializeRootRegister();
861 void AssertFPCRState(Register fpcr = NoReg);
862 void ConfigureFPCR();
863 void CanonicalizeNaN(const FPRegister& dst, const FPRegister& src);
864 void CanonicalizeNaN(const FPRegister& reg) {
865 CanonicalizeNaN(reg, reg);
868 // Load an object from the root table.
869 void LoadRoot(CPURegister destination,
870 Heap::RootListIndex index);
871 // Store an object to the root table.
872 void StoreRoot(Register source,
873 Heap::RootListIndex index);
875 // Load both TrueValue and FalseValue roots.
876 void LoadTrueFalseRoots(Register true_root, Register false_root);
878 void LoadHeapObject(Register dst, Handle<HeapObject> object);
880 void LoadObject(Register result, Handle<Object> object) {
881 AllowDeferredHandleDereference heap_object_check;
882 if (object->IsHeapObject()) {
883 LoadHeapObject(result, Handle<HeapObject>::cast(object));
885 DCHECK(object->IsSmi());
886 Mov(result, Operand(object));
890 static int SafepointRegisterStackIndex(int reg_code);
892 // This is required for compatibility with architecture independant code.
893 // Remove if not needed.
894 inline void Move(Register dst, Register src) { Mov(dst, src); }
896 void LoadInstanceDescriptors(Register map,
897 Register descriptors);
898 void EnumLengthUntagged(Register dst, Register map);
899 void EnumLengthSmi(Register dst, Register map);
900 void NumberOfOwnDescriptors(Register dst, Register map);
901 void LoadAccessor(Register dst, Register holder, int accessor_index,
902 AccessorComponent accessor);
904 template<typename Field>
905 void DecodeField(Register dst, Register src) {
906 static const int shift = Field::kShift;
907 static const int setbits = CountSetBits(Field::kMask, 32);
908 Ubfx(dst, src, shift, setbits);
911 template<typename Field>
912 void DecodeField(Register reg) {
913 DecodeField<Field>(reg, reg);
916 // ---- SMI and Number Utilities ----
918 inline void SmiTag(Register dst, Register src);
919 inline void SmiTag(Register smi);
920 inline void SmiUntag(Register dst, Register src);
921 inline void SmiUntag(Register smi);
922 inline void SmiUntagToDouble(FPRegister dst,
924 UntagMode mode = kNotSpeculativeUntag);
925 inline void SmiUntagToFloat(FPRegister dst,
927 UntagMode mode = kNotSpeculativeUntag);
929 // Tag and push in one step.
930 inline void SmiTagAndPush(Register src);
931 inline void SmiTagAndPush(Register src1, Register src2);
933 inline void JumpIfSmi(Register value,
935 Label* not_smi_label = NULL);
936 inline void JumpIfNotSmi(Register value, Label* not_smi_label);
937 inline void JumpIfBothSmi(Register value1,
939 Label* both_smi_label,
940 Label* not_smi_label = NULL);
941 inline void JumpIfEitherSmi(Register value1,
943 Label* either_smi_label,
944 Label* not_smi_label = NULL);
945 inline void JumpIfEitherNotSmi(Register value1,
947 Label* not_smi_label);
948 inline void JumpIfBothNotSmi(Register value1,
950 Label* not_smi_label);
952 // Abort execution if argument is a smi, enabled via --debug-code.
953 void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
954 void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
956 inline void ObjectTag(Register tagged_obj, Register obj);
957 inline void ObjectUntag(Register untagged_obj, Register obj);
959 // Abort execution if argument is not a name, enabled via --debug-code.
960 void AssertName(Register object);
962 // Abort execution if argument is not undefined or an AllocationSite, enabled
964 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
966 // Abort execution if argument is not a string, enabled via --debug-code.
967 void AssertString(Register object);
969 void JumpIfHeapNumber(Register object, Label* on_heap_number,
970 SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
971 void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
972 SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
974 // Sets the vs flag if the input is -0.0.
975 void TestForMinusZero(DoubleRegister input);
977 // Jump to label if the input double register contains -0.0.
978 void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
980 // Jump to label if the input integer register contains the double precision
981 // floating point representation of -0.0.
982 void JumpIfMinusZero(Register input, Label* on_negative_zero);
984 // Generate code to do a lookup in the number string cache. If the number in
985 // the register object is found in the cache the generated code falls through
986 // with the result in the result register. The object and the result register
987 // can be the same. If the number is not found in the cache the code jumps to
988 // the label not_found with only the content of register object unchanged.
989 void LookupNumberStringCache(Register object,
996 // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
998 void ClampInt32ToUint8(Register in_out);
999 void ClampInt32ToUint8(Register output, Register input);
1001 // Saturate a double in input to an unsigned 8-bit integer in output.
1002 void ClampDoubleToUint8(Register output,
1003 DoubleRegister input,
1004 DoubleRegister dbl_scratch);
1006 // Try to represent a double as a signed 32-bit int.
1007 // This succeeds if the result compares equal to the input, so inputs of -0.0
1008 // are represented as 0 and handled as a success.
1010 // On output the Z flag is set if the operation was successful.
1011 void TryRepresentDoubleAsInt32(Register as_int,
1013 FPRegister scratch_d,
1014 Label* on_successful_conversion = NULL,
1015 Label* on_failed_conversion = NULL) {
1016 DCHECK(as_int.Is32Bits());
1017 TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
1018 on_failed_conversion);
1021 // Try to represent a double as a signed 64-bit int.
1022 // This succeeds if the result compares equal to the input, so inputs of -0.0
1023 // are represented as 0 and handled as a success.
1025 // On output the Z flag is set if the operation was successful.
1026 void TryRepresentDoubleAsInt64(Register as_int,
1028 FPRegister scratch_d,
1029 Label* on_successful_conversion = NULL,
1030 Label* on_failed_conversion = NULL) {
1031 DCHECK(as_int.Is64Bits());
1032 TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
1033 on_failed_conversion);
1036 // ---- Object Utilities ----
1038 // Copy fields from 'src' to 'dst', where both are tagged objects.
1039 // The 'temps' list is a list of X registers which can be used for scratch
1040 // values. The temps list must include at least one register.
1042 // Currently, CopyFields cannot make use of more than three registers from
1043 // the 'temps' list.
1045 // CopyFields expects to be able to take at least two registers from
1046 // MacroAssembler::TmpList().
1047 void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
1049 // Starting at address in dst, initialize field_count 64-bit fields with
1050 // 64-bit value in register filler. Register dst is corrupted.
1051 void FillFields(Register dst,
1052 Register field_count,
1055 // Copies a number of bytes from src to dst. All passed registers are
1056 // clobbered. On exit src and dst will point to the place just after where the
1057 // last byte was read or written and length will be zero. Hint may be used to
1058 // determine which is the most efficient algorithm to use for copying.
1059 void CopyBytes(Register dst,
1063 CopyHint hint = kCopyUnknown);
1065 // ---- String Utilities ----
1068 // Jump to label if either object is not a sequential one-byte string.
1069 // Optionally perform a smi check on the objects first.
1070 void JumpIfEitherIsNotSequentialOneByteStrings(
1071 Register first, Register second, Register scratch1, Register scratch2,
1072 Label* failure, SmiCheckType smi_check = DO_SMI_CHECK);
1074 // Check if instance type is sequential one-byte string and jump to label if
1076 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1079 // Checks if both instance types are sequential one-byte strings and jumps to
1080 // label if either is not.
1081 void JumpIfEitherInstanceTypeIsNotSequentialOneByte(
1082 Register first_object_instance_type, Register second_object_instance_type,
1083 Register scratch1, Register scratch2, Label* failure);
1085 // Checks if both instance types are sequential one-byte strings and jumps to
1086 // label if either is not.
1087 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1088 Register first_object_instance_type, Register second_object_instance_type,
1089 Register scratch1, Register scratch2, Label* failure);
1091 void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name);
1093 // ---- Calling / Jumping helpers ----
1095 // This is required for compatibility in architecture indepenedant code.
1096 inline void jmp(Label* L) { B(L); }
1098 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
1099 void TailCallStub(CodeStub* stub);
1101 void CallRuntime(const Runtime::Function* f,
1103 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1105 void CallRuntime(Runtime::FunctionId id,
1107 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1108 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1111 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1112 const Runtime::Function* function = Runtime::FunctionForId(id);
1113 CallRuntime(function, function->nargs, kSaveFPRegs);
1116 void TailCallRuntime(Runtime::FunctionId fid,
1120 int ActivationFrameAlignment();
1122 // Calls a C function.
1123 // The called function is not allowed to trigger a
1124 // garbage collection, since that might move the code and invalidate the
1125 // return address (unless this is somehow accounted for by the called
1127 void CallCFunction(ExternalReference function,
1128 int num_reg_arguments);
1129 void CallCFunction(ExternalReference function,
1130 int num_reg_arguments,
1131 int num_double_arguments);
1132 void CallCFunction(Register function,
1133 int num_reg_arguments,
1134 int num_double_arguments);
1136 // Jump to a runtime routine.
1137 void JumpToExternalReference(const ExternalReference& builtin);
1138 // Tail call of a runtime routine (jump).
1139 // Like JumpToExternalReference, but also takes care of passing the number
1141 void TailCallExternalReference(const ExternalReference& ext,
1144 void CallExternalReference(const ExternalReference& ext,
1148 // Invoke specified builtin JavaScript function.
1149 void InvokeBuiltin(int native_context_index, InvokeFlag flag,
1150 const CallWrapper& call_wrapper = NullCallWrapper());
1152 // Store the code object for the given builtin in the target register and
1153 // setup the function in the function register.
1154 void GetBuiltinEntry(Register target, Register function,
1155 int native_context_index);
1157 // Store the function for the given builtin in the target register.
1158 void GetBuiltinFunction(Register target, int native_context_index);
1160 void Jump(Register target);
1161 void Jump(Address target, RelocInfo::Mode rmode);
1162 void Jump(Handle<Code> code, RelocInfo::Mode rmode);
1163 void Jump(intptr_t target, RelocInfo::Mode rmode);
1165 void Call(Register target);
1166 void Call(Label* target);
1167 void Call(Address target, RelocInfo::Mode rmode);
1168 void Call(Handle<Code> code,
1169 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
1170 TypeFeedbackId ast_id = TypeFeedbackId::None());
1172 // For every Call variant, there is a matching CallSize function that returns
1173 // the size (in bytes) of the call sequence.
1174 static int CallSize(Register target);
1175 static int CallSize(Label* target);
1176 static int CallSize(Address target, RelocInfo::Mode rmode);
1177 static int CallSize(Handle<Code> code,
1178 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
1179 TypeFeedbackId ast_id = TypeFeedbackId::None());
1181 // Registers used through the invocation chain are hard-coded.
1182 // We force passing the parameters to ensure the contracts are correctly
1183 // honoured by the caller.
1184 // 'function' must be x1.
1185 // 'actual' must use an immediate or x0.
1186 // 'expected' must use an immediate or x2.
1187 // 'call_kind' must be x5.
1188 void InvokePrologue(const ParameterCount& expected,
1189 const ParameterCount& actual,
1190 Handle<Code> code_constant,
1194 bool* definitely_mismatches,
1195 const CallWrapper& call_wrapper);
1196 void InvokeCode(Register code,
1197 const ParameterCount& expected,
1198 const ParameterCount& actual,
1200 const CallWrapper& call_wrapper);
1201 // Invoke the JavaScript function in the given register.
1202 // Changes the current context to the context in the function before invoking.
1203 void InvokeFunction(Register function,
1204 const ParameterCount& actual,
1206 const CallWrapper& call_wrapper);
1207 void InvokeFunction(Register function,
1208 const ParameterCount& expected,
1209 const ParameterCount& actual,
1211 const CallWrapper& call_wrapper);
1212 void InvokeFunction(Handle<JSFunction> function,
1213 const ParameterCount& expected,
1214 const ParameterCount& actual,
1216 const CallWrapper& call_wrapper);
1219 // ---- Floating point helpers ----
1221 // Perform a conversion from a double to a signed int64. If the input fits in
1222 // range of the 64-bit result, execution branches to done. Otherwise,
1223 // execution falls through, and the sign of the result can be used to
1224 // determine if overflow was towards positive or negative infinity.
1226 // On successful conversion, the least significant 32 bits of the result are
1227 // equivalent to the ECMA-262 operation "ToInt32".
1229 // Only public for the test code in test-code-stubs-arm64.cc.
1230 void TryConvertDoubleToInt64(Register result,
1231 DoubleRegister input,
1234 // Performs a truncating conversion of a floating point number as used by
1235 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1236 // Exits with 'result' holding the answer.
1237 void TruncateDoubleToI(Register result, DoubleRegister double_input);
1239 // Performs a truncating conversion of a heap number as used by
1240 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1241 // must be different registers. Exits with 'result' holding the answer.
1242 void TruncateHeapNumberToI(Register result, Register object);
1244 // Converts the smi or heap number in object to an int32 using the rules
1245 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1246 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1247 // different registers.
1248 void TruncateNumberToI(Register object,
1250 Register heap_number_map,
1253 // ---- Code generation helpers ----
1255 void set_generating_stub(bool value) { generating_stub_ = value; }
1256 bool generating_stub() const { return generating_stub_; }
1258 void set_allow_macro_instructions(bool value) {
1259 allow_macro_instructions_ = value;
1261 bool allow_macro_instructions() const { return allow_macro_instructions_; }
1263 bool use_real_aborts() const { return use_real_aborts_; }
1264 void set_has_frame(bool value) { has_frame_ = value; }
1265 bool has_frame() const { return has_frame_; }
1266 bool AllowThisStubCall(CodeStub* stub);
1268 class NoUseRealAbortsScope {
1270 explicit NoUseRealAbortsScope(MacroAssembler* masm) :
1271 saved_(masm->use_real_aborts_), masm_(masm) {
1272 masm_->use_real_aborts_ = false;
1274 ~NoUseRealAbortsScope() {
1275 masm_->use_real_aborts_ = saved_;
1279 MacroAssembler* masm_;
1282 // ---------------------------------------------------------------------------
1287 // ---------------------------------------------------------------------------
1288 // Exception handling
1290 // Push a new stack handler and link into stack handler chain.
1291 void PushStackHandler();
1293 // Unlink the stack handler on top of the stack from the stack handler chain.
1294 // Must preserve the result register.
1295 void PopStackHandler();
1298 // ---------------------------------------------------------------------------
1299 // Allocation support
1301 // Allocate an object in new space or old space. The object_size is
1302 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
1303 // is passed. The allocated object is returned in result.
1305 // If the new space is exhausted control continues at the gc_required label.
1306 // In this case, the result and scratch registers may still be clobbered.
1307 // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
1308 void Allocate(Register object_size,
1313 AllocationFlags flags);
1315 void Allocate(int object_size,
1320 AllocationFlags flags);
1322 void AllocateTwoByteString(Register result,
1327 Label* gc_required);
1328 void AllocateOneByteString(Register result, Register length,
1329 Register scratch1, Register scratch2,
1330 Register scratch3, Label* gc_required);
1331 void AllocateTwoByteConsString(Register result,
1335 Label* gc_required);
1336 void AllocateOneByteConsString(Register result, Register length,
1337 Register scratch1, Register scratch2,
1338 Label* gc_required);
1339 void AllocateTwoByteSlicedString(Register result,
1343 Label* gc_required);
1344 void AllocateOneByteSlicedString(Register result, Register length,
1345 Register scratch1, Register scratch2,
1346 Label* gc_required);
1348 // Allocates a heap number or jumps to the gc_required label if the young
1349 // space is full and a scavenge is needed.
1350 // All registers are clobbered.
1351 // If no heap_number_map register is provided, the function will take care of
1353 void AllocateHeapNumber(Register result,
1357 CPURegister value = NoFPReg,
1358 CPURegister heap_number_map = NoReg,
1359 MutableMode mode = IMMUTABLE);
1361 // ---------------------------------------------------------------------------
1362 // Support functions.
1364 // Machine code version of Map::GetConstructor().
1365 // |temp| holds |result|'s map when done, and |temp2| its instance type.
1366 void GetMapConstructor(Register result, Register map, Register temp,
1369 void TryGetFunctionPrototype(Register function, Register result,
1370 Register scratch, Label* miss);
1372 // Compare object type for heap object. heap_object contains a non-Smi
1373 // whose object type should be compared with the given type. This both
1374 // sets the flags and leaves the object type in the type_reg register.
1375 // It leaves the map in the map register (unless the type_reg and map register
1376 // are the same register). It leaves the heap object in the heap_object
1377 // register unless the heap_object register is the same register as one of the
1379 void CompareObjectType(Register heap_object,
1385 // Compare object type for heap object, and branch if equal (or not.)
1386 // heap_object contains a non-Smi whose object type should be compared with
1387 // the given type. This both sets the flags and leaves the object type in
1388 // the type_reg register. It leaves the map in the map register (unless the
1389 // type_reg and map register are the same register). It leaves the heap
1390 // object in the heap_object register unless the heap_object register is the
1391 // same register as one of the other registers.
1392 void JumpIfObjectType(Register object,
1396 Label* if_cond_pass,
1397 Condition cond = eq);
1399 void JumpIfNotObjectType(Register object,
1403 Label* if_not_object);
1405 // Compare instance type in a map. map contains a valid map object whose
1406 // object type should be compared with the given type. This both
1407 // sets the flags and leaves the object type in the type_reg register.
1408 void CompareInstanceType(Register map,
1412 // Compare an object's map with the specified map. Condition flags are set
1413 // with result of map compare.
1414 void CompareObjectMap(Register obj, Heap::RootListIndex index);
1416 // Compare an object's map with the specified map. Condition flags are set
1417 // with result of map compare.
1418 void CompareObjectMap(Register obj, Register scratch, Handle<Map> map);
1420 // As above, but the map of the object is already loaded into the register
1421 // which is preserved by the code generated.
1422 void CompareMap(Register obj_map,
1425 // Check if the map of an object is equal to a specified map and branch to
1426 // label if not. Skip the smi check if not required (object is known to be a
1427 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1428 // against maps that are ElementsKind transition maps of the specified map.
1429 void CheckMap(Register obj,
1433 SmiCheckType smi_check_type);
1436 void CheckMap(Register obj,
1438 Heap::RootListIndex index,
1440 SmiCheckType smi_check_type);
1442 // As above, but the map of the object is already loaded into obj_map, and is
1444 void CheckMap(Register obj_map,
1447 SmiCheckType smi_check_type);
1449 // Check if the map of an object is equal to a specified weak map and branch
1450 // to a specified target if equal. Skip the smi check if not required
1451 // (object is known to be a heap object)
1452 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1453 Handle<WeakCell> cell, Handle<Code> success,
1454 SmiCheckType smi_check_type);
1456 // Compare the given value and the value of weak cell.
1457 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
1459 void GetWeakValue(Register value, Handle<WeakCell> cell);
1461 // Load the value of the weak cell in the value register. Branch to the given
1462 // miss label if the weak cell was cleared.
1463 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
1465 // Test the bitfield of the heap object map with mask and set the condition
1466 // flags. The object register is preserved.
1467 void TestMapBitfield(Register object, uint64_t mask);
1469 // Load the elements kind field from a map, and return it in the result
1471 void LoadElementsKindFromMap(Register result, Register map);
1473 // Compare the object in a register to a value from the root list.
1474 void CompareRoot(const Register& obj, Heap::RootListIndex index);
1476 // Compare the object in a register to a value and jump if they are equal.
1477 void JumpIfRoot(const Register& obj,
1478 Heap::RootListIndex index,
1481 // Compare the object in a register to a value and jump if they are not equal.
1482 void JumpIfNotRoot(const Register& obj,
1483 Heap::RootListIndex index,
1484 Label* if_not_equal);
1486 // Load and check the instance type of an object for being a unique name.
1487 // Loads the type into the second argument register.
1488 // The object and type arguments can be the same register; in that case it
1489 // will be overwritten with the type.
1490 // Fall-through if the object was a string and jump on fail otherwise.
1491 inline void IsObjectNameType(Register object, Register type, Label* fail);
1493 inline void IsObjectJSObjectType(Register heap_object,
1498 // Check the instance type in the given map to see if it corresponds to a
1499 // JS object type. Jump to the fail label if this is not the case and fall
1500 // through otherwise. However if fail label is NULL, no branch will be
1501 // performed and the flag will be updated. You can test the flag for "le"
1502 // condition to test if it is a valid JS object type.
1503 inline void IsInstanceJSObjectType(Register map,
1507 // Load and check the instance type of an object for being a string.
1508 // Loads the type into the second argument register.
1509 // The object and type arguments can be the same register; in that case it
1510 // will be overwritten with the type.
1511 // Jumps to not_string or string appropriate. If the appropriate label is
1512 // NULL, fall through.
1513 inline void IsObjectJSStringType(Register object, Register type,
1514 Label* not_string, Label* string = NULL);
1516 // Compare the contents of a register with an operand, and branch to true,
1517 // false or fall through, depending on condition.
1518 void CompareAndSplit(const Register& lhs,
1523 Label* fall_through);
1525 // Test the bits of register defined by bit_pattern, and branch to
1526 // if_any_set, if_all_clear or fall_through accordingly.
1527 void TestAndSplit(const Register& reg,
1528 uint64_t bit_pattern,
1529 Label* if_all_clear,
1531 Label* fall_through);
1533 // Check if a map for a JSObject indicates that the object has fast elements.
1534 // Jump to the specified label if it does not.
1535 void CheckFastElements(Register map, Register scratch, Label* fail);
1537 // Check if a map for a JSObject indicates that the object can have both smi
1538 // and HeapObject elements. Jump to the specified label if it does not.
1539 void CheckFastObjectElements(Register map, Register scratch, Label* fail);
1541 // Check to see if number can be stored as a double in FastDoubleElements.
1542 // If it can, store it at the index specified by key_reg in the array,
1543 // otherwise jump to fail.
1544 void StoreNumberToDoubleElements(Register value_reg,
1546 Register elements_reg,
1548 FPRegister fpscratch1,
1550 int elements_offset = 0);
1552 // Picks out an array index from the hash field.
1554 // hash - holds the index's hash. Clobbered.
1555 // index - holds the overwritten index on exit.
1556 void IndexFromHash(Register hash, Register index);
1558 // ---------------------------------------------------------------------------
1559 // Inline caching support.
1561 void EmitSeqStringSetCharCheck(Register string,
1563 SeqStringSetCharCheckIndexType index_type,
1565 uint32_t encoding_mask);
1567 // Generate code for checking access rights - used for security checks
1568 // on access to global objects across environments. The holder register
1569 // is left untouched, whereas both scratch registers are clobbered.
1570 void CheckAccessGlobalProxy(Register holder_reg,
1575 // Hash the interger value in 'key' register.
1576 // It uses the same algorithm as ComputeIntegerHash in utils.h.
1577 void GetNumberHash(Register key, Register scratch);
1579 // Load value from the dictionary.
1581 // elements - holds the slow-case elements of the receiver on entry.
1582 // Unchanged unless 'result' is the same register.
1584 // key - holds the smi key on entry.
1585 // Unchanged unless 'result' is the same register.
1587 // result - holds the result on exit if the load succeeded.
1588 // Allowed to be the same as 'key' or 'result'.
1589 // Unchanged on bailout so 'key' or 'result' can be used
1590 // in further computation.
1591 void LoadFromNumberDictionary(Label* miss,
1600 // ---------------------------------------------------------------------------
1603 // Activation support.
1604 void EnterFrame(StackFrame::Type type);
1605 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
1606 void LeaveFrame(StackFrame::Type type);
1608 // Returns map with validated enum cache in object register.
1609 void CheckEnumCache(Register object,
1610 Register null_value,
1615 Label* call_runtime);
1617 // AllocationMemento support. Arrays may have an associated
1618 // AllocationMemento object that can be checked for in order to pretransition
1620 // On entry, receiver should point to the array object.
1621 // If allocation info is present, the Z flag is set (so that the eq
1622 // condition will pass).
1623 void TestJSArrayForAllocationMemento(Register receiver,
1626 Label* no_memento_found);
1628 void JumpIfJSArrayHasAllocationMemento(Register receiver,
1631 Label* memento_found) {
1632 Label no_memento_found;
1633 TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
1635 B(eq, memento_found);
1636 Bind(&no_memento_found);
1639 // The stack pointer has to switch between csp and jssp when setting up and
1640 // destroying the exit frame. Hence preserving/restoring the registers is
1641 // slightly more complicated than simple push/pop operations.
1642 void ExitFramePreserveFPRegs();
1643 void ExitFrameRestoreFPRegs();
1645 // Generates function and stub prologue code.
1646 void StubPrologue();
1647 void Prologue(bool code_pre_aging);
1649 // Enter exit frame. Exit frames are used when calling C code from generated
1650 // (JavaScript) code.
1652 // The stack pointer must be jssp on entry, and will be set to csp by this
1653 // function. The frame pointer is also configured, but the only other
1654 // registers modified by this function are the provided scratch register, and
1657 // The 'extra_space' argument can be used to allocate some space in the exit
1658 // frame that will be ignored by the GC. This space will be reserved in the
1659 // bottom of the frame immediately above the return address slot.
1661 // Set up a stack frame and registers as follows:
1662 // fp[8]: CallerPC (lr)
1663 // fp -> fp[0]: CallerFP (old fp)
1664 // fp[-8]: SPOffset (new csp)
1665 // fp[-16]: CodeObject()
1666 // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
1667 // csp[8]: Memory reserved for the caller if extra_space != 0.
1668 // Alignment padding, if necessary.
1669 // csp -> csp[0]: Space reserved for the return address.
1671 // This function also stores the new frame information in the top frame, so
1672 // that the new frame becomes the current frame.
1673 void EnterExitFrame(bool save_doubles,
1674 const Register& scratch,
1675 int extra_space = 0);
1677 // Leave the current exit frame, after a C function has returned to generated
1678 // (JavaScript) code.
1680 // This effectively unwinds the operation of EnterExitFrame:
1681 // * Preserved doubles are restored (if restore_doubles is true).
1682 // * The frame information is removed from the top frame.
1683 // * The exit frame is dropped.
1684 // * The stack pointer is reset to jssp.
1686 // The stack pointer must be csp on entry.
1687 void LeaveExitFrame(bool save_doubles,
1688 const Register& scratch,
1689 bool restore_context);
1691 void LoadContext(Register dst, int context_chain_length);
1693 // Emit code for a truncating division by a constant. The dividend register is
1694 // unchanged. Dividend and result must be different.
1695 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1697 // ---------------------------------------------------------------------------
1698 // StatsCounter support
1700 void SetCounter(StatsCounter* counter, int value, Register scratch1,
1702 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1704 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1707 // ---------------------------------------------------------------------------
1708 // Garbage collector support (GC).
1710 enum RememberedSetFinalAction {
1715 // Record in the remembered set the fact that we have a pointer to new space
1716 // at the address pointed to by the addr register. Only works if addr is not
1718 void RememberedSetHelper(Register object, // Used for debug code.
1721 SaveFPRegsMode save_fp,
1722 RememberedSetFinalAction and_then);
1724 // Push and pop the registers that can hold pointers, as defined by the
1725 // RegList constant kSafepointSavedRegisters.
1726 void PushSafepointRegisters();
1727 void PopSafepointRegisters();
1729 void PushSafepointRegistersAndDoubles();
1730 void PopSafepointRegistersAndDoubles();
1732 // Store value in register src in the safepoint stack slot for register dst.
1733 void StoreToSafepointRegisterSlot(Register src, Register dst) {
1734 Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
1737 // Load the value of the src register from its safepoint stack slot
1738 // into register dst.
1739 void LoadFromSafepointRegisterSlot(Register dst, Register src) {
1740 Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
1743 void CheckPageFlagSet(const Register& object,
1744 const Register& scratch,
1748 void CheckPageFlagClear(const Register& object,
1749 const Register& scratch,
1751 Label* if_all_clear);
1753 // Check if object is in new space and jump accordingly.
1754 // Register 'object' is preserved.
1755 void JumpIfNotInNewSpace(Register object,
1757 InNewSpace(object, ne, branch);
1760 void JumpIfInNewSpace(Register object,
1762 InNewSpace(object, eq, branch);
1765 // Notify the garbage collector that we wrote a pointer into an object.
1766 // |object| is the object being stored into, |value| is the object being
1767 // stored. value and scratch registers are clobbered by the operation.
1768 // The offset is the offset from the start of the object, not the offset from
1769 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
1770 void RecordWriteField(
1775 LinkRegisterStatus lr_status,
1776 SaveFPRegsMode save_fp,
1777 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1778 SmiCheck smi_check = INLINE_SMI_CHECK,
1779 PointersToHereCheck pointers_to_here_check_for_value =
1780 kPointersToHereMaybeInteresting);
1782 // As above, but the offset has the tag presubtracted. For use with
1783 // MemOperand(reg, off).
1784 inline void RecordWriteContextSlot(
1789 LinkRegisterStatus lr_status,
1790 SaveFPRegsMode save_fp,
1791 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1792 SmiCheck smi_check = INLINE_SMI_CHECK,
1793 PointersToHereCheck pointers_to_here_check_for_value =
1794 kPointersToHereMaybeInteresting) {
1795 RecordWriteField(context,
1796 offset + kHeapObjectTag,
1801 remembered_set_action,
1803 pointers_to_here_check_for_value);
1806 void RecordWriteForMap(
1810 LinkRegisterStatus lr_status,
1811 SaveFPRegsMode save_fp);
1813 // For a given |object| notify the garbage collector that the slot |address|
1814 // has been written. |value| is the object being stored. The value and
1815 // address registers are clobbered by the operation.
1820 LinkRegisterStatus lr_status,
1821 SaveFPRegsMode save_fp,
1822 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1823 SmiCheck smi_check = INLINE_SMI_CHECK,
1824 PointersToHereCheck pointers_to_here_check_for_value =
1825 kPointersToHereMaybeInteresting);
1827 // Checks the color of an object. If the object is already grey or black
1828 // then we just fall through, since it is already live. If it is white and
1829 // we can determine that it doesn't need to be scanned, then we just mark it
1830 // black and fall through. For the rest we jump to the label so the
1831 // incremental marker can fix its assumptions.
1832 void EnsureNotWhite(Register object,
1837 Label* object_is_white_and_not_data);
1839 // Detects conservatively whether an object is data-only, i.e. it does need to
1840 // be scanned by the garbage collector.
1841 void JumpIfDataObject(Register value,
1843 Label* not_data_object);
1845 // Helper for finding the mark bits for an address.
1846 // Note that the behaviour slightly differs from other architectures.
1848 // - addr_reg is unchanged.
1849 // - The bitmap register points at the word with the mark bits.
1850 // - The shift register contains the index of the first color bit for this
1851 // object in the bitmap.
1852 inline void GetMarkBits(Register addr_reg,
1853 Register bitmap_reg,
1854 Register shift_reg);
1856 // Check if an object has a given incremental marking color.
1857 void HasColor(Register object,
1864 void JumpIfBlack(Register object,
1870 // ---------------------------------------------------------------------------
1873 // Calls Abort(msg) if the condition cond is not satisfied.
1874 // Use --debug_code to enable.
1875 void Assert(Condition cond, BailoutReason reason);
1876 void AssertRegisterIsClear(Register reg, BailoutReason reason);
1877 void AssertRegisterIsRoot(
1879 Heap::RootListIndex index,
1880 BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
1881 void AssertFastElements(Register elements);
1883 // Abort if the specified register contains the invalid color bit pattern.
1884 // The pattern must be in bits [1:0] of 'reg' register.
1886 // If emit_debug_code() is false, this emits no code.
1887 void AssertHasValidColor(const Register& reg);
1889 // Abort if 'object' register doesn't point to a string object.
1891 // If emit_debug_code() is false, this emits no code.
1892 void AssertIsString(const Register& object);
1894 // Like Assert(), but always enabled.
1895 void Check(Condition cond, BailoutReason reason);
1896 void CheckRegisterIsClear(Register reg, BailoutReason reason);
1898 // Print a message to stderr and abort execution.
1899 void Abort(BailoutReason reason);
1901 // Conditionally load the cached Array transitioned map of type
1902 // transitioned_kind from the native context if the map in register
1903 // map_in_out is the cached Array map in the native context of
1905 void LoadTransitionedArrayMapConditional(
1906 ElementsKind expected_kind,
1907 ElementsKind transitioned_kind,
1908 Register map_in_out,
1911 Label* no_map_match);
1913 void LoadGlobalFunction(int index, Register function);
1915 // Load the initial map from the global function. The registers function and
1916 // map can be the same, function is then overwritten.
1917 void LoadGlobalFunctionInitialMap(Register function,
1921 CPURegList* TmpList() { return &tmp_list_; }
1922 CPURegList* FPTmpList() { return &fptmp_list_; }
1924 static CPURegList DefaultTmpList();
1925 static CPURegList DefaultFPTmpList();
1927 // Like printf, but print at run-time from generated code.
1929 // The caller must ensure that arguments for floating-point placeholders
1930 // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
1931 // placeholders are Registers.
1933 // At the moment it is only possible to print the value of csp if it is the
1934 // current stack pointer. Otherwise, the MacroAssembler will automatically
1935 // update csp on every push (using BumpSystemStackPointer), so determining its
1936 // value is difficult.
1938 // Format placeholders that refer to more than one argument, or to a specific
1939 // argument, are not supported. This includes formats like "%1$d" or "%.*d".
1941 // This function automatically preserves caller-saved registers so that
1942 // calling code can use Printf at any point without having to worry about
1943 // corruption. The preservation mechanism generates a lot of code. If this is
1944 // a problem, preserve the important registers manually and then call
1945 // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
1946 // implicitly preserved.
1947 void Printf(const char * format,
1948 CPURegister arg0 = NoCPUReg,
1949 CPURegister arg1 = NoCPUReg,
1950 CPURegister arg2 = NoCPUReg,
1951 CPURegister arg3 = NoCPUReg);
1953 // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
1955 // The return code from the system printf call will be returned in x0.
1956 void PrintfNoPreserve(const char * format,
1957 const CPURegister& arg0 = NoCPUReg,
1958 const CPURegister& arg1 = NoCPUReg,
1959 const CPURegister& arg2 = NoCPUReg,
1960 const CPURegister& arg3 = NoCPUReg);
1962 // Code ageing support functions.
1964 // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
1965 // function as old, it replaces some of the function prologue (generated by
1966 // FullCodeGenerator::Generate) with a call to a special stub (ultimately
1967 // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
1968 // function prologue to its initial young state (indicating that it has been
1969 // recently run) and continues. A young function is therefore one which has a
1970 // normal frame setup sequence, and an old function has a code age sequence
1971 // which calls a code ageing stub.
1973 // Set up a basic stack frame for young code (or code exempt from ageing) with
1974 // type FUNCTION. It may be patched later for code ageing support. This is
1975 // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
1977 // This function takes an Assembler so it can be called from either a
1978 // MacroAssembler or a PatchingAssembler context.
1979 static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
1981 // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
1982 void EmitFrameSetupForCodeAgePatching();
1984 // Emit a code age sequence that calls the relevant code age stub. The code
1985 // generated by this sequence is expected to replace the code generated by
1986 // EmitFrameSetupForCodeAgePatching, and represents an old function.
1988 // If stub is NULL, this function generates the code age sequence but omits
1989 // the stub address that is normally embedded in the instruction stream. This
1990 // can be used by debug code to verify code age sequences.
1991 static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
1993 // Call EmitCodeAgeSequence from a MacroAssembler context.
1994 void EmitCodeAgeSequence(Code* stub);
1996 // Return true if the sequence is a young sequence geneated by
1997 // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
1998 // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
1999 static bool IsYoungSequence(Isolate* isolate, byte* sequence);
2001 // Jumps to found label if a prototype map has dictionary elements.
2002 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
2003 Register scratch1, Label* found);
2005 // Perform necessary maintenance operations before a push or after a pop.
2007 // Note that size is specified in bytes.
2008 void PushPreamble(Operand total_size);
2009 void PopPostamble(Operand total_size);
2011 void PushPreamble(int count, int size) { PushPreamble(count * size); }
2012 void PopPostamble(int count, int size) { PopPostamble(count * size); }
2015 // Helpers for CopyFields.
2016 // These each implement CopyFields in a different way.
2017 void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
2018 Register scratch1, Register scratch2,
2019 Register scratch3, Register scratch4,
2021 void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
2022 Register scratch1, Register scratch2,
2023 Register scratch3, Register scratch4);
2024 void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
2025 Register scratch1, Register scratch2,
2028 // The actual Push and Pop implementations. These don't generate any code
2029 // other than that required for the push or pop. This allows
2030 // (Push|Pop)CPURegList to bundle together run-time assertions for a large
2031 // block of registers.
2033 // Note that size is per register, and is specified in bytes.
2034 void PushHelper(int count, int size,
2035 const CPURegister& src0, const CPURegister& src1,
2036 const CPURegister& src2, const CPURegister& src3);
2037 void PopHelper(int count, int size,
2038 const CPURegister& dst0, const CPURegister& dst1,
2039 const CPURegister& dst2, const CPURegister& dst3);
2041 // Call Printf. On a native build, a simple call will be generated, but if the
2042 // simulator is being used then a suitable pseudo-instruction is used. The
2043 // arguments and stack (csp) must be prepared by the caller as for a normal
2044 // AAPCS64 call to 'printf'.
2046 // The 'args' argument should point to an array of variable arguments in their
2047 // proper PCS registers (and in calling order). The argument registers can
2048 // have mixed types. The format string (x0) should not be included.
2049 void CallPrintf(int arg_count = 0, const CPURegister * args = NULL);
2051 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
2052 void InNewSpace(Register object,
2053 Condition cond, // eq for new space, ne otherwise.
2056 // Try to represent a double as an int so that integer fast-paths may be
2057 // used. Not every valid integer value is guaranteed to be caught.
2058 // It supports both 32-bit and 64-bit integers depending whether 'as_int'
2059 // is a W or X register.
2061 // This does not distinguish between +0 and -0, so if this distinction is
2062 // important it must be checked separately.
2064 // On output the Z flag is set if the operation was successful.
2065 void TryRepresentDoubleAsInt(Register as_int,
2067 FPRegister scratch_d,
2068 Label* on_successful_conversion = NULL,
2069 Label* on_failed_conversion = NULL);
2071 bool generating_stub_;
2073 // Tell whether any of the macro instruction can be used. When false the
2074 // MacroAssembler will assert if a method which can emit a variable number
2075 // of instructions is called.
2076 bool allow_macro_instructions_;
2080 // The Abort method should call a V8 runtime function, but the CallRuntime
2081 // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
2082 // use a simpler abort mechanism that doesn't depend on CEntryStub.
2084 // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
2086 bool use_real_aborts_;
2088 // This handle will be patched with the code object on installation.
2089 Handle<Object> code_object_;
2091 // The register to use as a stack pointer for stack operations.
2094 // Scratch registers available for use by the MacroAssembler.
2095 CPURegList tmp_list_;
2096 CPURegList fptmp_list_;
2098 void InitializeNewString(Register string,
2100 Heap::RootListIndex map_index,
2105 // Far branches resolving.
2107 // The various classes of branch instructions with immediate offsets have
2108 // different ranges. While the Assembler will fail to assemble a branch
2109 // exceeding its range, the MacroAssembler offers a mechanism to resolve
2110 // branches to too distant targets, either by tweaking the generated code to
2111 // use branch instructions with wider ranges or generating veneers.
2113 // Currently branches to distant targets are resolved using unconditional
2114 // branch isntructions with a range of +-128MB. If that becomes too little
2115 // (!), the mechanism can be extended to generate special veneers for really
2118 // Helps resolve branching to labels potentially out of range.
2119 // If the label is not bound, it registers the information necessary to later
2120 // be able to emit a veneer for this branch if necessary.
2121 // If the label is bound, it returns true if the label (or the previous link
2122 // in the label chain) is out of range. In that case the caller is responsible
2123 // for generating appropriate code.
2124 // Otherwise it returns false.
2125 // This function also checks wether veneers need to be emitted.
2126 bool NeedExtraInstructionsOrRegisterBranch(Label *label,
2127 ImmBranchType branch_type);
2131 // Use this scope when you need a one-to-one mapping bewteen methods and
2132 // instructions. This scope prevents the MacroAssembler from being called and
2133 // literal pools from being emitted. It also asserts the number of instructions
2134 // emitted is what you specified when creating the scope.
2135 class InstructionAccurateScope BASE_EMBEDDED {
2137 explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
2141 size_(count * kInstructionSize)
2144 // Before blocking the const pool, see if it needs to be emitted.
2145 masm_->CheckConstPool(false, true);
2146 masm_->CheckVeneerPool(false, true);
2148 masm_->StartBlockPools();
2151 masm_->bind(&start_);
2153 previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
2154 masm_->set_allow_macro_instructions(false);
2158 ~InstructionAccurateScope() {
2159 masm_->EndBlockPools();
2161 if (start_.is_bound()) {
2162 DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
2164 masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
2169 MacroAssembler* masm_;
2173 bool previous_allow_macro_instructions_;
2178 // This scope utility allows scratch registers to be managed safely. The
2179 // MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
2180 // registers. These registers can be allocated on demand, and will be returned
2181 // at the end of the scope.
2183 // When the scope ends, the MacroAssembler's lists will be restored to their
2184 // original state, even if the lists were modified by some other means.
2185 class UseScratchRegisterScope {
2187 explicit UseScratchRegisterScope(MacroAssembler* masm)
2188 : available_(masm->TmpList()),
2189 availablefp_(masm->FPTmpList()),
2190 old_available_(available_->list()),
2191 old_availablefp_(availablefp_->list()) {
2192 DCHECK(available_->type() == CPURegister::kRegister);
2193 DCHECK(availablefp_->type() == CPURegister::kFPRegister);
2196 ~UseScratchRegisterScope();
2198 // Take a register from the appropriate temps list. It will be returned
2199 // automatically when the scope ends.
2200 Register AcquireW() { return AcquireNextAvailable(available_).W(); }
2201 Register AcquireX() { return AcquireNextAvailable(available_).X(); }
2202 FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
2203 FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
2205 Register UnsafeAcquire(const Register& reg) {
2206 return Register(UnsafeAcquire(available_, reg));
2209 Register AcquireSameSizeAs(const Register& reg);
2210 FPRegister AcquireSameSizeAs(const FPRegister& reg);
2213 static CPURegister AcquireNextAvailable(CPURegList* available);
2214 static CPURegister UnsafeAcquire(CPURegList* available,
2215 const CPURegister& reg);
2217 // Available scratch registers.
2218 CPURegList* available_; // kRegister
2219 CPURegList* availablefp_; // kFPRegister
2221 // The state of the available lists at the start of this scope.
2222 RegList old_available_; // kRegister
2223 RegList old_availablefp_; // kFPRegister
2227 inline MemOperand ContextMemOperand(Register context, int index = 0) {
2228 return MemOperand(context, Context::SlotOffset(index));
2231 inline MemOperand GlobalObjectMemOperand() {
2232 return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
2236 // Encode and decode information about patchable inline SMI checks.
2237 class InlineSmiCheckInfo {
2239 explicit InlineSmiCheckInfo(Address info);
2241 bool HasSmiCheck() const {
2242 return smi_check_ != NULL;
2245 const Register& SmiRegister() const {
2249 Instruction* SmiCheck() const {
2253 // Use MacroAssembler::InlineData to emit information about patchable inline
2254 // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
2255 // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
2257 // The generated patch information can be read using the InlineSMICheckInfo
2259 static void Emit(MacroAssembler* masm, const Register& reg,
2260 const Label* smi_check);
2262 // Emit information to indicate that there is no inline SMI check.
2263 static void EmitNotInlined(MacroAssembler* masm) {
2265 Emit(masm, NoReg, &unbound);
2270 Instruction* smi_check_;
2272 // Fields in the data encoded by InlineData.
2274 // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
2275 // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
2276 // used in a patchable check. The Emit() method checks this.
2278 // Note that the total size of the fields is restricted by the underlying
2279 // storage size handled by the BitField class, which is a uint32_t.
2280 class RegisterBits : public BitField<unsigned, 0, 5> {};
2281 class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
2284 } } // namespace v8::internal
2286 #ifdef GENERATED_CODE_COVERAGE
2287 #error "Unsupported option"
2288 #define CODE_COVERAGE_STRINGIFY(x) #x
2289 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
2290 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
2291 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
2293 #define ACCESS_MASM(masm) masm->
2296 #endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_