1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
6 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
10 #include "src/arm64/assembler-arm64.h"
11 #include "src/bailout-reason.h"
12 #include "src/base/bits.h"
13 #include "src/globals.h"
15 // Simulator specific helpers.
17 // TODO(all): If possible automatically prepend an indicator like
18 // UNIMPLEMENTED or LOCATION.
19 #define ASM_UNIMPLEMENTED(message) \
20 __ Debug(message, __LINE__, NO_PARAM)
21 #define ASM_UNIMPLEMENTED_BREAK(message) \
22 __ Debug(message, __LINE__, \
23 FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
24 #define ASM_LOCATION(message) \
25 __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
27 #define ASM_UNIMPLEMENTED(message)
28 #define ASM_UNIMPLEMENTED_BREAK(message)
29 #define ASM_LOCATION(message)
36 // Give alias names to registers for calling conventions.
37 // TODO(titzer): arm64 is a pain for aliasing; get rid of these macros
38 #define kReturnRegister0 x0
39 #define kReturnRegister1 x1
40 #define kJSFunctionRegister x1
41 #define kContextRegister cp
42 #define kInterpreterAccumulatorRegister x0
43 #define kInterpreterRegisterFileRegister x18
44 #define kInterpreterBytecodeOffsetRegister x19
45 #define kInterpreterBytecodeArrayRegister x20
46 #define kInterpreterDispatchTableRegister x21
47 #define kRuntimeCallFunctionRegister x1
48 #define kRuntimeCallArgCountRegister x0
50 #define LS_MACRO_LIST(V) \
51 V(Ldrb, Register&, rt, LDRB_w) \
52 V(Strb, Register&, rt, STRB_w) \
53 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
54 V(Ldrh, Register&, rt, LDRH_w) \
55 V(Strh, Register&, rt, STRH_w) \
56 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
57 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
58 V(Str, CPURegister&, rt, StoreOpFor(rt)) \
59 V(Ldrsw, Register&, rt, LDRSW_x)
61 #define LSPAIR_MACRO_LIST(V) \
62 V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
63 V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
64 V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
67 // ----------------------------------------------------------------------------
68 // Static helper functions
70 // Generate a MemOperand for loading a field from an object.
71 inline MemOperand FieldMemOperand(Register object, int offset);
72 inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
74 // Generate a MemOperand for loading a SMI from memory.
75 inline MemOperand UntagSmiMemOperand(Register object, int offset);
78 // ----------------------------------------------------------------------------
82 // Copies of architectural conditions.
83 // The associated conditions can be used in place of those, the code will
84 // take care of reinterpreting them with the correct type.
102 // These two are *different* from the architectural codes al and nv.
103 // 'always' is used to generate unconditional branches.
104 // 'never' is used to not generate a branch (generally as the inverse
105 // branch type of 'always).
108 reg_zero, reg_not_zero,
110 reg_bit_clear, reg_bit_set,
113 kBranchTypeFirstCondition = eq,
114 kBranchTypeLastCondition = nv,
115 kBranchTypeFirstUsingReg = reg_zero,
116 kBranchTypeFirstUsingBit = reg_bit_clear
119 inline BranchType InvertBranchType(BranchType type) {
120 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
121 return static_cast<BranchType>(
122 NegateCondition(static_cast<Condition>(type)));
124 return static_cast<BranchType>(type ^ 1);
128 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
129 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
130 enum PointersToHereCheck {
131 kPointersToHereMaybeInteresting,
132 kPointersToHereAreAlwaysInteresting
134 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
135 enum TargetAddressStorageMode {
136 CAN_INLINE_TARGET_ADDRESS,
137 NEVER_INLINE_TARGET_ADDRESS
139 enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
140 enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
141 enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
142 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
143 enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
145 class MacroAssembler : public Assembler {
147 MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
149 inline Handle<Object> CodeObject();
151 // Instruction set functions ------------------------------------------------
153 inline void And(const Register& rd,
155 const Operand& operand);
156 inline void Ands(const Register& rd,
158 const Operand& operand);
159 inline void Bic(const Register& rd,
161 const Operand& operand);
162 inline void Bics(const Register& rd,
164 const Operand& operand);
165 inline void Orr(const Register& rd,
167 const Operand& operand);
168 inline void Orn(const Register& rd,
170 const Operand& operand);
171 inline void Eor(const Register& rd,
173 const Operand& operand);
174 inline void Eon(const Register& rd,
176 const Operand& operand);
177 inline void Tst(const Register& rn, const Operand& operand);
178 void LogicalMacro(const Register& rd,
180 const Operand& operand,
183 // Add and sub macros.
184 inline void Add(const Register& rd,
186 const Operand& operand);
187 inline void Adds(const Register& rd,
189 const Operand& operand);
190 inline void Sub(const Register& rd,
192 const Operand& operand);
193 inline void Subs(const Register& rd,
195 const Operand& operand);
196 inline void Cmn(const Register& rn, const Operand& operand);
197 inline void Cmp(const Register& rn, const Operand& operand);
198 inline void Neg(const Register& rd,
199 const Operand& operand);
200 inline void Negs(const Register& rd,
201 const Operand& operand);
203 void AddSubMacro(const Register& rd,
205 const Operand& operand,
209 // Add/sub with carry macros.
210 inline void Adc(const Register& rd,
212 const Operand& operand);
213 inline void Adcs(const Register& rd,
215 const Operand& operand);
216 inline void Sbc(const Register& rd,
218 const Operand& operand);
219 inline void Sbcs(const Register& rd,
221 const Operand& operand);
222 inline void Ngc(const Register& rd,
223 const Operand& operand);
224 inline void Ngcs(const Register& rd,
225 const Operand& operand);
226 void AddSubWithCarryMacro(const Register& rd,
228 const Operand& operand,
230 AddSubWithCarryOp op);
233 void Mov(const Register& rd,
234 const Operand& operand,
235 DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
236 void Mov(const Register& rd, uint64_t imm);
237 inline void Mvn(const Register& rd, uint64_t imm);
238 void Mvn(const Register& rd, const Operand& operand);
239 static bool IsImmMovn(uint64_t imm, unsigned reg_size);
240 static bool IsImmMovz(uint64_t imm, unsigned reg_size);
241 static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
243 // Try to move an immediate into the destination register in a single
244 // instruction. Returns true for success, and updates the contents of dst.
245 // Returns false, otherwise.
246 bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
248 // Move an immediate into register dst, and return an Operand object for use
249 // with a subsequent instruction that accepts a shift. The value moved into
250 // dst is not necessarily equal to imm; it may have had a shifting operation
251 // applied to it that will be subsequently undone by the shift applied in the
253 Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
255 // Conditional macros.
256 inline void Ccmp(const Register& rn,
257 const Operand& operand,
260 inline void Ccmn(const Register& rn,
261 const Operand& operand,
264 void ConditionalCompareMacro(const Register& rn,
265 const Operand& operand,
268 ConditionalCompareOp op);
269 void Csel(const Register& rd,
271 const Operand& operand,
274 // Load/store macros.
275 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
276 inline void FN(const REGTYPE REG, const MemOperand& addr);
277 LS_MACRO_LIST(DECLARE_FUNCTION)
278 #undef DECLARE_FUNCTION
280 void LoadStoreMacro(const CPURegister& rt,
281 const MemOperand& addr,
284 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
285 inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
286 LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
287 #undef DECLARE_FUNCTION
289 void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
290 const MemOperand& addr, LoadStorePairOp op);
292 // V8-specific load/store helpers.
293 void Load(const Register& rt, const MemOperand& addr, Representation r);
294 void Store(const Register& rt, const MemOperand& addr, Representation r);
297 // The target must be within the immediate range of adr.
299 // The target may be outside of the immediate range of adr. Additional
300 // instructions may be emitted.
303 void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
305 // Remaining instructions are simple pass-through calls to the assembler.
306 inline void Asr(const Register& rd, const Register& rn, unsigned shift);
307 inline void Asr(const Register& rd, const Register& rn, const Register& rm);
309 // Branch type inversion relies on these relations.
310 STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
311 (reg_bit_clear == (reg_bit_set ^ 1)) &&
312 (always == (never ^ 1)));
314 void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
316 inline void B(Label* label);
317 inline void B(Condition cond, Label* label);
318 void B(Label* label, Condition cond);
319 inline void Bfi(const Register& rd,
323 inline void Bfxil(const Register& rd,
327 inline void Bind(Label* label);
328 inline void Bl(Label* label);
329 inline void Blr(const Register& xn);
330 inline void Br(const Register& xn);
331 inline void Brk(int code);
332 void Cbnz(const Register& rt, Label* label);
333 void Cbz(const Register& rt, Label* label);
334 inline void Cinc(const Register& rd, const Register& rn, Condition cond);
335 inline void Cinv(const Register& rd, const Register& rn, Condition cond);
336 inline void Cls(const Register& rd, const Register& rn);
337 inline void Clz(const Register& rd, const Register& rn);
338 inline void Cneg(const Register& rd, const Register& rn, Condition cond);
339 inline void CzeroX(const Register& rd, Condition cond);
340 inline void CmovX(const Register& rd, const Register& rn, Condition cond);
341 inline void Cset(const Register& rd, Condition cond);
342 inline void Csetm(const Register& rd, Condition cond);
343 inline void Csinc(const Register& rd,
347 inline void Csinv(const Register& rd,
351 inline void Csneg(const Register& rd,
355 inline void Dmb(BarrierDomain domain, BarrierType type);
356 inline void Dsb(BarrierDomain domain, BarrierType type);
357 inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
358 inline void Extr(const Register& rd,
362 inline void Fabs(const FPRegister& fd, const FPRegister& fn);
363 inline void Fadd(const FPRegister& fd,
364 const FPRegister& fn,
365 const FPRegister& fm);
366 inline void Fccmp(const FPRegister& fn,
367 const FPRegister& fm,
370 inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
371 inline void Fcmp(const FPRegister& fn, double value);
372 inline void Fcsel(const FPRegister& fd,
373 const FPRegister& fn,
374 const FPRegister& fm,
376 inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
377 inline void Fcvtas(const Register& rd, const FPRegister& fn);
378 inline void Fcvtau(const Register& rd, const FPRegister& fn);
379 inline void Fcvtms(const Register& rd, const FPRegister& fn);
380 inline void Fcvtmu(const Register& rd, const FPRegister& fn);
381 inline void Fcvtns(const Register& rd, const FPRegister& fn);
382 inline void Fcvtnu(const Register& rd, const FPRegister& fn);
383 inline void Fcvtzs(const Register& rd, const FPRegister& fn);
384 inline void Fcvtzu(const Register& rd, const FPRegister& fn);
385 inline void Fdiv(const FPRegister& fd,
386 const FPRegister& fn,
387 const FPRegister& fm);
388 inline void Fmadd(const FPRegister& fd,
389 const FPRegister& fn,
390 const FPRegister& fm,
391 const FPRegister& fa);
392 inline void Fmax(const FPRegister& fd,
393 const FPRegister& fn,
394 const FPRegister& fm);
395 inline void Fmaxnm(const FPRegister& fd,
396 const FPRegister& fn,
397 const FPRegister& fm);
398 inline void Fmin(const FPRegister& fd,
399 const FPRegister& fn,
400 const FPRegister& fm);
401 inline void Fminnm(const FPRegister& fd,
402 const FPRegister& fn,
403 const FPRegister& fm);
404 inline void Fmov(FPRegister fd, FPRegister fn);
405 inline void Fmov(FPRegister fd, Register rn);
406 // Provide explicit double and float interfaces for FP immediate moves, rather
407 // than relying on implicit C++ casts. This allows signalling NaNs to be
408 // preserved when the immediate matches the format of fd. Most systems convert
409 // signalling NaNs to quiet NaNs when converting between float and double.
410 inline void Fmov(FPRegister fd, double imm);
411 inline void Fmov(FPRegister fd, float imm);
412 // Provide a template to allow other types to be converted automatically.
414 void Fmov(FPRegister fd, T imm) {
415 DCHECK(allow_macro_instructions_);
416 Fmov(fd, static_cast<double>(imm));
418 inline void Fmov(Register rd, FPRegister fn);
419 inline void Fmsub(const FPRegister& fd,
420 const FPRegister& fn,
421 const FPRegister& fm,
422 const FPRegister& fa);
423 inline void Fmul(const FPRegister& fd,
424 const FPRegister& fn,
425 const FPRegister& fm);
426 inline void Fneg(const FPRegister& fd, const FPRegister& fn);
427 inline void Fnmadd(const FPRegister& fd,
428 const FPRegister& fn,
429 const FPRegister& fm,
430 const FPRegister& fa);
431 inline void Fnmsub(const FPRegister& fd,
432 const FPRegister& fn,
433 const FPRegister& fm,
434 const FPRegister& fa);
435 inline void Frinta(const FPRegister& fd, const FPRegister& fn);
436 inline void Frintm(const FPRegister& fd, const FPRegister& fn);
437 inline void Frintn(const FPRegister& fd, const FPRegister& fn);
438 inline void Frintp(const FPRegister& fd, const FPRegister& fn);
439 inline void Frintz(const FPRegister& fd, const FPRegister& fn);
440 inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
441 inline void Fsub(const FPRegister& fd,
442 const FPRegister& fn,
443 const FPRegister& fm);
444 inline void Hint(SystemHint code);
445 inline void Hlt(int code);
447 inline void Ldnp(const CPURegister& rt,
448 const CPURegister& rt2,
449 const MemOperand& src);
450 // Load a literal from the inline constant pool.
451 inline void Ldr(const CPURegister& rt, const Immediate& imm);
452 // Helper function for double immediate.
453 inline void Ldr(const CPURegister& rt, double imm);
454 inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
455 inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
456 inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
457 inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
458 inline void Madd(const Register& rd,
462 inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
463 inline void Mov(const Register& rd, const Register& rm);
464 inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
465 inline void Mrs(const Register& rt, SystemRegister sysreg);
466 inline void Msr(SystemRegister sysreg, const Register& rt);
467 inline void Msub(const Register& rd,
471 inline void Mul(const Register& rd, const Register& rn, const Register& rm);
472 inline void Nop() { nop(); }
473 inline void Rbit(const Register& rd, const Register& rn);
474 inline void Ret(const Register& xn = lr);
475 inline void Rev(const Register& rd, const Register& rn);
476 inline void Rev16(const Register& rd, const Register& rn);
477 inline void Rev32(const Register& rd, const Register& rn);
478 inline void Ror(const Register& rd, const Register& rs, unsigned shift);
479 inline void Ror(const Register& rd, const Register& rn, const Register& rm);
480 inline void Sbfiz(const Register& rd,
484 inline void Sbfx(const Register& rd,
488 inline void Scvtf(const FPRegister& fd,
491 inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
492 inline void Smaddl(const Register& rd,
496 inline void Smsubl(const Register& rd,
500 inline void Smull(const Register& rd,
503 inline void Smulh(const Register& rd,
506 inline void Umull(const Register& rd, const Register& rn, const Register& rm);
507 inline void Stnp(const CPURegister& rt,
508 const CPURegister& rt2,
509 const MemOperand& dst);
510 inline void Sxtb(const Register& rd, const Register& rn);
511 inline void Sxth(const Register& rd, const Register& rn);
512 inline void Sxtw(const Register& rd, const Register& rn);
513 void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
514 void Tbz(const Register& rt, unsigned bit_pos, Label* label);
515 inline void Ubfiz(const Register& rd,
519 inline void Ubfx(const Register& rd,
523 inline void Ucvtf(const FPRegister& fd,
526 inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
527 inline void Umaddl(const Register& rd,
531 inline void Umsubl(const Register& rd,
535 inline void Uxtb(const Register& rd, const Register& rn);
536 inline void Uxth(const Register& rd, const Register& rn);
537 inline void Uxtw(const Register& rd, const Register& rn);
539 // Pseudo-instructions ------------------------------------------------------
541 // Compute rd = abs(rm).
542 // This function clobbers the condition flags. On output the overflow flag is
543 // set iff the negation overflowed.
545 // If rm is the minimum representable value, the result is not representable.
546 // Handlers for each case can be specified using the relevant labels.
547 void Abs(const Register& rd, const Register& rm,
548 Label * is_not_representable = NULL,
549 Label * is_representable = NULL);
551 // Push or pop up to 4 registers of the same width to or from the stack,
552 // using the current stack pointer as set by SetStackPointer.
554 // If an argument register is 'NoReg', all further arguments are also assumed
555 // to be 'NoReg', and are thus not pushed or popped.
557 // Arguments are ordered such that "Push(a, b);" is functionally equivalent
558 // to "Push(a); Push(b);".
560 // It is valid to push the same register more than once, and there is no
561 // restriction on the order in which registers are specified.
563 // It is not valid to pop into the same register more than once in one
564 // operation, not even into the zero register.
566 // If the current stack pointer (as set by SetStackPointer) is csp, then it
567 // must be aligned to 16 bytes on entry and the total size of the specified
568 // registers must also be a multiple of 16 bytes.
570 // Even if the current stack pointer is not the system stack pointer (csp),
571 // Push (and derived methods) will still modify the system stack pointer in
572 // order to comply with ABI rules about accessing memory below the system
575 // Other than the registers passed into Pop, the stack pointer and (possibly)
576 // the system stack pointer, these methods do not modify any other registers.
577 void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
578 const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
579 void Push(const CPURegister& src0, const CPURegister& src1,
580 const CPURegister& src2, const CPURegister& src3,
581 const CPURegister& src4, const CPURegister& src5 = NoReg,
582 const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
583 void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
584 const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
585 void Pop(const CPURegister& dst0, const CPURegister& dst1,
586 const CPURegister& dst2, const CPURegister& dst3,
587 const CPURegister& dst4, const CPURegister& dst5 = NoReg,
588 const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
589 void Push(const Register& src0, const FPRegister& src1);
591 // Alternative forms of Push and Pop, taking a RegList or CPURegList that
592 // specifies the registers that are to be pushed or popped. Higher-numbered
593 // registers are associated with higher memory addresses (as in the A32 push
594 // and pop instructions).
596 // (Push|Pop)SizeRegList allow you to specify the register size as a
597 // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
598 // kSRegSizeInBits are supported.
600 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
601 void PushCPURegList(CPURegList registers);
602 void PopCPURegList(CPURegList registers);
604 inline void PushSizeRegList(RegList registers, unsigned reg_size,
605 CPURegister::RegisterType type = CPURegister::kRegister) {
606 PushCPURegList(CPURegList(type, reg_size, registers));
608 inline void PopSizeRegList(RegList registers, unsigned reg_size,
609 CPURegister::RegisterType type = CPURegister::kRegister) {
610 PopCPURegList(CPURegList(type, reg_size, registers));
612 inline void PushXRegList(RegList regs) {
613 PushSizeRegList(regs, kXRegSizeInBits);
615 inline void PopXRegList(RegList regs) {
616 PopSizeRegList(regs, kXRegSizeInBits);
618 inline void PushWRegList(RegList regs) {
619 PushSizeRegList(regs, kWRegSizeInBits);
621 inline void PopWRegList(RegList regs) {
622 PopSizeRegList(regs, kWRegSizeInBits);
624 inline void PushDRegList(RegList regs) {
625 PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
627 inline void PopDRegList(RegList regs) {
628 PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
630 inline void PushSRegList(RegList regs) {
631 PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
633 inline void PopSRegList(RegList regs) {
634 PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
637 // Push the specified register 'count' times.
638 void PushMultipleTimes(CPURegister src, Register count);
639 void PushMultipleTimes(CPURegister src, int count);
641 // This is a convenience method for pushing a single Handle<Object>.
642 inline void Push(Handle<Object> handle);
643 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
645 // Aliases of Push and Pop, required for V8 compatibility.
646 inline void push(Register src) {
649 inline void pop(Register dst) {
653 // Sometimes callers need to push or pop multiple registers in a way that is
654 // difficult to structure efficiently for fixed Push or Pop calls. This scope
655 // allows push requests to be queued up, then flushed at once. The
656 // MacroAssembler will try to generate the most efficient sequence required.
658 // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of
659 // register sizes and types.
662 explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { }
665 DCHECK(queued_.empty());
668 void Queue(const CPURegister& rt) {
669 size_ += rt.SizeInBytes();
670 queued_.push_back(rt);
673 enum PreambleDirective {
677 void PushQueued(PreambleDirective preamble_directive = WITH_PREAMBLE);
681 MacroAssembler* masm_;
683 std::vector<CPURegister> queued_;
686 // Poke 'src' onto the stack. The offset is in bytes.
688 // If the current stack pointer (according to StackPointer()) is csp, then
689 // csp must be aligned to 16 bytes.
690 void Poke(const CPURegister& src, const Operand& offset);
692 // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
694 // If the current stack pointer (according to StackPointer()) is csp, then
695 // csp must be aligned to 16 bytes.
696 void Peek(const CPURegister& dst, const Operand& offset);
698 // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
699 // with 'src2' at a higher address than 'src1'. The offset is in bytes.
701 // If the current stack pointer (according to StackPointer()) is csp, then
702 // csp must be aligned to 16 bytes.
703 void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
705 // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
706 // values peeked will be adjacent, with the value in 'dst2' being from a
707 // higher address than 'dst1'. The offset is in bytes.
709 // If the current stack pointer (according to StackPointer()) is csp, then
710 // csp must be aligned to 16 bytes.
711 void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
713 // Claim or drop stack space without actually accessing memory.
715 // In debug mode, both of these will write invalid data into the claimed or
718 // If the current stack pointer (according to StackPointer()) is csp, then it
719 // must be aligned to 16 bytes and the size claimed or dropped must be a
720 // multiple of 16 bytes.
722 // Note that unit_size must be specified in bytes. For variants which take a
723 // Register count, the unit size must be a power of two.
724 inline void Claim(uint64_t count, uint64_t unit_size = kXRegSize);
725 inline void Claim(const Register& count,
726 uint64_t unit_size = kXRegSize);
727 inline void Drop(uint64_t count, uint64_t unit_size = kXRegSize);
728 inline void Drop(const Register& count,
729 uint64_t unit_size = kXRegSize);
731 // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
733 inline void ClaimBySMI(const Register& count_smi,
734 uint64_t unit_size = kXRegSize);
735 inline void DropBySMI(const Register& count_smi,
736 uint64_t unit_size = kXRegSize);
738 // Compare a register with an operand, and branch to label depending on the
739 // condition. May corrupt the status flags.
740 inline void CompareAndBranch(const Register& lhs,
745 // Test the bits of register defined by bit_pattern, and branch if ANY of
746 // those bits are set. May corrupt the status flags.
747 inline void TestAndBranchIfAnySet(const Register& reg,
748 const uint64_t bit_pattern,
751 // Test the bits of register defined by bit_pattern, and branch if ALL of
752 // those bits are clear (ie. not set.) May corrupt the status flags.
753 inline void TestAndBranchIfAllClear(const Register& reg,
754 const uint64_t bit_pattern,
757 // Insert one or more instructions into the instruction stream that encode
758 // some caller-defined data. The instructions used will be executable with no
760 inline void InlineData(uint64_t data);
762 // Insert an instrumentation enable marker into the instruction stream.
763 inline void EnableInstrumentation();
765 // Insert an instrumentation disable marker into the instruction stream.
766 inline void DisableInstrumentation();
768 // Insert an instrumentation event marker into the instruction stream. These
769 // will be picked up by the instrumentation system to annotate an instruction
770 // profile. The argument marker_name must be a printable two character string;
771 // it will be encoded in the event marker.
772 inline void AnnotateInstrumentation(const char* marker_name);
774 // If emit_debug_code() is true, emit a run-time check to ensure that
775 // StackPointer() does not point below the system stack pointer.
777 // Whilst it is architecturally legal for StackPointer() to point below csp,
778 // it can be evidence of a potential bug because the ABI forbids accesses
781 // If StackPointer() is the system stack pointer (csp), then csp will be
782 // dereferenced to cause the processor (or simulator) to abort if it is not
785 // If emit_debug_code() is false, this emits no code.
786 void AssertStackConsistency();
788 // Preserve the callee-saved registers (as defined by AAPCS64).
790 // Higher-numbered registers are pushed before lower-numbered registers, and
791 // thus get higher addresses.
792 // Floating-point registers are pushed before general-purpose registers, and
793 // thus get higher addresses.
795 // Note that registers are not checked for invalid values. Use this method
796 // only if you know that the GC won't try to examine the values on the stack.
798 // This method must not be called unless the current stack pointer (as set by
799 // SetStackPointer) is the system stack pointer (csp), and is aligned to
800 // ActivationFrameAlignment().
801 void PushCalleeSavedRegisters();
803 // Restore the callee-saved registers (as defined by AAPCS64).
805 // Higher-numbered registers are popped after lower-numbered registers, and
806 // thus come from higher addresses.
807 // Floating-point registers are popped after general-purpose registers, and
808 // thus come from higher addresses.
810 // This method must not be called unless the current stack pointer (as set by
811 // SetStackPointer) is the system stack pointer (csp), and is aligned to
812 // ActivationFrameAlignment().
813 void PopCalleeSavedRegisters();
815 // Set the current stack pointer, but don't generate any code.
816 inline void SetStackPointer(const Register& stack_pointer) {
817 DCHECK(!TmpList()->IncludesAliasOf(stack_pointer));
821 // Return the current stack pointer, as set by SetStackPointer.
822 inline const Register& StackPointer() const {
826 // Align csp for a frame, as per ActivationFrameAlignment, and make it the
827 // current stack pointer.
828 inline void AlignAndSetCSPForFrame() {
829 int sp_alignment = ActivationFrameAlignment();
830 // AAPCS64 mandates at least 16-byte alignment.
831 DCHECK(sp_alignment >= 16);
832 DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
833 Bic(csp, StackPointer(), sp_alignment - 1);
834 SetStackPointer(csp);
837 // Push the system stack pointer (csp) down to allow the same to be done to
838 // the current stack pointer (according to StackPointer()). This must be
839 // called _before_ accessing the memory.
841 // This is necessary when pushing or otherwise adding things to the stack, to
842 // satisfy the AAPCS64 constraint that the memory below the system stack
843 // pointer is not accessed. The amount pushed will be increased as necessary
844 // to ensure csp remains aligned to 16 bytes.
846 // This method asserts that StackPointer() is not csp, since the call does
847 // not make sense in that context.
848 inline void BumpSystemStackPointer(const Operand& space);
850 // Re-synchronizes the system stack pointer (csp) with the current stack
851 // pointer (according to StackPointer()).
853 // This method asserts that StackPointer() is not csp, since the call does
854 // not make sense in that context.
855 inline void SyncSystemStackPointer();
857 // Helpers ------------------------------------------------------------------
859 inline void InitializeRootRegister();
861 void AssertFPCRState(Register fpcr = NoReg);
862 void ConfigureFPCR();
863 void CanonicalizeNaN(const FPRegister& dst, const FPRegister& src);
864 void CanonicalizeNaN(const FPRegister& reg) {
865 CanonicalizeNaN(reg, reg);
868 // Load an object from the root table.
869 void LoadRoot(CPURegister destination,
870 Heap::RootListIndex index);
871 // Store an object to the root table.
872 void StoreRoot(Register source,
873 Heap::RootListIndex index);
875 // Load both TrueValue and FalseValue roots.
876 void LoadTrueFalseRoots(Register true_root, Register false_root);
878 void LoadHeapObject(Register dst, Handle<HeapObject> object);
880 void LoadObject(Register result, Handle<Object> object) {
881 AllowDeferredHandleDereference heap_object_check;
882 if (object->IsHeapObject()) {
883 LoadHeapObject(result, Handle<HeapObject>::cast(object));
885 DCHECK(object->IsSmi());
886 Mov(result, Operand(object));
890 static int SafepointRegisterStackIndex(int reg_code);
892 // This is required for compatibility with architecture independant code.
893 // Remove if not needed.
894 inline void Move(Register dst, Register src) { Mov(dst, src); }
896 void LoadInstanceDescriptors(Register map,
897 Register descriptors);
898 void EnumLengthUntagged(Register dst, Register map);
899 void EnumLengthSmi(Register dst, Register map);
900 void NumberOfOwnDescriptors(Register dst, Register map);
901 void LoadAccessor(Register dst, Register holder, int accessor_index,
902 AccessorComponent accessor);
904 template<typename Field>
905 void DecodeField(Register dst, Register src) {
906 static const int shift = Field::kShift;
907 static const int setbits = CountSetBits(Field::kMask, 32);
908 Ubfx(dst, src, shift, setbits);
911 template<typename Field>
912 void DecodeField(Register reg) {
913 DecodeField<Field>(reg, reg);
916 // ---- SMI and Number Utilities ----
918 inline void SmiTag(Register dst, Register src);
919 inline void SmiTag(Register smi);
920 inline void SmiUntag(Register dst, Register src);
921 inline void SmiUntag(Register smi);
922 inline void SmiUntagToDouble(FPRegister dst,
924 UntagMode mode = kNotSpeculativeUntag);
925 inline void SmiUntagToFloat(FPRegister dst,
927 UntagMode mode = kNotSpeculativeUntag);
929 // Tag and push in one step.
930 inline void SmiTagAndPush(Register src);
931 inline void SmiTagAndPush(Register src1, Register src2);
933 inline void JumpIfSmi(Register value,
935 Label* not_smi_label = NULL);
936 inline void JumpIfNotSmi(Register value, Label* not_smi_label);
937 inline void JumpIfBothSmi(Register value1,
939 Label* both_smi_label,
940 Label* not_smi_label = NULL);
941 inline void JumpIfEitherSmi(Register value1,
943 Label* either_smi_label,
944 Label* not_smi_label = NULL);
945 inline void JumpIfEitherNotSmi(Register value1,
947 Label* not_smi_label);
948 inline void JumpIfBothNotSmi(Register value1,
950 Label* not_smi_label);
952 // Abort execution if argument is a smi, enabled via --debug-code.
953 void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
954 void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
956 inline void ObjectTag(Register tagged_obj, Register obj);
957 inline void ObjectUntag(Register untagged_obj, Register obj);
959 // Abort execution if argument is not a name, enabled via --debug-code.
960 void AssertName(Register object);
962 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
963 void AssertFunction(Register object);
965 // Abort execution if argument is not undefined or an AllocationSite, enabled
967 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
969 // Abort execution if argument is not a string, enabled via --debug-code.
970 void AssertString(Register object);
972 void JumpIfHeapNumber(Register object, Label* on_heap_number,
973 SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
974 void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
975 SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
977 // Sets the vs flag if the input is -0.0.
978 void TestForMinusZero(DoubleRegister input);
980 // Jump to label if the input double register contains -0.0.
981 void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
983 // Jump to label if the input integer register contains the double precision
984 // floating point representation of -0.0.
985 void JumpIfMinusZero(Register input, Label* on_negative_zero);
987 // Generate code to do a lookup in the number string cache. If the number in
988 // the register object is found in the cache the generated code falls through
989 // with the result in the result register. The object and the result register
990 // can be the same. If the number is not found in the cache the code jumps to
991 // the label not_found with only the content of register object unchanged.
992 void LookupNumberStringCache(Register object,
999 // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
1001 void ClampInt32ToUint8(Register in_out);
1002 void ClampInt32ToUint8(Register output, Register input);
1004 // Saturate a double in input to an unsigned 8-bit integer in output.
1005 void ClampDoubleToUint8(Register output,
1006 DoubleRegister input,
1007 DoubleRegister dbl_scratch);
1009 // Try to represent a double as a signed 32-bit int.
1010 // This succeeds if the result compares equal to the input, so inputs of -0.0
1011 // are represented as 0 and handled as a success.
1013 // On output the Z flag is set if the operation was successful.
1014 void TryRepresentDoubleAsInt32(Register as_int,
1016 FPRegister scratch_d,
1017 Label* on_successful_conversion = NULL,
1018 Label* on_failed_conversion = NULL) {
1019 DCHECK(as_int.Is32Bits());
1020 TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
1021 on_failed_conversion);
1024 // Try to represent a double as a signed 64-bit int.
1025 // This succeeds if the result compares equal to the input, so inputs of -0.0
1026 // are represented as 0 and handled as a success.
1028 // On output the Z flag is set if the operation was successful.
1029 void TryRepresentDoubleAsInt64(Register as_int,
1031 FPRegister scratch_d,
1032 Label* on_successful_conversion = NULL,
1033 Label* on_failed_conversion = NULL) {
1034 DCHECK(as_int.Is64Bits());
1035 TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
1036 on_failed_conversion);
1039 // ---- Object Utilities ----
1041 // Copy fields from 'src' to 'dst', where both are tagged objects.
1042 // The 'temps' list is a list of X registers which can be used for scratch
1043 // values. The temps list must include at least one register.
1045 // Currently, CopyFields cannot make use of more than three registers from
1046 // the 'temps' list.
1048 // CopyFields expects to be able to take at least two registers from
1049 // MacroAssembler::TmpList().
1050 void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
1052 // Starting at address in dst, initialize field_count 64-bit fields with
1053 // 64-bit value in register filler. Register dst is corrupted.
1054 void FillFields(Register dst,
1055 Register field_count,
1058 // Copies a number of bytes from src to dst. All passed registers are
1059 // clobbered. On exit src and dst will point to the place just after where the
1060 // last byte was read or written and length will be zero. Hint may be used to
1061 // determine which is the most efficient algorithm to use for copying.
1062 void CopyBytes(Register dst,
1066 CopyHint hint = kCopyUnknown);
1068 // ---- String Utilities ----
1071 // Jump to label if either object is not a sequential one-byte string.
1072 // Optionally perform a smi check on the objects first.
1073 void JumpIfEitherIsNotSequentialOneByteStrings(
1074 Register first, Register second, Register scratch1, Register scratch2,
1075 Label* failure, SmiCheckType smi_check = DO_SMI_CHECK);
1077 // Check if instance type is sequential one-byte string and jump to label if
1079 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1082 // Checks if both instance types are sequential one-byte strings and jumps to
1083 // label if either is not.
1084 void JumpIfEitherInstanceTypeIsNotSequentialOneByte(
1085 Register first_object_instance_type, Register second_object_instance_type,
1086 Register scratch1, Register scratch2, Label* failure);
1088 // Checks if both instance types are sequential one-byte strings and jumps to
1089 // label if either is not.
1090 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1091 Register first_object_instance_type, Register second_object_instance_type,
1092 Register scratch1, Register scratch2, Label* failure);
1094 void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name);
1096 // ---- Calling / Jumping helpers ----
1098 // This is required for compatibility in architecture indepenedant code.
1099 inline void jmp(Label* L) { B(L); }
1101 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
1102 void TailCallStub(CodeStub* stub);
1104 void CallRuntime(const Runtime::Function* f,
1106 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1108 void CallRuntime(Runtime::FunctionId id,
1110 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1111 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1114 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1115 const Runtime::Function* function = Runtime::FunctionForId(id);
1116 CallRuntime(function, function->nargs, kSaveFPRegs);
1119 void TailCallRuntime(Runtime::FunctionId fid,
1123 int ActivationFrameAlignment();
1125 // Calls a C function.
1126 // The called function is not allowed to trigger a
1127 // garbage collection, since that might move the code and invalidate the
1128 // return address (unless this is somehow accounted for by the called
1130 void CallCFunction(ExternalReference function,
1131 int num_reg_arguments);
1132 void CallCFunction(ExternalReference function,
1133 int num_reg_arguments,
1134 int num_double_arguments);
1135 void CallCFunction(Register function,
1136 int num_reg_arguments,
1137 int num_double_arguments);
1139 // Jump to a runtime routine.
1140 void JumpToExternalReference(const ExternalReference& builtin);
1141 // Tail call of a runtime routine (jump).
1142 // Like JumpToExternalReference, but also takes care of passing the number
1144 void TailCallExternalReference(const ExternalReference& ext,
1147 void CallExternalReference(const ExternalReference& ext,
1151 // Invoke specified builtin JavaScript function.
1152 void InvokeBuiltin(int native_context_index, InvokeFlag flag,
1153 const CallWrapper& call_wrapper = NullCallWrapper());
1155 // Store the code object for the given builtin in the target register and
1156 // setup the function in the function register.
1157 void GetBuiltinEntry(Register target, Register function,
1158 int native_context_index);
1160 // Store the function for the given builtin in the target register.
1161 void GetBuiltinFunction(Register target, int native_context_index);
1163 void Jump(Register target);
1164 void Jump(Address target, RelocInfo::Mode rmode);
1165 void Jump(Handle<Code> code, RelocInfo::Mode rmode);
1166 void Jump(intptr_t target, RelocInfo::Mode rmode);
1168 void Call(Register target);
1169 void Call(Label* target);
1170 void Call(Address target, RelocInfo::Mode rmode);
1171 void Call(Handle<Code> code,
1172 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
1173 TypeFeedbackId ast_id = TypeFeedbackId::None());
1175 // For every Call variant, there is a matching CallSize function that returns
1176 // the size (in bytes) of the call sequence.
1177 static int CallSize(Register target);
1178 static int CallSize(Label* target);
1179 static int CallSize(Address target, RelocInfo::Mode rmode);
1180 static int CallSize(Handle<Code> code,
1181 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
1182 TypeFeedbackId ast_id = TypeFeedbackId::None());
1184 // Registers used through the invocation chain are hard-coded.
1185 // We force passing the parameters to ensure the contracts are correctly
1186 // honoured by the caller.
1187 // 'function' must be x1.
1188 // 'actual' must use an immediate or x0.
1189 // 'expected' must use an immediate or x2.
1190 // 'call_kind' must be x5.
1191 void InvokePrologue(const ParameterCount& expected,
1192 const ParameterCount& actual,
1193 Handle<Code> code_constant,
1197 bool* definitely_mismatches,
1198 const CallWrapper& call_wrapper);
1199 void InvokeCode(Register code,
1200 const ParameterCount& expected,
1201 const ParameterCount& actual,
1203 const CallWrapper& call_wrapper);
1204 // Invoke the JavaScript function in the given register.
1205 // Changes the current context to the context in the function before invoking.
1206 void InvokeFunction(Register function,
1207 const ParameterCount& actual,
1209 const CallWrapper& call_wrapper);
1210 void InvokeFunction(Register function,
1211 const ParameterCount& expected,
1212 const ParameterCount& actual,
1214 const CallWrapper& call_wrapper);
1215 void InvokeFunction(Handle<JSFunction> function,
1216 const ParameterCount& expected,
1217 const ParameterCount& actual,
1219 const CallWrapper& call_wrapper);
1222 // ---- Floating point helpers ----
1224 // Perform a conversion from a double to a signed int64. If the input fits in
1225 // range of the 64-bit result, execution branches to done. Otherwise,
1226 // execution falls through, and the sign of the result can be used to
1227 // determine if overflow was towards positive or negative infinity.
1229 // On successful conversion, the least significant 32 bits of the result are
1230 // equivalent to the ECMA-262 operation "ToInt32".
1232 // Only public for the test code in test-code-stubs-arm64.cc.
1233 void TryConvertDoubleToInt64(Register result,
1234 DoubleRegister input,
1237 // Performs a truncating conversion of a floating point number as used by
1238 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1239 // Exits with 'result' holding the answer.
1240 void TruncateDoubleToI(Register result, DoubleRegister double_input);
1242 // Performs a truncating conversion of a heap number as used by
1243 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1244 // must be different registers. Exits with 'result' holding the answer.
1245 void TruncateHeapNumberToI(Register result, Register object);
1247 // Converts the smi or heap number in object to an int32 using the rules
1248 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1249 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1250 // different registers.
1251 void TruncateNumberToI(Register object,
1253 Register heap_number_map,
1256 // ---- Code generation helpers ----
1258 void set_generating_stub(bool value) { generating_stub_ = value; }
1259 bool generating_stub() const { return generating_stub_; }
1261 void set_allow_macro_instructions(bool value) {
1262 allow_macro_instructions_ = value;
1264 bool allow_macro_instructions() const { return allow_macro_instructions_; }
1266 bool use_real_aborts() const { return use_real_aborts_; }
1267 void set_has_frame(bool value) { has_frame_ = value; }
1268 bool has_frame() const { return has_frame_; }
1269 bool AllowThisStubCall(CodeStub* stub);
1271 class NoUseRealAbortsScope {
1273 explicit NoUseRealAbortsScope(MacroAssembler* masm) :
1274 saved_(masm->use_real_aborts_), masm_(masm) {
1275 masm_->use_real_aborts_ = false;
1277 ~NoUseRealAbortsScope() {
1278 masm_->use_real_aborts_ = saved_;
1282 MacroAssembler* masm_;
1285 // ---------------------------------------------------------------------------
1290 // ---------------------------------------------------------------------------
1291 // Exception handling
1293 // Push a new stack handler and link into stack handler chain.
1294 void PushStackHandler();
1296 // Unlink the stack handler on top of the stack from the stack handler chain.
1297 // Must preserve the result register.
1298 void PopStackHandler();
1301 // ---------------------------------------------------------------------------
1302 // Allocation support
1304 // Allocate an object in new space or old space. The object_size is
1305 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
1306 // is passed. The allocated object is returned in result.
1308 // If the new space is exhausted control continues at the gc_required label.
1309 // In this case, the result and scratch registers may still be clobbered.
1310 // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
1311 void Allocate(Register object_size,
1316 AllocationFlags flags);
1318 void Allocate(int object_size,
1323 AllocationFlags flags);
1325 void AllocateTwoByteString(Register result,
1330 Label* gc_required);
1331 void AllocateOneByteString(Register result, Register length,
1332 Register scratch1, Register scratch2,
1333 Register scratch3, Label* gc_required);
1334 void AllocateTwoByteConsString(Register result,
1338 Label* gc_required);
1339 void AllocateOneByteConsString(Register result, Register length,
1340 Register scratch1, Register scratch2,
1341 Label* gc_required);
1342 void AllocateTwoByteSlicedString(Register result,
1346 Label* gc_required);
1347 void AllocateOneByteSlicedString(Register result, Register length,
1348 Register scratch1, Register scratch2,
1349 Label* gc_required);
1351 // Allocates a heap number or jumps to the gc_required label if the young
1352 // space is full and a scavenge is needed.
1353 // All registers are clobbered.
1354 // If no heap_number_map register is provided, the function will take care of
1356 void AllocateHeapNumber(Register result,
1360 CPURegister value = NoFPReg,
1361 CPURegister heap_number_map = NoReg,
1362 MutableMode mode = IMMUTABLE);
1364 // ---------------------------------------------------------------------------
1365 // Support functions.
1367 // Machine code version of Map::GetConstructor().
1368 // |temp| holds |result|'s map when done, and |temp2| its instance type.
1369 void GetMapConstructor(Register result, Register map, Register temp,
1372 void TryGetFunctionPrototype(Register function, Register result,
1373 Register scratch, Label* miss);
1375 // Compare object type for heap object. heap_object contains a non-Smi
1376 // whose object type should be compared with the given type. This both
1377 // sets the flags and leaves the object type in the type_reg register.
1378 // It leaves the map in the map register (unless the type_reg and map register
1379 // are the same register). It leaves the heap object in the heap_object
1380 // register unless the heap_object register is the same register as one of the
1382 void CompareObjectType(Register heap_object,
1388 // Compare object type for heap object, and branch if equal (or not.)
1389 // heap_object contains a non-Smi whose object type should be compared with
1390 // the given type. This both sets the flags and leaves the object type in
1391 // the type_reg register. It leaves the map in the map register (unless the
1392 // type_reg and map register are the same register). It leaves the heap
1393 // object in the heap_object register unless the heap_object register is the
1394 // same register as one of the other registers.
1395 void JumpIfObjectType(Register object,
1399 Label* if_cond_pass,
1400 Condition cond = eq);
1402 void JumpIfNotObjectType(Register object,
1406 Label* if_not_object);
1408 // Compare instance type in a map. map contains a valid map object whose
1409 // object type should be compared with the given type. This both
1410 // sets the flags and leaves the object type in the type_reg register.
1411 void CompareInstanceType(Register map,
1415 // Compare an object's map with the specified map. Condition flags are set
1416 // with result of map compare.
1417 void CompareObjectMap(Register obj, Heap::RootListIndex index);
1419 // Compare an object's map with the specified map. Condition flags are set
1420 // with result of map compare.
1421 void CompareObjectMap(Register obj, Register scratch, Handle<Map> map);
1423 // As above, but the map of the object is already loaded into the register
1424 // which is preserved by the code generated.
1425 void CompareMap(Register obj_map,
1428 // Check if the map of an object is equal to a specified map and branch to
1429 // label if not. Skip the smi check if not required (object is known to be a
1430 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1431 // against maps that are ElementsKind transition maps of the specified map.
1432 void CheckMap(Register obj,
1436 SmiCheckType smi_check_type);
1439 void CheckMap(Register obj,
1441 Heap::RootListIndex index,
1443 SmiCheckType smi_check_type);
1445 // As above, but the map of the object is already loaded into obj_map, and is
1447 void CheckMap(Register obj_map,
1450 SmiCheckType smi_check_type);
1452 // Check if the map of an object is equal to a specified weak map and branch
1453 // to a specified target if equal. Skip the smi check if not required
1454 // (object is known to be a heap object)
1455 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1456 Handle<WeakCell> cell, Handle<Code> success,
1457 SmiCheckType smi_check_type);
1459 // Compare the given value and the value of weak cell.
1460 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
1462 void GetWeakValue(Register value, Handle<WeakCell> cell);
1464 // Load the value of the weak cell in the value register. Branch to the given
1465 // miss label if the weak cell was cleared.
1466 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
1468 // Test the bitfield of the heap object map with mask and set the condition
1469 // flags. The object register is preserved.
1470 void TestMapBitfield(Register object, uint64_t mask);
1472 // Load the elements kind field from a map, and return it in the result
1474 void LoadElementsKindFromMap(Register result, Register map);
1476 // Compare the object in a register to a value from the root list.
1477 void CompareRoot(const Register& obj, Heap::RootListIndex index);
1479 // Compare the object in a register to a value and jump if they are equal.
1480 void JumpIfRoot(const Register& obj,
1481 Heap::RootListIndex index,
1484 // Compare the object in a register to a value and jump if they are not equal.
1485 void JumpIfNotRoot(const Register& obj,
1486 Heap::RootListIndex index,
1487 Label* if_not_equal);
1489 // Load and check the instance type of an object for being a unique name.
1490 // Loads the type into the second argument register.
1491 // The object and type arguments can be the same register; in that case it
1492 // will be overwritten with the type.
1493 // Fall-through if the object was a string and jump on fail otherwise.
1494 inline void IsObjectNameType(Register object, Register type, Label* fail);
1496 inline void IsObjectJSObjectType(Register heap_object,
1501 // Check the instance type in the given map to see if it corresponds to a
1502 // JS object type. Jump to the fail label if this is not the case and fall
1503 // through otherwise. However if fail label is NULL, no branch will be
1504 // performed and the flag will be updated. You can test the flag for "le"
1505 // condition to test if it is a valid JS object type.
1506 inline void IsInstanceJSObjectType(Register map,
1510 // Load and check the instance type of an object for being a string.
1511 // Loads the type into the second argument register.
1512 // The object and type arguments can be the same register; in that case it
1513 // will be overwritten with the type.
1514 // Jumps to not_string or string appropriate. If the appropriate label is
1515 // NULL, fall through.
1516 inline void IsObjectJSStringType(Register object, Register type,
1517 Label* not_string, Label* string = NULL);
1519 // Compare the contents of a register with an operand, and branch to true,
1520 // false or fall through, depending on condition.
1521 void CompareAndSplit(const Register& lhs,
1526 Label* fall_through);
1528 // Test the bits of register defined by bit_pattern, and branch to
1529 // if_any_set, if_all_clear or fall_through accordingly.
1530 void TestAndSplit(const Register& reg,
1531 uint64_t bit_pattern,
1532 Label* if_all_clear,
1534 Label* fall_through);
1536 // Check if a map for a JSObject indicates that the object has fast elements.
1537 // Jump to the specified label if it does not.
1538 void CheckFastElements(Register map, Register scratch, Label* fail);
1540 // Check if a map for a JSObject indicates that the object can have both smi
1541 // and HeapObject elements. Jump to the specified label if it does not.
1542 void CheckFastObjectElements(Register map, Register scratch, Label* fail);
1544 // Check to see if number can be stored as a double in FastDoubleElements.
1545 // If it can, store it at the index specified by key_reg in the array,
1546 // otherwise jump to fail.
1547 void StoreNumberToDoubleElements(Register value_reg,
1549 Register elements_reg,
1551 FPRegister fpscratch1,
1553 int elements_offset = 0);
1555 // Picks out an array index from the hash field.
1557 // hash - holds the index's hash. Clobbered.
1558 // index - holds the overwritten index on exit.
1559 void IndexFromHash(Register hash, Register index);
1561 // ---------------------------------------------------------------------------
1562 // Inline caching support.
1564 void EmitSeqStringSetCharCheck(Register string,
1566 SeqStringSetCharCheckIndexType index_type,
1568 uint32_t encoding_mask);
1570 // Generate code for checking access rights - used for security checks
1571 // on access to global objects across environments. The holder register
1572 // is left untouched, whereas both scratch registers are clobbered.
1573 void CheckAccessGlobalProxy(Register holder_reg,
1578 // Hash the interger value in 'key' register.
1579 // It uses the same algorithm as ComputeIntegerHash in utils.h.
1580 void GetNumberHash(Register key, Register scratch);
1582 // Load value from the dictionary.
1584 // elements - holds the slow-case elements of the receiver on entry.
1585 // Unchanged unless 'result' is the same register.
1587 // key - holds the smi key on entry.
1588 // Unchanged unless 'result' is the same register.
1590 // result - holds the result on exit if the load succeeded.
1591 // Allowed to be the same as 'key' or 'result'.
1592 // Unchanged on bailout so 'key' or 'result' can be used
1593 // in further computation.
1594 void LoadFromNumberDictionary(Label* miss,
1603 // ---------------------------------------------------------------------------
1606 // Activation support.
1607 void EnterFrame(StackFrame::Type type);
1608 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
1609 void LeaveFrame(StackFrame::Type type);
1611 // Returns map with validated enum cache in object register.
1612 void CheckEnumCache(Register object,
1613 Register null_value,
1618 Label* call_runtime);
1620 // AllocationMemento support. Arrays may have an associated
1621 // AllocationMemento object that can be checked for in order to pretransition
1623 // On entry, receiver should point to the array object.
1624 // If allocation info is present, the Z flag is set (so that the eq
1625 // condition will pass).
1626 void TestJSArrayForAllocationMemento(Register receiver,
1629 Label* no_memento_found);
1631 void JumpIfJSArrayHasAllocationMemento(Register receiver,
1634 Label* memento_found) {
1635 Label no_memento_found;
1636 TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
1638 B(eq, memento_found);
1639 Bind(&no_memento_found);
1642 // The stack pointer has to switch between csp and jssp when setting up and
1643 // destroying the exit frame. Hence preserving/restoring the registers is
1644 // slightly more complicated than simple push/pop operations.
1645 void ExitFramePreserveFPRegs();
1646 void ExitFrameRestoreFPRegs();
1648 // Generates function and stub prologue code.
1649 void StubPrologue();
1650 void Prologue(bool code_pre_aging);
1652 // Enter exit frame. Exit frames are used when calling C code from generated
1653 // (JavaScript) code.
1655 // The stack pointer must be jssp on entry, and will be set to csp by this
1656 // function. The frame pointer is also configured, but the only other
1657 // registers modified by this function are the provided scratch register, and
1660 // The 'extra_space' argument can be used to allocate some space in the exit
1661 // frame that will be ignored by the GC. This space will be reserved in the
1662 // bottom of the frame immediately above the return address slot.
1664 // Set up a stack frame and registers as follows:
1665 // fp[8]: CallerPC (lr)
1666 // fp -> fp[0]: CallerFP (old fp)
1667 // fp[-8]: SPOffset (new csp)
1668 // fp[-16]: CodeObject()
1669 // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
1670 // csp[8]: Memory reserved for the caller if extra_space != 0.
1671 // Alignment padding, if necessary.
1672 // csp -> csp[0]: Space reserved for the return address.
1674 // This function also stores the new frame information in the top frame, so
1675 // that the new frame becomes the current frame.
1676 void EnterExitFrame(bool save_doubles,
1677 const Register& scratch,
1678 int extra_space = 0);
1680 // Leave the current exit frame, after a C function has returned to generated
1681 // (JavaScript) code.
1683 // This effectively unwinds the operation of EnterExitFrame:
1684 // * Preserved doubles are restored (if restore_doubles is true).
1685 // * The frame information is removed from the top frame.
1686 // * The exit frame is dropped.
1687 // * The stack pointer is reset to jssp.
1689 // The stack pointer must be csp on entry.
1690 void LeaveExitFrame(bool save_doubles,
1691 const Register& scratch,
1692 bool restore_context);
1694 void LoadContext(Register dst, int context_chain_length);
1696 // Load the global proxy from the current context.
1697 void LoadGlobalProxy(Register dst);
1699 // Emit code for a truncating division by a constant. The dividend register is
1700 // unchanged. Dividend and result must be different.
1701 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1703 // ---------------------------------------------------------------------------
1704 // StatsCounter support
1706 void SetCounter(StatsCounter* counter, int value, Register scratch1,
1708 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1710 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1713 // ---------------------------------------------------------------------------
1714 // Garbage collector support (GC).
1716 enum RememberedSetFinalAction {
1721 // Record in the remembered set the fact that we have a pointer to new space
1722 // at the address pointed to by the addr register. Only works if addr is not
1724 void RememberedSetHelper(Register object, // Used for debug code.
1727 SaveFPRegsMode save_fp,
1728 RememberedSetFinalAction and_then);
1730 // Push and pop the registers that can hold pointers, as defined by the
1731 // RegList constant kSafepointSavedRegisters.
1732 void PushSafepointRegisters();
1733 void PopSafepointRegisters();
1735 void PushSafepointRegistersAndDoubles();
1736 void PopSafepointRegistersAndDoubles();
1738 // Store value in register src in the safepoint stack slot for register dst.
1739 void StoreToSafepointRegisterSlot(Register src, Register dst) {
1740 Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
1743 // Load the value of the src register from its safepoint stack slot
1744 // into register dst.
1745 void LoadFromSafepointRegisterSlot(Register dst, Register src) {
1746 Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
1749 void CheckPageFlagSet(const Register& object,
1750 const Register& scratch,
1754 void CheckPageFlagClear(const Register& object,
1755 const Register& scratch,
1757 Label* if_all_clear);
1759 // Check if object is in new space and jump accordingly.
1760 // Register 'object' is preserved.
1761 void JumpIfNotInNewSpace(Register object,
1763 InNewSpace(object, ne, branch);
1766 void JumpIfInNewSpace(Register object,
1768 InNewSpace(object, eq, branch);
1771 // Notify the garbage collector that we wrote a pointer into an object.
1772 // |object| is the object being stored into, |value| is the object being
1773 // stored. value and scratch registers are clobbered by the operation.
1774 // The offset is the offset from the start of the object, not the offset from
1775 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
1776 void RecordWriteField(
1781 LinkRegisterStatus lr_status,
1782 SaveFPRegsMode save_fp,
1783 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1784 SmiCheck smi_check = INLINE_SMI_CHECK,
1785 PointersToHereCheck pointers_to_here_check_for_value =
1786 kPointersToHereMaybeInteresting);
1788 // As above, but the offset has the tag presubtracted. For use with
1789 // MemOperand(reg, off).
1790 inline void RecordWriteContextSlot(
1795 LinkRegisterStatus lr_status,
1796 SaveFPRegsMode save_fp,
1797 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1798 SmiCheck smi_check = INLINE_SMI_CHECK,
1799 PointersToHereCheck pointers_to_here_check_for_value =
1800 kPointersToHereMaybeInteresting) {
1801 RecordWriteField(context,
1802 offset + kHeapObjectTag,
1807 remembered_set_action,
1809 pointers_to_here_check_for_value);
1812 void RecordWriteForMap(
1816 LinkRegisterStatus lr_status,
1817 SaveFPRegsMode save_fp);
1819 // For a given |object| notify the garbage collector that the slot |address|
1820 // has been written. |value| is the object being stored. The value and
1821 // address registers are clobbered by the operation.
1826 LinkRegisterStatus lr_status,
1827 SaveFPRegsMode save_fp,
1828 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1829 SmiCheck smi_check = INLINE_SMI_CHECK,
1830 PointersToHereCheck pointers_to_here_check_for_value =
1831 kPointersToHereMaybeInteresting);
1833 // Checks the color of an object. If the object is already grey or black
1834 // then we just fall through, since it is already live. If it is white and
1835 // we can determine that it doesn't need to be scanned, then we just mark it
1836 // black and fall through. For the rest we jump to the label so the
1837 // incremental marker can fix its assumptions.
1838 void EnsureNotWhite(Register object,
1843 Label* object_is_white_and_not_data);
1845 // Detects conservatively whether an object is data-only, i.e. it does need to
1846 // be scanned by the garbage collector.
1847 void JumpIfDataObject(Register value,
1849 Label* not_data_object);
1851 // Helper for finding the mark bits for an address.
1852 // Note that the behaviour slightly differs from other architectures.
1854 // - addr_reg is unchanged.
1855 // - The bitmap register points at the word with the mark bits.
1856 // - The shift register contains the index of the first color bit for this
1857 // object in the bitmap.
1858 inline void GetMarkBits(Register addr_reg,
1859 Register bitmap_reg,
1860 Register shift_reg);
1862 // Check if an object has a given incremental marking color.
1863 void HasColor(Register object,
1870 void JumpIfBlack(Register object,
1876 // ---------------------------------------------------------------------------
1879 // Calls Abort(msg) if the condition cond is not satisfied.
1880 // Use --debug_code to enable.
1881 void Assert(Condition cond, BailoutReason reason);
1882 void AssertRegisterIsClear(Register reg, BailoutReason reason);
1883 void AssertRegisterIsRoot(
1885 Heap::RootListIndex index,
1886 BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
1887 void AssertFastElements(Register elements);
1889 // Abort if the specified register contains the invalid color bit pattern.
1890 // The pattern must be in bits [1:0] of 'reg' register.
1892 // If emit_debug_code() is false, this emits no code.
1893 void AssertHasValidColor(const Register& reg);
1895 // Abort if 'object' register doesn't point to a string object.
1897 // If emit_debug_code() is false, this emits no code.
1898 void AssertIsString(const Register& object);
1900 // Like Assert(), but always enabled.
1901 void Check(Condition cond, BailoutReason reason);
1902 void CheckRegisterIsClear(Register reg, BailoutReason reason);
1904 // Print a message to stderr and abort execution.
1905 void Abort(BailoutReason reason);
1907 // Conditionally load the cached Array transitioned map of type
1908 // transitioned_kind from the native context if the map in register
1909 // map_in_out is the cached Array map in the native context of
1911 void LoadTransitionedArrayMapConditional(
1912 ElementsKind expected_kind,
1913 ElementsKind transitioned_kind,
1914 Register map_in_out,
1917 Label* no_map_match);
1919 void LoadGlobalFunction(int index, Register function);
1921 // Load the initial map from the global function. The registers function and
1922 // map can be the same, function is then overwritten.
1923 void LoadGlobalFunctionInitialMap(Register function,
1927 CPURegList* TmpList() { return &tmp_list_; }
1928 CPURegList* FPTmpList() { return &fptmp_list_; }
1930 static CPURegList DefaultTmpList();
1931 static CPURegList DefaultFPTmpList();
1933 // Like printf, but print at run-time from generated code.
1935 // The caller must ensure that arguments for floating-point placeholders
1936 // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
1937 // placeholders are Registers.
1939 // At the moment it is only possible to print the value of csp if it is the
1940 // current stack pointer. Otherwise, the MacroAssembler will automatically
1941 // update csp on every push (using BumpSystemStackPointer), so determining its
1942 // value is difficult.
1944 // Format placeholders that refer to more than one argument, or to a specific
1945 // argument, are not supported. This includes formats like "%1$d" or "%.*d".
1947 // This function automatically preserves caller-saved registers so that
1948 // calling code can use Printf at any point without having to worry about
1949 // corruption. The preservation mechanism generates a lot of code. If this is
1950 // a problem, preserve the important registers manually and then call
1951 // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
1952 // implicitly preserved.
1953 void Printf(const char * format,
1954 CPURegister arg0 = NoCPUReg,
1955 CPURegister arg1 = NoCPUReg,
1956 CPURegister arg2 = NoCPUReg,
1957 CPURegister arg3 = NoCPUReg);
1959 // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
1961 // The return code from the system printf call will be returned in x0.
1962 void PrintfNoPreserve(const char * format,
1963 const CPURegister& arg0 = NoCPUReg,
1964 const CPURegister& arg1 = NoCPUReg,
1965 const CPURegister& arg2 = NoCPUReg,
1966 const CPURegister& arg3 = NoCPUReg);
1968 // Code ageing support functions.
1970 // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
1971 // function as old, it replaces some of the function prologue (generated by
1972 // FullCodeGenerator::Generate) with a call to a special stub (ultimately
1973 // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
1974 // function prologue to its initial young state (indicating that it has been
1975 // recently run) and continues. A young function is therefore one which has a
1976 // normal frame setup sequence, and an old function has a code age sequence
1977 // which calls a code ageing stub.
1979 // Set up a basic stack frame for young code (or code exempt from ageing) with
1980 // type FUNCTION. It may be patched later for code ageing support. This is
1981 // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
1983 // This function takes an Assembler so it can be called from either a
1984 // MacroAssembler or a PatchingAssembler context.
1985 static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
1987 // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
1988 void EmitFrameSetupForCodeAgePatching();
1990 // Emit a code age sequence that calls the relevant code age stub. The code
1991 // generated by this sequence is expected to replace the code generated by
1992 // EmitFrameSetupForCodeAgePatching, and represents an old function.
1994 // If stub is NULL, this function generates the code age sequence but omits
1995 // the stub address that is normally embedded in the instruction stream. This
1996 // can be used by debug code to verify code age sequences.
1997 static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
1999 // Call EmitCodeAgeSequence from a MacroAssembler context.
2000 void EmitCodeAgeSequence(Code* stub);
2002 // Return true if the sequence is a young sequence geneated by
2003 // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
2004 // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
2005 static bool IsYoungSequence(Isolate* isolate, byte* sequence);
2007 // Jumps to found label if a prototype map has dictionary elements.
2008 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
2009 Register scratch1, Label* found);
2011 // Perform necessary maintenance operations before a push or after a pop.
2013 // Note that size is specified in bytes.
2014 void PushPreamble(Operand total_size);
2015 void PopPostamble(Operand total_size);
2017 void PushPreamble(int count, int size) { PushPreamble(count * size); }
2018 void PopPostamble(int count, int size) { PopPostamble(count * size); }
2021 // Helpers for CopyFields.
2022 // These each implement CopyFields in a different way.
2023 void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
2024 Register scratch1, Register scratch2,
2025 Register scratch3, Register scratch4,
2027 void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
2028 Register scratch1, Register scratch2,
2029 Register scratch3, Register scratch4);
2030 void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
2031 Register scratch1, Register scratch2,
2034 // The actual Push and Pop implementations. These don't generate any code
2035 // other than that required for the push or pop. This allows
2036 // (Push|Pop)CPURegList to bundle together run-time assertions for a large
2037 // block of registers.
2039 // Note that size is per register, and is specified in bytes.
2040 void PushHelper(int count, int size,
2041 const CPURegister& src0, const CPURegister& src1,
2042 const CPURegister& src2, const CPURegister& src3);
2043 void PopHelper(int count, int size,
2044 const CPURegister& dst0, const CPURegister& dst1,
2045 const CPURegister& dst2, const CPURegister& dst3);
2047 // Call Printf. On a native build, a simple call will be generated, but if the
2048 // simulator is being used then a suitable pseudo-instruction is used. The
2049 // arguments and stack (csp) must be prepared by the caller as for a normal
2050 // AAPCS64 call to 'printf'.
2052 // The 'args' argument should point to an array of variable arguments in their
2053 // proper PCS registers (and in calling order). The argument registers can
2054 // have mixed types. The format string (x0) should not be included.
2055 void CallPrintf(int arg_count = 0, const CPURegister * args = NULL);
2057 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
2058 void InNewSpace(Register object,
2059 Condition cond, // eq for new space, ne otherwise.
2062 // Try to represent a double as an int so that integer fast-paths may be
2063 // used. Not every valid integer value is guaranteed to be caught.
2064 // It supports both 32-bit and 64-bit integers depending whether 'as_int'
2065 // is a W or X register.
2067 // This does not distinguish between +0 and -0, so if this distinction is
2068 // important it must be checked separately.
2070 // On output the Z flag is set if the operation was successful.
2071 void TryRepresentDoubleAsInt(Register as_int,
2073 FPRegister scratch_d,
2074 Label* on_successful_conversion = NULL,
2075 Label* on_failed_conversion = NULL);
2077 bool generating_stub_;
2079 // Tell whether any of the macro instruction can be used. When false the
2080 // MacroAssembler will assert if a method which can emit a variable number
2081 // of instructions is called.
2082 bool allow_macro_instructions_;
2086 // The Abort method should call a V8 runtime function, but the CallRuntime
2087 // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
2088 // use a simpler abort mechanism that doesn't depend on CEntryStub.
2090 // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
2092 bool use_real_aborts_;
2094 // This handle will be patched with the code object on installation.
2095 Handle<Object> code_object_;
2097 // The register to use as a stack pointer for stack operations.
2100 // Scratch registers available for use by the MacroAssembler.
2101 CPURegList tmp_list_;
2102 CPURegList fptmp_list_;
2104 void InitializeNewString(Register string,
2106 Heap::RootListIndex map_index,
2111 // Far branches resolving.
2113 // The various classes of branch instructions with immediate offsets have
2114 // different ranges. While the Assembler will fail to assemble a branch
2115 // exceeding its range, the MacroAssembler offers a mechanism to resolve
2116 // branches to too distant targets, either by tweaking the generated code to
2117 // use branch instructions with wider ranges or generating veneers.
2119 // Currently branches to distant targets are resolved using unconditional
2120 // branch isntructions with a range of +-128MB. If that becomes too little
2121 // (!), the mechanism can be extended to generate special veneers for really
2124 // Helps resolve branching to labels potentially out of range.
2125 // If the label is not bound, it registers the information necessary to later
2126 // be able to emit a veneer for this branch if necessary.
2127 // If the label is bound, it returns true if the label (or the previous link
2128 // in the label chain) is out of range. In that case the caller is responsible
2129 // for generating appropriate code.
2130 // Otherwise it returns false.
2131 // This function also checks wether veneers need to be emitted.
2132 bool NeedExtraInstructionsOrRegisterBranch(Label *label,
2133 ImmBranchType branch_type);
2137 // Use this scope when you need a one-to-one mapping bewteen methods and
2138 // instructions. This scope prevents the MacroAssembler from being called and
2139 // literal pools from being emitted. It also asserts the number of instructions
2140 // emitted is what you specified when creating the scope.
2141 class InstructionAccurateScope BASE_EMBEDDED {
2143 explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
2147 size_(count * kInstructionSize)
2150 // Before blocking the const pool, see if it needs to be emitted.
2151 masm_->CheckConstPool(false, true);
2152 masm_->CheckVeneerPool(false, true);
2154 masm_->StartBlockPools();
2157 masm_->bind(&start_);
2159 previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
2160 masm_->set_allow_macro_instructions(false);
2164 ~InstructionAccurateScope() {
2165 masm_->EndBlockPools();
2167 if (start_.is_bound()) {
2168 DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
2170 masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
2175 MacroAssembler* masm_;
2179 bool previous_allow_macro_instructions_;
2184 // This scope utility allows scratch registers to be managed safely. The
2185 // MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
2186 // registers. These registers can be allocated on demand, and will be returned
2187 // at the end of the scope.
2189 // When the scope ends, the MacroAssembler's lists will be restored to their
2190 // original state, even if the lists were modified by some other means.
2191 class UseScratchRegisterScope {
2193 explicit UseScratchRegisterScope(MacroAssembler* masm)
2194 : available_(masm->TmpList()),
2195 availablefp_(masm->FPTmpList()),
2196 old_available_(available_->list()),
2197 old_availablefp_(availablefp_->list()) {
2198 DCHECK(available_->type() == CPURegister::kRegister);
2199 DCHECK(availablefp_->type() == CPURegister::kFPRegister);
2202 ~UseScratchRegisterScope();
2204 // Take a register from the appropriate temps list. It will be returned
2205 // automatically when the scope ends.
2206 Register AcquireW() { return AcquireNextAvailable(available_).W(); }
2207 Register AcquireX() { return AcquireNextAvailable(available_).X(); }
2208 FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
2209 FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
2211 Register UnsafeAcquire(const Register& reg) {
2212 return Register(UnsafeAcquire(available_, reg));
2215 Register AcquireSameSizeAs(const Register& reg);
2216 FPRegister AcquireSameSizeAs(const FPRegister& reg);
2219 static CPURegister AcquireNextAvailable(CPURegList* available);
2220 static CPURegister UnsafeAcquire(CPURegList* available,
2221 const CPURegister& reg);
2223 // Available scratch registers.
2224 CPURegList* available_; // kRegister
2225 CPURegList* availablefp_; // kFPRegister
2227 // The state of the available lists at the start of this scope.
2228 RegList old_available_; // kRegister
2229 RegList old_availablefp_; // kFPRegister
2233 inline MemOperand ContextMemOperand(Register context, int index = 0) {
2234 return MemOperand(context, Context::SlotOffset(index));
2237 inline MemOperand GlobalObjectMemOperand() {
2238 return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
2242 // Encode and decode information about patchable inline SMI checks.
2243 class InlineSmiCheckInfo {
2245 explicit InlineSmiCheckInfo(Address info);
2247 bool HasSmiCheck() const {
2248 return smi_check_ != NULL;
2251 const Register& SmiRegister() const {
2255 Instruction* SmiCheck() const {
2259 // Use MacroAssembler::InlineData to emit information about patchable inline
2260 // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
2261 // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
2263 // The generated patch information can be read using the InlineSMICheckInfo
2265 static void Emit(MacroAssembler* masm, const Register& reg,
2266 const Label* smi_check);
2268 // Emit information to indicate that there is no inline SMI check.
2269 static void EmitNotInlined(MacroAssembler* masm) {
2271 Emit(masm, NoReg, &unbound);
2276 Instruction* smi_check_;
2278 // Fields in the data encoded by InlineData.
2280 // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
2281 // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
2282 // used in a patchable check. The Emit() method checks this.
2284 // Note that the total size of the fields is restricted by the underlying
2285 // storage size handled by the BitField class, which is a uint32_t.
2286 class RegisterBits : public BitField<unsigned, 0, 5> {};
2287 class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
2290 } } // namespace v8::internal
2292 #ifdef GENERATED_CODE_COVERAGE
2293 #error "Unsupported option"
2294 #define CODE_COVERAGE_STRINGIFY(x) #x
2295 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
2296 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
2297 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
2299 #define ACCESS_MASM(masm) masm->
2302 #endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_