1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
6 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
10 #include "src/bailout-reason.h"
11 #include "src/globals.h"
13 #include "src/arm64/assembler-arm64-inl.h"
14 #include "src/base/bits.h"
16 // Simulator specific helpers.
18 // TODO(all): If possible automatically prepend an indicator like
19 // UNIMPLEMENTED or LOCATION.
20 #define ASM_UNIMPLEMENTED(message) \
21 __ Debug(message, __LINE__, NO_PARAM)
22 #define ASM_UNIMPLEMENTED_BREAK(message) \
23 __ Debug(message, __LINE__, \
24 FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
25 #define ASM_LOCATION(message) \
26 __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
28 #define ASM_UNIMPLEMENTED(message)
29 #define ASM_UNIMPLEMENTED_BREAK(message)
30 #define ASM_LOCATION(message)
37 #define LS_MACRO_LIST(V) \
38 V(Ldrb, Register&, rt, LDRB_w) \
39 V(Strb, Register&, rt, STRB_w) \
40 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
41 V(Ldrh, Register&, rt, LDRH_w) \
42 V(Strh, Register&, rt, STRH_w) \
43 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
44 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
45 V(Str, CPURegister&, rt, StoreOpFor(rt)) \
46 V(Ldrsw, Register&, rt, LDRSW_x)
48 #define LSPAIR_MACRO_LIST(V) \
49 V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
50 V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
51 V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
54 // ----------------------------------------------------------------------------
55 // Static helper functions
57 // Generate a MemOperand for loading a field from an object.
58 inline MemOperand FieldMemOperand(Register object, int offset);
59 inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
61 // Generate a MemOperand for loading a SMI from memory.
62 inline MemOperand UntagSmiMemOperand(Register object, int offset);
65 // ----------------------------------------------------------------------------
69 // Copies of architectural conditions.
70 // The associated conditions can be used in place of those, the code will
71 // take care of reinterpreting them with the correct type.
89 // These two are *different* from the architectural codes al and nv.
90 // 'always' is used to generate unconditional branches.
91 // 'never' is used to not generate a branch (generally as the inverse
92 // branch type of 'always).
95 reg_zero, reg_not_zero,
97 reg_bit_clear, reg_bit_set,
100 kBranchTypeFirstCondition = eq,
101 kBranchTypeLastCondition = nv,
102 kBranchTypeFirstUsingReg = reg_zero,
103 kBranchTypeFirstUsingBit = reg_bit_clear
106 inline BranchType InvertBranchType(BranchType type) {
107 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
108 return static_cast<BranchType>(
109 NegateCondition(static_cast<Condition>(type)));
111 return static_cast<BranchType>(type ^ 1);
115 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
116 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
117 enum PointersToHereCheck {
118 kPointersToHereMaybeInteresting,
119 kPointersToHereAreAlwaysInteresting
121 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
122 enum TargetAddressStorageMode {
123 CAN_INLINE_TARGET_ADDRESS,
124 NEVER_INLINE_TARGET_ADDRESS
126 enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
127 enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
128 enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
129 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
130 enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
132 class MacroAssembler : public Assembler {
134 MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
136 inline Handle<Object> CodeObject();
138 // Instruction set functions ------------------------------------------------
140 inline void And(const Register& rd,
142 const Operand& operand);
143 inline void Ands(const Register& rd,
145 const Operand& operand);
146 inline void Bic(const Register& rd,
148 const Operand& operand);
149 inline void Bics(const Register& rd,
151 const Operand& operand);
152 inline void Orr(const Register& rd,
154 const Operand& operand);
155 inline void Orn(const Register& rd,
157 const Operand& operand);
158 inline void Eor(const Register& rd,
160 const Operand& operand);
161 inline void Eon(const Register& rd,
163 const Operand& operand);
164 inline void Tst(const Register& rn, const Operand& operand);
165 void LogicalMacro(const Register& rd,
167 const Operand& operand,
170 // Add and sub macros.
171 inline void Add(const Register& rd,
173 const Operand& operand);
174 inline void Adds(const Register& rd,
176 const Operand& operand);
177 inline void Sub(const Register& rd,
179 const Operand& operand);
180 inline void Subs(const Register& rd,
182 const Operand& operand);
183 inline void Cmn(const Register& rn, const Operand& operand);
184 inline void Cmp(const Register& rn, const Operand& operand);
185 inline void Neg(const Register& rd,
186 const Operand& operand);
187 inline void Negs(const Register& rd,
188 const Operand& operand);
190 void AddSubMacro(const Register& rd,
192 const Operand& operand,
196 // Add/sub with carry macros.
197 inline void Adc(const Register& rd,
199 const Operand& operand);
200 inline void Adcs(const Register& rd,
202 const Operand& operand);
203 inline void Sbc(const Register& rd,
205 const Operand& operand);
206 inline void Sbcs(const Register& rd,
208 const Operand& operand);
209 inline void Ngc(const Register& rd,
210 const Operand& operand);
211 inline void Ngcs(const Register& rd,
212 const Operand& operand);
213 void AddSubWithCarryMacro(const Register& rd,
215 const Operand& operand,
217 AddSubWithCarryOp op);
220 void Mov(const Register& rd,
221 const Operand& operand,
222 DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
223 void Mov(const Register& rd, uint64_t imm);
224 inline void Mvn(const Register& rd, uint64_t imm);
225 void Mvn(const Register& rd, const Operand& operand);
226 static bool IsImmMovn(uint64_t imm, unsigned reg_size);
227 static bool IsImmMovz(uint64_t imm, unsigned reg_size);
228 static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
230 // Try to move an immediate into the destination register in a single
231 // instruction. Returns true for success, and updates the contents of dst.
232 // Returns false, otherwise.
233 bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
235 // Move an immediate into register dst, and return an Operand object for use
236 // with a subsequent instruction that accepts a shift. The value moved into
237 // dst is not necessarily equal to imm; it may have had a shifting operation
238 // applied to it that will be subsequently undone by the shift applied in the
240 Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
242 // Conditional macros.
243 inline void Ccmp(const Register& rn,
244 const Operand& operand,
247 inline void Ccmn(const Register& rn,
248 const Operand& operand,
251 void ConditionalCompareMacro(const Register& rn,
252 const Operand& operand,
255 ConditionalCompareOp op);
256 void Csel(const Register& rd,
258 const Operand& operand,
261 // Load/store macros.
262 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
263 inline void FN(const REGTYPE REG, const MemOperand& addr);
264 LS_MACRO_LIST(DECLARE_FUNCTION)
265 #undef DECLARE_FUNCTION
267 void LoadStoreMacro(const CPURegister& rt,
268 const MemOperand& addr,
271 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
272 inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
273 LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
274 #undef DECLARE_FUNCTION
276 void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
277 const MemOperand& addr, LoadStorePairOp op);
279 // V8-specific load/store helpers.
280 void Load(const Register& rt, const MemOperand& addr, Representation r);
281 void Store(const Register& rt, const MemOperand& addr, Representation r);
284 // The target must be within the immediate range of adr.
286 // The target may be outside of the immediate range of adr. Additional
287 // instructions may be emitted.
290 void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
292 // Remaining instructions are simple pass-through calls to the assembler.
293 inline void Asr(const Register& rd, const Register& rn, unsigned shift);
294 inline void Asr(const Register& rd, const Register& rn, const Register& rm);
296 // Branch type inversion relies on these relations.
297 STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
298 (reg_bit_clear == (reg_bit_set ^ 1)) &&
299 (always == (never ^ 1)));
301 void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
303 inline void B(Label* label);
304 inline void B(Condition cond, Label* label);
305 void B(Label* label, Condition cond);
306 inline void Bfi(const Register& rd,
310 inline void Bfxil(const Register& rd,
314 inline void Bind(Label* label);
315 inline void Bl(Label* label);
316 inline void Blr(const Register& xn);
317 inline void Br(const Register& xn);
318 inline void Brk(int code);
319 void Cbnz(const Register& rt, Label* label);
320 void Cbz(const Register& rt, Label* label);
321 inline void Cinc(const Register& rd, const Register& rn, Condition cond);
322 inline void Cinv(const Register& rd, const Register& rn, Condition cond);
323 inline void Cls(const Register& rd, const Register& rn);
324 inline void Clz(const Register& rd, const Register& rn);
325 inline void Cneg(const Register& rd, const Register& rn, Condition cond);
326 inline void CzeroX(const Register& rd, Condition cond);
327 inline void CmovX(const Register& rd, const Register& rn, Condition cond);
328 inline void Cset(const Register& rd, Condition cond);
329 inline void Csetm(const Register& rd, Condition cond);
330 inline void Csinc(const Register& rd,
334 inline void Csinv(const Register& rd,
338 inline void Csneg(const Register& rd,
342 inline void Dmb(BarrierDomain domain, BarrierType type);
343 inline void Dsb(BarrierDomain domain, BarrierType type);
344 inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
345 inline void Extr(const Register& rd,
349 inline void Fabs(const FPRegister& fd, const FPRegister& fn);
350 inline void Fadd(const FPRegister& fd,
351 const FPRegister& fn,
352 const FPRegister& fm);
353 inline void Fccmp(const FPRegister& fn,
354 const FPRegister& fm,
357 inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
358 inline void Fcmp(const FPRegister& fn, double value);
359 inline void Fcsel(const FPRegister& fd,
360 const FPRegister& fn,
361 const FPRegister& fm,
363 inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
364 inline void Fcvtas(const Register& rd, const FPRegister& fn);
365 inline void Fcvtau(const Register& rd, const FPRegister& fn);
366 inline void Fcvtms(const Register& rd, const FPRegister& fn);
367 inline void Fcvtmu(const Register& rd, const FPRegister& fn);
368 inline void Fcvtns(const Register& rd, const FPRegister& fn);
369 inline void Fcvtnu(const Register& rd, const FPRegister& fn);
370 inline void Fcvtzs(const Register& rd, const FPRegister& fn);
371 inline void Fcvtzu(const Register& rd, const FPRegister& fn);
372 inline void Fdiv(const FPRegister& fd,
373 const FPRegister& fn,
374 const FPRegister& fm);
375 inline void Fmadd(const FPRegister& fd,
376 const FPRegister& fn,
377 const FPRegister& fm,
378 const FPRegister& fa);
379 inline void Fmax(const FPRegister& fd,
380 const FPRegister& fn,
381 const FPRegister& fm);
382 inline void Fmaxnm(const FPRegister& fd,
383 const FPRegister& fn,
384 const FPRegister& fm);
385 inline void Fmin(const FPRegister& fd,
386 const FPRegister& fn,
387 const FPRegister& fm);
388 inline void Fminnm(const FPRegister& fd,
389 const FPRegister& fn,
390 const FPRegister& fm);
391 inline void Fmov(FPRegister fd, FPRegister fn);
392 inline void Fmov(FPRegister fd, Register rn);
393 // Provide explicit double and float interfaces for FP immediate moves, rather
394 // than relying on implicit C++ casts. This allows signalling NaNs to be
395 // preserved when the immediate matches the format of fd. Most systems convert
396 // signalling NaNs to quiet NaNs when converting between float and double.
397 inline void Fmov(FPRegister fd, double imm);
398 inline void Fmov(FPRegister fd, float imm);
399 // Provide a template to allow other types to be converted automatically.
401 void Fmov(FPRegister fd, T imm) {
402 DCHECK(allow_macro_instructions_);
403 Fmov(fd, static_cast<double>(imm));
405 inline void Fmov(Register rd, FPRegister fn);
406 inline void Fmsub(const FPRegister& fd,
407 const FPRegister& fn,
408 const FPRegister& fm,
409 const FPRegister& fa);
410 inline void Fmul(const FPRegister& fd,
411 const FPRegister& fn,
412 const FPRegister& fm);
413 inline void Fneg(const FPRegister& fd, const FPRegister& fn);
414 inline void Fnmadd(const FPRegister& fd,
415 const FPRegister& fn,
416 const FPRegister& fm,
417 const FPRegister& fa);
418 inline void Fnmsub(const FPRegister& fd,
419 const FPRegister& fn,
420 const FPRegister& fm,
421 const FPRegister& fa);
422 inline void Frinta(const FPRegister& fd, const FPRegister& fn);
423 inline void Frintm(const FPRegister& fd, const FPRegister& fn);
424 inline void Frintn(const FPRegister& fd, const FPRegister& fn);
425 inline void Frintp(const FPRegister& fd, const FPRegister& fn);
426 inline void Frintz(const FPRegister& fd, const FPRegister& fn);
427 inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
428 inline void Fsub(const FPRegister& fd,
429 const FPRegister& fn,
430 const FPRegister& fm);
431 inline void Hint(SystemHint code);
432 inline void Hlt(int code);
434 inline void Ldnp(const CPURegister& rt,
435 const CPURegister& rt2,
436 const MemOperand& src);
437 // Load a literal from the inline constant pool.
438 inline void Ldr(const CPURegister& rt, const Immediate& imm);
439 // Helper function for double immediate.
440 inline void Ldr(const CPURegister& rt, double imm);
441 inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
442 inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
443 inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
444 inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
445 inline void Madd(const Register& rd,
449 inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
450 inline void Mov(const Register& rd, const Register& rm);
451 inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
452 inline void Mrs(const Register& rt, SystemRegister sysreg);
453 inline void Msr(SystemRegister sysreg, const Register& rt);
454 inline void Msub(const Register& rd,
458 inline void Mul(const Register& rd, const Register& rn, const Register& rm);
459 inline void Nop() { nop(); }
460 inline void Rbit(const Register& rd, const Register& rn);
461 inline void Ret(const Register& xn = lr);
462 inline void Rev(const Register& rd, const Register& rn);
463 inline void Rev16(const Register& rd, const Register& rn);
464 inline void Rev32(const Register& rd, const Register& rn);
465 inline void Ror(const Register& rd, const Register& rs, unsigned shift);
466 inline void Ror(const Register& rd, const Register& rn, const Register& rm);
467 inline void Sbfiz(const Register& rd,
471 inline void Sbfx(const Register& rd,
475 inline void Scvtf(const FPRegister& fd,
478 inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
479 inline void Smaddl(const Register& rd,
483 inline void Smsubl(const Register& rd,
487 inline void Smull(const Register& rd,
490 inline void Smulh(const Register& rd,
493 inline void Umull(const Register& rd, const Register& rn, const Register& rm);
494 inline void Stnp(const CPURegister& rt,
495 const CPURegister& rt2,
496 const MemOperand& dst);
497 inline void Sxtb(const Register& rd, const Register& rn);
498 inline void Sxth(const Register& rd, const Register& rn);
499 inline void Sxtw(const Register& rd, const Register& rn);
500 void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
501 void Tbz(const Register& rt, unsigned bit_pos, Label* label);
502 inline void Ubfiz(const Register& rd,
506 inline void Ubfx(const Register& rd,
510 inline void Ucvtf(const FPRegister& fd,
513 inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
514 inline void Umaddl(const Register& rd,
518 inline void Umsubl(const Register& rd,
522 inline void Uxtb(const Register& rd, const Register& rn);
523 inline void Uxth(const Register& rd, const Register& rn);
524 inline void Uxtw(const Register& rd, const Register& rn);
526 // Pseudo-instructions ------------------------------------------------------
528 // Compute rd = abs(rm).
529 // This function clobbers the condition flags. On output the overflow flag is
530 // set iff the negation overflowed.
532 // If rm is the minimum representable value, the result is not representable.
533 // Handlers for each case can be specified using the relevant labels.
534 void Abs(const Register& rd, const Register& rm,
535 Label * is_not_representable = NULL,
536 Label * is_representable = NULL);
538 // Push or pop up to 4 registers of the same width to or from the stack,
539 // using the current stack pointer as set by SetStackPointer.
541 // If an argument register is 'NoReg', all further arguments are also assumed
542 // to be 'NoReg', and are thus not pushed or popped.
544 // Arguments are ordered such that "Push(a, b);" is functionally equivalent
545 // to "Push(a); Push(b);".
547 // It is valid to push the same register more than once, and there is no
548 // restriction on the order in which registers are specified.
550 // It is not valid to pop into the same register more than once in one
551 // operation, not even into the zero register.
553 // If the current stack pointer (as set by SetStackPointer) is csp, then it
554 // must be aligned to 16 bytes on entry and the total size of the specified
555 // registers must also be a multiple of 16 bytes.
557 // Even if the current stack pointer is not the system stack pointer (csp),
558 // Push (and derived methods) will still modify the system stack pointer in
559 // order to comply with ABI rules about accessing memory below the system
562 // Other than the registers passed into Pop, the stack pointer and (possibly)
563 // the system stack pointer, these methods do not modify any other registers.
564 void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
565 const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
566 void Push(const CPURegister& src0, const CPURegister& src1,
567 const CPURegister& src2, const CPURegister& src3,
568 const CPURegister& src4, const CPURegister& src5 = NoReg,
569 const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
570 void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
571 const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
572 void Push(const Register& src0, const FPRegister& src1);
574 // Alternative forms of Push and Pop, taking a RegList or CPURegList that
575 // specifies the registers that are to be pushed or popped. Higher-numbered
576 // registers are associated with higher memory addresses (as in the A32 push
577 // and pop instructions).
579 // (Push|Pop)SizeRegList allow you to specify the register size as a
580 // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
581 // kSRegSizeInBits are supported.
583 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
584 void PushCPURegList(CPURegList registers);
585 void PopCPURegList(CPURegList registers);
587 inline void PushSizeRegList(RegList registers, unsigned reg_size,
588 CPURegister::RegisterType type = CPURegister::kRegister) {
589 PushCPURegList(CPURegList(type, reg_size, registers));
591 inline void PopSizeRegList(RegList registers, unsigned reg_size,
592 CPURegister::RegisterType type = CPURegister::kRegister) {
593 PopCPURegList(CPURegList(type, reg_size, registers));
595 inline void PushXRegList(RegList regs) {
596 PushSizeRegList(regs, kXRegSizeInBits);
598 inline void PopXRegList(RegList regs) {
599 PopSizeRegList(regs, kXRegSizeInBits);
601 inline void PushWRegList(RegList regs) {
602 PushSizeRegList(regs, kWRegSizeInBits);
604 inline void PopWRegList(RegList regs) {
605 PopSizeRegList(regs, kWRegSizeInBits);
607 inline void PushDRegList(RegList regs) {
608 PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
610 inline void PopDRegList(RegList regs) {
611 PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
613 inline void PushSRegList(RegList regs) {
614 PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
616 inline void PopSRegList(RegList regs) {
617 PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
620 // Push the specified register 'count' times.
621 void PushMultipleTimes(CPURegister src, Register count);
622 void PushMultipleTimes(CPURegister src, int count);
624 // This is a convenience method for pushing a single Handle<Object>.
625 inline void Push(Handle<Object> handle);
626 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
628 // Aliases of Push and Pop, required for V8 compatibility.
629 inline void push(Register src) {
632 inline void pop(Register dst) {
636 // Sometimes callers need to push or pop multiple registers in a way that is
637 // difficult to structure efficiently for fixed Push or Pop calls. This scope
638 // allows push requests to be queued up, then flushed at once. The
639 // MacroAssembler will try to generate the most efficient sequence required.
641 // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of
642 // register sizes and types.
645 explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { }
648 DCHECK(queued_.empty());
651 void Queue(const CPURegister& rt) {
652 size_ += rt.SizeInBytes();
653 queued_.push_back(rt);
656 enum PreambleDirective {
660 void PushQueued(PreambleDirective preamble_directive = WITH_PREAMBLE);
664 MacroAssembler* masm_;
666 std::vector<CPURegister> queued_;
669 // Poke 'src' onto the stack. The offset is in bytes.
671 // If the current stack pointer (according to StackPointer()) is csp, then
672 // csp must be aligned to 16 bytes.
673 void Poke(const CPURegister& src, const Operand& offset);
675 // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
677 // If the current stack pointer (according to StackPointer()) is csp, then
678 // csp must be aligned to 16 bytes.
679 void Peek(const CPURegister& dst, const Operand& offset);
681 // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
682 // with 'src2' at a higher address than 'src1'. The offset is in bytes.
684 // If the current stack pointer (according to StackPointer()) is csp, then
685 // csp must be aligned to 16 bytes.
686 void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
688 // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
689 // values peeked will be adjacent, with the value in 'dst2' being from a
690 // higher address than 'dst1'. The offset is in bytes.
692 // If the current stack pointer (according to StackPointer()) is csp, then
693 // csp must be aligned to 16 bytes.
694 void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
696 // Claim or drop stack space without actually accessing memory.
698 // In debug mode, both of these will write invalid data into the claimed or
701 // If the current stack pointer (according to StackPointer()) is csp, then it
702 // must be aligned to 16 bytes and the size claimed or dropped must be a
703 // multiple of 16 bytes.
705 // Note that unit_size must be specified in bytes. For variants which take a
706 // Register count, the unit size must be a power of two.
707 inline void Claim(uint64_t count, uint64_t unit_size = kXRegSize);
708 inline void Claim(const Register& count,
709 uint64_t unit_size = kXRegSize);
710 inline void Drop(uint64_t count, uint64_t unit_size = kXRegSize);
711 inline void Drop(const Register& count,
712 uint64_t unit_size = kXRegSize);
714 // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
716 inline void ClaimBySMI(const Register& count_smi,
717 uint64_t unit_size = kXRegSize);
718 inline void DropBySMI(const Register& count_smi,
719 uint64_t unit_size = kXRegSize);
721 // Compare a register with an operand, and branch to label depending on the
722 // condition. May corrupt the status flags.
723 inline void CompareAndBranch(const Register& lhs,
728 // Test the bits of register defined by bit_pattern, and branch if ANY of
729 // those bits are set. May corrupt the status flags.
730 inline void TestAndBranchIfAnySet(const Register& reg,
731 const uint64_t bit_pattern,
734 // Test the bits of register defined by bit_pattern, and branch if ALL of
735 // those bits are clear (ie. not set.) May corrupt the status flags.
736 inline void TestAndBranchIfAllClear(const Register& reg,
737 const uint64_t bit_pattern,
740 // Insert one or more instructions into the instruction stream that encode
741 // some caller-defined data. The instructions used will be executable with no
743 inline void InlineData(uint64_t data);
745 // Insert an instrumentation enable marker into the instruction stream.
746 inline void EnableInstrumentation();
748 // Insert an instrumentation disable marker into the instruction stream.
749 inline void DisableInstrumentation();
751 // Insert an instrumentation event marker into the instruction stream. These
752 // will be picked up by the instrumentation system to annotate an instruction
753 // profile. The argument marker_name must be a printable two character string;
754 // it will be encoded in the event marker.
755 inline void AnnotateInstrumentation(const char* marker_name);
757 // If emit_debug_code() is true, emit a run-time check to ensure that
758 // StackPointer() does not point below the system stack pointer.
760 // Whilst it is architecturally legal for StackPointer() to point below csp,
761 // it can be evidence of a potential bug because the ABI forbids accesses
764 // If StackPointer() is the system stack pointer (csp), then csp will be
765 // dereferenced to cause the processor (or simulator) to abort if it is not
768 // If emit_debug_code() is false, this emits no code.
769 void AssertStackConsistency();
771 // Preserve the callee-saved registers (as defined by AAPCS64).
773 // Higher-numbered registers are pushed before lower-numbered registers, and
774 // thus get higher addresses.
775 // Floating-point registers are pushed before general-purpose registers, and
776 // thus get higher addresses.
778 // Note that registers are not checked for invalid values. Use this method
779 // only if you know that the GC won't try to examine the values on the stack.
781 // This method must not be called unless the current stack pointer (as set by
782 // SetStackPointer) is the system stack pointer (csp), and is aligned to
783 // ActivationFrameAlignment().
784 void PushCalleeSavedRegisters();
786 // Restore the callee-saved registers (as defined by AAPCS64).
788 // Higher-numbered registers are popped after lower-numbered registers, and
789 // thus come from higher addresses.
790 // Floating-point registers are popped after general-purpose registers, and
791 // thus come from higher addresses.
793 // This method must not be called unless the current stack pointer (as set by
794 // SetStackPointer) is the system stack pointer (csp), and is aligned to
795 // ActivationFrameAlignment().
796 void PopCalleeSavedRegisters();
798 // Set the current stack pointer, but don't generate any code.
799 inline void SetStackPointer(const Register& stack_pointer) {
800 DCHECK(!TmpList()->IncludesAliasOf(stack_pointer));
804 // Return the current stack pointer, as set by SetStackPointer.
805 inline const Register& StackPointer() const {
809 // Align csp for a frame, as per ActivationFrameAlignment, and make it the
810 // current stack pointer.
811 inline void AlignAndSetCSPForFrame() {
812 int sp_alignment = ActivationFrameAlignment();
813 // AAPCS64 mandates at least 16-byte alignment.
814 DCHECK(sp_alignment >= 16);
815 DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
816 Bic(csp, StackPointer(), sp_alignment - 1);
817 SetStackPointer(csp);
820 // Push the system stack pointer (csp) down to allow the same to be done to
821 // the current stack pointer (according to StackPointer()). This must be
822 // called _before_ accessing the memory.
824 // This is necessary when pushing or otherwise adding things to the stack, to
825 // satisfy the AAPCS64 constraint that the memory below the system stack
826 // pointer is not accessed. The amount pushed will be increased as necessary
827 // to ensure csp remains aligned to 16 bytes.
829 // This method asserts that StackPointer() is not csp, since the call does
830 // not make sense in that context.
831 inline void BumpSystemStackPointer(const Operand& space);
833 // Re-synchronizes the system stack pointer (csp) with the current stack
834 // pointer (according to StackPointer()).
836 // This method asserts that StackPointer() is not csp, since the call does
837 // not make sense in that context.
838 inline void SyncSystemStackPointer();
840 // Helpers ------------------------------------------------------------------
842 inline void InitializeRootRegister();
844 void AssertFPCRState(Register fpcr = NoReg);
845 void ConfigureFPCR();
846 void CanonicalizeNaN(const FPRegister& dst, const FPRegister& src);
847 void CanonicalizeNaN(const FPRegister& reg) {
848 CanonicalizeNaN(reg, reg);
851 // Load an object from the root table.
852 void LoadRoot(CPURegister destination,
853 Heap::RootListIndex index);
854 // Store an object to the root table.
855 void StoreRoot(Register source,
856 Heap::RootListIndex index);
858 // Load both TrueValue and FalseValue roots.
859 void LoadTrueFalseRoots(Register true_root, Register false_root);
861 void LoadHeapObject(Register dst, Handle<HeapObject> object);
863 void LoadObject(Register result, Handle<Object> object) {
864 AllowDeferredHandleDereference heap_object_check;
865 if (object->IsHeapObject()) {
866 LoadHeapObject(result, Handle<HeapObject>::cast(object));
868 DCHECK(object->IsSmi());
869 Mov(result, Operand(object));
873 static int SafepointRegisterStackIndex(int reg_code);
875 // This is required for compatibility with architecture independant code.
876 // Remove if not needed.
877 inline void Move(Register dst, Register src) { Mov(dst, src); }
879 void LoadInstanceDescriptors(Register map,
880 Register descriptors);
881 void EnumLengthUntagged(Register dst, Register map);
882 void EnumLengthSmi(Register dst, Register map);
883 void NumberOfOwnDescriptors(Register dst, Register map);
884 void LoadAccessor(Register dst, Register holder, int accessor_index,
885 AccessorComponent accessor);
887 template<typename Field>
888 void DecodeField(Register dst, Register src) {
889 static const uint64_t shift = Field::kShift;
890 static const uint64_t setbits = CountSetBits(Field::kMask, 32);
891 Ubfx(dst, src, shift, setbits);
894 template<typename Field>
895 void DecodeField(Register reg) {
896 DecodeField<Field>(reg, reg);
899 // ---- SMI and Number Utilities ----
901 inline void SmiTag(Register dst, Register src);
902 inline void SmiTag(Register smi);
903 inline void SmiUntag(Register dst, Register src);
904 inline void SmiUntag(Register smi);
905 inline void SmiUntagToDouble(FPRegister dst,
907 UntagMode mode = kNotSpeculativeUntag);
908 inline void SmiUntagToFloat(FPRegister dst,
910 UntagMode mode = kNotSpeculativeUntag);
912 // Tag and push in one step.
913 inline void SmiTagAndPush(Register src);
914 inline void SmiTagAndPush(Register src1, Register src2);
916 inline void JumpIfSmi(Register value,
918 Label* not_smi_label = NULL);
919 inline void JumpIfNotSmi(Register value, Label* not_smi_label);
920 inline void JumpIfBothSmi(Register value1,
922 Label* both_smi_label,
923 Label* not_smi_label = NULL);
924 inline void JumpIfEitherSmi(Register value1,
926 Label* either_smi_label,
927 Label* not_smi_label = NULL);
928 inline void JumpIfEitherNotSmi(Register value1,
930 Label* not_smi_label);
931 inline void JumpIfBothNotSmi(Register value1,
933 Label* not_smi_label);
935 // Abort execution if argument is a smi, enabled via --debug-code.
936 void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
937 void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
939 inline void ObjectTag(Register tagged_obj, Register obj);
940 inline void ObjectUntag(Register untagged_obj, Register obj);
942 // Abort execution if argument is not a name, enabled via --debug-code.
943 void AssertName(Register object);
945 // Abort execution if argument is not undefined or an AllocationSite, enabled
947 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
949 // Abort execution if argument is not a string, enabled via --debug-code.
950 void AssertString(Register object);
952 void JumpIfHeapNumber(Register object, Label* on_heap_number,
953 SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
954 void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
955 SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
957 // Sets the vs flag if the input is -0.0.
958 void TestForMinusZero(DoubleRegister input);
960 // Jump to label if the input double register contains -0.0.
961 void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
963 // Jump to label if the input integer register contains the double precision
964 // floating point representation of -0.0.
965 void JumpIfMinusZero(Register input, Label* on_negative_zero);
967 // Generate code to do a lookup in the number string cache. If the number in
968 // the register object is found in the cache the generated code falls through
969 // with the result in the result register. The object and the result register
970 // can be the same. If the number is not found in the cache the code jumps to
971 // the label not_found with only the content of register object unchanged.
972 void LookupNumberStringCache(Register object,
979 // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
981 void ClampInt32ToUint8(Register in_out);
982 void ClampInt32ToUint8(Register output, Register input);
984 // Saturate a double in input to an unsigned 8-bit integer in output.
985 void ClampDoubleToUint8(Register output,
986 DoubleRegister input,
987 DoubleRegister dbl_scratch);
989 // Try to represent a double as a signed 32-bit int.
990 // This succeeds if the result compares equal to the input, so inputs of -0.0
991 // are represented as 0 and handled as a success.
993 // On output the Z flag is set if the operation was successful.
994 void TryRepresentDoubleAsInt32(Register as_int,
996 FPRegister scratch_d,
997 Label* on_successful_conversion = NULL,
998 Label* on_failed_conversion = NULL) {
999 DCHECK(as_int.Is32Bits());
1000 TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
1001 on_failed_conversion);
1004 // Try to represent a double as a signed 64-bit int.
1005 // This succeeds if the result compares equal to the input, so inputs of -0.0
1006 // are represented as 0 and handled as a success.
1008 // On output the Z flag is set if the operation was successful.
1009 void TryRepresentDoubleAsInt64(Register as_int,
1011 FPRegister scratch_d,
1012 Label* on_successful_conversion = NULL,
1013 Label* on_failed_conversion = NULL) {
1014 DCHECK(as_int.Is64Bits());
1015 TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
1016 on_failed_conversion);
1019 // ---- Object Utilities ----
1021 // Copy fields from 'src' to 'dst', where both are tagged objects.
1022 // The 'temps' list is a list of X registers which can be used for scratch
1023 // values. The temps list must include at least one register.
1025 // Currently, CopyFields cannot make use of more than three registers from
1026 // the 'temps' list.
1028 // CopyFields expects to be able to take at least two registers from
1029 // MacroAssembler::TmpList().
1030 void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
1032 // Starting at address in dst, initialize field_count 64-bit fields with
1033 // 64-bit value in register filler. Register dst is corrupted.
1034 void FillFields(Register dst,
1035 Register field_count,
1038 // Copies a number of bytes from src to dst. All passed registers are
1039 // clobbered. On exit src and dst will point to the place just after where the
1040 // last byte was read or written and length will be zero. Hint may be used to
1041 // determine which is the most efficient algorithm to use for copying.
1042 void CopyBytes(Register dst,
1046 CopyHint hint = kCopyUnknown);
1048 // ---- String Utilities ----
1051 // Jump to label if either object is not a sequential one-byte string.
1052 // Optionally perform a smi check on the objects first.
1053 void JumpIfEitherIsNotSequentialOneByteStrings(
1054 Register first, Register second, Register scratch1, Register scratch2,
1055 Label* failure, SmiCheckType smi_check = DO_SMI_CHECK);
1057 // Check if instance type is sequential one-byte string and jump to label if
1059 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1062 // Checks if both instance types are sequential one-byte strings and jumps to
1063 // label if either is not.
1064 void JumpIfEitherInstanceTypeIsNotSequentialOneByte(
1065 Register first_object_instance_type, Register second_object_instance_type,
1066 Register scratch1, Register scratch2, Label* failure);
1068 // Checks if both instance types are sequential one-byte strings and jumps to
1069 // label if either is not.
1070 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1071 Register first_object_instance_type, Register second_object_instance_type,
1072 Register scratch1, Register scratch2, Label* failure);
1074 void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name);
1076 // ---- Calling / Jumping helpers ----
1078 // This is required for compatibility in architecture indepenedant code.
1079 inline void jmp(Label* L) { B(L); }
1081 // Passes thrown value to the handler of top of the try handler chain.
1082 // Register value must be x0.
1083 void Throw(Register value,
1089 // Propagates an uncatchable exception to the top of the current JS stack's
1090 // handler chain. Register value must be x0.
1091 void ThrowUncatchable(Register value,
1097 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
1098 void TailCallStub(CodeStub* stub);
1100 void CallRuntime(const Runtime::Function* f,
1102 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1104 void CallRuntime(Runtime::FunctionId id,
1106 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1107 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1110 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1111 const Runtime::Function* function = Runtime::FunctionForId(id);
1112 CallRuntime(function, function->nargs, kSaveFPRegs);
1115 void TailCallRuntime(Runtime::FunctionId fid,
1119 int ActivationFrameAlignment();
1121 // Calls a C function.
1122 // The called function is not allowed to trigger a
1123 // garbage collection, since that might move the code and invalidate the
1124 // return address (unless this is somehow accounted for by the called
1126 void CallCFunction(ExternalReference function,
1127 int num_reg_arguments);
1128 void CallCFunction(ExternalReference function,
1129 int num_reg_arguments,
1130 int num_double_arguments);
1131 void CallCFunction(Register function,
1132 int num_reg_arguments,
1133 int num_double_arguments);
1135 // Jump to a runtime routine.
1136 void JumpToExternalReference(const ExternalReference& builtin);
1137 // Tail call of a runtime routine (jump).
1138 // Like JumpToExternalReference, but also takes care of passing the number
1140 void TailCallExternalReference(const ExternalReference& ext,
1143 void CallExternalReference(const ExternalReference& ext,
1147 // Invoke specified builtin JavaScript function. Adds an entry to
1148 // the unresolved list if the name does not resolve.
1149 void InvokeBuiltin(Builtins::JavaScript id,
1151 const CallWrapper& call_wrapper = NullCallWrapper());
1153 // Store the code object for the given builtin in the target register and
1154 // setup the function in the function register.
1155 void GetBuiltinEntry(Register target,
1157 Builtins::JavaScript id);
1159 // Store the function for the given builtin in the target register.
1160 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1162 void Jump(Register target);
1163 void Jump(Address target, RelocInfo::Mode rmode);
1164 void Jump(Handle<Code> code, RelocInfo::Mode rmode);
1165 void Jump(intptr_t target, RelocInfo::Mode rmode);
1167 void Call(Register target);
1168 void Call(Label* target);
1169 void Call(Address target, RelocInfo::Mode rmode);
1170 void Call(Handle<Code> code,
1171 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
1172 TypeFeedbackId ast_id = TypeFeedbackId::None());
1174 // For every Call variant, there is a matching CallSize function that returns
1175 // the size (in bytes) of the call sequence.
1176 static int CallSize(Register target);
1177 static int CallSize(Label* target);
1178 static int CallSize(Address target, RelocInfo::Mode rmode);
1179 static int CallSize(Handle<Code> code,
1180 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
1181 TypeFeedbackId ast_id = TypeFeedbackId::None());
1183 // Registers used through the invocation chain are hard-coded.
1184 // We force passing the parameters to ensure the contracts are correctly
1185 // honoured by the caller.
1186 // 'function' must be x1.
1187 // 'actual' must use an immediate or x0.
1188 // 'expected' must use an immediate or x2.
1189 // 'call_kind' must be x5.
1190 void InvokePrologue(const ParameterCount& expected,
1191 const ParameterCount& actual,
1192 Handle<Code> code_constant,
1196 bool* definitely_mismatches,
1197 const CallWrapper& call_wrapper);
1198 void InvokeCode(Register code,
1199 const ParameterCount& expected,
1200 const ParameterCount& actual,
1202 const CallWrapper& call_wrapper);
1203 // Invoke the JavaScript function in the given register.
1204 // Changes the current context to the context in the function before invoking.
1205 void InvokeFunction(Register function,
1206 const ParameterCount& actual,
1208 const CallWrapper& call_wrapper);
1209 void InvokeFunction(Register function,
1210 const ParameterCount& expected,
1211 const ParameterCount& actual,
1213 const CallWrapper& call_wrapper);
1214 void InvokeFunction(Handle<JSFunction> function,
1215 const ParameterCount& expected,
1216 const ParameterCount& actual,
1218 const CallWrapper& call_wrapper);
1221 // ---- Floating point helpers ----
1223 // Perform a conversion from a double to a signed int64. If the input fits in
1224 // range of the 64-bit result, execution branches to done. Otherwise,
1225 // execution falls through, and the sign of the result can be used to
1226 // determine if overflow was towards positive or negative infinity.
1228 // On successful conversion, the least significant 32 bits of the result are
1229 // equivalent to the ECMA-262 operation "ToInt32".
1231 // Only public for the test code in test-code-stubs-arm64.cc.
1232 void TryConvertDoubleToInt64(Register result,
1233 DoubleRegister input,
1236 // Performs a truncating conversion of a floating point number as used by
1237 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1238 // Exits with 'result' holding the answer.
1239 void TruncateDoubleToI(Register result, DoubleRegister double_input);
1241 // Performs a truncating conversion of a heap number as used by
1242 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1243 // must be different registers. Exits with 'result' holding the answer.
1244 void TruncateHeapNumberToI(Register result, Register object);
1246 // Converts the smi or heap number in object to an int32 using the rules
1247 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1248 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1249 // different registers.
1250 void TruncateNumberToI(Register object,
1252 Register heap_number_map,
1255 // ---- Code generation helpers ----
1257 void set_generating_stub(bool value) { generating_stub_ = value; }
1258 bool generating_stub() const { return generating_stub_; }
1260 void set_allow_macro_instructions(bool value) {
1261 allow_macro_instructions_ = value;
1263 bool allow_macro_instructions() const { return allow_macro_instructions_; }
1265 bool use_real_aborts() const { return use_real_aborts_; }
1266 void set_has_frame(bool value) { has_frame_ = value; }
1267 bool has_frame() const { return has_frame_; }
1268 bool AllowThisStubCall(CodeStub* stub);
1270 class NoUseRealAbortsScope {
1272 explicit NoUseRealAbortsScope(MacroAssembler* masm) :
1273 saved_(masm->use_real_aborts_), masm_(masm) {
1274 masm_->use_real_aborts_ = false;
1276 ~NoUseRealAbortsScope() {
1277 masm_->use_real_aborts_ = saved_;
1281 MacroAssembler* masm_;
1284 // ---------------------------------------------------------------------------
1289 // ---------------------------------------------------------------------------
1290 // Exception handling
1292 // Push a new try handler and link into try handler chain.
1293 void PushTryHandler(StackHandler::Kind kind, int handler_index);
1295 // Unlink the stack handler on top of the stack from the try handler chain.
1296 // Must preserve the result register.
1297 void PopTryHandler();
1300 // ---------------------------------------------------------------------------
1301 // Allocation support
1303 // Allocate an object in new space or old pointer space. The object_size is
1304 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
1305 // is passed. The allocated object is returned in result.
1307 // If the new space is exhausted control continues at the gc_required label.
1308 // In this case, the result and scratch registers may still be clobbered.
1309 // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
1310 void Allocate(Register object_size,
1315 AllocationFlags flags);
1317 void Allocate(int object_size,
1322 AllocationFlags flags);
1324 // Undo allocation in new space. The object passed and objects allocated after
1325 // it will no longer be allocated. The caller must make sure that no pointers
1326 // are left to the object(s) no longer allocated as they would be invalid when
1327 // allocation is undone.
1328 void UndoAllocationInNewSpace(Register object, Register scratch);
1330 void AllocateTwoByteString(Register result,
1335 Label* gc_required);
1336 void AllocateOneByteString(Register result, Register length,
1337 Register scratch1, Register scratch2,
1338 Register scratch3, Label* gc_required);
1339 void AllocateTwoByteConsString(Register result,
1343 Label* gc_required);
1344 void AllocateOneByteConsString(Register result, Register length,
1345 Register scratch1, Register scratch2,
1346 Label* gc_required);
1347 void AllocateTwoByteSlicedString(Register result,
1351 Label* gc_required);
1352 void AllocateOneByteSlicedString(Register result, Register length,
1353 Register scratch1, Register scratch2,
1354 Label* gc_required);
1356 // Allocates a heap number or jumps to the gc_required label if the young
1357 // space is full and a scavenge is needed.
1358 // All registers are clobbered.
1359 // If no heap_number_map register is provided, the function will take care of
1361 void AllocateHeapNumber(Register result,
1365 CPURegister value = NoFPReg,
1366 CPURegister heap_number_map = NoReg,
1367 MutableMode mode = IMMUTABLE);
1369 // ---------------------------------------------------------------------------
1370 // Support functions.
1372 // Try to get function prototype of a function and puts the value in the
1373 // result register. Checks that the function really is a function and jumps
1374 // to the miss label if the fast checks fail. The function register will be
1375 // untouched; the other registers may be clobbered.
1376 enum BoundFunctionAction {
1377 kMissOnBoundFunction,
1378 kDontMissOnBoundFunction
1381 void TryGetFunctionPrototype(Register function,
1385 BoundFunctionAction action =
1386 kDontMissOnBoundFunction);
1388 // Compare object type for heap object. heap_object contains a non-Smi
1389 // whose object type should be compared with the given type. This both
1390 // sets the flags and leaves the object type in the type_reg register.
1391 // It leaves the map in the map register (unless the type_reg and map register
1392 // are the same register). It leaves the heap object in the heap_object
1393 // register unless the heap_object register is the same register as one of the
1395 void CompareObjectType(Register heap_object,
1401 // Compare object type for heap object, and branch if equal (or not.)
1402 // heap_object contains a non-Smi whose object type should be compared with
1403 // the given type. This both sets the flags and leaves the object type in
1404 // the type_reg register. It leaves the map in the map register (unless the
1405 // type_reg and map register are the same register). It leaves the heap
1406 // object in the heap_object register unless the heap_object register is the
1407 // same register as one of the other registers.
1408 void JumpIfObjectType(Register object,
1412 Label* if_cond_pass,
1413 Condition cond = eq);
1415 void JumpIfNotObjectType(Register object,
1419 Label* if_not_object);
1421 // Compare instance type in a map. map contains a valid map object whose
1422 // object type should be compared with the given type. This both
1423 // sets the flags and leaves the object type in the type_reg register.
1424 void CompareInstanceType(Register map,
1428 // Compare an object's map with the specified map. Condition flags are set
1429 // with result of map compare.
1430 void CompareObjectMap(Register obj, Heap::RootListIndex index);
1432 // Compare an object's map with the specified map. Condition flags are set
1433 // with result of map compare.
1434 void CompareObjectMap(Register obj, Register scratch, Handle<Map> map);
1436 // As above, but the map of the object is already loaded into the register
1437 // which is preserved by the code generated.
1438 void CompareMap(Register obj_map,
1441 // Check if the map of an object is equal to a specified map and branch to
1442 // label if not. Skip the smi check if not required (object is known to be a
1443 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1444 // against maps that are ElementsKind transition maps of the specified map.
1445 void CheckMap(Register obj,
1449 SmiCheckType smi_check_type);
1452 void CheckMap(Register obj,
1454 Heap::RootListIndex index,
1456 SmiCheckType smi_check_type);
1458 // As above, but the map of the object is already loaded into obj_map, and is
1460 void CheckMap(Register obj_map,
1463 SmiCheckType smi_check_type);
1465 // Check if the map of an object is equal to a specified weak map and branch
1466 // to a specified target if equal. Skip the smi check if not required
1467 // (object is known to be a heap object)
1468 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1469 Handle<WeakCell> cell, Handle<Code> success,
1470 SmiCheckType smi_check_type);
1472 // Compare the given value and the value of weak cell.
1473 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
1475 void GetWeakValue(Register value, Handle<WeakCell> cell);
1477 // Load the value of the weak cell in the value register. Branch to the given
1478 // miss label if the weak cell was cleared.
1479 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
1481 // Test the bitfield of the heap object map with mask and set the condition
1482 // flags. The object register is preserved.
1483 void TestMapBitfield(Register object, uint64_t mask);
1485 // Load the elements kind field from a map, and return it in the result
1487 void LoadElementsKindFromMap(Register result, Register map);
1489 // Compare the object in a register to a value from the root list.
1490 void CompareRoot(const Register& obj, Heap::RootListIndex index);
1492 // Compare the object in a register to a value and jump if they are equal.
1493 void JumpIfRoot(const Register& obj,
1494 Heap::RootListIndex index,
1497 // Compare the object in a register to a value and jump if they are not equal.
1498 void JumpIfNotRoot(const Register& obj,
1499 Heap::RootListIndex index,
1500 Label* if_not_equal);
1502 // Load and check the instance type of an object for being a unique name.
1503 // Loads the type into the second argument register.
1504 // The object and type arguments can be the same register; in that case it
1505 // will be overwritten with the type.
1506 // Fall-through if the object was a string and jump on fail otherwise.
1507 inline void IsObjectNameType(Register object, Register type, Label* fail);
1509 inline void IsObjectJSObjectType(Register heap_object,
1514 // Check the instance type in the given map to see if it corresponds to a
1515 // JS object type. Jump to the fail label if this is not the case and fall
1516 // through otherwise. However if fail label is NULL, no branch will be
1517 // performed and the flag will be updated. You can test the flag for "le"
1518 // condition to test if it is a valid JS object type.
1519 inline void IsInstanceJSObjectType(Register map,
1523 // Load and check the instance type of an object for being a string.
1524 // Loads the type into the second argument register.
1525 // The object and type arguments can be the same register; in that case it
1526 // will be overwritten with the type.
1527 // Jumps to not_string or string appropriate. If the appropriate label is
1528 // NULL, fall through.
1529 inline void IsObjectJSStringType(Register object, Register type,
1530 Label* not_string, Label* string = NULL);
1532 // Compare the contents of a register with an operand, and branch to true,
1533 // false or fall through, depending on condition.
1534 void CompareAndSplit(const Register& lhs,
1539 Label* fall_through);
1541 // Test the bits of register defined by bit_pattern, and branch to
1542 // if_any_set, if_all_clear or fall_through accordingly.
1543 void TestAndSplit(const Register& reg,
1544 uint64_t bit_pattern,
1545 Label* if_all_clear,
1547 Label* fall_through);
1549 // Check if a map for a JSObject indicates that the object has fast elements.
1550 // Jump to the specified label if it does not.
1551 void CheckFastElements(Register map, Register scratch, Label* fail);
1553 // Check if a map for a JSObject indicates that the object can have both smi
1554 // and HeapObject elements. Jump to the specified label if it does not.
1555 void CheckFastObjectElements(Register map, Register scratch, Label* fail);
1557 // Check to see if number can be stored as a double in FastDoubleElements.
1558 // If it can, store it at the index specified by key_reg in the array,
1559 // otherwise jump to fail.
1560 void StoreNumberToDoubleElements(Register value_reg,
1562 Register elements_reg,
1564 FPRegister fpscratch1,
1566 int elements_offset = 0);
1568 // Picks out an array index from the hash field.
1570 // hash - holds the index's hash. Clobbered.
1571 // index - holds the overwritten index on exit.
1572 void IndexFromHash(Register hash, Register index);
1574 // ---------------------------------------------------------------------------
1575 // Inline caching support.
1577 void EmitSeqStringSetCharCheck(Register string,
1579 SeqStringSetCharCheckIndexType index_type,
1581 uint32_t encoding_mask);
1583 // Generate code for checking access rights - used for security checks
1584 // on access to global objects across environments. The holder register
1585 // is left untouched, whereas both scratch registers are clobbered.
1586 void CheckAccessGlobalProxy(Register holder_reg,
1591 // Hash the interger value in 'key' register.
1592 // It uses the same algorithm as ComputeIntegerHash in utils.h.
1593 void GetNumberHash(Register key, Register scratch);
1595 // Load value from the dictionary.
1597 // elements - holds the slow-case elements of the receiver on entry.
1598 // Unchanged unless 'result' is the same register.
1600 // key - holds the smi key on entry.
1601 // Unchanged unless 'result' is the same register.
1603 // result - holds the result on exit if the load succeeded.
1604 // Allowed to be the same as 'key' or 'result'.
1605 // Unchanged on bailout so 'key' or 'result' can be used
1606 // in further computation.
1607 void LoadFromNumberDictionary(Label* miss,
1616 // ---------------------------------------------------------------------------
1619 // Activation support.
1620 void EnterFrame(StackFrame::Type type);
1621 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
1622 void LeaveFrame(StackFrame::Type type);
1624 // Returns map with validated enum cache in object register.
1625 void CheckEnumCache(Register object,
1626 Register null_value,
1631 Label* call_runtime);
1633 // AllocationMemento support. Arrays may have an associated
1634 // AllocationMemento object that can be checked for in order to pretransition
1636 // On entry, receiver should point to the array object.
1637 // If allocation info is present, the Z flag is set (so that the eq
1638 // condition will pass).
1639 void TestJSArrayForAllocationMemento(Register receiver,
1642 Label* no_memento_found);
1644 void JumpIfJSArrayHasAllocationMemento(Register receiver,
1647 Label* memento_found) {
1648 Label no_memento_found;
1649 TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
1651 B(eq, memento_found);
1652 Bind(&no_memento_found);
1655 // The stack pointer has to switch between csp and jssp when setting up and
1656 // destroying the exit frame. Hence preserving/restoring the registers is
1657 // slightly more complicated than simple push/pop operations.
1658 void ExitFramePreserveFPRegs();
1659 void ExitFrameRestoreFPRegs();
1661 // Generates function and stub prologue code.
1662 void StubPrologue();
1663 void Prologue(bool code_pre_aging);
1665 // Enter exit frame. Exit frames are used when calling C code from generated
1666 // (JavaScript) code.
1668 // The stack pointer must be jssp on entry, and will be set to csp by this
1669 // function. The frame pointer is also configured, but the only other
1670 // registers modified by this function are the provided scratch register, and
1673 // The 'extra_space' argument can be used to allocate some space in the exit
1674 // frame that will be ignored by the GC. This space will be reserved in the
1675 // bottom of the frame immediately above the return address slot.
1677 // Set up a stack frame and registers as follows:
1678 // fp[8]: CallerPC (lr)
1679 // fp -> fp[0]: CallerFP (old fp)
1680 // fp[-8]: SPOffset (new csp)
1681 // fp[-16]: CodeObject()
1682 // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
1683 // csp[8]: Memory reserved for the caller if extra_space != 0.
1684 // Alignment padding, if necessary.
1685 // csp -> csp[0]: Space reserved for the return address.
1687 // This function also stores the new frame information in the top frame, so
1688 // that the new frame becomes the current frame.
1689 void EnterExitFrame(bool save_doubles,
1690 const Register& scratch,
1691 int extra_space = 0);
1693 // Leave the current exit frame, after a C function has returned to generated
1694 // (JavaScript) code.
1696 // This effectively unwinds the operation of EnterExitFrame:
1697 // * Preserved doubles are restored (if restore_doubles is true).
1698 // * The frame information is removed from the top frame.
1699 // * The exit frame is dropped.
1700 // * The stack pointer is reset to jssp.
1702 // The stack pointer must be csp on entry.
1703 void LeaveExitFrame(bool save_doubles,
1704 const Register& scratch,
1705 bool restore_context);
1707 void LoadContext(Register dst, int context_chain_length);
1709 // Emit code for a truncating division by a constant. The dividend register is
1710 // unchanged. Dividend and result must be different.
1711 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1713 // ---------------------------------------------------------------------------
1714 // StatsCounter support
1716 void SetCounter(StatsCounter* counter, int value, Register scratch1,
1718 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1720 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1723 // ---------------------------------------------------------------------------
1724 // Garbage collector support (GC).
1726 enum RememberedSetFinalAction {
1731 // Record in the remembered set the fact that we have a pointer to new space
1732 // at the address pointed to by the addr register. Only works if addr is not
1734 void RememberedSetHelper(Register object, // Used for debug code.
1737 SaveFPRegsMode save_fp,
1738 RememberedSetFinalAction and_then);
1740 // Push and pop the registers that can hold pointers, as defined by the
1741 // RegList constant kSafepointSavedRegisters.
1742 void PushSafepointRegisters();
1743 void PopSafepointRegisters();
1745 void PushSafepointRegistersAndDoubles();
1746 void PopSafepointRegistersAndDoubles();
1748 // Store value in register src in the safepoint stack slot for register dst.
1749 void StoreToSafepointRegisterSlot(Register src, Register dst) {
1750 Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
1753 // Load the value of the src register from its safepoint stack slot
1754 // into register dst.
1755 void LoadFromSafepointRegisterSlot(Register dst, Register src) {
1756 Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
1759 void CheckPageFlagSet(const Register& object,
1760 const Register& scratch,
1764 void CheckPageFlagClear(const Register& object,
1765 const Register& scratch,
1767 Label* if_all_clear);
1769 // Check if object is in new space and jump accordingly.
1770 // Register 'object' is preserved.
1771 void JumpIfNotInNewSpace(Register object,
1773 InNewSpace(object, ne, branch);
1776 void JumpIfInNewSpace(Register object,
1778 InNewSpace(object, eq, branch);
1781 // Notify the garbage collector that we wrote a pointer into an object.
1782 // |object| is the object being stored into, |value| is the object being
1783 // stored. value and scratch registers are clobbered by the operation.
1784 // The offset is the offset from the start of the object, not the offset from
1785 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
1786 void RecordWriteField(
1791 LinkRegisterStatus lr_status,
1792 SaveFPRegsMode save_fp,
1793 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1794 SmiCheck smi_check = INLINE_SMI_CHECK,
1795 PointersToHereCheck pointers_to_here_check_for_value =
1796 kPointersToHereMaybeInteresting);
1798 // As above, but the offset has the tag presubtracted. For use with
1799 // MemOperand(reg, off).
1800 inline void RecordWriteContextSlot(
1805 LinkRegisterStatus lr_status,
1806 SaveFPRegsMode save_fp,
1807 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1808 SmiCheck smi_check = INLINE_SMI_CHECK,
1809 PointersToHereCheck pointers_to_here_check_for_value =
1810 kPointersToHereMaybeInteresting) {
1811 RecordWriteField(context,
1812 offset + kHeapObjectTag,
1817 remembered_set_action,
1819 pointers_to_here_check_for_value);
1822 void RecordWriteForMap(
1826 LinkRegisterStatus lr_status,
1827 SaveFPRegsMode save_fp);
1829 // For a given |object| notify the garbage collector that the slot |address|
1830 // has been written. |value| is the object being stored. The value and
1831 // address registers are clobbered by the operation.
1836 LinkRegisterStatus lr_status,
1837 SaveFPRegsMode save_fp,
1838 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1839 SmiCheck smi_check = INLINE_SMI_CHECK,
1840 PointersToHereCheck pointers_to_here_check_for_value =
1841 kPointersToHereMaybeInteresting);
1843 // Checks the color of an object. If the object is already grey or black
1844 // then we just fall through, since it is already live. If it is white and
1845 // we can determine that it doesn't need to be scanned, then we just mark it
1846 // black and fall through. For the rest we jump to the label so the
1847 // incremental marker can fix its assumptions.
1848 void EnsureNotWhite(Register object,
1853 Label* object_is_white_and_not_data);
1855 // Detects conservatively whether an object is data-only, i.e. it does need to
1856 // be scanned by the garbage collector.
1857 void JumpIfDataObject(Register value,
1859 Label* not_data_object);
1861 // Helper for finding the mark bits for an address.
1862 // Note that the behaviour slightly differs from other architectures.
1864 // - addr_reg is unchanged.
1865 // - The bitmap register points at the word with the mark bits.
1866 // - The shift register contains the index of the first color bit for this
1867 // object in the bitmap.
1868 inline void GetMarkBits(Register addr_reg,
1869 Register bitmap_reg,
1870 Register shift_reg);
1872 // Check if an object has a given incremental marking color.
1873 void HasColor(Register object,
1880 void JumpIfBlack(Register object,
1886 // Get the location of a relocated constant (its address in the constant pool)
1887 // from its load site.
1888 void GetRelocatedValueLocation(Register ldr_location,
1892 // ---------------------------------------------------------------------------
1895 // Calls Abort(msg) if the condition cond is not satisfied.
1896 // Use --debug_code to enable.
1897 void Assert(Condition cond, BailoutReason reason);
1898 void AssertRegisterIsClear(Register reg, BailoutReason reason);
1899 void AssertRegisterIsRoot(
1901 Heap::RootListIndex index,
1902 BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
1903 void AssertFastElements(Register elements);
1905 // Abort if the specified register contains the invalid color bit pattern.
1906 // The pattern must be in bits [1:0] of 'reg' register.
1908 // If emit_debug_code() is false, this emits no code.
1909 void AssertHasValidColor(const Register& reg);
1911 // Abort if 'object' register doesn't point to a string object.
1913 // If emit_debug_code() is false, this emits no code.
1914 void AssertIsString(const Register& object);
1916 // Like Assert(), but always enabled.
1917 void Check(Condition cond, BailoutReason reason);
1918 void CheckRegisterIsClear(Register reg, BailoutReason reason);
1920 // Print a message to stderr and abort execution.
1921 void Abort(BailoutReason reason);
1923 // Conditionally load the cached Array transitioned map of type
1924 // transitioned_kind from the native context if the map in register
1925 // map_in_out is the cached Array map in the native context of
1927 void LoadTransitionedArrayMapConditional(
1928 ElementsKind expected_kind,
1929 ElementsKind transitioned_kind,
1930 Register map_in_out,
1933 Label* no_map_match);
1935 void LoadGlobalFunction(int index, Register function);
1937 // Load the initial map from the global function. The registers function and
1938 // map can be the same, function is then overwritten.
1939 void LoadGlobalFunctionInitialMap(Register function,
1943 CPURegList* TmpList() { return &tmp_list_; }
1944 CPURegList* FPTmpList() { return &fptmp_list_; }
1946 static CPURegList DefaultTmpList();
1947 static CPURegList DefaultFPTmpList();
1949 // Like printf, but print at run-time from generated code.
1951 // The caller must ensure that arguments for floating-point placeholders
1952 // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
1953 // placeholders are Registers.
1955 // At the moment it is only possible to print the value of csp if it is the
1956 // current stack pointer. Otherwise, the MacroAssembler will automatically
1957 // update csp on every push (using BumpSystemStackPointer), so determining its
1958 // value is difficult.
1960 // Format placeholders that refer to more than one argument, or to a specific
1961 // argument, are not supported. This includes formats like "%1$d" or "%.*d".
1963 // This function automatically preserves caller-saved registers so that
1964 // calling code can use Printf at any point without having to worry about
1965 // corruption. The preservation mechanism generates a lot of code. If this is
1966 // a problem, preserve the important registers manually and then call
1967 // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
1968 // implicitly preserved.
1969 void Printf(const char * format,
1970 CPURegister arg0 = NoCPUReg,
1971 CPURegister arg1 = NoCPUReg,
1972 CPURegister arg2 = NoCPUReg,
1973 CPURegister arg3 = NoCPUReg);
1975 // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
1977 // The return code from the system printf call will be returned in x0.
1978 void PrintfNoPreserve(const char * format,
1979 const CPURegister& arg0 = NoCPUReg,
1980 const CPURegister& arg1 = NoCPUReg,
1981 const CPURegister& arg2 = NoCPUReg,
1982 const CPURegister& arg3 = NoCPUReg);
1984 // Code ageing support functions.
1986 // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
1987 // function as old, it replaces some of the function prologue (generated by
1988 // FullCodeGenerator::Generate) with a call to a special stub (ultimately
1989 // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
1990 // function prologue to its initial young state (indicating that it has been
1991 // recently run) and continues. A young function is therefore one which has a
1992 // normal frame setup sequence, and an old function has a code age sequence
1993 // which calls a code ageing stub.
1995 // Set up a basic stack frame for young code (or code exempt from ageing) with
1996 // type FUNCTION. It may be patched later for code ageing support. This is
1997 // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
1999 // This function takes an Assembler so it can be called from either a
2000 // MacroAssembler or a PatchingAssembler context.
2001 static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
2003 // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
2004 void EmitFrameSetupForCodeAgePatching();
2006 // Emit a code age sequence that calls the relevant code age stub. The code
2007 // generated by this sequence is expected to replace the code generated by
2008 // EmitFrameSetupForCodeAgePatching, and represents an old function.
2010 // If stub is NULL, this function generates the code age sequence but omits
2011 // the stub address that is normally embedded in the instruction stream. This
2012 // can be used by debug code to verify code age sequences.
2013 static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
2015 // Call EmitCodeAgeSequence from a MacroAssembler context.
2016 void EmitCodeAgeSequence(Code* stub);
2018 // Return true if the sequence is a young sequence geneated by
2019 // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
2020 // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
2021 static bool IsYoungSequence(Isolate* isolate, byte* sequence);
2023 // Jumps to found label if a prototype map has dictionary elements.
2024 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
2025 Register scratch1, Label* found);
2027 // Perform necessary maintenance operations before a push or after a pop.
2029 // Note that size is specified in bytes.
2030 void PushPreamble(Operand total_size);
2031 void PopPostamble(Operand total_size);
2033 void PushPreamble(int count, int size) { PushPreamble(count * size); }
2034 void PopPostamble(int count, int size) { PopPostamble(count * size); }
2037 // Helpers for CopyFields.
2038 // These each implement CopyFields in a different way.
2039 void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
2040 Register scratch1, Register scratch2,
2041 Register scratch3, Register scratch4,
2043 void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
2044 Register scratch1, Register scratch2,
2045 Register scratch3, Register scratch4);
2046 void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
2047 Register scratch1, Register scratch2,
2050 // The actual Push and Pop implementations. These don't generate any code
2051 // other than that required for the push or pop. This allows
2052 // (Push|Pop)CPURegList to bundle together run-time assertions for a large
2053 // block of registers.
2055 // Note that size is per register, and is specified in bytes.
2056 void PushHelper(int count, int size,
2057 const CPURegister& src0, const CPURegister& src1,
2058 const CPURegister& src2, const CPURegister& src3);
2059 void PopHelper(int count, int size,
2060 const CPURegister& dst0, const CPURegister& dst1,
2061 const CPURegister& dst2, const CPURegister& dst3);
2063 // Call Printf. On a native build, a simple call will be generated, but if the
2064 // simulator is being used then a suitable pseudo-instruction is used. The
2065 // arguments and stack (csp) must be prepared by the caller as for a normal
2066 // AAPCS64 call to 'printf'.
2068 // The 'args' argument should point to an array of variable arguments in their
2069 // proper PCS registers (and in calling order). The argument registers can
2070 // have mixed types. The format string (x0) should not be included.
2071 void CallPrintf(int arg_count = 0, const CPURegister * args = NULL);
2073 // Helper for throwing exceptions. Compute a handler address and jump to
2074 // it. See the implementation for register usage.
2075 void JumpToHandlerEntry(Register exception,
2081 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
2082 void InNewSpace(Register object,
2083 Condition cond, // eq for new space, ne otherwise.
2086 // Try to represent a double as an int so that integer fast-paths may be
2087 // used. Not every valid integer value is guaranteed to be caught.
2088 // It supports both 32-bit and 64-bit integers depending whether 'as_int'
2089 // is a W or X register.
2091 // This does not distinguish between +0 and -0, so if this distinction is
2092 // important it must be checked separately.
2094 // On output the Z flag is set if the operation was successful.
2095 void TryRepresentDoubleAsInt(Register as_int,
2097 FPRegister scratch_d,
2098 Label* on_successful_conversion = NULL,
2099 Label* on_failed_conversion = NULL);
2101 bool generating_stub_;
2103 // Tell whether any of the macro instruction can be used. When false the
2104 // MacroAssembler will assert if a method which can emit a variable number
2105 // of instructions is called.
2106 bool allow_macro_instructions_;
2110 // The Abort method should call a V8 runtime function, but the CallRuntime
2111 // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
2112 // use a simpler abort mechanism that doesn't depend on CEntryStub.
2114 // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
2116 bool use_real_aborts_;
2118 // This handle will be patched with the code object on installation.
2119 Handle<Object> code_object_;
2121 // The register to use as a stack pointer for stack operations.
2124 // Scratch registers available for use by the MacroAssembler.
2125 CPURegList tmp_list_;
2126 CPURegList fptmp_list_;
2128 void InitializeNewString(Register string,
2130 Heap::RootListIndex map_index,
2135 // Far branches resolving.
2137 // The various classes of branch instructions with immediate offsets have
2138 // different ranges. While the Assembler will fail to assemble a branch
2139 // exceeding its range, the MacroAssembler offers a mechanism to resolve
2140 // branches to too distant targets, either by tweaking the generated code to
2141 // use branch instructions with wider ranges or generating veneers.
2143 // Currently branches to distant targets are resolved using unconditional
2144 // branch isntructions with a range of +-128MB. If that becomes too little
2145 // (!), the mechanism can be extended to generate special veneers for really
2148 // Helps resolve branching to labels potentially out of range.
2149 // If the label is not bound, it registers the information necessary to later
2150 // be able to emit a veneer for this branch if necessary.
2151 // If the label is bound, it returns true if the label (or the previous link
2152 // in the label chain) is out of range. In that case the caller is responsible
2153 // for generating appropriate code.
2154 // Otherwise it returns false.
2155 // This function also checks wether veneers need to be emitted.
2156 bool NeedExtraInstructionsOrRegisterBranch(Label *label,
2157 ImmBranchType branch_type);
2161 // Use this scope when you need a one-to-one mapping bewteen methods and
2162 // instructions. This scope prevents the MacroAssembler from being called and
2163 // literal pools from being emitted. It also asserts the number of instructions
2164 // emitted is what you specified when creating the scope.
2165 class InstructionAccurateScope BASE_EMBEDDED {
2167 explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
2171 size_(count * kInstructionSize)
2174 // Before blocking the const pool, see if it needs to be emitted.
2175 masm_->CheckConstPool(false, true);
2176 masm_->CheckVeneerPool(false, true);
2178 masm_->StartBlockPools();
2181 masm_->bind(&start_);
2183 previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
2184 masm_->set_allow_macro_instructions(false);
2188 ~InstructionAccurateScope() {
2189 masm_->EndBlockPools();
2191 if (start_.is_bound()) {
2192 DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
2194 masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
2199 MacroAssembler* masm_;
2203 bool previous_allow_macro_instructions_;
2208 // This scope utility allows scratch registers to be managed safely. The
2209 // MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
2210 // registers. These registers can be allocated on demand, and will be returned
2211 // at the end of the scope.
2213 // When the scope ends, the MacroAssembler's lists will be restored to their
2214 // original state, even if the lists were modified by some other means.
2215 class UseScratchRegisterScope {
2217 explicit UseScratchRegisterScope(MacroAssembler* masm)
2218 : available_(masm->TmpList()),
2219 availablefp_(masm->FPTmpList()),
2220 old_available_(available_->list()),
2221 old_availablefp_(availablefp_->list()) {
2222 DCHECK(available_->type() == CPURegister::kRegister);
2223 DCHECK(availablefp_->type() == CPURegister::kFPRegister);
2226 ~UseScratchRegisterScope();
2228 // Take a register from the appropriate temps list. It will be returned
2229 // automatically when the scope ends.
2230 Register AcquireW() { return AcquireNextAvailable(available_).W(); }
2231 Register AcquireX() { return AcquireNextAvailable(available_).X(); }
2232 FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
2233 FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
2235 Register UnsafeAcquire(const Register& reg) {
2236 return Register(UnsafeAcquire(available_, reg));
2239 Register AcquireSameSizeAs(const Register& reg);
2240 FPRegister AcquireSameSizeAs(const FPRegister& reg);
2243 static CPURegister AcquireNextAvailable(CPURegList* available);
2244 static CPURegister UnsafeAcquire(CPURegList* available,
2245 const CPURegister& reg);
2247 // Available scratch registers.
2248 CPURegList* available_; // kRegister
2249 CPURegList* availablefp_; // kFPRegister
2251 // The state of the available lists at the start of this scope.
2252 RegList old_available_; // kRegister
2253 RegList old_availablefp_; // kFPRegister
2257 inline MemOperand ContextMemOperand(Register context, int index) {
2258 return MemOperand(context, Context::SlotOffset(index));
2261 inline MemOperand GlobalObjectMemOperand() {
2262 return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
2266 // Encode and decode information about patchable inline SMI checks.
2267 class InlineSmiCheckInfo {
2269 explicit InlineSmiCheckInfo(Address info);
2271 bool HasSmiCheck() const {
2272 return smi_check_ != NULL;
2275 const Register& SmiRegister() const {
2279 Instruction* SmiCheck() const {
2283 // Use MacroAssembler::InlineData to emit information about patchable inline
2284 // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
2285 // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
2287 // The generated patch information can be read using the InlineSMICheckInfo
2289 static void Emit(MacroAssembler* masm, const Register& reg,
2290 const Label* smi_check);
2292 // Emit information to indicate that there is no inline SMI check.
2293 static void EmitNotInlined(MacroAssembler* masm) {
2295 Emit(masm, NoReg, &unbound);
2300 Instruction* smi_check_;
2302 // Fields in the data encoded by InlineData.
2304 // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
2305 // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
2306 // used in a patchable check. The Emit() method checks this.
2308 // Note that the total size of the fields is restricted by the underlying
2309 // storage size handled by the BitField class, which is a uint32_t.
2310 class RegisterBits : public BitField<unsigned, 0, 5> {};
2311 class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
2314 } } // namespace v8::internal
2316 #ifdef GENERATED_CODE_COVERAGE
2317 #error "Unsupported option"
2318 #define CODE_COVERAGE_STRINGIFY(x) #x
2319 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
2320 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
2321 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
2323 #define ACCESS_MASM(masm) masm->
2326 #endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_