1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_ARM64
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/cpu-profiler.h"
14 #include "src/debug.h"
15 #include "src/isolate-inl.h"
16 #include "src/runtime/runtime.h"
21 // Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
25 MacroAssembler::MacroAssembler(Isolate* arg_isolate,
28 : Assembler(arg_isolate, buffer, buffer_size),
29 generating_stub_(false),
31 allow_macro_instructions_(true),
34 use_real_aborts_(true),
36 tmp_list_(DefaultTmpList()),
37 fptmp_list_(DefaultFPTmpList()) {
38 if (isolate() != NULL) {
39 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
45 CPURegList MacroAssembler::DefaultTmpList() {
46 return CPURegList(ip0, ip1);
50 CPURegList MacroAssembler::DefaultFPTmpList() {
51 return CPURegList(fp_scratch1, fp_scratch2);
55 void MacroAssembler::LogicalMacro(const Register& rd,
57 const Operand& operand,
59 UseScratchRegisterScope temps(this);
61 if (operand.NeedsRelocation(this)) {
62 Register temp = temps.AcquireX();
63 Ldr(temp, operand.immediate());
64 Logical(rd, rn, temp, op);
66 } else if (operand.IsImmediate()) {
67 int64_t immediate = operand.ImmediateValue();
68 unsigned reg_size = rd.SizeInBits();
70 // If the operation is NOT, invert the operation and immediate.
71 if ((op & NOT) == NOT) {
72 op = static_cast<LogicalOp>(op & ~NOT);
73 immediate = ~immediate;
76 // Ignore the top 32 bits of an immediate if we're moving to a W register.
78 // Check that the top 32 bits are consistent.
79 DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
80 ((immediate >> kWRegSizeInBits) == -1));
81 immediate &= kWRegMask;
84 DCHECK(rd.Is64Bits() || is_uint32(immediate));
86 // Special cases for all set or all clear immediates.
92 case ORR: // Fall through.
96 case ANDS: // Fall through.
102 } else if ((rd.Is64Bits() && (immediate == -1L)) ||
103 (rd.Is32Bits() && (immediate == 0xffffffffL))) {
114 case ANDS: // Fall through.
122 unsigned n, imm_s, imm_r;
123 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
124 // Immediate can be encoded in the instruction.
125 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
127 // Immediate can't be encoded: synthesize using move immediate.
128 Register temp = temps.AcquireSameSizeAs(rn);
129 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
131 // If rd is the stack pointer we cannot use it as the destination
132 // register so we use the temp register as an intermediate again.
133 Logical(temp, rn, imm_operand, op);
135 AssertStackConsistency();
137 Logical(rd, rn, imm_operand, op);
141 } else if (operand.IsExtendedRegister()) {
142 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
143 // Add/sub extended supports shift <= 4. We want to support exactly the
145 DCHECK(operand.shift_amount() <= 4);
146 DCHECK(operand.reg().Is64Bits() ||
147 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
148 Register temp = temps.AcquireSameSizeAs(rn);
149 EmitExtendShift(temp, operand.reg(), operand.extend(),
150 operand.shift_amount());
151 Logical(rd, rn, temp, op);
154 // The operand can be encoded in the instruction.
155 DCHECK(operand.IsShiftedRegister());
156 Logical(rd, rn, operand, op);
161 void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
162 DCHECK(allow_macro_instructions_);
163 DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
164 DCHECK(!rd.IsZero());
166 // TODO(all) extend to support more immediates.
168 // Immediates on Aarch64 can be produced using an initial value, and zero to
169 // three move keep operations.
171 // Initial values can be generated with:
172 // 1. 64-bit move zero (movz).
173 // 2. 32-bit move inverted (movn).
174 // 3. 64-bit move inverted.
175 // 4. 32-bit orr immediate.
176 // 5. 64-bit orr immediate.
177 // Move-keep may then be used to modify each of the 16-bit half-words.
179 // The code below supports all five initial value generators, and
180 // applying move-keep operations to move-zero and move-inverted initial
183 // Try to move the immediate in one instruction, and if that fails, switch to
184 // using multiple instructions.
185 if (!TryOneInstrMoveImmediate(rd, imm)) {
186 unsigned reg_size = rd.SizeInBits();
188 // Generic immediate case. Imm will be represented by
189 // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
190 // A move-zero or move-inverted is generated for the first non-zero or
191 // non-0xffff immX, and a move-keep for subsequent non-zero immX.
193 uint64_t ignored_halfword = 0;
194 bool invert_move = false;
195 // If the number of 0xffff halfwords is greater than the number of 0x0000
196 // halfwords, it's more efficient to use move-inverted.
197 if (CountClearHalfWords(~imm, reg_size) >
198 CountClearHalfWords(imm, reg_size)) {
199 ignored_halfword = 0xffffL;
203 // Mov instructions can't move immediate values into the stack pointer, so
204 // set up a temporary register, if needed.
205 UseScratchRegisterScope temps(this);
206 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
208 // Iterate through the halfwords. Use movn/movz for the first non-ignored
209 // halfword, and movk for subsequent halfwords.
210 DCHECK((reg_size % 16) == 0);
211 bool first_mov_done = false;
212 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
213 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
214 if (imm16 != ignored_halfword) {
215 if (!first_mov_done) {
217 movn(temp, (~imm16) & 0xffffL, 16 * i);
219 movz(temp, imm16, 16 * i);
221 first_mov_done = true;
223 // Construct a wider constant.
224 movk(temp, imm16, 16 * i);
228 DCHECK(first_mov_done);
230 // Move the temporary if the original destination register was the stack
234 AssertStackConsistency();
240 void MacroAssembler::Mov(const Register& rd,
241 const Operand& operand,
242 DiscardMoveMode discard_mode) {
243 DCHECK(allow_macro_instructions_);
244 DCHECK(!rd.IsZero());
246 // Provide a swap register for instructions that need to write into the
247 // system stack pointer (and can't do this inherently).
248 UseScratchRegisterScope temps(this);
249 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
251 if (operand.NeedsRelocation(this)) {
252 Ldr(dst, operand.immediate());
254 } else if (operand.IsImmediate()) {
255 // Call the macro assembler for generic immediates.
256 Mov(dst, operand.ImmediateValue());
258 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
259 // Emit a shift instruction if moving a shifted register. This operation
260 // could also be achieved using an orr instruction (like orn used by Mvn),
261 // but using a shift instruction makes the disassembly clearer.
262 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
264 } else if (operand.IsExtendedRegister()) {
265 // Emit an extend instruction if moving an extended register. This handles
266 // extend with post-shift operations, too.
267 EmitExtendShift(dst, operand.reg(), operand.extend(),
268 operand.shift_amount());
271 // Otherwise, emit a register move only if the registers are distinct, or
272 // if they are not X registers.
274 // Note that mov(w0, w0) is not a no-op because it clears the top word of
275 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
276 // registers is not required to clear the top word of the X register. In
277 // this case, the instruction is discarded.
279 // If csp is an operand, add #0 is emitted, otherwise, orr #0.
280 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
281 (discard_mode == kDontDiscardForSameWReg))) {
282 Assembler::mov(rd, operand.reg());
284 // This case can handle writes into the system stack pointer directly.
288 // Copy the result to the system stack pointer.
291 Assembler::mov(rd, dst);
296 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
297 DCHECK(allow_macro_instructions_);
299 if (operand.NeedsRelocation(this)) {
300 Ldr(rd, operand.immediate());
303 } else if (operand.IsImmediate()) {
304 // Call the macro assembler for generic immediates.
305 Mov(rd, ~operand.ImmediateValue());
307 } else if (operand.IsExtendedRegister()) {
308 // Emit two instructions for the extend case. This differs from Mov, as
309 // the extend and invert can't be achieved in one instruction.
310 EmitExtendShift(rd, operand.reg(), operand.extend(),
311 operand.shift_amount());
320 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
321 DCHECK((reg_size % 8) == 0);
323 for (unsigned i = 0; i < (reg_size / 16); i++) {
324 if ((imm & 0xffff) == 0) {
333 // The movz instruction can generate immediates containing an arbitrary 16-bit
334 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
335 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
336 DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
337 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
341 // The movn instruction can generate immediates containing an arbitrary 16-bit
342 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
343 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
344 return IsImmMovz(~imm, reg_size);
348 void MacroAssembler::ConditionalCompareMacro(const Register& rn,
349 const Operand& operand,
352 ConditionalCompareOp op) {
353 DCHECK((cond != al) && (cond != nv));
354 if (operand.NeedsRelocation(this)) {
355 UseScratchRegisterScope temps(this);
356 Register temp = temps.AcquireX();
357 Ldr(temp, operand.immediate());
358 ConditionalCompareMacro(rn, temp, nzcv, cond, op);
360 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
361 (operand.IsImmediate() &&
362 IsImmConditionalCompare(operand.ImmediateValue()))) {
363 // The immediate can be encoded in the instruction, or the operand is an
364 // unshifted register: call the assembler.
365 ConditionalCompare(rn, operand, nzcv, cond, op);
368 // The operand isn't directly supported by the instruction: perform the
369 // operation on a temporary register.
370 UseScratchRegisterScope temps(this);
371 Register temp = temps.AcquireSameSizeAs(rn);
373 ConditionalCompare(rn, temp, nzcv, cond, op);
378 void MacroAssembler::Csel(const Register& rd,
380 const Operand& operand,
382 DCHECK(allow_macro_instructions_);
383 DCHECK(!rd.IsZero());
384 DCHECK((cond != al) && (cond != nv));
385 if (operand.IsImmediate()) {
386 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
388 int64_t imm = operand.ImmediateValue();
389 Register zr = AppropriateZeroRegFor(rn);
391 csel(rd, rn, zr, cond);
392 } else if (imm == 1) {
393 csinc(rd, rn, zr, cond);
394 } else if (imm == -1) {
395 csinv(rd, rn, zr, cond);
397 UseScratchRegisterScope temps(this);
398 Register temp = temps.AcquireSameSizeAs(rn);
400 csel(rd, rn, temp, cond);
402 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
403 // Unshifted register argument.
404 csel(rd, rn, operand.reg(), cond);
406 // All other arguments.
407 UseScratchRegisterScope temps(this);
408 Register temp = temps.AcquireSameSizeAs(rn);
410 csel(rd, rn, temp, cond);
415 bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
417 unsigned n, imm_s, imm_r;
418 int reg_size = dst.SizeInBits();
419 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
420 // Immediate can be represented in a move zero instruction. Movz can't write
421 // to the stack pointer.
424 } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
425 // Immediate can be represented in a move not instruction. Movn can't write
426 // to the stack pointer.
427 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
429 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
430 // Immediate can be represented in a logical orr instruction.
431 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
438 Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
440 int reg_size = dst.SizeInBits();
442 // Encode the immediate in a single move instruction, if possible.
443 if (TryOneInstrMoveImmediate(dst, imm)) {
444 // The move was successful; nothing to do here.
446 // Pre-shift the immediate to the least-significant bits of the register.
447 int shift_low = CountTrailingZeros(imm, reg_size);
448 int64_t imm_low = imm >> shift_low;
450 // Pre-shift the immediate to the most-significant bits of the register. We
451 // insert set bits in the least-significant bits, as this creates a
452 // different immediate that may be encodable using movn or orr-immediate.
453 // If this new immediate is encodable, the set bits will be eliminated by
454 // the post shift on the following instruction.
455 int shift_high = CountLeadingZeros(imm, reg_size);
456 int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
458 if (TryOneInstrMoveImmediate(dst, imm_low)) {
459 // The new immediate has been moved into the destination's low bits:
460 // return a new leftward-shifting operand.
461 return Operand(dst, LSL, shift_low);
462 } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
463 // The new immediate has been moved into the destination's high bits:
464 // return a new rightward-shifting operand.
465 return Operand(dst, LSR, shift_high);
467 // Use the generic move operation to set up the immediate.
475 void MacroAssembler::AddSubMacro(const Register& rd,
477 const Operand& operand,
480 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
481 !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
482 // The instruction would be a nop. Avoid generating useless code.
486 if (operand.NeedsRelocation(this)) {
487 UseScratchRegisterScope temps(this);
488 Register temp = temps.AcquireX();
489 Ldr(temp, operand.immediate());
490 AddSubMacro(rd, rn, temp, S, op);
491 } else if ((operand.IsImmediate() &&
492 !IsImmAddSub(operand.ImmediateValue())) ||
493 (rn.IsZero() && !operand.IsShiftedRegister()) ||
494 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
495 UseScratchRegisterScope temps(this);
496 Register temp = temps.AcquireSameSizeAs(rn);
497 if (operand.IsImmediate()) {
498 Operand imm_operand =
499 MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
500 AddSub(rd, rn, imm_operand, S, op);
503 AddSub(rd, rn, temp, S, op);
506 AddSub(rd, rn, operand, S, op);
511 void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
513 const Operand& operand,
515 AddSubWithCarryOp op) {
516 DCHECK(rd.SizeInBits() == rn.SizeInBits());
517 UseScratchRegisterScope temps(this);
519 if (operand.NeedsRelocation(this)) {
520 Register temp = temps.AcquireX();
521 Ldr(temp, operand.immediate());
522 AddSubWithCarryMacro(rd, rn, temp, S, op);
524 } else if (operand.IsImmediate() ||
525 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
526 // Add/sub with carry (immediate or ROR shifted register.)
527 Register temp = temps.AcquireSameSizeAs(rn);
529 AddSubWithCarry(rd, rn, temp, S, op);
531 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
532 // Add/sub with carry (shifted register).
533 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
534 DCHECK(operand.shift() != ROR);
535 DCHECK(is_uintn(operand.shift_amount(),
536 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
537 : kWRegSizeInBitsLog2));
538 Register temp = temps.AcquireSameSizeAs(rn);
539 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
540 AddSubWithCarry(rd, rn, temp, S, op);
542 } else if (operand.IsExtendedRegister()) {
543 // Add/sub with carry (extended register).
544 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
545 // Add/sub extended supports a shift <= 4. We want to support exactly the
547 DCHECK(operand.shift_amount() <= 4);
548 DCHECK(operand.reg().Is64Bits() ||
549 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
550 Register temp = temps.AcquireSameSizeAs(rn);
551 EmitExtendShift(temp, operand.reg(), operand.extend(),
552 operand.shift_amount());
553 AddSubWithCarry(rd, rn, temp, S, op);
556 // The addressing mode is directly supported by the instruction.
557 AddSubWithCarry(rd, rn, operand, S, op);
562 void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
563 const MemOperand& addr,
565 int64_t offset = addr.offset();
566 LSDataSize size = CalcLSDataSize(op);
568 // Check if an immediate offset fits in the immediate field of the
569 // appropriate instruction. If not, emit two instructions to perform
571 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
572 !IsImmLSUnscaled(offset)) {
573 // Immediate offset that can't be encoded using unsigned or unscaled
575 UseScratchRegisterScope temps(this);
576 Register temp = temps.AcquireSameSizeAs(addr.base());
577 Mov(temp, addr.offset());
578 LoadStore(rt, MemOperand(addr.base(), temp), op);
579 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
580 // Post-index beyond unscaled addressing range.
581 LoadStore(rt, MemOperand(addr.base()), op);
582 add(addr.base(), addr.base(), offset);
583 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
584 // Pre-index beyond unscaled addressing range.
585 add(addr.base(), addr.base(), offset);
586 LoadStore(rt, MemOperand(addr.base()), op);
588 // Encodable in one load/store instruction.
589 LoadStore(rt, addr, op);
593 void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
594 const CPURegister& rt2,
595 const MemOperand& addr,
596 LoadStorePairOp op) {
597 // TODO(all): Should we support register offset for load-store-pair?
598 DCHECK(!addr.IsRegisterOffset());
600 int64_t offset = addr.offset();
601 LSDataSize size = CalcLSPairDataSize(op);
603 // Check if the offset fits in the immediate field of the appropriate
604 // instruction. If not, emit two instructions to perform the operation.
605 if (IsImmLSPair(offset, size)) {
606 // Encodable in one load/store pair instruction.
607 LoadStorePair(rt, rt2, addr, op);
609 Register base = addr.base();
610 if (addr.IsImmediateOffset()) {
611 UseScratchRegisterScope temps(this);
612 Register temp = temps.AcquireSameSizeAs(base);
613 Add(temp, base, offset);
614 LoadStorePair(rt, rt2, MemOperand(temp), op);
615 } else if (addr.IsPostIndex()) {
616 LoadStorePair(rt, rt2, MemOperand(base), op);
617 Add(base, base, offset);
619 DCHECK(addr.IsPreIndex());
620 Add(base, base, offset);
621 LoadStorePair(rt, rt2, MemOperand(base), op);
627 void MacroAssembler::Load(const Register& rt,
628 const MemOperand& addr,
630 DCHECK(!r.IsDouble());
632 if (r.IsInteger8()) {
634 } else if (r.IsUInteger8()) {
636 } else if (r.IsInteger16()) {
638 } else if (r.IsUInteger16()) {
640 } else if (r.IsInteger32()) {
643 DCHECK(rt.Is64Bits());
649 void MacroAssembler::Store(const Register& rt,
650 const MemOperand& addr,
652 DCHECK(!r.IsDouble());
654 if (r.IsInteger8() || r.IsUInteger8()) {
656 } else if (r.IsInteger16() || r.IsUInteger16()) {
658 } else if (r.IsInteger32()) {
661 DCHECK(rt.Is64Bits());
662 if (r.IsHeapObject()) {
664 } else if (r.IsSmi()) {
672 bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
673 Label *label, ImmBranchType b_type) {
674 bool need_longer_range = false;
675 // There are two situations in which we care about the offset being out of
677 // - The label is bound but too far away.
678 // - The label is not bound but linked, and the previous branch
679 // instruction in the chain is too far away.
680 if (label->is_bound() || label->is_linked()) {
682 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
684 if (!need_longer_range && !label->is_bound()) {
685 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
686 unresolved_branches_.insert(
687 std::pair<int, FarBranchInfo>(max_reachable_pc,
688 FarBranchInfo(pc_offset(), label)));
689 // Also maintain the next pool check.
690 next_veneer_pool_check_ =
691 Min(next_veneer_pool_check_,
692 max_reachable_pc - kVeneerDistanceCheckMargin);
694 return need_longer_range;
698 void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
699 DCHECK(allow_macro_instructions_);
700 DCHECK(!rd.IsZero());
702 if (hint == kAdrNear) {
707 DCHECK(hint == kAdrFar);
708 if (label->is_bound()) {
709 int label_offset = label->pos() - pc_offset();
710 if (Instruction::IsValidPCRelOffset(label_offset)) {
713 DCHECK(label_offset <= 0);
714 int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
715 adr(rd, min_adr_offset);
716 Add(rd, rd, label_offset - min_adr_offset);
719 UseScratchRegisterScope temps(this);
720 Register scratch = temps.AcquireX();
722 InstructionAccurateScope scope(
723 this, PatchingAssembler::kAdrFarPatchableNInstrs);
725 for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
733 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
734 DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
735 (bit == -1 || type >= kBranchTypeFirstUsingBit));
736 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
737 B(static_cast<Condition>(type), label);
740 case always: B(label); break;
742 case reg_zero: Cbz(reg, label); break;
743 case reg_not_zero: Cbnz(reg, label); break;
744 case reg_bit_clear: Tbz(reg, bit, label); break;
745 case reg_bit_set: Tbnz(reg, bit, label); break;
753 void MacroAssembler::B(Label* label, Condition cond) {
754 DCHECK(allow_macro_instructions_);
755 DCHECK((cond != al) && (cond != nv));
758 bool need_extra_instructions =
759 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
761 if (need_extra_instructions) {
762 b(&done, NegateCondition(cond));
771 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
772 DCHECK(allow_macro_instructions_);
775 bool need_extra_instructions =
776 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
778 if (need_extra_instructions) {
779 tbz(rt, bit_pos, &done);
782 tbnz(rt, bit_pos, label);
788 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
789 DCHECK(allow_macro_instructions_);
792 bool need_extra_instructions =
793 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
795 if (need_extra_instructions) {
796 tbnz(rt, bit_pos, &done);
799 tbz(rt, bit_pos, label);
805 void MacroAssembler::Cbnz(const Register& rt, Label* label) {
806 DCHECK(allow_macro_instructions_);
809 bool need_extra_instructions =
810 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
812 if (need_extra_instructions) {
822 void MacroAssembler::Cbz(const Register& rt, Label* label) {
823 DCHECK(allow_macro_instructions_);
826 bool need_extra_instructions =
827 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
829 if (need_extra_instructions) {
839 // Pseudo-instructions.
842 void MacroAssembler::Abs(const Register& rd, const Register& rm,
843 Label* is_not_representable,
844 Label* is_representable) {
845 DCHECK(allow_macro_instructions_);
846 DCHECK(AreSameSizeAndType(rd, rm));
851 // If the comparison sets the v flag, the input was the smallest value
852 // representable by rm, and the mathematical result of abs(rm) is not
853 // representable using two's complement.
854 if ((is_not_representable != NULL) && (is_representable != NULL)) {
855 B(is_not_representable, vs);
857 } else if (is_not_representable != NULL) {
858 B(is_not_representable, vs);
859 } else if (is_representable != NULL) {
860 B(is_representable, vc);
865 // Abstracted stack operations.
868 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
869 const CPURegister& src2, const CPURegister& src3) {
870 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
872 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
873 int size = src0.SizeInBytes();
875 PushPreamble(count, size);
876 PushHelper(count, size, src0, src1, src2, src3);
880 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
881 const CPURegister& src2, const CPURegister& src3,
882 const CPURegister& src4, const CPURegister& src5,
883 const CPURegister& src6, const CPURegister& src7) {
884 DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
886 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
887 int size = src0.SizeInBytes();
889 PushPreamble(count, size);
890 PushHelper(4, size, src0, src1, src2, src3);
891 PushHelper(count - 4, size, src4, src5, src6, src7);
895 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
896 const CPURegister& dst2, const CPURegister& dst3) {
897 // It is not valid to pop into the same register more than once in one
898 // instruction, not even into the zero register.
899 DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
900 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
901 DCHECK(dst0.IsValid());
903 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
904 int size = dst0.SizeInBytes();
906 PopHelper(count, size, dst0, dst1, dst2, dst3);
907 PopPostamble(count, size);
911 void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
912 int size = src0.SizeInBytes() + src1.SizeInBytes();
915 // Reserve room for src0 and push src1.
916 str(src1, MemOperand(StackPointer(), -size, PreIndex));
917 // Fill the gap with src0.
918 str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
922 void MacroAssembler::PushPopQueue::PushQueued(
923 PreambleDirective preamble_directive) {
924 if (queued_.empty()) return;
926 if (preamble_directive == WITH_PREAMBLE) {
927 masm_->PushPreamble(size_);
930 int count = queued_.size();
932 while (index < count) {
933 // PushHelper can only handle registers with the same size and type, and it
934 // can handle only four at a time. Batch them up accordingly.
935 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
938 batch[batch_index++] = queued_[index++];
939 } while ((batch_index < 4) && (index < count) &&
940 batch[0].IsSameSizeAndType(queued_[index]));
942 masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
943 batch[0], batch[1], batch[2], batch[3]);
950 void MacroAssembler::PushPopQueue::PopQueued() {
951 if (queued_.empty()) return;
953 int count = queued_.size();
955 while (index < count) {
956 // PopHelper can only handle registers with the same size and type, and it
957 // can handle only four at a time. Batch them up accordingly.
958 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
961 batch[batch_index++] = queued_[index++];
962 } while ((batch_index < 4) && (index < count) &&
963 batch[0].IsSameSizeAndType(queued_[index]));
965 masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
966 batch[0], batch[1], batch[2], batch[3]);
969 masm_->PopPostamble(size_);
974 void MacroAssembler::PushCPURegList(CPURegList registers) {
975 int size = registers.RegisterSizeInBytes();
977 PushPreamble(registers.Count(), size);
978 // Push up to four registers at a time because if the current stack pointer is
979 // csp and reg_size is 32, registers must be pushed in blocks of four in order
980 // to maintain the 16-byte alignment for csp.
981 while (!registers.IsEmpty()) {
982 int count_before = registers.Count();
983 const CPURegister& src0 = registers.PopHighestIndex();
984 const CPURegister& src1 = registers.PopHighestIndex();
985 const CPURegister& src2 = registers.PopHighestIndex();
986 const CPURegister& src3 = registers.PopHighestIndex();
987 int count = count_before - registers.Count();
988 PushHelper(count, size, src0, src1, src2, src3);
993 void MacroAssembler::PopCPURegList(CPURegList registers) {
994 int size = registers.RegisterSizeInBytes();
996 // Pop up to four registers at a time because if the current stack pointer is
997 // csp and reg_size is 32, registers must be pushed in blocks of four in
998 // order to maintain the 16-byte alignment for csp.
999 while (!registers.IsEmpty()) {
1000 int count_before = registers.Count();
1001 const CPURegister& dst0 = registers.PopLowestIndex();
1002 const CPURegister& dst1 = registers.PopLowestIndex();
1003 const CPURegister& dst2 = registers.PopLowestIndex();
1004 const CPURegister& dst3 = registers.PopLowestIndex();
1005 int count = count_before - registers.Count();
1006 PopHelper(count, size, dst0, dst1, dst2, dst3);
1008 PopPostamble(registers.Count(), size);
1012 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
1013 int size = src.SizeInBytes();
1015 PushPreamble(count, size);
1017 if (FLAG_optimize_for_size && count > 8) {
1018 UseScratchRegisterScope temps(this);
1019 Register temp = temps.AcquireX();
1022 __ Mov(temp, count / 2);
1024 PushHelper(2, size, src, src, NoReg, NoReg);
1025 __ Subs(temp, temp, 1);
1031 // Push up to four registers at a time if possible because if the current
1032 // stack pointer is csp and the register size is 32, registers must be pushed
1033 // in blocks of four in order to maintain the 16-byte alignment for csp.
1034 while (count >= 4) {
1035 PushHelper(4, size, src, src, src, src);
1039 PushHelper(2, size, src, src, NoReg, NoReg);
1043 PushHelper(1, size, src, NoReg, NoReg, NoReg);
1050 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
1051 PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
1053 UseScratchRegisterScope temps(this);
1054 Register temp = temps.AcquireSameSizeAs(count);
1056 if (FLAG_optimize_for_size) {
1059 Subs(temp, count, 1);
1062 // Push all registers individually, to save code size.
1064 Subs(temp, temp, 1);
1065 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1070 Label loop, leftover2, leftover1, done;
1072 Subs(temp, count, 4);
1075 // Push groups of four first.
1077 Subs(temp, temp, 4);
1078 PushHelper(4, src.SizeInBytes(), src, src, src, src);
1081 // Push groups of two.
1083 Tbz(count, 1, &leftover1);
1084 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
1086 // Push the last one (if required).
1088 Tbz(count, 0, &done);
1089 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1096 void MacroAssembler::PushHelper(int count, int size,
1097 const CPURegister& src0,
1098 const CPURegister& src1,
1099 const CPURegister& src2,
1100 const CPURegister& src3) {
1101 // Ensure that we don't unintentially modify scratch or debug registers.
1102 InstructionAccurateScope scope(this);
1104 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
1105 DCHECK(size == src0.SizeInBytes());
1107 // When pushing multiple registers, the store order is chosen such that
1108 // Push(a, b) is equivalent to Push(a) followed by Push(b).
1111 DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
1112 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
1115 DCHECK(src2.IsNone() && src3.IsNone());
1116 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
1119 DCHECK(src3.IsNone());
1120 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
1121 str(src0, MemOperand(StackPointer(), 2 * size));
1124 // Skip over 4 * size, then fill in the gap. This allows four W registers
1125 // to be pushed using csp, whilst maintaining 16-byte alignment for csp
1127 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
1128 stp(src1, src0, MemOperand(StackPointer(), 2 * size));
1136 void MacroAssembler::PopHelper(int count, int size,
1137 const CPURegister& dst0,
1138 const CPURegister& dst1,
1139 const CPURegister& dst2,
1140 const CPURegister& dst3) {
1141 // Ensure that we don't unintentially modify scratch or debug registers.
1142 InstructionAccurateScope scope(this);
1144 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1145 DCHECK(size == dst0.SizeInBytes());
1147 // When popping multiple registers, the load order is chosen such that
1148 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1151 DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1152 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
1155 DCHECK(dst2.IsNone() && dst3.IsNone());
1156 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
1159 DCHECK(dst3.IsNone());
1160 ldr(dst2, MemOperand(StackPointer(), 2 * size));
1161 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
1164 // Load the higher addresses first, then load the lower addresses and
1165 // skip the whole block in the second instruction. This allows four W
1166 // registers to be popped using csp, whilst maintaining 16-byte alignment
1167 // for csp at all times.
1168 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
1169 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
1177 void MacroAssembler::PushPreamble(Operand total_size) {
1178 if (csp.Is(StackPointer())) {
1179 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1180 // on entry and the total size of the specified registers must also be a
1181 // multiple of 16 bytes.
1182 if (total_size.IsImmediate()) {
1183 DCHECK((total_size.ImmediateValue() % 16) == 0);
1186 // Don't check access size for non-immediate sizes. It's difficult to do
1187 // well, and it will be caught by hardware (or the simulator) anyway.
1189 // Even if the current stack pointer is not the system stack pointer (csp),
1190 // the system stack pointer will still be modified in order to comply with
1191 // ABI rules about accessing memory below the system stack pointer.
1192 BumpSystemStackPointer(total_size);
1197 void MacroAssembler::PopPostamble(Operand total_size) {
1198 if (csp.Is(StackPointer())) {
1199 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1200 // on entry and the total size of the specified registers must also be a
1201 // multiple of 16 bytes.
1202 if (total_size.IsImmediate()) {
1203 DCHECK((total_size.ImmediateValue() % 16) == 0);
1206 // Don't check access size for non-immediate sizes. It's difficult to do
1207 // well, and it will be caught by hardware (or the simulator) anyway.
1208 } else if (emit_debug_code()) {
1209 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1210 // but if we keep it matching StackPointer, the simulator can detect memory
1211 // accesses in the now-free part of the stack.
1212 SyncSystemStackPointer();
1217 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
1218 if (offset.IsImmediate()) {
1219 DCHECK(offset.ImmediateValue() >= 0);
1220 } else if (emit_debug_code()) {
1222 Check(le, kStackAccessBelowStackPointer);
1225 Str(src, MemOperand(StackPointer(), offset));
1229 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
1230 if (offset.IsImmediate()) {
1231 DCHECK(offset.ImmediateValue() >= 0);
1232 } else if (emit_debug_code()) {
1234 Check(le, kStackAccessBelowStackPointer);
1237 Ldr(dst, MemOperand(StackPointer(), offset));
1241 void MacroAssembler::PokePair(const CPURegister& src1,
1242 const CPURegister& src2,
1244 DCHECK(AreSameSizeAndType(src1, src2));
1245 DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1246 Stp(src1, src2, MemOperand(StackPointer(), offset));
1250 void MacroAssembler::PeekPair(const CPURegister& dst1,
1251 const CPURegister& dst2,
1253 DCHECK(AreSameSizeAndType(dst1, dst2));
1254 DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1255 Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
1259 void MacroAssembler::PushCalleeSavedRegisters() {
1260 // Ensure that the macro-assembler doesn't use any scratch registers.
1261 InstructionAccurateScope scope(this);
1263 // This method must not be called unless the current stack pointer is the
1264 // system stack pointer (csp).
1265 DCHECK(csp.Is(StackPointer()));
1267 MemOperand tos(csp, -2 * kXRegSize, PreIndex);
1275 stp(x27, x28, tos); // x28 = jssp
1283 void MacroAssembler::PopCalleeSavedRegisters() {
1284 // Ensure that the macro-assembler doesn't use any scratch registers.
1285 InstructionAccurateScope scope(this);
1287 // This method must not be called unless the current stack pointer is the
1288 // system stack pointer (csp).
1289 DCHECK(csp.Is(StackPointer()));
1291 MemOperand tos(csp, 2 * kXRegSize, PostIndex);
1297 ldp(x27, x28, tos); // x28 = jssp
1307 void MacroAssembler::AssertStackConsistency() {
1308 // Avoid emitting code when !use_real_abort() since non-real aborts cause too
1309 // much code to be generated.
1310 if (emit_debug_code() && use_real_aborts()) {
1311 if (csp.Is(StackPointer())) {
1312 // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
1313 // can't check the alignment of csp without using a scratch register (or
1314 // clobbering the flags), but the processor (or simulator) will abort if
1315 // it is not properly aligned during a load.
1316 ldr(xzr, MemOperand(csp, 0));
1318 if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
1320 // Check that csp <= StackPointer(), preserving all registers and NZCV.
1321 sub(StackPointer(), csp, StackPointer());
1322 cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
1323 tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
1325 // Avoid generating AssertStackConsistency checks for the Push in Abort.
1326 { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
1327 Abort(kTheCurrentStackPointerIsBelowCsp);
1331 // Restore StackPointer().
1332 sub(StackPointer(), csp, StackPointer());
1338 void MacroAssembler::AssertFPCRState(Register fpcr) {
1339 if (emit_debug_code()) {
1340 Label unexpected_mode, done;
1341 UseScratchRegisterScope temps(this);
1342 if (fpcr.IsNone()) {
1343 fpcr = temps.AcquireX();
1347 // Settings overridden by ConfiugreFPCR():
1348 // - Assert that default-NaN mode is set.
1349 Tbz(fpcr, DN_offset, &unexpected_mode);
1351 // Settings left to their default values:
1352 // - Assert that flush-to-zero is not set.
1353 Tbnz(fpcr, FZ_offset, &unexpected_mode);
1354 // - Assert that the rounding mode is nearest-with-ties-to-even.
1355 STATIC_ASSERT(FPTieEven == 0);
1356 Tst(fpcr, RMode_mask);
1359 Bind(&unexpected_mode);
1360 Abort(kUnexpectedFPCRMode);
1367 void MacroAssembler::ConfigureFPCR() {
1368 UseScratchRegisterScope temps(this);
1369 Register fpcr = temps.AcquireX();
1372 // If necessary, enable default-NaN mode. The default values of the other FPCR
1373 // options should be suitable, and AssertFPCRState will verify that.
1374 Label no_write_required;
1375 Tbnz(fpcr, DN_offset, &no_write_required);
1377 Orr(fpcr, fpcr, DN_mask);
1380 Bind(&no_write_required);
1381 AssertFPCRState(fpcr);
1385 void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
1386 const FPRegister& src) {
1389 // With DN=1 and RMode=FPTieEven, subtracting 0.0 preserves all inputs except
1390 // for NaNs, which become the default NaN. We use fsub rather than fadd
1391 // because sub preserves -0.0 inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
1392 Fsub(dst, src, fp_zero);
1396 void MacroAssembler::LoadRoot(CPURegister destination,
1397 Heap::RootListIndex index) {
1398 // TODO(jbramley): Most root values are constants, and can be synthesized
1399 // without a load. Refer to the ARM back end for details.
1400 Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
1404 void MacroAssembler::StoreRoot(Register source,
1405 Heap::RootListIndex index) {
1406 Str(source, MemOperand(root, index << kPointerSizeLog2));
1410 void MacroAssembler::LoadTrueFalseRoots(Register true_root,
1411 Register false_root) {
1412 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
1413 Ldp(true_root, false_root,
1414 MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
1418 void MacroAssembler::LoadHeapObject(Register result,
1419 Handle<HeapObject> object) {
1420 AllowDeferredHandleDereference using_raw_address;
1421 if (isolate()->heap()->InNewSpace(*object)) {
1422 Handle<Cell> cell = isolate()->factory()->NewCell(object);
1423 Mov(result, Operand(cell));
1424 Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
1426 Mov(result, Operand(object));
1431 void MacroAssembler::LoadInstanceDescriptors(Register map,
1432 Register descriptors) {
1433 Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
1437 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
1438 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
1439 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
1443 void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
1444 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
1445 Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset));
1446 And(dst, dst, Map::EnumLengthBits::kMask);
1450 void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
1451 EnumLengthUntagged(dst, map);
1456 void MacroAssembler::LoadAccessor(Register dst, Register holder,
1458 AccessorComponent accessor) {
1459 Ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
1460 LoadInstanceDescriptors(dst, dst);
1462 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
1463 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
1464 : AccessorPair::kSetterOffset;
1465 Ldr(dst, FieldMemOperand(dst, offset));
1469 void MacroAssembler::CheckEnumCache(Register object,
1470 Register null_value,
1475 Label* call_runtime) {
1476 DCHECK(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
1479 Register empty_fixed_array_value = scratch0;
1480 Register current_object = scratch1;
1482 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
1485 Mov(current_object, object);
1487 // Check if the enum length field is properly initialized, indicating that
1488 // there is an enum cache.
1489 Register map = scratch2;
1490 Register enum_length = scratch3;
1491 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1493 EnumLengthUntagged(enum_length, map);
1494 Cmp(enum_length, kInvalidEnumCacheSentinel);
1495 B(eq, call_runtime);
1500 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1502 // For all objects but the receiver, check that the cache is empty.
1503 EnumLengthUntagged(enum_length, map);
1504 Cbnz(enum_length, call_runtime);
1508 // Check that there are no elements. Register current_object contains the
1509 // current JS object we've reached through the prototype chain.
1511 Ldr(current_object, FieldMemOperand(current_object,
1512 JSObject::kElementsOffset));
1513 Cmp(current_object, empty_fixed_array_value);
1514 B(eq, &no_elements);
1516 // Second chance, the object may be using the empty slow element dictionary.
1517 CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
1518 B(ne, call_runtime);
1521 Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
1522 Cmp(current_object, null_value);
1527 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
1530 Label* no_memento_found) {
1531 ExternalReference new_space_start =
1532 ExternalReference::new_space_start(isolate());
1533 ExternalReference new_space_allocation_top =
1534 ExternalReference::new_space_allocation_top_address(isolate());
1536 Add(scratch1, receiver,
1537 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
1538 Cmp(scratch1, new_space_start);
1539 B(lt, no_memento_found);
1541 Mov(scratch2, new_space_allocation_top);
1542 Ldr(scratch2, MemOperand(scratch2));
1543 Cmp(scratch1, scratch2);
1544 B(gt, no_memento_found);
1546 Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
1548 Operand(isolate()->factory()->allocation_memento_map()));
1552 void MacroAssembler::JumpToHandlerEntry(Register exception,
1556 Register scratch2) {
1557 // Handler expects argument in x0.
1558 DCHECK(exception.Is(x0));
1560 // Compute the handler entry address and jump to it. The handler table is
1561 // a fixed array of (smi-tagged) code offsets.
1562 Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset));
1563 Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
1564 STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2);
1565 Lsr(scratch2, state, StackHandler::kKindWidth);
1566 Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
1567 Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag);
1568 Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
1573 void MacroAssembler::InNewSpace(Register object,
1576 DCHECK(cond == eq || cond == ne);
1577 UseScratchRegisterScope temps(this);
1578 Register temp = temps.AcquireX();
1579 And(temp, object, ExternalReference::new_space_mask(isolate()));
1580 Cmp(temp, ExternalReference::new_space_start(isolate()));
1585 void MacroAssembler::Throw(Register value,
1589 Register scratch4) {
1590 // Adjust this code if not the case.
1591 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1592 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1593 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1594 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1595 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1596 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1598 // The handler expects the exception in x0.
1599 DCHECK(value.Is(x0));
1601 // Drop the stack pointer to the top of the top handler.
1602 DCHECK(jssp.Is(StackPointer()));
1603 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1605 Ldr(jssp, MemOperand(scratch1));
1606 // Restore the next handler.
1608 Str(scratch2, MemOperand(scratch1));
1610 // Get the code object and state. Restore the context and frame pointer.
1611 Register object = scratch1;
1612 Register state = scratch2;
1613 Pop(object, state, cp, fp);
1615 // If the handler is a JS frame, restore the context to the frame.
1616 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1619 Cbz(cp, ¬_js_frame);
1620 Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1621 Bind(¬_js_frame);
1623 JumpToHandlerEntry(value, object, state, scratch3, scratch4);
1627 void MacroAssembler::ThrowUncatchable(Register value,
1631 Register scratch4) {
1632 // Adjust this code if not the case.
1633 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1634 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1635 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1636 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1637 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1638 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1640 // The handler expects the exception in x0.
1641 DCHECK(value.Is(x0));
1643 // Drop the stack pointer to the top of the top stack handler.
1644 DCHECK(jssp.Is(StackPointer()));
1645 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1647 Ldr(jssp, MemOperand(scratch1));
1649 // Unwind the handlers until the ENTRY handler is found.
1650 Label fetch_next, check_kind;
1653 Peek(jssp, StackHandlerConstants::kNextOffset);
1656 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1657 Peek(scratch2, StackHandlerConstants::kStateOffset);
1658 TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next);
1660 // Set the top handler address to next handler past the top ENTRY handler.
1662 Str(scratch2, MemOperand(scratch1));
1664 // Get the code object and state. Clear the context and frame pointer (0 was
1665 // saved in the handler).
1666 Register object = scratch1;
1667 Register state = scratch2;
1668 Pop(object, state, cp, fp);
1670 JumpToHandlerEntry(value, object, state, scratch3, scratch4);
1674 void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
1675 if (emit_debug_code()) {
1676 STATIC_ASSERT(kSmiTag == 0);
1677 Tst(object, kSmiTagMask);
1683 void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
1684 if (emit_debug_code()) {
1685 STATIC_ASSERT(kSmiTag == 0);
1686 Tst(object, kSmiTagMask);
1692 void MacroAssembler::AssertName(Register object) {
1693 if (emit_debug_code()) {
1694 AssertNotSmi(object, kOperandIsASmiAndNotAName);
1696 UseScratchRegisterScope temps(this);
1697 Register temp = temps.AcquireX();
1699 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1700 CompareInstanceType(temp, temp, LAST_NAME_TYPE);
1701 Check(ls, kOperandIsNotAName);
1706 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1708 if (emit_debug_code()) {
1709 Label done_checking;
1710 AssertNotSmi(object);
1711 JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
1712 Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1713 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
1714 Assert(eq, kExpectedUndefinedOrCell);
1715 Bind(&done_checking);
1720 void MacroAssembler::AssertString(Register object) {
1721 if (emit_debug_code()) {
1722 UseScratchRegisterScope temps(this);
1723 Register temp = temps.AcquireX();
1724 STATIC_ASSERT(kSmiTag == 0);
1725 Tst(object, kSmiTagMask);
1726 Check(ne, kOperandIsASmiAndNotAString);
1727 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1728 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
1729 Check(lo, kOperandIsNotAString);
1734 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1735 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1736 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1740 void MacroAssembler::TailCallStub(CodeStub* stub) {
1741 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1745 void MacroAssembler::CallRuntime(const Runtime::Function* f,
1747 SaveFPRegsMode save_doubles) {
1748 // All arguments must be on the stack before this function is called.
1749 // x0 holds the return value after the call.
1751 // Check that the number of arguments matches what the function expects.
1752 // If f->nargs is -1, the function can accept a variable number of arguments.
1753 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1755 // Place the necessary arguments.
1756 Mov(x0, num_arguments);
1757 Mov(x1, ExternalReference(f, isolate()));
1759 CEntryStub stub(isolate(), 1, save_doubles);
1764 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
1765 int num_arguments) {
1766 Mov(x0, num_arguments);
1769 CEntryStub stub(isolate(), 1);
1774 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
1776 CEntryStub stub(isolate(), 1);
1777 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1781 void MacroAssembler::GetBuiltinFunction(Register target,
1782 Builtins::JavaScript id) {
1783 // Load the builtins object into target register.
1784 Ldr(target, GlobalObjectMemOperand());
1785 Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
1786 // Load the JavaScript builtin function from the builtins object.
1787 Ldr(target, FieldMemOperand(target,
1788 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
1792 void MacroAssembler::GetBuiltinEntry(Register target,
1794 Builtins::JavaScript id) {
1795 DCHECK(!AreAliased(target, function));
1796 GetBuiltinFunction(function, id);
1797 // Load the code entry point from the builtins object.
1798 Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1802 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
1804 const CallWrapper& call_wrapper) {
1805 ASM_LOCATION("MacroAssembler::InvokeBuiltin");
1806 // You can't call a builtin without a valid frame.
1807 DCHECK(flag == JUMP_FUNCTION || has_frame());
1809 // Get the builtin entry in x2 and setup the function object in x1.
1810 GetBuiltinEntry(x2, x1, id);
1811 if (flag == CALL_FUNCTION) {
1812 call_wrapper.BeforeCall(CallSize(x2));
1814 call_wrapper.AfterCall();
1816 DCHECK(flag == JUMP_FUNCTION);
1822 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
1825 // TODO(1236192): Most runtime routines don't need the number of
1826 // arguments passed in because it is constant. At some point we
1827 // should remove this need and make the runtime routine entry code
1829 Mov(x0, num_arguments);
1830 JumpToExternalReference(ext);
1834 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
1837 TailCallExternalReference(ExternalReference(fid, isolate()),
1843 void MacroAssembler::InitializeNewString(Register string,
1845 Heap::RootListIndex map_index,
1847 Register scratch2) {
1848 DCHECK(!AreAliased(string, length, scratch1, scratch2));
1849 LoadRoot(scratch2, map_index);
1850 SmiTag(scratch1, length);
1851 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1853 Mov(scratch2, String::kEmptyHashField);
1854 Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1855 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
1859 int MacroAssembler::ActivationFrameAlignment() {
1860 #if V8_HOST_ARCH_ARM64
1861 // Running on the real platform. Use the alignment as mandated by the local
1863 // Note: This will break if we ever start generating snapshots on one ARM
1864 // platform for another ARM platform with a different alignment.
1865 return base::OS::ActivationFrameAlignment();
1866 #else // V8_HOST_ARCH_ARM64
1867 // If we are using the simulator then we should always align to the expected
1868 // alignment. As the simulator is used to generate snapshots we do not know
1869 // if the target platform will need alignment, so this is controlled from a
1871 return FLAG_sim_stack_alignment;
1872 #endif // V8_HOST_ARCH_ARM64
1876 void MacroAssembler::CallCFunction(ExternalReference function,
1877 int num_of_reg_args) {
1878 CallCFunction(function, num_of_reg_args, 0);
1882 void MacroAssembler::CallCFunction(ExternalReference function,
1883 int num_of_reg_args,
1884 int num_of_double_args) {
1885 UseScratchRegisterScope temps(this);
1886 Register temp = temps.AcquireX();
1887 Mov(temp, function);
1888 CallCFunction(temp, num_of_reg_args, num_of_double_args);
1892 void MacroAssembler::CallCFunction(Register function,
1893 int num_of_reg_args,
1894 int num_of_double_args) {
1895 DCHECK(has_frame());
1896 // We can pass 8 integer arguments in registers. If we need to pass more than
1897 // that, we'll need to implement support for passing them on the stack.
1898 DCHECK(num_of_reg_args <= 8);
1900 // If we're passing doubles, we're limited to the following prototypes
1901 // (defined by ExternalReference::Type):
1902 // BUILTIN_COMPARE_CALL: int f(double, double)
1903 // BUILTIN_FP_FP_CALL: double f(double, double)
1904 // BUILTIN_FP_CALL: double f(double)
1905 // BUILTIN_FP_INT_CALL: double f(double, int)
1906 if (num_of_double_args > 0) {
1907 DCHECK(num_of_reg_args <= 1);
1908 DCHECK((num_of_double_args + num_of_reg_args) <= 2);
1912 // If the stack pointer is not csp, we need to derive an aligned csp from the
1913 // current stack pointer.
1914 const Register old_stack_pointer = StackPointer();
1915 if (!csp.Is(old_stack_pointer)) {
1916 AssertStackConsistency();
1918 int sp_alignment = ActivationFrameAlignment();
1919 // The ABI mandates at least 16-byte alignment.
1920 DCHECK(sp_alignment >= 16);
1921 DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
1923 // The current stack pointer is a callee saved register, and is preserved
1925 DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
1927 // Align and synchronize the system stack pointer with jssp.
1928 Bic(csp, old_stack_pointer, sp_alignment - 1);
1929 SetStackPointer(csp);
1932 // Call directly. The function called cannot cause a GC, or allow preemption,
1933 // so the return address in the link register stays correct.
1936 if (!csp.Is(old_stack_pointer)) {
1937 if (emit_debug_code()) {
1938 // Because the stack pointer must be aligned on a 16-byte boundary, the
1939 // aligned csp can be up to 12 bytes below the jssp. This is the case
1940 // where we only pushed one W register on top of an aligned jssp.
1941 UseScratchRegisterScope temps(this);
1942 Register temp = temps.AcquireX();
1943 DCHECK(ActivationFrameAlignment() == 16);
1944 Sub(temp, csp, old_stack_pointer);
1945 // We want temp <= 0 && temp >= -12.
1947 Ccmp(temp, -12, NFlag, le);
1948 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
1950 SetStackPointer(old_stack_pointer);
1955 void MacroAssembler::Jump(Register target) {
1960 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
1961 UseScratchRegisterScope temps(this);
1962 Register temp = temps.AcquireX();
1963 Mov(temp, Operand(target, rmode));
1968 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
1969 DCHECK(!RelocInfo::IsCodeTarget(rmode));
1970 Jump(reinterpret_cast<intptr_t>(target), rmode);
1974 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
1975 DCHECK(RelocInfo::IsCodeTarget(rmode));
1976 AllowDeferredHandleDereference embedding_raw_address;
1977 Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
1981 void MacroAssembler::Call(Register target) {
1982 BlockPoolsScope scope(this);
1991 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1996 void MacroAssembler::Call(Label* target) {
1997 BlockPoolsScope scope(this);
2006 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
2011 // MacroAssembler::CallSize is sensitive to changes in this function, as it
2012 // requires to know how many instructions are used to branch to the target.
2013 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
2014 BlockPoolsScope scope(this);
2019 // Statement positions are expected to be recorded when the target
2020 // address is loaded.
2021 positions_recorder()->WriteRecordedPositions();
2023 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2024 DCHECK(rmode != RelocInfo::NONE32);
2026 UseScratchRegisterScope temps(this);
2027 Register temp = temps.AcquireX();
2029 if (rmode == RelocInfo::NONE64) {
2030 // Addresses are 48 bits so we never need to load the upper 16 bits.
2031 uint64_t imm = reinterpret_cast<uint64_t>(target);
2032 // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
2033 DCHECK(((imm >> 48) & 0xffff) == 0);
2034 movz(temp, (imm >> 0) & 0xffff, 0);
2035 movk(temp, (imm >> 16) & 0xffff, 16);
2036 movk(temp, (imm >> 32) & 0xffff, 32);
2038 Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
2042 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
2047 void MacroAssembler::Call(Handle<Code> code,
2048 RelocInfo::Mode rmode,
2049 TypeFeedbackId ast_id) {
2055 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
2056 SetRecordedAstId(ast_id);
2057 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2060 AllowDeferredHandleDereference embedding_raw_address;
2061 Call(reinterpret_cast<Address>(code.location()), rmode);
2064 // Check the size of the code generated.
2065 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
2070 int MacroAssembler::CallSize(Register target) {
2072 return kInstructionSize;
2076 int MacroAssembler::CallSize(Label* target) {
2078 return kInstructionSize;
2082 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
2085 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2086 DCHECK(rmode != RelocInfo::NONE32);
2088 if (rmode == RelocInfo::NONE64) {
2089 return kCallSizeWithoutRelocation;
2091 return kCallSizeWithRelocation;
2096 int MacroAssembler::CallSize(Handle<Code> code,
2097 RelocInfo::Mode rmode,
2098 TypeFeedbackId ast_id) {
2102 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2103 DCHECK(rmode != RelocInfo::NONE32);
2105 if (rmode == RelocInfo::NONE64) {
2106 return kCallSizeWithoutRelocation;
2108 return kCallSizeWithRelocation;
2113 void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
2114 SmiCheckType smi_check_type) {
2115 Label on_not_heap_number;
2117 if (smi_check_type == DO_SMI_CHECK) {
2118 JumpIfSmi(object, &on_not_heap_number);
2121 AssertNotSmi(object);
2123 UseScratchRegisterScope temps(this);
2124 Register temp = temps.AcquireX();
2125 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2126 JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
2128 Bind(&on_not_heap_number);
2132 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2133 Label* on_not_heap_number,
2134 SmiCheckType smi_check_type) {
2135 if (smi_check_type == DO_SMI_CHECK) {
2136 JumpIfSmi(object, on_not_heap_number);
2139 AssertNotSmi(object);
2141 UseScratchRegisterScope temps(this);
2142 Register temp = temps.AcquireX();
2143 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2144 JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
2148 void MacroAssembler::LookupNumberStringCache(Register object,
2154 DCHECK(!AreAliased(object, result, scratch1, scratch2, scratch3));
2156 // Use of registers. Register result is used as a temporary.
2157 Register number_string_cache = result;
2158 Register mask = scratch3;
2160 // Load the number string cache.
2161 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2163 // Make the hash mask from the length of the number string cache. It
2164 // contains two elements (number and string) for each cache entry.
2165 Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache,
2166 FixedArray::kLengthOffset));
2167 Asr(mask, mask, 1); // Divide length by two.
2168 Sub(mask, mask, 1); // Make mask.
2170 // Calculate the entry in the number string cache. The hash value in the
2171 // number string cache for smis is just the smi value, and the hash for
2172 // doubles is the xor of the upper and lower words. See
2173 // Heap::GetNumberStringCache.
2175 Label load_result_from_cache;
2177 JumpIfSmi(object, &is_smi);
2178 JumpIfNotHeapNumber(object, not_found);
2180 STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
2181 Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
2182 Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
2183 Eor(scratch1, scratch1, scratch2);
2184 And(scratch1, scratch1, mask);
2186 // Calculate address of entry in string cache: each entry consists of two
2187 // pointer sized fields.
2188 Add(scratch1, number_string_cache,
2189 Operand(scratch1, LSL, kPointerSizeLog2 + 1));
2191 Register probe = mask;
2192 Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2193 JumpIfSmi(probe, not_found);
2194 Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
2195 Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
2198 B(&load_result_from_cache);
2201 Register scratch = scratch1;
2202 And(scratch, mask, Operand::UntagSmi(object));
2203 // Calculate address of entry in string cache: each entry consists
2204 // of two pointer sized fields.
2205 Add(scratch, number_string_cache,
2206 Operand(scratch, LSL, kPointerSizeLog2 + 1));
2208 // Check if the entry is the smi we are looking for.
2209 Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2213 // Get the result from the cache.
2214 Bind(&load_result_from_cache);
2215 Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
2216 IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
2217 scratch1, scratch2);
2221 void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
2223 FPRegister scratch_d,
2224 Label* on_successful_conversion,
2225 Label* on_failed_conversion) {
2226 // Convert to an int and back again, then compare with the original value.
2227 Fcvtzs(as_int, value);
2228 Scvtf(scratch_d, as_int);
2229 Fcmp(value, scratch_d);
2231 if (on_successful_conversion) {
2232 B(on_successful_conversion, eq);
2234 if (on_failed_conversion) {
2235 B(on_failed_conversion, ne);
2240 void MacroAssembler::TestForMinusZero(DoubleRegister input) {
2241 UseScratchRegisterScope temps(this);
2242 Register temp = temps.AcquireX();
2243 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
2250 void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
2251 Label* on_negative_zero) {
2252 TestForMinusZero(input);
2253 B(vs, on_negative_zero);
2257 void MacroAssembler::JumpIfMinusZero(Register input,
2258 Label* on_negative_zero) {
2259 DCHECK(input.Is64Bits());
2260 // Floating point value is in an integer register. Detect -0.0 by subtracting
2261 // 1 (cmp), which will cause overflow.
2263 B(vs, on_negative_zero);
2267 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
2268 // Clamp the value to [0..255].
2269 Cmp(input.W(), Operand(input.W(), UXTB));
2270 // If input < input & 0xff, it must be < 0, so saturate to 0.
2271 Csel(output.W(), wzr, input.W(), lt);
2272 // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
2273 Csel(output.W(), output.W(), 255, le);
2277 void MacroAssembler::ClampInt32ToUint8(Register in_out) {
2278 ClampInt32ToUint8(in_out, in_out);
2282 void MacroAssembler::ClampDoubleToUint8(Register output,
2283 DoubleRegister input,
2284 DoubleRegister dbl_scratch) {
2285 // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
2286 // - Inputs lower than 0 (including -infinity) produce 0.
2287 // - Inputs higher than 255 (including +infinity) produce 255.
2288 // Also, it seems that PIXEL types use round-to-nearest rather than
2289 // round-towards-zero.
2291 // Squash +infinity before the conversion, since Fcvtnu will normally
2293 Fmov(dbl_scratch, 255);
2294 Fmin(dbl_scratch, dbl_scratch, input);
2296 // Convert double to unsigned integer. Values less than zero become zero.
2297 // Values greater than 255 have already been clamped to 255.
2298 Fcvtnu(output, dbl_scratch);
2302 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
2309 Register scratch5) {
2310 // Untag src and dst into scratch registers.
2311 // Copy src->dst in a tight loop.
2312 DCHECK(!AreAliased(dst, src,
2313 scratch1, scratch2, scratch3, scratch4, scratch5));
2316 const Register& remaining = scratch3;
2317 Mov(remaining, count / 2);
2319 const Register& dst_untagged = scratch1;
2320 const Register& src_untagged = scratch2;
2321 Sub(dst_untagged, dst, kHeapObjectTag);
2322 Sub(src_untagged, src, kHeapObjectTag);
2324 // Copy fields in pairs.
2327 Ldp(scratch4, scratch5,
2328 MemOperand(src_untagged, kXRegSize* 2, PostIndex));
2329 Stp(scratch4, scratch5,
2330 MemOperand(dst_untagged, kXRegSize* 2, PostIndex));
2331 Sub(remaining, remaining, 1);
2332 Cbnz(remaining, &loop);
2334 // Handle the leftovers.
2336 Ldr(scratch4, MemOperand(src_untagged));
2337 Str(scratch4, MemOperand(dst_untagged));
2342 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
2348 Register scratch4) {
2349 // Untag src and dst into scratch registers.
2350 // Copy src->dst in an unrolled loop.
2351 DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
2353 const Register& dst_untagged = scratch1;
2354 const Register& src_untagged = scratch2;
2355 sub(dst_untagged, dst, kHeapObjectTag);
2356 sub(src_untagged, src, kHeapObjectTag);
2358 // Copy fields in pairs.
2359 for (unsigned i = 0; i < count / 2; i++) {
2360 Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex));
2361 Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex));
2364 // Handle the leftovers.
2366 Ldr(scratch3, MemOperand(src_untagged));
2367 Str(scratch3, MemOperand(dst_untagged));
2372 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
2377 Register scratch3) {
2378 // Untag src and dst into scratch registers.
2379 // Copy src->dst in an unrolled loop.
2380 DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3));
2382 const Register& dst_untagged = scratch1;
2383 const Register& src_untagged = scratch2;
2384 Sub(dst_untagged, dst, kHeapObjectTag);
2385 Sub(src_untagged, src, kHeapObjectTag);
2387 // Copy fields one by one.
2388 for (unsigned i = 0; i < count; i++) {
2389 Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex));
2390 Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex));
2395 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
2397 // One of two methods is used:
2399 // For high 'count' values where many scratch registers are available:
2400 // Untag src and dst into scratch registers.
2401 // Copy src->dst in a tight loop.
2403 // For low 'count' values or where few scratch registers are available:
2404 // Untag src and dst into scratch registers.
2405 // Copy src->dst in an unrolled loop.
2407 // In both cases, fields are copied in pairs if possible, and left-overs are
2408 // handled separately.
2409 DCHECK(!AreAliased(dst, src));
2410 DCHECK(!temps.IncludesAliasOf(dst));
2411 DCHECK(!temps.IncludesAliasOf(src));
2412 DCHECK(!temps.IncludesAliasOf(xzr));
2414 if (emit_debug_code()) {
2416 Check(ne, kTheSourceAndDestinationAreTheSame);
2419 // The value of 'count' at which a loop will be generated (if there are
2420 // enough scratch registers).
2421 static const unsigned kLoopThreshold = 8;
2423 UseScratchRegisterScope masm_temps(this);
2424 if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
2425 CopyFieldsLoopPairsHelper(dst, src, count,
2426 Register(temps.PopLowestIndex()),
2427 Register(temps.PopLowestIndex()),
2428 Register(temps.PopLowestIndex()),
2429 masm_temps.AcquireX(),
2430 masm_temps.AcquireX());
2431 } else if (temps.Count() >= 2) {
2432 CopyFieldsUnrolledPairsHelper(dst, src, count,
2433 Register(temps.PopLowestIndex()),
2434 Register(temps.PopLowestIndex()),
2435 masm_temps.AcquireX(),
2436 masm_temps.AcquireX());
2437 } else if (temps.Count() == 1) {
2438 CopyFieldsUnrolledHelper(dst, src, count,
2439 Register(temps.PopLowestIndex()),
2440 masm_temps.AcquireX(),
2441 masm_temps.AcquireX());
2448 void MacroAssembler::CopyBytes(Register dst,
2453 UseScratchRegisterScope temps(this);
2454 Register tmp1 = temps.AcquireX();
2455 Register tmp2 = temps.AcquireX();
2456 DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
2457 DCHECK(!AreAliased(src, dst, csp));
2459 if (emit_debug_code()) {
2460 // Check copy length.
2462 Assert(ge, kUnexpectedNegativeValue);
2464 // Check src and dst buffers don't overlap.
2465 Add(scratch, src, length); // Calculate end of src buffer.
2467 Add(scratch, dst, length); // Calculate end of dst buffer.
2468 Ccmp(scratch, src, ZFlag, gt);
2469 Assert(le, kCopyBuffersOverlap);
2472 Label short_copy, short_loop, bulk_loop, done;
2474 if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
2475 Register bulk_length = scratch;
2476 int pair_size = 2 * kXRegSize;
2477 int pair_mask = pair_size - 1;
2479 Bic(bulk_length, length, pair_mask);
2480 Cbz(bulk_length, &short_copy);
2482 Sub(bulk_length, bulk_length, pair_size);
2483 Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
2484 Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
2485 Cbnz(bulk_length, &bulk_loop);
2487 And(length, length, pair_mask);
2493 Sub(length, length, 1);
2494 Ldrb(tmp1, MemOperand(src, 1, PostIndex));
2495 Strb(tmp1, MemOperand(dst, 1, PostIndex));
2496 Cbnz(length, &short_loop);
2503 void MacroAssembler::FillFields(Register dst,
2504 Register field_count,
2506 DCHECK(!dst.Is(csp));
2507 UseScratchRegisterScope temps(this);
2508 Register field_ptr = temps.AcquireX();
2509 Register counter = temps.AcquireX();
2512 // Decrement count. If the result < zero, count was zero, and there's nothing
2513 // to do. If count was one, flags are set to fail the gt condition at the end
2514 // of the pairs loop.
2515 Subs(counter, field_count, 1);
2518 // There's at least one field to fill, so do this unconditionally.
2519 Str(filler, MemOperand(dst, kPointerSize, PostIndex));
2521 // If the bottom bit of counter is set, there are an even number of fields to
2522 // fill, so pull the start pointer back by one field, allowing the pairs loop
2523 // to overwrite the field that was stored above.
2524 And(field_ptr, counter, 1);
2525 Sub(field_ptr, dst, Operand(field_ptr, LSL, kPointerSizeLog2));
2527 // Store filler to memory in pairs.
2531 Stp(filler, filler, MemOperand(field_ptr, 2 * kPointerSize, PostIndex));
2532 Subs(counter, counter, 2);
2540 void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
2541 Register first, Register second, Register scratch1, Register scratch2,
2542 Label* failure, SmiCheckType smi_check) {
2543 if (smi_check == DO_SMI_CHECK) {
2544 JumpIfEitherSmi(first, second, failure);
2545 } else if (emit_debug_code()) {
2546 DCHECK(smi_check == DONT_DO_SMI_CHECK);
2548 JumpIfEitherSmi(first, second, NULL, ¬_smi);
2550 // At least one input is a smi, but the flags indicated a smi check wasn't
2552 Abort(kUnexpectedSmi);
2557 // Test that both first and second are sequential one-byte strings.
2558 Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2559 Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2560 Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2561 Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2563 JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
2568 void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
2569 Register first, Register second, Register scratch1, Register scratch2,
2571 DCHECK(!AreAliased(scratch1, second));
2572 DCHECK(!AreAliased(scratch1, scratch2));
2573 static const int kFlatOneByteStringMask =
2574 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2575 static const int kFlatOneByteStringTag = ONE_BYTE_STRING_TYPE;
2576 And(scratch1, first, kFlatOneByteStringMask);
2577 And(scratch2, second, kFlatOneByteStringMask);
2578 Cmp(scratch1, kFlatOneByteStringTag);
2579 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2584 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
2587 const int kFlatOneByteStringMask =
2588 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2589 const int kFlatOneByteStringTag =
2590 kStringTag | kOneByteStringTag | kSeqStringTag;
2591 And(scratch, type, kFlatOneByteStringMask);
2592 Cmp(scratch, kFlatOneByteStringTag);
2597 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2598 Register first, Register second, Register scratch1, Register scratch2,
2600 DCHECK(!AreAliased(first, second, scratch1, scratch2));
2601 const int kFlatOneByteStringMask =
2602 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2603 const int kFlatOneByteStringTag =
2604 kStringTag | kOneByteStringTag | kSeqStringTag;
2605 And(scratch1, first, kFlatOneByteStringMask);
2606 And(scratch2, second, kFlatOneByteStringMask);
2607 Cmp(scratch1, kFlatOneByteStringTag);
2608 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2613 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
2614 Label* not_unique_name) {
2615 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
2616 // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
2619 // goto not_unique_name
2621 Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
2622 Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
2623 B(ne, not_unique_name);
2627 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2628 const ParameterCount& actual,
2629 Handle<Code> code_constant,
2633 bool* definitely_mismatches,
2634 const CallWrapper& call_wrapper) {
2635 bool definitely_matches = false;
2636 *definitely_mismatches = false;
2637 Label regular_invoke;
2639 // Check whether the expected and actual arguments count match. If not,
2640 // setup registers according to contract with ArgumentsAdaptorTrampoline:
2641 // x0: actual arguments count.
2642 // x1: function (passed through to callee).
2643 // x2: expected arguments count.
2645 // The code below is made a lot easier because the calling code already sets
2646 // up actual and expected registers according to the contract if values are
2647 // passed in registers.
2648 DCHECK(actual.is_immediate() || actual.reg().is(x0));
2649 DCHECK(expected.is_immediate() || expected.reg().is(x2));
2650 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
2652 if (expected.is_immediate()) {
2653 DCHECK(actual.is_immediate());
2654 if (expected.immediate() == actual.immediate()) {
2655 definitely_matches = true;
2658 Mov(x0, actual.immediate());
2659 if (expected.immediate() ==
2660 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2661 // Don't worry about adapting arguments for builtins that
2662 // don't want that done. Skip adaption code by making it look
2663 // like we have a match between expected and actual number of
2665 definitely_matches = true;
2667 *definitely_mismatches = true;
2668 // Set up x2 for the argument adaptor.
2669 Mov(x2, expected.immediate());
2673 } else { // expected is a register.
2674 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2675 : Operand(actual.reg());
2676 // If actual == expected perform a regular invocation.
2677 Cmp(expected.reg(), actual_op);
2678 B(eq, ®ular_invoke);
2679 // Otherwise set up x0 for the argument adaptor.
2683 // If the argument counts may mismatch, generate a call to the argument
2685 if (!definitely_matches) {
2686 if (!code_constant.is_null()) {
2687 Mov(x3, Operand(code_constant));
2688 Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
2691 Handle<Code> adaptor =
2692 isolate()->builtins()->ArgumentsAdaptorTrampoline();
2693 if (flag == CALL_FUNCTION) {
2694 call_wrapper.BeforeCall(CallSize(adaptor));
2696 call_wrapper.AfterCall();
2697 if (!*definitely_mismatches) {
2698 // If the arg counts don't match, no extra code is emitted by
2699 // MAsm::InvokeCode and we can just fall through.
2703 Jump(adaptor, RelocInfo::CODE_TARGET);
2706 Bind(®ular_invoke);
2710 void MacroAssembler::InvokeCode(Register code,
2711 const ParameterCount& expected,
2712 const ParameterCount& actual,
2714 const CallWrapper& call_wrapper) {
2715 // You can't call a function without a valid frame.
2716 DCHECK(flag == JUMP_FUNCTION || has_frame());
2720 bool definitely_mismatches = false;
2721 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
2722 &definitely_mismatches, call_wrapper);
2724 // If we are certain that actual != expected, then we know InvokePrologue will
2725 // have handled the call through the argument adaptor mechanism.
2726 // The called function expects the call kind in x5.
2727 if (!definitely_mismatches) {
2728 if (flag == CALL_FUNCTION) {
2729 call_wrapper.BeforeCall(CallSize(code));
2731 call_wrapper.AfterCall();
2733 DCHECK(flag == JUMP_FUNCTION);
2738 // Continue here if InvokePrologue does handle the invocation due to
2739 // mismatched parameter counts.
2744 void MacroAssembler::InvokeFunction(Register function,
2745 const ParameterCount& actual,
2747 const CallWrapper& call_wrapper) {
2748 // You can't call a function without a valid frame.
2749 DCHECK(flag == JUMP_FUNCTION || has_frame());
2751 // Contract with called JS functions requires that function is passed in x1.
2752 // (See FullCodeGenerator::Generate().)
2753 DCHECK(function.is(x1));
2755 Register expected_reg = x2;
2756 Register code_reg = x3;
2758 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2759 // The number of arguments is stored as an int32_t, and -1 is a marker
2760 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
2761 // extension to correctly handle it.
2762 Ldr(expected_reg, FieldMemOperand(function,
2763 JSFunction::kSharedFunctionInfoOffset));
2765 FieldMemOperand(expected_reg,
2766 SharedFunctionInfo::kFormalParameterCountOffset));
2768 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2770 ParameterCount expected(expected_reg);
2771 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2775 void MacroAssembler::InvokeFunction(Register function,
2776 const ParameterCount& expected,
2777 const ParameterCount& actual,
2779 const CallWrapper& call_wrapper) {
2780 // You can't call a function without a valid frame.
2781 DCHECK(flag == JUMP_FUNCTION || has_frame());
2783 // Contract with called JS functions requires that function is passed in x1.
2784 // (See FullCodeGenerator::Generate().)
2785 DCHECK(function.Is(x1));
2787 Register code_reg = x3;
2789 // Set up the context.
2790 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2792 // We call indirectly through the code field in the function to
2793 // allow recompilation to take effect without changing any of the
2795 Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2796 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2800 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2801 const ParameterCount& expected,
2802 const ParameterCount& actual,
2804 const CallWrapper& call_wrapper) {
2805 // Contract with called JS functions requires that function is passed in x1.
2806 // (See FullCodeGenerator::Generate().)
2807 __ LoadObject(x1, function);
2808 InvokeFunction(x1, expected, actual, flag, call_wrapper);
2812 void MacroAssembler::TryConvertDoubleToInt64(Register result,
2813 DoubleRegister double_input,
2815 // Try to convert with an FPU convert instruction. It's trivial to compute
2816 // the modulo operation on an integer register so we convert to a 64-bit
2819 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
2820 // when the double is out of range. NaNs and infinities will be converted to 0
2821 // (as ECMA-262 requires).
2822 Fcvtzs(result.X(), double_input);
2824 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
2825 // representable using a double, so if the result is one of those then we know
2826 // that saturation occured, and we need to manually handle the conversion.
2828 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
2829 // 1 will cause signed overflow.
2831 Ccmp(result.X(), -1, VFlag, vc);
2837 void MacroAssembler::TruncateDoubleToI(Register result,
2838 DoubleRegister double_input) {
2841 // Try to convert the double to an int64. If successful, the bottom 32 bits
2842 // contain our truncated int32 result.
2843 TryConvertDoubleToInt64(result, double_input, &done);
2845 const Register old_stack_pointer = StackPointer();
2846 if (csp.Is(old_stack_pointer)) {
2847 // This currently only happens during compiler-unittest. If it arises
2848 // during regular code generation the DoubleToI stub should be updated to
2849 // cope with csp and have an extra parameter indicating which stack pointer
2851 Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
2853 SetStackPointer(jssp);
2856 // If we fell through then inline version didn't succeed - call stub instead.
2857 Push(lr, double_input);
2859 DoubleToIStub stub(isolate(),
2863 true, // is_truncating
2864 true); // skip_fastpath
2865 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2867 DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
2868 Pop(xzr, lr); // xzr to drop the double input on the stack.
2870 if (csp.Is(old_stack_pointer)) {
2872 SetStackPointer(csp);
2873 AssertStackConsistency();
2881 void MacroAssembler::TruncateHeapNumberToI(Register result,
2884 DCHECK(!result.is(object));
2885 DCHECK(jssp.Is(StackPointer()));
2887 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2889 // Try to convert the double to an int64. If successful, the bottom 32 bits
2890 // contain our truncated int32 result.
2891 TryConvertDoubleToInt64(result, fp_scratch, &done);
2893 // If we fell through then inline version didn't succeed - call stub instead.
2895 DoubleToIStub stub(isolate(),
2898 HeapNumber::kValueOffset - kHeapObjectTag,
2899 true, // is_truncating
2900 true); // skip_fastpath
2901 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2908 void MacroAssembler::StubPrologue() {
2909 DCHECK(StackPointer().Is(jssp));
2910 UseScratchRegisterScope temps(this);
2911 Register temp = temps.AcquireX();
2912 __ Mov(temp, Smi::FromInt(StackFrame::STUB));
2913 // Compiled stubs don't age, and so they don't need the predictable code
2915 __ Push(lr, fp, cp, temp);
2916 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
2920 void MacroAssembler::Prologue(bool code_pre_aging) {
2921 if (code_pre_aging) {
2922 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
2923 __ EmitCodeAgeSequence(stub);
2925 __ EmitFrameSetupForCodeAgePatching();
2930 void MacroAssembler::EnterFrame(StackFrame::Type type,
2931 bool load_constant_pool_pointer_reg) {
2932 // Out-of-line constant pool not implemented on arm64.
2937 void MacroAssembler::EnterFrame(StackFrame::Type type) {
2938 DCHECK(jssp.Is(StackPointer()));
2939 UseScratchRegisterScope temps(this);
2940 Register type_reg = temps.AcquireX();
2941 Register code_reg = temps.AcquireX();
2944 Mov(type_reg, Smi::FromInt(type));
2945 Mov(code_reg, Operand(CodeObject()));
2946 Push(type_reg, code_reg);
2951 // jssp[0] : code object
2953 // Adjust FP to point to saved FP.
2954 Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
2958 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
2959 DCHECK(jssp.Is(StackPointer()));
2960 // Drop the execution stack down to the frame pointer and restore
2961 // the caller frame pointer and return address.
2963 AssertStackConsistency();
2968 void MacroAssembler::ExitFramePreserveFPRegs() {
2969 PushCPURegList(kCallerSavedFP);
2973 void MacroAssembler::ExitFrameRestoreFPRegs() {
2974 // Read the registers from the stack without popping them. The stack pointer
2975 // will be reset as part of the unwinding process.
2976 CPURegList saved_fp_regs = kCallerSavedFP;
2977 DCHECK(saved_fp_regs.Count() % 2 == 0);
2979 int offset = ExitFrameConstants::kLastExitFrameField;
2980 while (!saved_fp_regs.IsEmpty()) {
2981 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
2982 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
2983 offset -= 2 * kDRegSize;
2984 Ldp(dst1, dst0, MemOperand(fp, offset));
2989 void MacroAssembler::EnterExitFrame(bool save_doubles,
2990 const Register& scratch,
2992 DCHECK(jssp.Is(StackPointer()));
2994 // Set up the new stack frame.
2995 Mov(scratch, Operand(CodeObject()));
2997 Mov(fp, StackPointer());
2999 // fp[8]: CallerPC (lr)
3000 // fp -> fp[0]: CallerFP (old fp)
3001 // fp[-8]: Space reserved for SPOffset.
3002 // jssp -> fp[-16]: CodeObject()
3003 STATIC_ASSERT((2 * kPointerSize) ==
3004 ExitFrameConstants::kCallerSPDisplacement);
3005 STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
3006 STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
3007 STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
3008 STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
3010 // Save the frame pointer and context pointer in the top frame.
3011 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
3013 Str(fp, MemOperand(scratch));
3014 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3016 Str(cp, MemOperand(scratch));
3018 STATIC_ASSERT((-2 * kPointerSize) ==
3019 ExitFrameConstants::kLastExitFrameField);
3021 ExitFramePreserveFPRegs();
3024 // Reserve space for the return address and for user requested memory.
3025 // We do this before aligning to make sure that we end up correctly
3026 // aligned with the minimum of wasted space.
3027 Claim(extra_space + 1, kXRegSize);
3028 // fp[8]: CallerPC (lr)
3029 // fp -> fp[0]: CallerFP (old fp)
3030 // fp[-8]: Space reserved for SPOffset.
3031 // fp[-16]: CodeObject()
3032 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
3033 // jssp[8]: Extra space reserved for caller (if extra_space != 0).
3034 // jssp -> jssp[0]: Space reserved for the return address.
3036 // Align and synchronize the system stack pointer with jssp.
3037 AlignAndSetCSPForFrame();
3038 DCHECK(csp.Is(StackPointer()));
3040 // fp[8]: CallerPC (lr)
3041 // fp -> fp[0]: CallerFP (old fp)
3042 // fp[-8]: Space reserved for SPOffset.
3043 // fp[-16]: CodeObject()
3044 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
3045 // csp[8]: Memory reserved for the caller if extra_space != 0.
3046 // Alignment padding, if necessary.
3047 // csp -> csp[0]: Space reserved for the return address.
3049 // ExitFrame::GetStateForFramePointer expects to find the return address at
3050 // the memory address immediately below the pointer stored in SPOffset.
3051 // It is not safe to derive much else from SPOffset, because the size of the
3052 // padding can vary.
3053 Add(scratch, csp, kXRegSize);
3054 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
3058 // Leave the current exit frame.
3059 void MacroAssembler::LeaveExitFrame(bool restore_doubles,
3060 const Register& scratch,
3061 bool restore_context) {
3062 DCHECK(csp.Is(StackPointer()));
3064 if (restore_doubles) {
3065 ExitFrameRestoreFPRegs();
3068 // Restore the context pointer from the top frame.
3069 if (restore_context) {
3070 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3072 Ldr(cp, MemOperand(scratch));
3075 if (emit_debug_code()) {
3076 // Also emit debug code to clear the cp in the top frame.
3077 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3079 Str(xzr, MemOperand(scratch));
3081 // Clear the frame pointer from the top frame.
3082 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
3084 Str(xzr, MemOperand(scratch));
3086 // Pop the exit frame.
3087 // fp[8]: CallerPC (lr)
3088 // fp -> fp[0]: CallerFP (old fp)
3089 // fp[...]: The rest of the frame.
3091 SetStackPointer(jssp);
3092 AssertStackConsistency();
3097 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
3098 Register scratch1, Register scratch2) {
3099 if (FLAG_native_code_counters && counter->Enabled()) {
3100 Mov(scratch1, value);
3101 Mov(scratch2, ExternalReference(counter));
3102 Str(scratch1, MemOperand(scratch2));
3107 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
3108 Register scratch1, Register scratch2) {
3110 if (FLAG_native_code_counters && counter->Enabled()) {
3111 Mov(scratch2, ExternalReference(counter));
3112 Ldr(scratch1, MemOperand(scratch2));
3113 Add(scratch1, scratch1, value);
3114 Str(scratch1, MemOperand(scratch2));
3119 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
3120 Register scratch1, Register scratch2) {
3121 IncrementCounter(counter, -value, scratch1, scratch2);
3125 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
3126 if (context_chain_length > 0) {
3127 // Move up the chain of contexts to the context containing the slot.
3128 Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3129 for (int i = 1; i < context_chain_length; i++) {
3130 Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3133 // Slot is in the current function context. Move it into the
3134 // destination register in case we store into it (the write barrier
3135 // cannot be allowed to destroy the context in cp).
3141 void MacroAssembler::DebugBreak() {
3143 Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
3144 CEntryStub ces(isolate(), 1);
3145 DCHECK(AllowThisStubCall(&ces));
3146 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3150 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3151 int handler_index) {
3152 DCHECK(jssp.Is(StackPointer()));
3153 // Adjust this code if the asserts don't hold.
3154 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3155 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3156 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3157 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3158 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3159 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3161 // For the JSEntry handler, we must preserve the live registers x0-x4.
3162 // (See JSEntryStub::GenerateBody().)
3165 StackHandler::IndexField::encode(handler_index) |
3166 StackHandler::KindField::encode(kind);
3168 // Set up the code object and the state for pushing.
3169 Mov(x10, Operand(CodeObject()));
3172 // Push the frame pointer, context, state, and code object.
3173 if (kind == StackHandler::JS_ENTRY) {
3174 DCHECK(Smi::FromInt(0) == 0);
3175 Push(xzr, xzr, x11, x10);
3177 Push(fp, cp, x11, x10);
3180 // Link the current handler as the next handler.
3181 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3182 Ldr(x10, MemOperand(x11));
3184 // Set this new handler as the current one.
3185 Str(jssp, MemOperand(x11));
3189 void MacroAssembler::PopTryHandler() {
3190 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3192 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3193 Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
3194 Str(x10, MemOperand(x11));
3198 void MacroAssembler::Allocate(int object_size,
3203 AllocationFlags flags) {
3204 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3205 if (!FLAG_inline_new) {
3206 if (emit_debug_code()) {
3207 // Trash the registers to simulate an allocation failure.
3208 // We apply salt to the original zap value to easily spot the values.
3209 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3210 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3211 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3217 UseScratchRegisterScope temps(this);
3218 Register scratch3 = temps.AcquireX();
3220 DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
3221 DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3223 // Make object size into bytes.
3224 if ((flags & SIZE_IN_WORDS) != 0) {
3225 object_size *= kPointerSize;
3227 DCHECK(0 == (object_size & kObjectAlignmentMask));
3229 // Check relative positions of allocation top and limit addresses.
3230 // The values must be adjacent in memory to allow the use of LDP.
3231 ExternalReference heap_allocation_top =
3232 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3233 ExternalReference heap_allocation_limit =
3234 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3235 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3236 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3237 DCHECK((limit - top) == kPointerSize);
3239 // Set up allocation top address and object size registers.
3240 Register top_address = scratch1;
3241 Register allocation_limit = scratch2;
3242 Mov(top_address, Operand(heap_allocation_top));
3244 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3245 // Load allocation top into result and the allocation limit.
3246 Ldp(result, allocation_limit, MemOperand(top_address));
3248 if (emit_debug_code()) {
3249 // Assert that result actually contains top on entry.
3250 Ldr(scratch3, MemOperand(top_address));
3251 Cmp(result, scratch3);
3252 Check(eq, kUnexpectedAllocationTop);
3254 // Load the allocation limit. 'result' already contains the allocation top.
3255 Ldr(allocation_limit, MemOperand(top_address, limit - top));
3258 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3259 // the same alignment on ARM64.
3260 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3262 // Calculate new top and bail out if new space is exhausted.
3263 Adds(scratch3, result, object_size);
3264 Ccmp(scratch3, allocation_limit, CFlag, cc);
3266 Str(scratch3, MemOperand(top_address));
3268 // Tag the object if requested.
3269 if ((flags & TAG_OBJECT) != 0) {
3270 ObjectTag(result, result);
3275 void MacroAssembler::Allocate(Register object_size,
3280 AllocationFlags flags) {
3281 if (!FLAG_inline_new) {
3282 if (emit_debug_code()) {
3283 // Trash the registers to simulate an allocation failure.
3284 // We apply salt to the original zap value to easily spot the values.
3285 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3286 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3287 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3293 UseScratchRegisterScope temps(this);
3294 Register scratch3 = temps.AcquireX();
3296 DCHECK(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
3297 DCHECK(object_size.Is64Bits() && result.Is64Bits() &&
3298 scratch1.Is64Bits() && scratch2.Is64Bits());
3300 // Check relative positions of allocation top and limit addresses.
3301 // The values must be adjacent in memory to allow the use of LDP.
3302 ExternalReference heap_allocation_top =
3303 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3304 ExternalReference heap_allocation_limit =
3305 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3306 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3307 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3308 DCHECK((limit - top) == kPointerSize);
3310 // Set up allocation top address and object size registers.
3311 Register top_address = scratch1;
3312 Register allocation_limit = scratch2;
3313 Mov(top_address, heap_allocation_top);
3315 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3316 // Load allocation top into result and the allocation limit.
3317 Ldp(result, allocation_limit, MemOperand(top_address));
3319 if (emit_debug_code()) {
3320 // Assert that result actually contains top on entry.
3321 Ldr(scratch3, MemOperand(top_address));
3322 Cmp(result, scratch3);
3323 Check(eq, kUnexpectedAllocationTop);
3325 // Load the allocation limit. 'result' already contains the allocation top.
3326 Ldr(allocation_limit, MemOperand(top_address, limit - top));
3329 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3330 // the same alignment on ARM64.
3331 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3333 // Calculate new top and bail out if new space is exhausted
3334 if ((flags & SIZE_IN_WORDS) != 0) {
3335 Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2));
3337 Adds(scratch3, result, object_size);
3340 if (emit_debug_code()) {
3341 Tst(scratch3, kObjectAlignmentMask);
3342 Check(eq, kUnalignedAllocationInNewSpace);
3345 Ccmp(scratch3, allocation_limit, CFlag, cc);
3347 Str(scratch3, MemOperand(top_address));
3349 // Tag the object if requested.
3350 if ((flags & TAG_OBJECT) != 0) {
3351 ObjectTag(result, result);
3356 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3358 ExternalReference new_space_allocation_top =
3359 ExternalReference::new_space_allocation_top_address(isolate());
3361 // Make sure the object has no tag before resetting top.
3362 Bic(object, object, kHeapObjectTagMask);
3364 // Check that the object un-allocated is below the current top.
3365 Mov(scratch, new_space_allocation_top);
3366 Ldr(scratch, MemOperand(scratch));
3367 Cmp(object, scratch);
3368 Check(lt, kUndoAllocationOfNonAllocatedMemory);
3370 // Write the address of the object to un-allocate as the current top.
3371 Mov(scratch, new_space_allocation_top);
3372 Str(object, MemOperand(scratch));
3376 void MacroAssembler::AllocateTwoByteString(Register result,
3381 Label* gc_required) {
3382 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3383 // Calculate the number of bytes needed for the characters in the string while
3384 // observing object alignment.
3385 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3386 Add(scratch1, length, length); // Length in bytes, not chars.
3387 Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3388 Bic(scratch1, scratch1, kObjectAlignmentMask);
3390 // Allocate two-byte string in new space.
3398 // Set the map, length and hash field.
3399 InitializeNewString(result,
3401 Heap::kStringMapRootIndex,
3407 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3408 Register scratch1, Register scratch2,
3410 Label* gc_required) {
3411 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3412 // Calculate the number of bytes needed for the characters in the string while
3413 // observing object alignment.
3414 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3415 STATIC_ASSERT(kCharSize == 1);
3416 Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3417 Bic(scratch1, scratch1, kObjectAlignmentMask);
3419 // Allocate one-byte string in new space.
3427 // Set the map, length and hash field.
3428 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3429 scratch1, scratch2);
3433 void MacroAssembler::AllocateTwoByteConsString(Register result,
3437 Label* gc_required) {
3438 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3441 InitializeNewString(result,
3443 Heap::kConsStringMapRootIndex,
3449 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3452 Label* gc_required) {
3453 Allocate(ConsString::kSize,
3460 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3461 scratch1, scratch2);
3465 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3469 Label* gc_required) {
3470 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3471 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3474 InitializeNewString(result,
3476 Heap::kSlicedStringMapRootIndex,
3482 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3486 Label* gc_required) {
3487 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3488 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3491 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3492 scratch1, scratch2);
3496 // Allocates a heap number or jumps to the need_gc label if the young space
3497 // is full and a scavenge is needed.
3498 void MacroAssembler::AllocateHeapNumber(Register result,
3503 CPURegister heap_number_map,
3505 DCHECK(!value.IsValid() || value.Is64Bits());
3506 UseScratchRegisterScope temps(this);
3508 // Allocate an object in the heap for the heap number and tag it as a heap
3510 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3511 NO_ALLOCATION_FLAGS);
3513 Heap::RootListIndex map_index = mode == MUTABLE
3514 ? Heap::kMutableHeapNumberMapRootIndex
3515 : Heap::kHeapNumberMapRootIndex;
3517 // Prepare the heap number map.
3518 if (!heap_number_map.IsValid()) {
3519 // If we have a valid value register, use the same type of register to store
3520 // the map so we can use STP to store both in one instruction.
3521 if (value.IsValid() && value.IsFPRegister()) {
3522 heap_number_map = temps.AcquireD();
3524 heap_number_map = scratch1;
3526 LoadRoot(heap_number_map, map_index);
3528 if (emit_debug_code()) {
3530 if (heap_number_map.IsFPRegister()) {
3532 Fmov(map, DoubleRegister(heap_number_map));
3534 map = Register(heap_number_map);
3536 AssertRegisterIsRoot(map, map_index);
3539 // Store the heap number map and the value in the allocated object.
3540 if (value.IsSameSizeAndType(heap_number_map)) {
3541 STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
3542 HeapNumber::kValueOffset);
3543 Stp(heap_number_map, value, MemOperand(result, HeapObject::kMapOffset));
3545 Str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3546 if (value.IsValid()) {
3547 Str(value, MemOperand(result, HeapNumber::kValueOffset));
3550 ObjectTag(result, result);
3554 void MacroAssembler::JumpIfObjectType(Register object,
3558 Label* if_cond_pass,
3560 CompareObjectType(object, map, type_reg, type);
3561 B(cond, if_cond_pass);
3565 void MacroAssembler::JumpIfNotObjectType(Register object,
3569 Label* if_not_object) {
3570 JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
3574 // Sets condition flags based on comparison, and returns type in type_reg.
3575 void MacroAssembler::CompareObjectType(Register object,
3578 InstanceType type) {
3579 Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3580 CompareInstanceType(map, type_reg, type);
3584 // Sets condition flags based on comparison, and returns type in type_reg.
3585 void MacroAssembler::CompareInstanceType(Register map,
3587 InstanceType type) {
3588 Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3589 Cmp(type_reg, type);
3593 void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
3594 UseScratchRegisterScope temps(this);
3595 Register obj_map = temps.AcquireX();
3596 Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
3597 CompareRoot(obj_map, index);
3601 void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
3603 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3604 CompareMap(scratch, map);
3608 void MacroAssembler::CompareMap(Register obj_map,
3610 Cmp(obj_map, Operand(map));
3614 void MacroAssembler::CheckMap(Register obj,
3618 SmiCheckType smi_check_type) {
3619 if (smi_check_type == DO_SMI_CHECK) {
3620 JumpIfSmi(obj, fail);
3623 CompareObjectMap(obj, scratch, map);
3628 void MacroAssembler::CheckMap(Register obj,
3630 Heap::RootListIndex index,
3632 SmiCheckType smi_check_type) {
3633 if (smi_check_type == DO_SMI_CHECK) {
3634 JumpIfSmi(obj, fail);
3636 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3637 JumpIfNotRoot(scratch, index, fail);
3641 void MacroAssembler::CheckMap(Register obj_map,
3644 SmiCheckType smi_check_type) {
3645 if (smi_check_type == DO_SMI_CHECK) {
3646 JumpIfSmi(obj_map, fail);
3649 CompareMap(obj_map, map);
3654 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3655 Register scratch2, Handle<WeakCell> cell,
3656 Handle<Code> success,
3657 SmiCheckType smi_check_type) {
3659 if (smi_check_type == DO_SMI_CHECK) {
3660 JumpIfSmi(obj, &fail);
3662 Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
3663 CmpWeakValue(scratch1, cell, scratch2);
3665 Jump(success, RelocInfo::CODE_TARGET);
3670 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
3672 Mov(scratch, Operand(cell));
3673 Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
3674 Cmp(value, scratch);
3678 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
3679 Mov(value, Operand(cell));
3680 Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
3684 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3686 GetWeakValue(value, cell);
3687 JumpIfSmi(value, miss);
3691 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
3692 UseScratchRegisterScope temps(this);
3693 Register temp = temps.AcquireX();
3694 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
3695 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3700 void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
3701 // Load the map's "bit field 2".
3702 __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
3703 // Retrieve elements_kind from bit field 2.
3704 DecodeField<Map::ElementsKindBits>(result);
3708 void MacroAssembler::TryGetFunctionPrototype(Register function,
3712 BoundFunctionAction action) {
3713 DCHECK(!AreAliased(function, result, scratch));
3716 if (action == kMissOnBoundFunction) {
3717 // Check that the receiver isn't a smi.
3718 JumpIfSmi(function, miss);
3720 // Check that the function really is a function. Load map into result reg.
3721 JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
3723 Register scratch_w = scratch.W();
3725 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3726 // On 64-bit platforms, compiler hints field is not a smi. See definition of
3727 // kCompilerHintsOffset in src/objects.h.
3729 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3730 Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
3732 // Make sure that the function has an instance prototype.
3733 Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3734 Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
3737 // Get the prototype or initial map from the function.
3739 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3741 // If the prototype or initial map is the hole, don't return it and simply
3742 // miss the cache instead. This will allow us to allocate a prototype object
3743 // on-demand in the runtime system.
3744 JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
3746 // If the function does not have an initial map, we're done.
3748 JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
3750 // Get the prototype from the initial map.
3751 Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3753 if (action == kMissOnBoundFunction) {
3756 // Non-instance prototype: fetch prototype from constructor field in initial
3758 Bind(&non_instance);
3759 Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3767 void MacroAssembler::CompareRoot(const Register& obj,
3768 Heap::RootListIndex index) {
3769 UseScratchRegisterScope temps(this);
3770 Register temp = temps.AcquireX();
3771 DCHECK(!AreAliased(obj, temp));
3772 LoadRoot(temp, index);
3777 void MacroAssembler::JumpIfRoot(const Register& obj,
3778 Heap::RootListIndex index,
3780 CompareRoot(obj, index);
3785 void MacroAssembler::JumpIfNotRoot(const Register& obj,
3786 Heap::RootListIndex index,
3787 Label* if_not_equal) {
3788 CompareRoot(obj, index);
3789 B(ne, if_not_equal);
3793 void MacroAssembler::CompareAndSplit(const Register& lhs,
3798 Label* fall_through) {
3799 if ((if_true == if_false) && (if_false == fall_through)) {
3801 } else if (if_true == if_false) {
3803 } else if (if_false == fall_through) {
3804 CompareAndBranch(lhs, rhs, cond, if_true);
3805 } else if (if_true == fall_through) {
3806 CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
3808 CompareAndBranch(lhs, rhs, cond, if_true);
3814 void MacroAssembler::TestAndSplit(const Register& reg,
3815 uint64_t bit_pattern,
3816 Label* if_all_clear,
3818 Label* fall_through) {
3819 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
3821 } else if (if_all_clear == if_any_set) {
3823 } else if (if_all_clear == fall_through) {
3824 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3825 } else if (if_any_set == fall_through) {
3826 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
3828 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3834 void MacroAssembler::CheckFastElements(Register map,
3837 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3838 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3839 STATIC_ASSERT(FAST_ELEMENTS == 2);
3840 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3841 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3842 Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
3847 void MacroAssembler::CheckFastObjectElements(Register map,
3850 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3851 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3852 STATIC_ASSERT(FAST_ELEMENTS == 2);
3853 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3854 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3855 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3856 // If cond==ls, set cond=hi, otherwise compare.
3858 Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
3863 // Note: The ARM version of this clobbers elements_reg, but this version does
3864 // not. Some uses of this in ARM64 assume that elements_reg will be preserved.
3865 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3867 Register elements_reg,
3869 FPRegister fpscratch1,
3871 int elements_offset) {
3872 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
3875 // Speculatively convert the smi to a double - all smis can be exactly
3876 // represented as a double.
3877 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
3879 // If value_reg is a smi, we're done.
3880 JumpIfSmi(value_reg, &store_num);
3882 // Ensure that the object is a heap number.
3883 JumpIfNotHeapNumber(value_reg, fail);
3885 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
3887 // Canonicalize NaNs.
3888 CanonicalizeNaN(fpscratch1);
3890 // Store the result.
3892 Add(scratch1, elements_reg,
3893 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
3895 FieldMemOperand(scratch1,
3896 FixedDoubleArray::kHeaderSize - elements_offset));
3900 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3901 return has_frame_ || !stub->SometimesSetsUpAFrame();
3905 void MacroAssembler::IndexFromHash(Register hash, Register index) {
3906 // If the hash field contains an array index pick it out. The assert checks
3907 // that the constants for the maximum number of digits for an array index
3908 // cached in the hash field and the number of bits reserved for it does not
3910 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
3911 (1 << String::kArrayIndexValueBits));
3912 DecodeField<String::ArrayIndexValueBits>(index, hash);
3913 SmiTag(index, index);
3917 void MacroAssembler::EmitSeqStringSetCharCheck(
3920 SeqStringSetCharCheckIndexType index_type,
3922 uint32_t encoding_mask) {
3923 DCHECK(!AreAliased(string, index, scratch));
3925 if (index_type == kIndexIsSmi) {
3929 // Check that string is an object.
3930 AssertNotSmi(string, kNonObject);
3932 // Check that string has an appropriate map.
3933 Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
3934 Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3936 And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
3937 Cmp(scratch, encoding_mask);
3938 Check(eq, kUnexpectedStringType);
3940 Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
3941 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
3942 Check(lt, kIndexIsTooLarge);
3944 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
3946 Check(ge, kIndexIsNegative);
3950 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3954 DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
3955 Label same_contexts;
3957 // Load current lexical context from the stack frame.
3958 Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
3959 // In debug mode, make sure the lexical context is set.
3962 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
3965 // Load the native context of the current context.
3967 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
3968 Ldr(scratch1, FieldMemOperand(scratch1, offset));
3969 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
3971 // Check the context is a native context.
3972 if (emit_debug_code()) {
3973 // Read the first word and compare to the native_context_map.
3974 Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
3975 CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
3976 Check(eq, kExpectedNativeContext);
3979 // Check if both contexts are the same.
3980 Ldr(scratch2, FieldMemOperand(holder_reg,
3981 JSGlobalProxy::kNativeContextOffset));
3982 Cmp(scratch1, scratch2);
3983 B(&same_contexts, eq);
3985 // Check the context is a native context.
3986 if (emit_debug_code()) {
3987 // We're short on scratch registers here, so use holder_reg as a scratch.
3989 Register scratch3 = holder_reg;
3991 CompareRoot(scratch2, Heap::kNullValueRootIndex);
3992 Check(ne, kExpectedNonNullContext);
3994 Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
3995 CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
3996 Check(eq, kExpectedNativeContext);
4000 // Check that the security token in the calling global object is
4001 // compatible with the security token in the receiving global
4003 int token_offset = Context::kHeaderSize +
4004 Context::SECURITY_TOKEN_INDEX * kPointerSize;
4006 Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
4007 Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
4008 Cmp(scratch1, scratch2);
4011 Bind(&same_contexts);
4015 // Compute the hash code from the untagged key. This must be kept in sync with
4016 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
4017 // code-stub-hydrogen.cc
4018 void MacroAssembler::GetNumberHash(Register key, Register scratch) {
4019 DCHECK(!AreAliased(key, scratch));
4021 // Xor original key with a seed.
4022 LoadRoot(scratch, Heap::kHashSeedRootIndex);
4023 Eor(key, key, Operand::UntagSmi(scratch));
4025 // The algorithm uses 32-bit integer values.
4027 scratch = scratch.W();
4029 // Compute the hash code from the untagged key. This must be kept in sync
4030 // with ComputeIntegerHash in utils.h.
4032 // hash = ~hash + (hash <<1 15);
4034 Add(key, scratch, Operand(key, LSL, 15));
4035 // hash = hash ^ (hash >> 12);
4036 Eor(key, key, Operand(key, LSR, 12));
4037 // hash = hash + (hash << 2);
4038 Add(key, key, Operand(key, LSL, 2));
4039 // hash = hash ^ (hash >> 4);
4040 Eor(key, key, Operand(key, LSR, 4));
4041 // hash = hash * 2057;
4042 Mov(scratch, Operand(key, LSL, 11));
4043 Add(key, key, Operand(key, LSL, 3));
4044 Add(key, key, scratch);
4045 // hash = hash ^ (hash >> 16);
4046 Eor(key, key, Operand(key, LSR, 16));
4050 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4057 Register scratch3) {
4058 DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
4062 SmiUntag(scratch0, key);
4063 GetNumberHash(scratch0, scratch1);
4065 // Compute the capacity mask.
4067 UntagSmiFieldMemOperand(elements,
4068 SeededNumberDictionary::kCapacityOffset));
4069 Sub(scratch1, scratch1, 1);
4071 // Generate an unrolled loop that performs a few probes before giving up.
4072 for (int i = 0; i < kNumberDictionaryProbes; i++) {
4073 // Compute the masked index: (hash + i + i * i) & mask.
4075 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
4077 Mov(scratch2, scratch0);
4079 And(scratch2, scratch2, scratch1);
4081 // Scale the index by multiplying by the element size.
4082 DCHECK(SeededNumberDictionary::kEntrySize == 3);
4083 Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
4085 // Check if the key is identical to the name.
4086 Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
4088 FieldMemOperand(scratch2,
4089 SeededNumberDictionary::kElementsStartOffset));
4091 if (i != (kNumberDictionaryProbes - 1)) {
4099 // Check that the value is a field property.
4100 const int kDetailsOffset =
4101 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4102 Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
4104 TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
4106 // Get the value at the masked, scaled index and return.
4107 const int kValueOffset =
4108 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4109 Ldr(result, FieldMemOperand(scratch2, kValueOffset));
4113 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
4116 SaveFPRegsMode fp_mode,
4117 RememberedSetFinalAction and_then) {
4118 DCHECK(!AreAliased(object, address, scratch1));
4119 Label done, store_buffer_overflow;
4120 if (emit_debug_code()) {
4122 JumpIfNotInNewSpace(object, &ok);
4123 Abort(kRememberedSetPointerInNewSpace);
4126 UseScratchRegisterScope temps(this);
4127 Register scratch2 = temps.AcquireX();
4129 // Load store buffer top.
4130 Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
4131 Ldr(scratch1, MemOperand(scratch2));
4132 // Store pointer to buffer and increment buffer top.
4133 Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
4134 // Write back new top of buffer.
4135 Str(scratch1, MemOperand(scratch2));
4136 // Call stub on end of buffer.
4137 // Check for end of buffer.
4138 DCHECK(StoreBuffer::kStoreBufferOverflowBit ==
4139 (1 << (14 + kPointerSizeLog2)));
4140 if (and_then == kFallThroughAtEnd) {
4141 Tbz(scratch1, (14 + kPointerSizeLog2), &done);
4143 DCHECK(and_then == kReturnAtEnd);
4144 Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
4148 Bind(&store_buffer_overflow);
4150 StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
4151 CallStub(&store_buffer_overflow_stub);
4155 if (and_then == kReturnAtEnd) {
4161 void MacroAssembler::PopSafepointRegisters() {
4162 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4163 PopXRegList(kSafepointSavedRegisters);
4168 void MacroAssembler::PushSafepointRegisters() {
4169 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
4170 // adjust the stack for unsaved registers.
4171 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4172 DCHECK(num_unsaved >= 0);
4174 PushXRegList(kSafepointSavedRegisters);
4178 void MacroAssembler::PushSafepointRegistersAndDoubles() {
4179 PushSafepointRegisters();
4180 PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
4181 FPRegister::kAllocatableFPRegisters));
4185 void MacroAssembler::PopSafepointRegistersAndDoubles() {
4186 PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
4187 FPRegister::kAllocatableFPRegisters));
4188 PopSafepointRegisters();
4192 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
4193 // Make sure the safepoint registers list is what we expect.
4194 DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
4196 // Safepoint registers are stored contiguously on the stack, but not all the
4197 // registers are saved. The following registers are excluded:
4198 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
4199 // the macro assembler.
4200 // - x28 (jssp) because JS stack pointer doesn't need to be included in
4201 // safepoint registers.
4202 // - x31 (csp) because the system stack pointer doesn't need to be included
4203 // in safepoint registers.
4205 // This function implements the mapping of register code to index into the
4206 // safepoint register slots.
4207 if ((reg_code >= 0) && (reg_code <= 15)) {
4209 } else if ((reg_code >= 18) && (reg_code <= 27)) {
4210 // Skip ip0 and ip1.
4211 return reg_code - 2;
4212 } else if ((reg_code == 29) || (reg_code == 30)) {
4214 return reg_code - 3;
4216 // This register has no safepoint register slot.
4223 void MacroAssembler::CheckPageFlagSet(const Register& object,
4224 const Register& scratch,
4226 Label* if_any_set) {
4227 And(scratch, object, ~Page::kPageAlignmentMask);
4228 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4229 TestAndBranchIfAnySet(scratch, mask, if_any_set);
4233 void MacroAssembler::CheckPageFlagClear(const Register& object,
4234 const Register& scratch,
4236 Label* if_all_clear) {
4237 And(scratch, object, ~Page::kPageAlignmentMask);
4238 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4239 TestAndBranchIfAllClear(scratch, mask, if_all_clear);
4243 void MacroAssembler::RecordWriteField(
4248 LinkRegisterStatus lr_status,
4249 SaveFPRegsMode save_fp,
4250 RememberedSetAction remembered_set_action,
4252 PointersToHereCheck pointers_to_here_check_for_value) {
4253 // First, check if a write barrier is even needed. The tests below
4254 // catch stores of Smis.
4257 // Skip the barrier if writing a smi.
4258 if (smi_check == INLINE_SMI_CHECK) {
4259 JumpIfSmi(value, &done);
4262 // Although the object register is tagged, the offset is relative to the start
4263 // of the object, so offset must be a multiple of kPointerSize.
4264 DCHECK(IsAligned(offset, kPointerSize));
4266 Add(scratch, object, offset - kHeapObjectTag);
4267 if (emit_debug_code()) {
4269 Tst(scratch, (1 << kPointerSizeLog2) - 1);
4271 Abort(kUnalignedCellInWriteBarrier);
4280 remembered_set_action,
4282 pointers_to_here_check_for_value);
4286 // Clobber clobbered input registers when running with the debug-code flag
4287 // turned on to provoke errors.
4288 if (emit_debug_code()) {
4289 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
4290 Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
4295 // Will clobber: object, map, dst.
4296 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4297 void MacroAssembler::RecordWriteForMap(Register object,
4300 LinkRegisterStatus lr_status,
4301 SaveFPRegsMode fp_mode) {
4302 ASM_LOCATION("MacroAssembler::RecordWrite");
4303 DCHECK(!AreAliased(object, map));
4305 if (emit_debug_code()) {
4306 UseScratchRegisterScope temps(this);
4307 Register temp = temps.AcquireX();
4309 CompareObjectMap(map, temp, isolate()->factory()->meta_map());
4310 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4313 if (!FLAG_incremental_marking) {
4317 if (emit_debug_code()) {
4318 UseScratchRegisterScope temps(this);
4319 Register temp = temps.AcquireX();
4321 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4323 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4326 // First, check if a write barrier is even needed. The tests below
4327 // catch stores of smis and stores into the young generation.
4330 // A single check of the map's pages interesting flag suffices, since it is
4331 // only set during incremental collection, and then it's also guaranteed that
4332 // the from object's page's interesting flag is also set. This optimization
4333 // relies on the fact that maps can never be in new space.
4334 CheckPageFlagClear(map,
4335 map, // Used as scratch.
4336 MemoryChunk::kPointersToHereAreInterestingMask,
4339 // Record the actual write.
4340 if (lr_status == kLRHasNotBeenSaved) {
4343 Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag);
4344 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
4347 if (lr_status == kLRHasNotBeenSaved) {
4353 // Count number of write barriers in generated code.
4354 isolate()->counters()->write_barriers_static()->Increment();
4355 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map,
4358 // Clobber clobbered registers when running with the debug-code flag
4359 // turned on to provoke errors.
4360 if (emit_debug_code()) {
4361 Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
4362 Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
4367 // Will clobber: object, address, value.
4368 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4370 // The register 'object' contains a heap object pointer. The heap object tag is
4372 void MacroAssembler::RecordWrite(
4376 LinkRegisterStatus lr_status,
4377 SaveFPRegsMode fp_mode,
4378 RememberedSetAction remembered_set_action,
4380 PointersToHereCheck pointers_to_here_check_for_value) {
4381 ASM_LOCATION("MacroAssembler::RecordWrite");
4382 DCHECK(!AreAliased(object, value));
4384 if (emit_debug_code()) {
4385 UseScratchRegisterScope temps(this);
4386 Register temp = temps.AcquireX();
4388 Ldr(temp, MemOperand(address));
4390 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4393 // First, check if a write barrier is even needed. The tests below
4394 // catch stores of smis and stores into the young generation.
4397 if (smi_check == INLINE_SMI_CHECK) {
4398 DCHECK_EQ(0, kSmiTag);
4399 JumpIfSmi(value, &done);
4402 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
4403 CheckPageFlagClear(value,
4404 value, // Used as scratch.
4405 MemoryChunk::kPointersToHereAreInterestingMask,
4408 CheckPageFlagClear(object,
4409 value, // Used as scratch.
4410 MemoryChunk::kPointersFromHereAreInterestingMask,
4413 // Record the actual write.
4414 if (lr_status == kLRHasNotBeenSaved) {
4417 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
4420 if (lr_status == kLRHasNotBeenSaved) {
4426 // Count number of write barriers in generated code.
4427 isolate()->counters()->write_barriers_static()->Increment();
4428 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
4431 // Clobber clobbered registers when running with the debug-code flag
4432 // turned on to provoke errors.
4433 if (emit_debug_code()) {
4434 Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
4435 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
4440 void MacroAssembler::AssertHasValidColor(const Register& reg) {
4441 if (emit_debug_code()) {
4442 // The bit sequence is backward. The first character in the string
4443 // represents the least significant bit.
4444 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4446 Label color_is_valid;
4447 Tbnz(reg, 0, &color_is_valid);
4448 Tbz(reg, 1, &color_is_valid);
4449 Abort(kUnexpectedColorFound);
4450 Bind(&color_is_valid);
4455 void MacroAssembler::GetMarkBits(Register addr_reg,
4456 Register bitmap_reg,
4457 Register shift_reg) {
4458 DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
4459 DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
4460 // addr_reg is divided into fields:
4461 // |63 page base 20|19 high 8|7 shift 3|2 0|
4462 // 'high' gives the index of the cell holding color bits for the object.
4463 // 'shift' gives the offset in the cell for this object's color.
4464 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
4465 UseScratchRegisterScope temps(this);
4466 Register temp = temps.AcquireX();
4467 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
4468 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
4469 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
4471 // |63 page base 20|19 zeros 15|14 high 3|2 0|
4472 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
4476 void MacroAssembler::HasColor(Register object,
4477 Register bitmap_scratch,
4478 Register shift_scratch,
4482 // See mark-compact.h for color definitions.
4483 DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
4485 GetMarkBits(object, bitmap_scratch, shift_scratch);
4486 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4487 // Shift the bitmap down to get the color of the object in bits [1:0].
4488 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
4490 AssertHasValidColor(bitmap_scratch);
4492 // These bit sequences are backwards. The first character in the string
4493 // represents the least significant bit.
4494 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4495 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4496 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
4498 // Check for the color.
4499 if (first_bit == 0) {
4500 // Checking for white.
4501 DCHECK(second_bit == 0);
4502 // We only need to test the first bit.
4503 Tbz(bitmap_scratch, 0, has_color);
4506 // Checking for grey or black.
4507 Tbz(bitmap_scratch, 0, &other_color);
4508 if (second_bit == 0) {
4509 Tbz(bitmap_scratch, 1, has_color);
4511 Tbnz(bitmap_scratch, 1, has_color);
4516 // Fall through if it does not have the right color.
4520 void MacroAssembler::JumpIfBlack(Register object,
4524 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4525 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
4529 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
4534 DCHECK(!AreAliased(object, scratch0, scratch1));
4535 Factory* factory = isolate()->factory();
4536 Register current = scratch0;
4539 // Scratch contains elements pointer.
4540 Mov(current, object);
4542 // Loop based on the map going up the prototype chain.
4544 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4545 Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4546 DecodeField<Map::ElementsKindBits>(scratch1);
4547 CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
4548 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4549 CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
4553 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
4555 DCHECK(!result.Is(ldr_location));
4556 const uint32_t kLdrLitOffset_lsb = 5;
4557 const uint32_t kLdrLitOffset_width = 19;
4558 Ldr(result, MemOperand(ldr_location));
4559 if (emit_debug_code()) {
4560 And(result, result, LoadLiteralFMask);
4561 Cmp(result, LoadLiteralFixed);
4562 Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
4563 // The instruction was clobbered. Reload it.
4564 Ldr(result, MemOperand(ldr_location));
4566 Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
4567 Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
4571 void MacroAssembler::EnsureNotWhite(
4573 Register bitmap_scratch,
4574 Register shift_scratch,
4575 Register load_scratch,
4576 Register length_scratch,
4577 Label* value_is_white_and_not_data) {
4579 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
4581 // These bit sequences are backwards. The first character in the string
4582 // represents the least significant bit.
4583 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4584 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4585 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
4587 GetMarkBits(value, bitmap_scratch, shift_scratch);
4588 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4589 Lsr(load_scratch, load_scratch, shift_scratch);
4591 AssertHasValidColor(load_scratch);
4593 // If the value is black or grey we don't need to do anything.
4594 // Since both black and grey have a 1 in the first position and white does
4595 // not have a 1 there we only need to check one bit.
4597 Tbnz(load_scratch, 0, &done);
4599 // Value is white. We check whether it is data that doesn't need scanning.
4600 Register map = load_scratch; // Holds map while checking type.
4601 Label is_data_object;
4603 // Check for heap-number.
4604 Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
4605 Mov(length_scratch, HeapNumber::kSize);
4606 JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
4608 // Check for strings.
4609 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4610 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4611 // If it's a string and it's not a cons string then it's an object containing
4613 Register instance_type = load_scratch;
4614 Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
4615 TestAndBranchIfAnySet(instance_type,
4616 kIsIndirectStringMask | kIsNotStringMask,
4617 value_is_white_and_not_data);
4619 // It's a non-indirect (non-cons and non-slice) string.
4620 // If it's external, the length is just ExternalString::kSize.
4621 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4622 // External strings are the only ones with the kExternalStringTag bit
4624 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
4625 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
4626 Mov(length_scratch, ExternalString::kSize);
4627 TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
4629 // Sequential string, either Latin1 or UC16.
4630 // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
4631 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
4632 // getting the length multiplied by 2.
4633 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
4634 Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
4635 String::kLengthOffset));
4636 Tst(instance_type, kStringEncodingMask);
4637 Cset(load_scratch, eq);
4638 Lsl(length_scratch, length_scratch, load_scratch);
4641 SeqString::kHeaderSize + kObjectAlignmentMask);
4642 Bic(length_scratch, length_scratch, kObjectAlignmentMask);
4644 Bind(&is_data_object);
4645 // Value is a data object, and it is white. Mark it black. Since we know
4646 // that the object is white we can make it black by flipping one bit.
4647 Register mask = shift_scratch;
4648 Mov(load_scratch, 1);
4649 Lsl(mask, load_scratch, shift_scratch);
4651 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4652 Orr(load_scratch, load_scratch, mask);
4653 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4655 Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
4656 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4657 Add(load_scratch, load_scratch, length_scratch);
4658 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4664 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
4665 if (emit_debug_code()) {
4666 Check(cond, reason);
4672 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
4673 if (emit_debug_code()) {
4674 CheckRegisterIsClear(reg, reason);
4679 void MacroAssembler::AssertRegisterIsRoot(Register reg,
4680 Heap::RootListIndex index,
4681 BailoutReason reason) {
4682 if (emit_debug_code()) {
4683 CompareRoot(reg, index);
4689 void MacroAssembler::AssertFastElements(Register elements) {
4690 if (emit_debug_code()) {
4691 UseScratchRegisterScope temps(this);
4692 Register temp = temps.AcquireX();
4694 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
4695 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
4696 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
4697 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
4698 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4704 void MacroAssembler::AssertIsString(const Register& object) {
4705 if (emit_debug_code()) {
4706 UseScratchRegisterScope temps(this);
4707 Register temp = temps.AcquireX();
4708 STATIC_ASSERT(kSmiTag == 0);
4709 Tst(object, kSmiTagMask);
4710 Check(ne, kOperandIsNotAString);
4711 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4712 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
4713 Check(lo, kOperandIsNotAString);
4718 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
4722 // Will not return here.
4727 void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
4731 // Will not return here.
4736 void MacroAssembler::Abort(BailoutReason reason) {
4738 RecordComment("Abort message: ");
4739 RecordComment(GetBailoutReason(reason));
4741 if (FLAG_trap_on_abort) {
4747 // Abort is used in some contexts where csp is the stack pointer. In order to
4748 // simplify the CallRuntime code, make sure that jssp is the stack pointer.
4749 // There is no risk of register corruption here because Abort doesn't return.
4750 Register old_stack_pointer = StackPointer();
4751 SetStackPointer(jssp);
4752 Mov(jssp, old_stack_pointer);
4754 // We need some scratch registers for the MacroAssembler, so make sure we have
4755 // some. This is safe here because Abort never returns.
4756 RegList old_tmp_list = TmpList()->list();
4757 TmpList()->Combine(MacroAssembler::DefaultTmpList());
4759 if (use_real_aborts()) {
4760 // Avoid infinite recursion; Push contains some assertions that use Abort.
4761 NoUseRealAbortsScope no_real_aborts(this);
4763 Mov(x0, Smi::FromInt(reason));
4767 // We don't actually want to generate a pile of code for this, so just
4768 // claim there is a stack frame, without generating one.
4769 FrameScope scope(this, StackFrame::NONE);
4770 CallRuntime(Runtime::kAbort, 1);
4772 CallRuntime(Runtime::kAbort, 1);
4775 // Load the string to pass to Printf.
4777 Adr(x0, &msg_address);
4779 // Call Printf directly to report the error.
4782 // We need a way to stop execution on both the simulator and real hardware,
4783 // and Unreachable() is the best option.
4786 // Emit the message string directly in the instruction stream.
4788 BlockPoolsScope scope(this);
4790 EmitStringData(GetBailoutReason(reason));
4794 SetStackPointer(old_stack_pointer);
4795 TmpList()->set_list(old_tmp_list);
4799 void MacroAssembler::LoadTransitionedArrayMapConditional(
4800 ElementsKind expected_kind,
4801 ElementsKind transitioned_kind,
4802 Register map_in_out,
4805 Label* no_map_match) {
4806 // Load the global or builtins object from the current context.
4807 Ldr(scratch1, GlobalObjectMemOperand());
4808 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
4810 // Check that the function's map is the same as the expected cached map.
4811 Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
4812 size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4813 Ldr(scratch2, FieldMemOperand(scratch1, offset));
4814 Cmp(map_in_out, scratch2);
4815 B(ne, no_map_match);
4817 // Use the transitioned cached map.
4818 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4819 Ldr(map_in_out, FieldMemOperand(scratch1, offset));
4823 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4824 // Load the global or builtins object from the current context.
4825 Ldr(function, GlobalObjectMemOperand());
4826 // Load the native context from the global or builtins object.
4827 Ldr(function, FieldMemOperand(function,
4828 GlobalObject::kNativeContextOffset));
4829 // Load the function from the native context.
4830 Ldr(function, ContextMemOperand(function, index));
4834 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4837 // Load the initial map. The global functions all have initial maps.
4838 Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4839 if (emit_debug_code()) {
4841 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4844 Abort(kGlobalFunctionsMustHaveInitialMap);
4850 // This is the main Printf implementation. All other Printf variants call
4851 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
4852 void MacroAssembler::PrintfNoPreserve(const char * format,
4853 const CPURegister& arg0,
4854 const CPURegister& arg1,
4855 const CPURegister& arg2,
4856 const CPURegister& arg3) {
4857 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
4858 // in most cases anyway, so this restriction shouldn't be too serious.
4859 DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
4861 // The provided arguments, and their proper procedure-call standard registers.
4862 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
4863 CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
4865 int arg_count = kPrintfMaxArgCount;
4867 // The PCS varargs registers for printf. Note that x0 is used for the printf
4869 static const CPURegList kPCSVarargs =
4870 CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
4871 static const CPURegList kPCSVarargsFP =
4872 CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);
4874 // We can use caller-saved registers as scratch values, except for the
4875 // arguments and the PCS registers where they might need to go.
4876 CPURegList tmp_list = kCallerSaved;
4877 tmp_list.Remove(x0); // Used to pass the format string.
4878 tmp_list.Remove(kPCSVarargs);
4879 tmp_list.Remove(arg0, arg1, arg2, arg3);
4881 CPURegList fp_tmp_list = kCallerSavedFP;
4882 fp_tmp_list.Remove(kPCSVarargsFP);
4883 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4885 // Override the MacroAssembler's scratch register list. The lists will be
4886 // reset automatically at the end of the UseScratchRegisterScope.
4887 UseScratchRegisterScope temps(this);
4888 TmpList()->set_list(tmp_list.list());
4889 FPTmpList()->set_list(fp_tmp_list.list());
4891 // Copies of the printf vararg registers that we can pop from.
4892 CPURegList pcs_varargs = kPCSVarargs;
4893 CPURegList pcs_varargs_fp = kPCSVarargsFP;
4895 // Place the arguments. There are lots of clever tricks and optimizations we
4896 // could use here, but Printf is a debug tool so instead we just try to keep
4897 // it simple: Move each input that isn't already in the right place to a
4898 // scratch register, then move everything back.
4899 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
4900 // Work out the proper PCS register for this argument.
4901 if (args[i].IsRegister()) {
4902 pcs[i] = pcs_varargs.PopLowestIndex().X();
4903 // We might only need a W register here. We need to know the size of the
4904 // argument so we can properly encode it for the simulator call.
4905 if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
4906 } else if (args[i].IsFPRegister()) {
4907 // In C, floats are always cast to doubles for varargs calls.
4908 pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
4910 DCHECK(args[i].IsNone());
4915 // If the argument is already in the right place, leave it where it is.
4916 if (args[i].Aliases(pcs[i])) continue;
4918 // Otherwise, if the argument is in a PCS argument register, allocate an
4919 // appropriate scratch register and then move it out of the way.
4920 if (kPCSVarargs.IncludesAliasOf(args[i]) ||
4921 kPCSVarargsFP.IncludesAliasOf(args[i])) {
4922 if (args[i].IsRegister()) {
4923 Register old_arg = Register(args[i]);
4924 Register new_arg = temps.AcquireSameSizeAs(old_arg);
4925 Mov(new_arg, old_arg);
4928 FPRegister old_arg = FPRegister(args[i]);
4929 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
4930 Fmov(new_arg, old_arg);
4936 // Do a second pass to move values into their final positions and perform any
4937 // conversions that may be required.
4938 for (int i = 0; i < arg_count; i++) {
4939 DCHECK(pcs[i].type() == args[i].type());
4940 if (pcs[i].IsRegister()) {
4941 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
4943 DCHECK(pcs[i].IsFPRegister());
4944 if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
4945 Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
4947 Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
4952 // Load the format string into x0, as per the procedure-call standard.
4954 // To make the code as portable as possible, the format string is encoded
4955 // directly in the instruction stream. It might be cleaner to encode it in a
4956 // literal pool, but since Printf is usually used for debugging, it is
4957 // beneficial for it to be minimally dependent on other features.
4958 Label format_address;
4959 Adr(x0, &format_address);
4961 // Emit the format string directly in the instruction stream.
4962 { BlockPoolsScope scope(this);
4965 Bind(&format_address);
4966 EmitStringData(format);
4971 // We don't pass any arguments on the stack, but we still need to align the C
4972 // stack pointer to a 16-byte boundary for PCS compliance.
4973 if (!csp.Is(StackPointer())) {
4974 Bic(csp, StackPointer(), 0xf);
4977 CallPrintf(arg_count, pcs);
4981 void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
4982 // A call to printf needs special handling for the simulator, since the system
4983 // printf function will use a different instruction set and the procedure-call
4984 // standard will not be compatible.
4985 #ifdef USE_SIMULATOR
4986 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
4987 hlt(kImmExceptionIsPrintf);
4988 dc32(arg_count); // kPrintfArgCountOffset
4990 // Determine the argument pattern.
4991 uint32_t arg_pattern_list = 0;
4992 for (int i = 0; i < arg_count; i++) {
4993 uint32_t arg_pattern;
4994 if (args[i].IsRegister()) {
4995 arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
4997 DCHECK(args[i].Is64Bits());
4998 arg_pattern = kPrintfArgD;
5000 DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
5001 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
5003 dc32(arg_pattern_list); // kPrintfArgPatternListOffset
5006 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
5011 void MacroAssembler::Printf(const char * format,
5016 // We can only print sp if it is the current stack pointer.
5017 if (!csp.Is(StackPointer())) {
5018 DCHECK(!csp.Aliases(arg0));
5019 DCHECK(!csp.Aliases(arg1));
5020 DCHECK(!csp.Aliases(arg2));
5021 DCHECK(!csp.Aliases(arg3));
5024 // Printf is expected to preserve all registers, so make sure that none are
5025 // available as scratch registers until we've preserved them.
5026 RegList old_tmp_list = TmpList()->list();
5027 RegList old_fp_tmp_list = FPTmpList()->list();
5028 TmpList()->set_list(0);
5029 FPTmpList()->set_list(0);
5031 // Preserve all caller-saved registers as well as NZCV.
5032 // If csp is the stack pointer, PushCPURegList asserts that the size of each
5033 // list is a multiple of 16 bytes.
5034 PushCPURegList(kCallerSaved);
5035 PushCPURegList(kCallerSavedFP);
5037 // We can use caller-saved registers as scratch values (except for argN).
5038 CPURegList tmp_list = kCallerSaved;
5039 CPURegList fp_tmp_list = kCallerSavedFP;
5040 tmp_list.Remove(arg0, arg1, arg2, arg3);
5041 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
5042 TmpList()->set_list(tmp_list.list());
5043 FPTmpList()->set_list(fp_tmp_list.list());
5045 { UseScratchRegisterScope temps(this);
5046 // If any of the arguments are the current stack pointer, allocate a new
5047 // register for them, and adjust the value to compensate for pushing the
5048 // caller-saved registers.
5049 bool arg0_sp = StackPointer().Aliases(arg0);
5050 bool arg1_sp = StackPointer().Aliases(arg1);
5051 bool arg2_sp = StackPointer().Aliases(arg2);
5052 bool arg3_sp = StackPointer().Aliases(arg3);
5053 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
5054 // Allocate a register to hold the original stack pointer value, to pass
5055 // to PrintfNoPreserve as an argument.
5056 Register arg_sp = temps.AcquireX();
5057 Add(arg_sp, StackPointer(),
5058 kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
5059 if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
5060 if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
5061 if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
5062 if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
5066 { UseScratchRegisterScope temps(this);
5067 Register tmp = temps.AcquireX();
5072 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
5075 { UseScratchRegisterScope temps(this);
5076 Register tmp = temps.AcquireX();
5082 PopCPURegList(kCallerSavedFP);
5083 PopCPURegList(kCallerSaved);
5085 TmpList()->set_list(old_tmp_list);
5086 FPTmpList()->set_list(old_fp_tmp_list);
5090 void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
5091 // TODO(jbramley): Other architectures use the internal memcpy to copy the
5092 // sequence. If this is a performance bottleneck, we should consider caching
5093 // the sequence and copying it in the same way.
5094 InstructionAccurateScope scope(this,
5095 kNoCodeAgeSequenceLength / kInstructionSize);
5096 DCHECK(jssp.Is(StackPointer()));
5097 EmitFrameSetupForCodeAgePatching(this);
5102 void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
5103 InstructionAccurateScope scope(this,
5104 kNoCodeAgeSequenceLength / kInstructionSize);
5105 DCHECK(jssp.Is(StackPointer()));
5106 EmitCodeAgeSequence(this, stub);
5114 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
5118 // We can do this sequence using four instructions, but the code ageing
5119 // sequence that patches it needs five, so we use the extra space to try to
5120 // simplify some addressing modes and remove some dependencies (compared to
5121 // using two stp instructions with write-back).
5122 __ sub(jssp, jssp, 4 * kXRegSize);
5123 __ sub(csp, csp, 4 * kXRegSize);
5124 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
5125 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
5126 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
5128 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
5132 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
5136 // When the stub is called, the sequence is replaced with the young sequence
5137 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
5138 // stub jumps to &start, stored in x0. The young sequence does not call the
5139 // stub so there is no infinite loop here.
5141 // A branch (br) is used rather than a call (blr) because this code replaces
5142 // the frame setup code that would normally preserve lr.
5143 __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
5146 // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
5147 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
5148 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
5150 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
5151 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
5156 bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
5157 bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
5159 isolate->code_aging_helper()->IsOld(sequence));
5164 void MacroAssembler::TruncatingDiv(Register result,
5167 DCHECK(!AreAliased(result, dividend));
5168 DCHECK(result.Is32Bits() && dividend.Is32Bits());
5169 base::MagicNumbersForDivision<uint32_t> mag =
5170 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5171 Mov(result, mag.multiplier);
5172 Smull(result.X(), dividend, result);
5173 Asr(result.X(), result.X(), 32);
5174 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5175 if (divisor > 0 && neg) Add(result, result, dividend);
5176 if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
5177 if (mag.shift > 0) Asr(result, result, mag.shift);
5178 Add(result, result, Operand(dividend, LSR, 31));
5185 UseScratchRegisterScope::~UseScratchRegisterScope() {
5186 available_->set_list(old_available_);
5187 availablefp_->set_list(old_availablefp_);
5191 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
5192 int code = AcquireNextAvailable(available_).code();
5193 return Register::Create(code, reg.SizeInBits());
5197 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
5198 int code = AcquireNextAvailable(availablefp_).code();
5199 return FPRegister::Create(code, reg.SizeInBits());
5203 CPURegister UseScratchRegisterScope::AcquireNextAvailable(
5204 CPURegList* available) {
5205 CHECK(!available->IsEmpty());
5206 CPURegister result = available->PopLowestIndex();
5207 DCHECK(!AreAliased(result, xzr, csp));
5212 CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
5213 const CPURegister& reg) {
5214 DCHECK(available->IncludesAliasOf(reg));
5215 available->Remove(reg);
5223 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
5224 const Label* smi_check) {
5225 Assembler::BlockPoolsScope scope(masm);
5226 if (reg.IsValid()) {
5227 DCHECK(smi_check->is_bound());
5228 DCHECK(reg.Is64Bits());
5230 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
5231 // 'check' in the other bits. The possible offset is limited in that we
5232 // use BitField to pack the data, and the underlying data type is a
5234 uint32_t delta = __ InstructionsGeneratedSince(smi_check);
5235 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
5237 DCHECK(!smi_check->is_bound());
5239 // An offset of 0 indicates that there is no patch site.
5245 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
5246 : reg_(NoReg), smi_check_(NULL) {
5247 InstructionSequence* inline_data = InstructionSequence::At(info);
5248 DCHECK(inline_data->IsInlineData());
5249 if (inline_data->IsInlineData()) {
5250 uint64_t payload = inline_data->InlineData();
5251 // We use BitField to decode the payload, and BitField can only handle
5253 DCHECK(is_uint32(payload));
5255 int reg_code = RegisterBits::decode(payload);
5256 reg_ = Register::XRegFromCode(reg_code);
5257 uint64_t smi_check_delta = DeltaBits::decode(payload);
5258 DCHECK(smi_check_delta != 0);
5259 smi_check_ = inline_data->preceding(smi_check_delta);
5268 } } // namespace v8::internal
5270 #endif // V8_TARGET_ARCH_ARM64