1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_ARM64
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/cpu-profiler.h"
14 #include "src/debug.h"
15 #include "src/isolate-inl.h"
16 #include "src/runtime/runtime.h"
21 // Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
25 MacroAssembler::MacroAssembler(Isolate* arg_isolate,
28 : Assembler(arg_isolate, buffer, buffer_size),
29 generating_stub_(false),
31 allow_macro_instructions_(true),
34 use_real_aborts_(true),
36 tmp_list_(DefaultTmpList()),
37 fptmp_list_(DefaultFPTmpList()) {
38 if (isolate() != NULL) {
39 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
45 CPURegList MacroAssembler::DefaultTmpList() {
46 return CPURegList(ip0, ip1);
50 CPURegList MacroAssembler::DefaultFPTmpList() {
51 return CPURegList(fp_scratch1, fp_scratch2);
55 void MacroAssembler::LogicalMacro(const Register& rd,
57 const Operand& operand,
59 UseScratchRegisterScope temps(this);
61 if (operand.NeedsRelocation(this)) {
62 Register temp = temps.AcquireX();
63 Ldr(temp, operand.immediate());
64 Logical(rd, rn, temp, op);
66 } else if (operand.IsImmediate()) {
67 int64_t immediate = operand.ImmediateValue();
68 unsigned reg_size = rd.SizeInBits();
70 // If the operation is NOT, invert the operation and immediate.
71 if ((op & NOT) == NOT) {
72 op = static_cast<LogicalOp>(op & ~NOT);
73 immediate = ~immediate;
76 // Ignore the top 32 bits of an immediate if we're moving to a W register.
78 // Check that the top 32 bits are consistent.
79 DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
80 ((immediate >> kWRegSizeInBits) == -1));
81 immediate &= kWRegMask;
84 DCHECK(rd.Is64Bits() || is_uint32(immediate));
86 // Special cases for all set or all clear immediates.
92 case ORR: // Fall through.
96 case ANDS: // Fall through.
102 } else if ((rd.Is64Bits() && (immediate == -1L)) ||
103 (rd.Is32Bits() && (immediate == 0xffffffffL))) {
114 case ANDS: // Fall through.
122 unsigned n, imm_s, imm_r;
123 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
124 // Immediate can be encoded in the instruction.
125 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
127 // Immediate can't be encoded: synthesize using move immediate.
128 Register temp = temps.AcquireSameSizeAs(rn);
129 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
131 // If rd is the stack pointer we cannot use it as the destination
132 // register so we use the temp register as an intermediate again.
133 Logical(temp, rn, imm_operand, op);
135 AssertStackConsistency();
137 Logical(rd, rn, imm_operand, op);
141 } else if (operand.IsExtendedRegister()) {
142 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
143 // Add/sub extended supports shift <= 4. We want to support exactly the
145 DCHECK(operand.shift_amount() <= 4);
146 DCHECK(operand.reg().Is64Bits() ||
147 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
148 Register temp = temps.AcquireSameSizeAs(rn);
149 EmitExtendShift(temp, operand.reg(), operand.extend(),
150 operand.shift_amount());
151 Logical(rd, rn, temp, op);
154 // The operand can be encoded in the instruction.
155 DCHECK(operand.IsShiftedRegister());
156 Logical(rd, rn, operand, op);
161 void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
162 DCHECK(allow_macro_instructions_);
163 DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
164 DCHECK(!rd.IsZero());
166 // TODO(all) extend to support more immediates.
168 // Immediates on Aarch64 can be produced using an initial value, and zero to
169 // three move keep operations.
171 // Initial values can be generated with:
172 // 1. 64-bit move zero (movz).
173 // 2. 32-bit move inverted (movn).
174 // 3. 64-bit move inverted.
175 // 4. 32-bit orr immediate.
176 // 5. 64-bit orr immediate.
177 // Move-keep may then be used to modify each of the 16-bit half-words.
179 // The code below supports all five initial value generators, and
180 // applying move-keep operations to move-zero and move-inverted initial
183 // Try to move the immediate in one instruction, and if that fails, switch to
184 // using multiple instructions.
185 if (!TryOneInstrMoveImmediate(rd, imm)) {
186 unsigned reg_size = rd.SizeInBits();
188 // Generic immediate case. Imm will be represented by
189 // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
190 // A move-zero or move-inverted is generated for the first non-zero or
191 // non-0xffff immX, and a move-keep for subsequent non-zero immX.
193 uint64_t ignored_halfword = 0;
194 bool invert_move = false;
195 // If the number of 0xffff halfwords is greater than the number of 0x0000
196 // halfwords, it's more efficient to use move-inverted.
197 if (CountClearHalfWords(~imm, reg_size) >
198 CountClearHalfWords(imm, reg_size)) {
199 ignored_halfword = 0xffffL;
203 // Mov instructions can't move immediate values into the stack pointer, so
204 // set up a temporary register, if needed.
205 UseScratchRegisterScope temps(this);
206 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
208 // Iterate through the halfwords. Use movn/movz for the first non-ignored
209 // halfword, and movk for subsequent halfwords.
210 DCHECK((reg_size % 16) == 0);
211 bool first_mov_done = false;
212 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
213 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
214 if (imm16 != ignored_halfword) {
215 if (!first_mov_done) {
217 movn(temp, (~imm16) & 0xffffL, 16 * i);
219 movz(temp, imm16, 16 * i);
221 first_mov_done = true;
223 // Construct a wider constant.
224 movk(temp, imm16, 16 * i);
228 DCHECK(first_mov_done);
230 // Move the temporary if the original destination register was the stack
234 AssertStackConsistency();
240 void MacroAssembler::Mov(const Register& rd,
241 const Operand& operand,
242 DiscardMoveMode discard_mode) {
243 DCHECK(allow_macro_instructions_);
244 DCHECK(!rd.IsZero());
246 // Provide a swap register for instructions that need to write into the
247 // system stack pointer (and can't do this inherently).
248 UseScratchRegisterScope temps(this);
249 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
251 if (operand.NeedsRelocation(this)) {
252 Ldr(dst, operand.immediate());
254 } else if (operand.IsImmediate()) {
255 // Call the macro assembler for generic immediates.
256 Mov(dst, operand.ImmediateValue());
258 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
259 // Emit a shift instruction if moving a shifted register. This operation
260 // could also be achieved using an orr instruction (like orn used by Mvn),
261 // but using a shift instruction makes the disassembly clearer.
262 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
264 } else if (operand.IsExtendedRegister()) {
265 // Emit an extend instruction if moving an extended register. This handles
266 // extend with post-shift operations, too.
267 EmitExtendShift(dst, operand.reg(), operand.extend(),
268 operand.shift_amount());
271 // Otherwise, emit a register move only if the registers are distinct, or
272 // if they are not X registers.
274 // Note that mov(w0, w0) is not a no-op because it clears the top word of
275 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
276 // registers is not required to clear the top word of the X register. In
277 // this case, the instruction is discarded.
279 // If csp is an operand, add #0 is emitted, otherwise, orr #0.
280 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
281 (discard_mode == kDontDiscardForSameWReg))) {
282 Assembler::mov(rd, operand.reg());
284 // This case can handle writes into the system stack pointer directly.
288 // Copy the result to the system stack pointer.
291 Assembler::mov(rd, dst);
296 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
297 DCHECK(allow_macro_instructions_);
299 if (operand.NeedsRelocation(this)) {
300 Ldr(rd, operand.immediate());
303 } else if (operand.IsImmediate()) {
304 // Call the macro assembler for generic immediates.
305 Mov(rd, ~operand.ImmediateValue());
307 } else if (operand.IsExtendedRegister()) {
308 // Emit two instructions for the extend case. This differs from Mov, as
309 // the extend and invert can't be achieved in one instruction.
310 EmitExtendShift(rd, operand.reg(), operand.extend(),
311 operand.shift_amount());
320 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
321 DCHECK((reg_size % 8) == 0);
323 for (unsigned i = 0; i < (reg_size / 16); i++) {
324 if ((imm & 0xffff) == 0) {
333 // The movz instruction can generate immediates containing an arbitrary 16-bit
334 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
335 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
336 DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
337 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
341 // The movn instruction can generate immediates containing an arbitrary 16-bit
342 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
343 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
344 return IsImmMovz(~imm, reg_size);
348 void MacroAssembler::ConditionalCompareMacro(const Register& rn,
349 const Operand& operand,
352 ConditionalCompareOp op) {
353 DCHECK((cond != al) && (cond != nv));
354 if (operand.NeedsRelocation(this)) {
355 UseScratchRegisterScope temps(this);
356 Register temp = temps.AcquireX();
357 Ldr(temp, operand.immediate());
358 ConditionalCompareMacro(rn, temp, nzcv, cond, op);
360 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
361 (operand.IsImmediate() &&
362 IsImmConditionalCompare(operand.ImmediateValue()))) {
363 // The immediate can be encoded in the instruction, or the operand is an
364 // unshifted register: call the assembler.
365 ConditionalCompare(rn, operand, nzcv, cond, op);
368 // The operand isn't directly supported by the instruction: perform the
369 // operation on a temporary register.
370 UseScratchRegisterScope temps(this);
371 Register temp = temps.AcquireSameSizeAs(rn);
373 ConditionalCompare(rn, temp, nzcv, cond, op);
378 void MacroAssembler::Csel(const Register& rd,
380 const Operand& operand,
382 DCHECK(allow_macro_instructions_);
383 DCHECK(!rd.IsZero());
384 DCHECK((cond != al) && (cond != nv));
385 if (operand.IsImmediate()) {
386 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
388 int64_t imm = operand.ImmediateValue();
389 Register zr = AppropriateZeroRegFor(rn);
391 csel(rd, rn, zr, cond);
392 } else if (imm == 1) {
393 csinc(rd, rn, zr, cond);
394 } else if (imm == -1) {
395 csinv(rd, rn, zr, cond);
397 UseScratchRegisterScope temps(this);
398 Register temp = temps.AcquireSameSizeAs(rn);
400 csel(rd, rn, temp, cond);
402 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
403 // Unshifted register argument.
404 csel(rd, rn, operand.reg(), cond);
406 // All other arguments.
407 UseScratchRegisterScope temps(this);
408 Register temp = temps.AcquireSameSizeAs(rn);
410 csel(rd, rn, temp, cond);
415 bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
417 unsigned n, imm_s, imm_r;
418 int reg_size = dst.SizeInBits();
419 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
420 // Immediate can be represented in a move zero instruction. Movz can't write
421 // to the stack pointer.
424 } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
425 // Immediate can be represented in a move not instruction. Movn can't write
426 // to the stack pointer.
427 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
429 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
430 // Immediate can be represented in a logical orr instruction.
431 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
438 Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
440 int reg_size = dst.SizeInBits();
442 // Encode the immediate in a single move instruction, if possible.
443 if (TryOneInstrMoveImmediate(dst, imm)) {
444 // The move was successful; nothing to do here.
446 // Pre-shift the immediate to the least-significant bits of the register.
447 int shift_low = CountTrailingZeros(imm, reg_size);
448 int64_t imm_low = imm >> shift_low;
450 // Pre-shift the immediate to the most-significant bits of the register. We
451 // insert set bits in the least-significant bits, as this creates a
452 // different immediate that may be encodable using movn or orr-immediate.
453 // If this new immediate is encodable, the set bits will be eliminated by
454 // the post shift on the following instruction.
455 int shift_high = CountLeadingZeros(imm, reg_size);
456 int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
458 if (TryOneInstrMoveImmediate(dst, imm_low)) {
459 // The new immediate has been moved into the destination's low bits:
460 // return a new leftward-shifting operand.
461 return Operand(dst, LSL, shift_low);
462 } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
463 // The new immediate has been moved into the destination's high bits:
464 // return a new rightward-shifting operand.
465 return Operand(dst, LSR, shift_high);
467 // Use the generic move operation to set up the immediate.
475 void MacroAssembler::AddSubMacro(const Register& rd,
477 const Operand& operand,
480 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
481 !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
482 // The instruction would be a nop. Avoid generating useless code.
486 if (operand.NeedsRelocation(this)) {
487 UseScratchRegisterScope temps(this);
488 Register temp = temps.AcquireX();
489 Ldr(temp, operand.immediate());
490 AddSubMacro(rd, rn, temp, S, op);
491 } else if ((operand.IsImmediate() &&
492 !IsImmAddSub(operand.ImmediateValue())) ||
493 (rn.IsZero() && !operand.IsShiftedRegister()) ||
494 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
495 UseScratchRegisterScope temps(this);
496 Register temp = temps.AcquireSameSizeAs(rn);
497 if (operand.IsImmediate()) {
498 Operand imm_operand =
499 MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
500 AddSub(rd, rn, imm_operand, S, op);
503 AddSub(rd, rn, temp, S, op);
506 AddSub(rd, rn, operand, S, op);
511 void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
513 const Operand& operand,
515 AddSubWithCarryOp op) {
516 DCHECK(rd.SizeInBits() == rn.SizeInBits());
517 UseScratchRegisterScope temps(this);
519 if (operand.NeedsRelocation(this)) {
520 Register temp = temps.AcquireX();
521 Ldr(temp, operand.immediate());
522 AddSubWithCarryMacro(rd, rn, temp, S, op);
524 } else if (operand.IsImmediate() ||
525 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
526 // Add/sub with carry (immediate or ROR shifted register.)
527 Register temp = temps.AcquireSameSizeAs(rn);
529 AddSubWithCarry(rd, rn, temp, S, op);
531 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
532 // Add/sub with carry (shifted register).
533 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
534 DCHECK(operand.shift() != ROR);
535 DCHECK(is_uintn(operand.shift_amount(),
536 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
537 : kWRegSizeInBitsLog2));
538 Register temp = temps.AcquireSameSizeAs(rn);
539 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
540 AddSubWithCarry(rd, rn, temp, S, op);
542 } else if (operand.IsExtendedRegister()) {
543 // Add/sub with carry (extended register).
544 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
545 // Add/sub extended supports a shift <= 4. We want to support exactly the
547 DCHECK(operand.shift_amount() <= 4);
548 DCHECK(operand.reg().Is64Bits() ||
549 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
550 Register temp = temps.AcquireSameSizeAs(rn);
551 EmitExtendShift(temp, operand.reg(), operand.extend(),
552 operand.shift_amount());
553 AddSubWithCarry(rd, rn, temp, S, op);
556 // The addressing mode is directly supported by the instruction.
557 AddSubWithCarry(rd, rn, operand, S, op);
562 void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
563 const MemOperand& addr,
565 int64_t offset = addr.offset();
566 LSDataSize size = CalcLSDataSize(op);
568 // Check if an immediate offset fits in the immediate field of the
569 // appropriate instruction. If not, emit two instructions to perform
571 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
572 !IsImmLSUnscaled(offset)) {
573 // Immediate offset that can't be encoded using unsigned or unscaled
575 UseScratchRegisterScope temps(this);
576 Register temp = temps.AcquireSameSizeAs(addr.base());
577 Mov(temp, addr.offset());
578 LoadStore(rt, MemOperand(addr.base(), temp), op);
579 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
580 // Post-index beyond unscaled addressing range.
581 LoadStore(rt, MemOperand(addr.base()), op);
582 add(addr.base(), addr.base(), offset);
583 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
584 // Pre-index beyond unscaled addressing range.
585 add(addr.base(), addr.base(), offset);
586 LoadStore(rt, MemOperand(addr.base()), op);
588 // Encodable in one load/store instruction.
589 LoadStore(rt, addr, op);
593 void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
594 const CPURegister& rt2,
595 const MemOperand& addr,
596 LoadStorePairOp op) {
597 // TODO(all): Should we support register offset for load-store-pair?
598 DCHECK(!addr.IsRegisterOffset());
600 int64_t offset = addr.offset();
601 LSDataSize size = CalcLSPairDataSize(op);
603 // Check if the offset fits in the immediate field of the appropriate
604 // instruction. If not, emit two instructions to perform the operation.
605 if (IsImmLSPair(offset, size)) {
606 // Encodable in one load/store pair instruction.
607 LoadStorePair(rt, rt2, addr, op);
609 Register base = addr.base();
610 if (addr.IsImmediateOffset()) {
611 UseScratchRegisterScope temps(this);
612 Register temp = temps.AcquireSameSizeAs(base);
613 Add(temp, base, offset);
614 LoadStorePair(rt, rt2, MemOperand(temp), op);
615 } else if (addr.IsPostIndex()) {
616 LoadStorePair(rt, rt2, MemOperand(base), op);
617 Add(base, base, offset);
619 DCHECK(addr.IsPreIndex());
620 Add(base, base, offset);
621 LoadStorePair(rt, rt2, MemOperand(base), op);
627 void MacroAssembler::Load(const Register& rt,
628 const MemOperand& addr,
630 DCHECK(!r.IsDouble());
632 if (r.IsInteger8()) {
634 } else if (r.IsUInteger8()) {
636 } else if (r.IsInteger16()) {
638 } else if (r.IsUInteger16()) {
640 } else if (r.IsInteger32()) {
643 DCHECK(rt.Is64Bits());
649 void MacroAssembler::Store(const Register& rt,
650 const MemOperand& addr,
652 DCHECK(!r.IsDouble());
654 if (r.IsInteger8() || r.IsUInteger8()) {
656 } else if (r.IsInteger16() || r.IsUInteger16()) {
658 } else if (r.IsInteger32()) {
661 DCHECK(rt.Is64Bits());
662 if (r.IsHeapObject()) {
664 } else if (r.IsSmi()) {
672 bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
673 Label *label, ImmBranchType b_type) {
674 bool need_longer_range = false;
675 // There are two situations in which we care about the offset being out of
677 // - The label is bound but too far away.
678 // - The label is not bound but linked, and the previous branch
679 // instruction in the chain is too far away.
680 if (label->is_bound() || label->is_linked()) {
682 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
684 if (!need_longer_range && !label->is_bound()) {
685 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
686 unresolved_branches_.insert(
687 std::pair<int, FarBranchInfo>(max_reachable_pc,
688 FarBranchInfo(pc_offset(), label)));
689 // Also maintain the next pool check.
690 next_veneer_pool_check_ =
691 Min(next_veneer_pool_check_,
692 max_reachable_pc - kVeneerDistanceCheckMargin);
694 return need_longer_range;
698 void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
699 DCHECK(allow_macro_instructions_);
700 DCHECK(!rd.IsZero());
702 if (hint == kAdrNear) {
707 DCHECK(hint == kAdrFar);
708 if (label->is_bound()) {
709 int label_offset = label->pos() - pc_offset();
710 if (Instruction::IsValidPCRelOffset(label_offset)) {
713 DCHECK(label_offset <= 0);
714 int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
715 adr(rd, min_adr_offset);
716 Add(rd, rd, label_offset - min_adr_offset);
719 UseScratchRegisterScope temps(this);
720 Register scratch = temps.AcquireX();
722 InstructionAccurateScope scope(
723 this, PatchingAssembler::kAdrFarPatchableNInstrs);
725 for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
733 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
734 DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
735 (bit == -1 || type >= kBranchTypeFirstUsingBit));
736 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
737 B(static_cast<Condition>(type), label);
740 case always: B(label); break;
742 case reg_zero: Cbz(reg, label); break;
743 case reg_not_zero: Cbnz(reg, label); break;
744 case reg_bit_clear: Tbz(reg, bit, label); break;
745 case reg_bit_set: Tbnz(reg, bit, label); break;
753 void MacroAssembler::B(Label* label, Condition cond) {
754 DCHECK(allow_macro_instructions_);
755 DCHECK((cond != al) && (cond != nv));
758 bool need_extra_instructions =
759 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
761 if (need_extra_instructions) {
762 b(&done, NegateCondition(cond));
771 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
772 DCHECK(allow_macro_instructions_);
775 bool need_extra_instructions =
776 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
778 if (need_extra_instructions) {
779 tbz(rt, bit_pos, &done);
782 tbnz(rt, bit_pos, label);
788 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
789 DCHECK(allow_macro_instructions_);
792 bool need_extra_instructions =
793 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
795 if (need_extra_instructions) {
796 tbnz(rt, bit_pos, &done);
799 tbz(rt, bit_pos, label);
805 void MacroAssembler::Cbnz(const Register& rt, Label* label) {
806 DCHECK(allow_macro_instructions_);
809 bool need_extra_instructions =
810 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
812 if (need_extra_instructions) {
822 void MacroAssembler::Cbz(const Register& rt, Label* label) {
823 DCHECK(allow_macro_instructions_);
826 bool need_extra_instructions =
827 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
829 if (need_extra_instructions) {
839 // Pseudo-instructions.
842 void MacroAssembler::Abs(const Register& rd, const Register& rm,
843 Label* is_not_representable,
844 Label* is_representable) {
845 DCHECK(allow_macro_instructions_);
846 DCHECK(AreSameSizeAndType(rd, rm));
851 // If the comparison sets the v flag, the input was the smallest value
852 // representable by rm, and the mathematical result of abs(rm) is not
853 // representable using two's complement.
854 if ((is_not_representable != NULL) && (is_representable != NULL)) {
855 B(is_not_representable, vs);
857 } else if (is_not_representable != NULL) {
858 B(is_not_representable, vs);
859 } else if (is_representable != NULL) {
860 B(is_representable, vc);
865 // Abstracted stack operations.
868 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
869 const CPURegister& src2, const CPURegister& src3) {
870 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
872 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
873 int size = src0.SizeInBytes();
875 PushPreamble(count, size);
876 PushHelper(count, size, src0, src1, src2, src3);
880 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
881 const CPURegister& src2, const CPURegister& src3,
882 const CPURegister& src4, const CPURegister& src5,
883 const CPURegister& src6, const CPURegister& src7) {
884 DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
886 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
887 int size = src0.SizeInBytes();
889 PushPreamble(count, size);
890 PushHelper(4, size, src0, src1, src2, src3);
891 PushHelper(count - 4, size, src4, src5, src6, src7);
895 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
896 const CPURegister& dst2, const CPURegister& dst3) {
897 // It is not valid to pop into the same register more than once in one
898 // instruction, not even into the zero register.
899 DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
900 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
901 DCHECK(dst0.IsValid());
903 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
904 int size = dst0.SizeInBytes();
906 PopHelper(count, size, dst0, dst1, dst2, dst3);
907 PopPostamble(count, size);
911 void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
912 int size = src0.SizeInBytes() + src1.SizeInBytes();
915 // Reserve room for src0 and push src1.
916 str(src1, MemOperand(StackPointer(), -size, PreIndex));
917 // Fill the gap with src0.
918 str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
922 void MacroAssembler::PushPopQueue::PushQueued(
923 PreambleDirective preamble_directive) {
924 if (queued_.empty()) return;
926 if (preamble_directive == WITH_PREAMBLE) {
927 masm_->PushPreamble(size_);
930 int count = queued_.size();
932 while (index < count) {
933 // PushHelper can only handle registers with the same size and type, and it
934 // can handle only four at a time. Batch them up accordingly.
935 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
938 batch[batch_index++] = queued_[index++];
939 } while ((batch_index < 4) && (index < count) &&
940 batch[0].IsSameSizeAndType(queued_[index]));
942 masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
943 batch[0], batch[1], batch[2], batch[3]);
950 void MacroAssembler::PushPopQueue::PopQueued() {
951 if (queued_.empty()) return;
953 int count = queued_.size();
955 while (index < count) {
956 // PopHelper can only handle registers with the same size and type, and it
957 // can handle only four at a time. Batch them up accordingly.
958 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
961 batch[batch_index++] = queued_[index++];
962 } while ((batch_index < 4) && (index < count) &&
963 batch[0].IsSameSizeAndType(queued_[index]));
965 masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
966 batch[0], batch[1], batch[2], batch[3]);
969 masm_->PopPostamble(size_);
974 void MacroAssembler::PushCPURegList(CPURegList registers) {
975 int size = registers.RegisterSizeInBytes();
977 PushPreamble(registers.Count(), size);
978 // Push up to four registers at a time because if the current stack pointer is
979 // csp and reg_size is 32, registers must be pushed in blocks of four in order
980 // to maintain the 16-byte alignment for csp.
981 while (!registers.IsEmpty()) {
982 int count_before = registers.Count();
983 const CPURegister& src0 = registers.PopHighestIndex();
984 const CPURegister& src1 = registers.PopHighestIndex();
985 const CPURegister& src2 = registers.PopHighestIndex();
986 const CPURegister& src3 = registers.PopHighestIndex();
987 int count = count_before - registers.Count();
988 PushHelper(count, size, src0, src1, src2, src3);
993 void MacroAssembler::PopCPURegList(CPURegList registers) {
994 int size = registers.RegisterSizeInBytes();
996 // Pop up to four registers at a time because if the current stack pointer is
997 // csp and reg_size is 32, registers must be pushed in blocks of four in
998 // order to maintain the 16-byte alignment for csp.
999 while (!registers.IsEmpty()) {
1000 int count_before = registers.Count();
1001 const CPURegister& dst0 = registers.PopLowestIndex();
1002 const CPURegister& dst1 = registers.PopLowestIndex();
1003 const CPURegister& dst2 = registers.PopLowestIndex();
1004 const CPURegister& dst3 = registers.PopLowestIndex();
1005 int count = count_before - registers.Count();
1006 PopHelper(count, size, dst0, dst1, dst2, dst3);
1008 PopPostamble(registers.Count(), size);
1012 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
1013 int size = src.SizeInBytes();
1015 PushPreamble(count, size);
1017 if (FLAG_optimize_for_size && count > 8) {
1018 UseScratchRegisterScope temps(this);
1019 Register temp = temps.AcquireX();
1022 __ Mov(temp, count / 2);
1024 PushHelper(2, size, src, src, NoReg, NoReg);
1025 __ Subs(temp, temp, 1);
1031 // Push up to four registers at a time if possible because if the current
1032 // stack pointer is csp and the register size is 32, registers must be pushed
1033 // in blocks of four in order to maintain the 16-byte alignment for csp.
1034 while (count >= 4) {
1035 PushHelper(4, size, src, src, src, src);
1039 PushHelper(2, size, src, src, NoReg, NoReg);
1043 PushHelper(1, size, src, NoReg, NoReg, NoReg);
1050 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
1051 PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
1053 UseScratchRegisterScope temps(this);
1054 Register temp = temps.AcquireSameSizeAs(count);
1056 if (FLAG_optimize_for_size) {
1059 Subs(temp, count, 1);
1062 // Push all registers individually, to save code size.
1064 Subs(temp, temp, 1);
1065 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1070 Label loop, leftover2, leftover1, done;
1072 Subs(temp, count, 4);
1075 // Push groups of four first.
1077 Subs(temp, temp, 4);
1078 PushHelper(4, src.SizeInBytes(), src, src, src, src);
1081 // Push groups of two.
1083 Tbz(count, 1, &leftover1);
1084 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
1086 // Push the last one (if required).
1088 Tbz(count, 0, &done);
1089 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1096 void MacroAssembler::PushHelper(int count, int size,
1097 const CPURegister& src0,
1098 const CPURegister& src1,
1099 const CPURegister& src2,
1100 const CPURegister& src3) {
1101 // Ensure that we don't unintentially modify scratch or debug registers.
1102 InstructionAccurateScope scope(this);
1104 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
1105 DCHECK(size == src0.SizeInBytes());
1107 // When pushing multiple registers, the store order is chosen such that
1108 // Push(a, b) is equivalent to Push(a) followed by Push(b).
1111 DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
1112 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
1115 DCHECK(src2.IsNone() && src3.IsNone());
1116 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
1119 DCHECK(src3.IsNone());
1120 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
1121 str(src0, MemOperand(StackPointer(), 2 * size));
1124 // Skip over 4 * size, then fill in the gap. This allows four W registers
1125 // to be pushed using csp, whilst maintaining 16-byte alignment for csp
1127 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
1128 stp(src1, src0, MemOperand(StackPointer(), 2 * size));
1136 void MacroAssembler::PopHelper(int count, int size,
1137 const CPURegister& dst0,
1138 const CPURegister& dst1,
1139 const CPURegister& dst2,
1140 const CPURegister& dst3) {
1141 // Ensure that we don't unintentially modify scratch or debug registers.
1142 InstructionAccurateScope scope(this);
1144 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1145 DCHECK(size == dst0.SizeInBytes());
1147 // When popping multiple registers, the load order is chosen such that
1148 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1151 DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1152 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
1155 DCHECK(dst2.IsNone() && dst3.IsNone());
1156 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
1159 DCHECK(dst3.IsNone());
1160 ldr(dst2, MemOperand(StackPointer(), 2 * size));
1161 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
1164 // Load the higher addresses first, then load the lower addresses and
1165 // skip the whole block in the second instruction. This allows four W
1166 // registers to be popped using csp, whilst maintaining 16-byte alignment
1167 // for csp at all times.
1168 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
1169 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
1177 void MacroAssembler::PushPreamble(Operand total_size) {
1178 if (csp.Is(StackPointer())) {
1179 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1180 // on entry and the total size of the specified registers must also be a
1181 // multiple of 16 bytes.
1182 if (total_size.IsImmediate()) {
1183 DCHECK((total_size.ImmediateValue() % 16) == 0);
1186 // Don't check access size for non-immediate sizes. It's difficult to do
1187 // well, and it will be caught by hardware (or the simulator) anyway.
1189 // Even if the current stack pointer is not the system stack pointer (csp),
1190 // the system stack pointer will still be modified in order to comply with
1191 // ABI rules about accessing memory below the system stack pointer.
1192 BumpSystemStackPointer(total_size);
1197 void MacroAssembler::PopPostamble(Operand total_size) {
1198 if (csp.Is(StackPointer())) {
1199 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1200 // on entry and the total size of the specified registers must also be a
1201 // multiple of 16 bytes.
1202 if (total_size.IsImmediate()) {
1203 DCHECK((total_size.ImmediateValue() % 16) == 0);
1206 // Don't check access size for non-immediate sizes. It's difficult to do
1207 // well, and it will be caught by hardware (or the simulator) anyway.
1208 } else if (emit_debug_code()) {
1209 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1210 // but if we keep it matching StackPointer, the simulator can detect memory
1211 // accesses in the now-free part of the stack.
1212 SyncSystemStackPointer();
1217 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
1218 if (offset.IsImmediate()) {
1219 DCHECK(offset.ImmediateValue() >= 0);
1220 } else if (emit_debug_code()) {
1222 Check(le, kStackAccessBelowStackPointer);
1225 Str(src, MemOperand(StackPointer(), offset));
1229 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
1230 if (offset.IsImmediate()) {
1231 DCHECK(offset.ImmediateValue() >= 0);
1232 } else if (emit_debug_code()) {
1234 Check(le, kStackAccessBelowStackPointer);
1237 Ldr(dst, MemOperand(StackPointer(), offset));
1241 void MacroAssembler::PokePair(const CPURegister& src1,
1242 const CPURegister& src2,
1244 DCHECK(AreSameSizeAndType(src1, src2));
1245 DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1246 Stp(src1, src2, MemOperand(StackPointer(), offset));
1250 void MacroAssembler::PeekPair(const CPURegister& dst1,
1251 const CPURegister& dst2,
1253 DCHECK(AreSameSizeAndType(dst1, dst2));
1254 DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1255 Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
1259 void MacroAssembler::PushCalleeSavedRegisters() {
1260 // Ensure that the macro-assembler doesn't use any scratch registers.
1261 InstructionAccurateScope scope(this);
1263 // This method must not be called unless the current stack pointer is the
1264 // system stack pointer (csp).
1265 DCHECK(csp.Is(StackPointer()));
1267 MemOperand tos(csp, -2 * kXRegSize, PreIndex);
1275 stp(x27, x28, tos); // x28 = jssp
1283 void MacroAssembler::PopCalleeSavedRegisters() {
1284 // Ensure that the macro-assembler doesn't use any scratch registers.
1285 InstructionAccurateScope scope(this);
1287 // This method must not be called unless the current stack pointer is the
1288 // system stack pointer (csp).
1289 DCHECK(csp.Is(StackPointer()));
1291 MemOperand tos(csp, 2 * kXRegSize, PostIndex);
1297 ldp(x27, x28, tos); // x28 = jssp
1307 void MacroAssembler::AssertStackConsistency() {
1308 // Avoid emitting code when !use_real_abort() since non-real aborts cause too
1309 // much code to be generated.
1310 if (emit_debug_code() && use_real_aborts()) {
1311 if (csp.Is(StackPointer())) {
1312 // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
1313 // can't check the alignment of csp without using a scratch register (or
1314 // clobbering the flags), but the processor (or simulator) will abort if
1315 // it is not properly aligned during a load.
1316 ldr(xzr, MemOperand(csp, 0));
1318 if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
1320 // Check that csp <= StackPointer(), preserving all registers and NZCV.
1321 sub(StackPointer(), csp, StackPointer());
1322 cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
1323 tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
1325 // Avoid generating AssertStackConsistency checks for the Push in Abort.
1326 { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
1327 Abort(kTheCurrentStackPointerIsBelowCsp);
1331 // Restore StackPointer().
1332 sub(StackPointer(), csp, StackPointer());
1338 void MacroAssembler::AssertFPCRState(Register fpcr) {
1339 if (emit_debug_code()) {
1340 Label unexpected_mode, done;
1341 UseScratchRegisterScope temps(this);
1342 if (fpcr.IsNone()) {
1343 fpcr = temps.AcquireX();
1347 // Settings overridden by ConfiugreFPCR():
1348 // - Assert that default-NaN mode is set.
1349 Tbz(fpcr, DN_offset, &unexpected_mode);
1351 // Settings left to their default values:
1352 // - Assert that flush-to-zero is not set.
1353 Tbnz(fpcr, FZ_offset, &unexpected_mode);
1354 // - Assert that the rounding mode is nearest-with-ties-to-even.
1355 STATIC_ASSERT(FPTieEven == 0);
1356 Tst(fpcr, RMode_mask);
1359 Bind(&unexpected_mode);
1360 Abort(kUnexpectedFPCRMode);
1367 void MacroAssembler::ConfigureFPCR() {
1368 UseScratchRegisterScope temps(this);
1369 Register fpcr = temps.AcquireX();
1372 // If necessary, enable default-NaN mode. The default values of the other FPCR
1373 // options should be suitable, and AssertFPCRState will verify that.
1374 Label no_write_required;
1375 Tbnz(fpcr, DN_offset, &no_write_required);
1377 Orr(fpcr, fpcr, DN_mask);
1380 Bind(&no_write_required);
1381 AssertFPCRState(fpcr);
1385 void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
1386 const FPRegister& src) {
1389 // With DN=1 and RMode=FPTieEven, subtracting 0.0 preserves all inputs except
1390 // for NaNs, which become the default NaN. We use fsub rather than fadd
1391 // because sub preserves -0.0 inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
1392 Fsub(dst, src, fp_zero);
1396 void MacroAssembler::LoadRoot(CPURegister destination,
1397 Heap::RootListIndex index) {
1398 // TODO(jbramley): Most root values are constants, and can be synthesized
1399 // without a load. Refer to the ARM back end for details.
1400 Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
1404 void MacroAssembler::StoreRoot(Register source,
1405 Heap::RootListIndex index) {
1406 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
1407 Str(source, MemOperand(root, index << kPointerSizeLog2));
1411 void MacroAssembler::LoadTrueFalseRoots(Register true_root,
1412 Register false_root) {
1413 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
1414 Ldp(true_root, false_root,
1415 MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
1419 void MacroAssembler::LoadHeapObject(Register result,
1420 Handle<HeapObject> object) {
1421 AllowDeferredHandleDereference using_raw_address;
1422 if (isolate()->heap()->InNewSpace(*object)) {
1423 Handle<Cell> cell = isolate()->factory()->NewCell(object);
1424 Mov(result, Operand(cell));
1425 Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
1427 Mov(result, Operand(object));
1432 void MacroAssembler::LoadInstanceDescriptors(Register map,
1433 Register descriptors) {
1434 Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
1438 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
1439 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
1440 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
1444 void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
1445 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
1446 Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset));
1447 And(dst, dst, Map::EnumLengthBits::kMask);
1451 void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
1452 EnumLengthUntagged(dst, map);
1457 void MacroAssembler::LoadAccessor(Register dst, Register holder,
1459 AccessorComponent accessor) {
1460 Ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
1461 LoadInstanceDescriptors(dst, dst);
1463 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
1464 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
1465 : AccessorPair::kSetterOffset;
1466 Ldr(dst, FieldMemOperand(dst, offset));
1470 void MacroAssembler::CheckEnumCache(Register object,
1471 Register null_value,
1476 Label* call_runtime) {
1477 DCHECK(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
1480 Register empty_fixed_array_value = scratch0;
1481 Register current_object = scratch1;
1483 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
1486 Mov(current_object, object);
1488 // Check if the enum length field is properly initialized, indicating that
1489 // there is an enum cache.
1490 Register map = scratch2;
1491 Register enum_length = scratch3;
1492 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1494 EnumLengthUntagged(enum_length, map);
1495 Cmp(enum_length, kInvalidEnumCacheSentinel);
1496 B(eq, call_runtime);
1501 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1503 // For all objects but the receiver, check that the cache is empty.
1504 EnumLengthUntagged(enum_length, map);
1505 Cbnz(enum_length, call_runtime);
1509 // Check that there are no elements. Register current_object contains the
1510 // current JS object we've reached through the prototype chain.
1512 Ldr(current_object, FieldMemOperand(current_object,
1513 JSObject::kElementsOffset));
1514 Cmp(current_object, empty_fixed_array_value);
1515 B(eq, &no_elements);
1517 // Second chance, the object may be using the empty slow element dictionary.
1518 CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
1519 B(ne, call_runtime);
1522 Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
1523 Cmp(current_object, null_value);
1528 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
1531 Label* no_memento_found) {
1532 ExternalReference new_space_start =
1533 ExternalReference::new_space_start(isolate());
1534 ExternalReference new_space_allocation_top =
1535 ExternalReference::new_space_allocation_top_address(isolate());
1537 Add(scratch1, receiver,
1538 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
1539 Cmp(scratch1, new_space_start);
1540 B(lt, no_memento_found);
1542 Mov(scratch2, new_space_allocation_top);
1543 Ldr(scratch2, MemOperand(scratch2));
1544 Cmp(scratch1, scratch2);
1545 B(gt, no_memento_found);
1547 Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
1549 Operand(isolate()->factory()->allocation_memento_map()));
1553 void MacroAssembler::InNewSpace(Register object,
1556 DCHECK(cond == eq || cond == ne);
1557 UseScratchRegisterScope temps(this);
1558 Register temp = temps.AcquireX();
1559 And(temp, object, ExternalReference::new_space_mask(isolate()));
1560 Cmp(temp, ExternalReference::new_space_start(isolate()));
1565 void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
1566 if (emit_debug_code()) {
1567 STATIC_ASSERT(kSmiTag == 0);
1568 Tst(object, kSmiTagMask);
1574 void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
1575 if (emit_debug_code()) {
1576 STATIC_ASSERT(kSmiTag == 0);
1577 Tst(object, kSmiTagMask);
1583 void MacroAssembler::AssertName(Register object) {
1584 if (emit_debug_code()) {
1585 AssertNotSmi(object, kOperandIsASmiAndNotAName);
1587 UseScratchRegisterScope temps(this);
1588 Register temp = temps.AcquireX();
1590 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1591 CompareInstanceType(temp, temp, LAST_NAME_TYPE);
1592 Check(ls, kOperandIsNotAName);
1597 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1599 if (emit_debug_code()) {
1600 Label done_checking;
1601 AssertNotSmi(object);
1602 JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
1603 Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1604 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
1605 Assert(eq, kExpectedUndefinedOrCell);
1606 Bind(&done_checking);
1611 void MacroAssembler::AssertString(Register object) {
1612 if (emit_debug_code()) {
1613 UseScratchRegisterScope temps(this);
1614 Register temp = temps.AcquireX();
1615 STATIC_ASSERT(kSmiTag == 0);
1616 Tst(object, kSmiTagMask);
1617 Check(ne, kOperandIsASmiAndNotAString);
1618 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1619 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
1620 Check(lo, kOperandIsNotAString);
1625 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1626 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1627 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1631 void MacroAssembler::TailCallStub(CodeStub* stub) {
1632 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1636 void MacroAssembler::CallRuntime(const Runtime::Function* f,
1638 SaveFPRegsMode save_doubles) {
1639 // All arguments must be on the stack before this function is called.
1640 // x0 holds the return value after the call.
1642 // Check that the number of arguments matches what the function expects.
1643 // If f->nargs is -1, the function can accept a variable number of arguments.
1644 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1646 // Place the necessary arguments.
1647 Mov(x0, num_arguments);
1648 Mov(x1, ExternalReference(f, isolate()));
1650 CEntryStub stub(isolate(), 1, save_doubles);
1655 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
1656 int num_arguments) {
1657 Mov(x0, num_arguments);
1660 CEntryStub stub(isolate(), 1);
1665 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
1667 CEntryStub stub(isolate(), 1);
1668 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1672 void MacroAssembler::GetBuiltinFunction(Register target,
1673 Builtins::JavaScript id) {
1674 // Load the builtins object into target register.
1675 Ldr(target, GlobalObjectMemOperand());
1676 Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
1677 // Load the JavaScript builtin function from the builtins object.
1678 Ldr(target, FieldMemOperand(target,
1679 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
1683 void MacroAssembler::GetBuiltinEntry(Register target,
1685 Builtins::JavaScript id) {
1686 DCHECK(!AreAliased(target, function));
1687 GetBuiltinFunction(function, id);
1688 // Load the code entry point from the builtins object.
1689 Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1693 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
1695 const CallWrapper& call_wrapper) {
1696 ASM_LOCATION("MacroAssembler::InvokeBuiltin");
1697 // You can't call a builtin without a valid frame.
1698 DCHECK(flag == JUMP_FUNCTION || has_frame());
1700 // Get the builtin entry in x2 and setup the function object in x1.
1701 GetBuiltinEntry(x2, x1, id);
1702 if (flag == CALL_FUNCTION) {
1703 call_wrapper.BeforeCall(CallSize(x2));
1705 call_wrapper.AfterCall();
1707 DCHECK(flag == JUMP_FUNCTION);
1713 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
1716 // TODO(1236192): Most runtime routines don't need the number of
1717 // arguments passed in because it is constant. At some point we
1718 // should remove this need and make the runtime routine entry code
1720 Mov(x0, num_arguments);
1721 JumpToExternalReference(ext);
1725 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
1728 TailCallExternalReference(ExternalReference(fid, isolate()),
1734 void MacroAssembler::InitializeNewString(Register string,
1736 Heap::RootListIndex map_index,
1738 Register scratch2) {
1739 DCHECK(!AreAliased(string, length, scratch1, scratch2));
1740 LoadRoot(scratch2, map_index);
1741 SmiTag(scratch1, length);
1742 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1744 Mov(scratch2, String::kEmptyHashField);
1745 Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1746 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
1750 int MacroAssembler::ActivationFrameAlignment() {
1751 #if V8_HOST_ARCH_ARM64
1752 // Running on the real platform. Use the alignment as mandated by the local
1754 // Note: This will break if we ever start generating snapshots on one ARM
1755 // platform for another ARM platform with a different alignment.
1756 return base::OS::ActivationFrameAlignment();
1757 #else // V8_HOST_ARCH_ARM64
1758 // If we are using the simulator then we should always align to the expected
1759 // alignment. As the simulator is used to generate snapshots we do not know
1760 // if the target platform will need alignment, so this is controlled from a
1762 return FLAG_sim_stack_alignment;
1763 #endif // V8_HOST_ARCH_ARM64
1767 void MacroAssembler::CallCFunction(ExternalReference function,
1768 int num_of_reg_args) {
1769 CallCFunction(function, num_of_reg_args, 0);
1773 void MacroAssembler::CallCFunction(ExternalReference function,
1774 int num_of_reg_args,
1775 int num_of_double_args) {
1776 UseScratchRegisterScope temps(this);
1777 Register temp = temps.AcquireX();
1778 Mov(temp, function);
1779 CallCFunction(temp, num_of_reg_args, num_of_double_args);
1783 void MacroAssembler::CallCFunction(Register function,
1784 int num_of_reg_args,
1785 int num_of_double_args) {
1786 DCHECK(has_frame());
1787 // We can pass 8 integer arguments in registers. If we need to pass more than
1788 // that, we'll need to implement support for passing them on the stack.
1789 DCHECK(num_of_reg_args <= 8);
1791 // If we're passing doubles, we're limited to the following prototypes
1792 // (defined by ExternalReference::Type):
1793 // BUILTIN_COMPARE_CALL: int f(double, double)
1794 // BUILTIN_FP_FP_CALL: double f(double, double)
1795 // BUILTIN_FP_CALL: double f(double)
1796 // BUILTIN_FP_INT_CALL: double f(double, int)
1797 if (num_of_double_args > 0) {
1798 DCHECK(num_of_reg_args <= 1);
1799 DCHECK((num_of_double_args + num_of_reg_args) <= 2);
1803 // If the stack pointer is not csp, we need to derive an aligned csp from the
1804 // current stack pointer.
1805 const Register old_stack_pointer = StackPointer();
1806 if (!csp.Is(old_stack_pointer)) {
1807 AssertStackConsistency();
1809 int sp_alignment = ActivationFrameAlignment();
1810 // The ABI mandates at least 16-byte alignment.
1811 DCHECK(sp_alignment >= 16);
1812 DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
1814 // The current stack pointer is a callee saved register, and is preserved
1816 DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
1818 // Align and synchronize the system stack pointer with jssp.
1819 Bic(csp, old_stack_pointer, sp_alignment - 1);
1820 SetStackPointer(csp);
1823 // Call directly. The function called cannot cause a GC, or allow preemption,
1824 // so the return address in the link register stays correct.
1827 if (!csp.Is(old_stack_pointer)) {
1828 if (emit_debug_code()) {
1829 // Because the stack pointer must be aligned on a 16-byte boundary, the
1830 // aligned csp can be up to 12 bytes below the jssp. This is the case
1831 // where we only pushed one W register on top of an aligned jssp.
1832 UseScratchRegisterScope temps(this);
1833 Register temp = temps.AcquireX();
1834 DCHECK(ActivationFrameAlignment() == 16);
1835 Sub(temp, csp, old_stack_pointer);
1836 // We want temp <= 0 && temp >= -12.
1838 Ccmp(temp, -12, NFlag, le);
1839 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
1841 SetStackPointer(old_stack_pointer);
1846 void MacroAssembler::Jump(Register target) {
1851 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
1852 UseScratchRegisterScope temps(this);
1853 Register temp = temps.AcquireX();
1854 Mov(temp, Operand(target, rmode));
1859 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
1860 DCHECK(!RelocInfo::IsCodeTarget(rmode));
1861 Jump(reinterpret_cast<intptr_t>(target), rmode);
1865 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
1866 DCHECK(RelocInfo::IsCodeTarget(rmode));
1867 AllowDeferredHandleDereference embedding_raw_address;
1868 Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
1872 void MacroAssembler::Call(Register target) {
1873 BlockPoolsScope scope(this);
1882 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1887 void MacroAssembler::Call(Label* target) {
1888 BlockPoolsScope scope(this);
1897 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1902 // MacroAssembler::CallSize is sensitive to changes in this function, as it
1903 // requires to know how many instructions are used to branch to the target.
1904 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
1905 BlockPoolsScope scope(this);
1910 // Statement positions are expected to be recorded when the target
1911 // address is loaded.
1912 positions_recorder()->WriteRecordedPositions();
1914 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
1915 DCHECK(rmode != RelocInfo::NONE32);
1917 UseScratchRegisterScope temps(this);
1918 Register temp = temps.AcquireX();
1920 if (rmode == RelocInfo::NONE64) {
1921 // Addresses are 48 bits so we never need to load the upper 16 bits.
1922 uint64_t imm = reinterpret_cast<uint64_t>(target);
1923 // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
1924 DCHECK(((imm >> 48) & 0xffff) == 0);
1925 movz(temp, (imm >> 0) & 0xffff, 0);
1926 movk(temp, (imm >> 16) & 0xffff, 16);
1927 movk(temp, (imm >> 32) & 0xffff, 32);
1929 Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
1933 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
1938 void MacroAssembler::Call(Handle<Code> code,
1939 RelocInfo::Mode rmode,
1940 TypeFeedbackId ast_id) {
1946 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
1947 SetRecordedAstId(ast_id);
1948 rmode = RelocInfo::CODE_TARGET_WITH_ID;
1951 AllowDeferredHandleDereference embedding_raw_address;
1952 Call(reinterpret_cast<Address>(code.location()), rmode);
1955 // Check the size of the code generated.
1956 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
1961 int MacroAssembler::CallSize(Register target) {
1963 return kInstructionSize;
1967 int MacroAssembler::CallSize(Label* target) {
1969 return kInstructionSize;
1973 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
1976 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
1977 DCHECK(rmode != RelocInfo::NONE32);
1979 if (rmode == RelocInfo::NONE64) {
1980 return kCallSizeWithoutRelocation;
1982 return kCallSizeWithRelocation;
1987 int MacroAssembler::CallSize(Handle<Code> code,
1988 RelocInfo::Mode rmode,
1989 TypeFeedbackId ast_id) {
1993 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
1994 DCHECK(rmode != RelocInfo::NONE32);
1996 if (rmode == RelocInfo::NONE64) {
1997 return kCallSizeWithoutRelocation;
1999 return kCallSizeWithRelocation;
2004 void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
2005 SmiCheckType smi_check_type) {
2006 Label on_not_heap_number;
2008 if (smi_check_type == DO_SMI_CHECK) {
2009 JumpIfSmi(object, &on_not_heap_number);
2012 AssertNotSmi(object);
2014 UseScratchRegisterScope temps(this);
2015 Register temp = temps.AcquireX();
2016 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2017 JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
2019 Bind(&on_not_heap_number);
2023 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2024 Label* on_not_heap_number,
2025 SmiCheckType smi_check_type) {
2026 if (smi_check_type == DO_SMI_CHECK) {
2027 JumpIfSmi(object, on_not_heap_number);
2030 AssertNotSmi(object);
2032 UseScratchRegisterScope temps(this);
2033 Register temp = temps.AcquireX();
2034 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2035 JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
2039 void MacroAssembler::LookupNumberStringCache(Register object,
2045 DCHECK(!AreAliased(object, result, scratch1, scratch2, scratch3));
2047 // Use of registers. Register result is used as a temporary.
2048 Register number_string_cache = result;
2049 Register mask = scratch3;
2051 // Load the number string cache.
2052 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2054 // Make the hash mask from the length of the number string cache. It
2055 // contains two elements (number and string) for each cache entry.
2056 Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache,
2057 FixedArray::kLengthOffset));
2058 Asr(mask, mask, 1); // Divide length by two.
2059 Sub(mask, mask, 1); // Make mask.
2061 // Calculate the entry in the number string cache. The hash value in the
2062 // number string cache for smis is just the smi value, and the hash for
2063 // doubles is the xor of the upper and lower words. See
2064 // Heap::GetNumberStringCache.
2066 Label load_result_from_cache;
2068 JumpIfSmi(object, &is_smi);
2069 JumpIfNotHeapNumber(object, not_found);
2071 STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
2072 Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
2073 Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
2074 Eor(scratch1, scratch1, scratch2);
2075 And(scratch1, scratch1, mask);
2077 // Calculate address of entry in string cache: each entry consists of two
2078 // pointer sized fields.
2079 Add(scratch1, number_string_cache,
2080 Operand(scratch1, LSL, kPointerSizeLog2 + 1));
2082 Register probe = mask;
2083 Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2084 JumpIfSmi(probe, not_found);
2085 Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
2086 Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
2089 B(&load_result_from_cache);
2092 Register scratch = scratch1;
2093 And(scratch, mask, Operand::UntagSmi(object));
2094 // Calculate address of entry in string cache: each entry consists
2095 // of two pointer sized fields.
2096 Add(scratch, number_string_cache,
2097 Operand(scratch, LSL, kPointerSizeLog2 + 1));
2099 // Check if the entry is the smi we are looking for.
2100 Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2104 // Get the result from the cache.
2105 Bind(&load_result_from_cache);
2106 Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
2107 IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
2108 scratch1, scratch2);
2112 void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
2114 FPRegister scratch_d,
2115 Label* on_successful_conversion,
2116 Label* on_failed_conversion) {
2117 // Convert to an int and back again, then compare with the original value.
2118 Fcvtzs(as_int, value);
2119 Scvtf(scratch_d, as_int);
2120 Fcmp(value, scratch_d);
2122 if (on_successful_conversion) {
2123 B(on_successful_conversion, eq);
2125 if (on_failed_conversion) {
2126 B(on_failed_conversion, ne);
2131 void MacroAssembler::TestForMinusZero(DoubleRegister input) {
2132 UseScratchRegisterScope temps(this);
2133 Register temp = temps.AcquireX();
2134 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
2141 void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
2142 Label* on_negative_zero) {
2143 TestForMinusZero(input);
2144 B(vs, on_negative_zero);
2148 void MacroAssembler::JumpIfMinusZero(Register input,
2149 Label* on_negative_zero) {
2150 DCHECK(input.Is64Bits());
2151 // Floating point value is in an integer register. Detect -0.0 by subtracting
2152 // 1 (cmp), which will cause overflow.
2154 B(vs, on_negative_zero);
2158 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
2159 // Clamp the value to [0..255].
2160 Cmp(input.W(), Operand(input.W(), UXTB));
2161 // If input < input & 0xff, it must be < 0, so saturate to 0.
2162 Csel(output.W(), wzr, input.W(), lt);
2163 // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
2164 Csel(output.W(), output.W(), 255, le);
2168 void MacroAssembler::ClampInt32ToUint8(Register in_out) {
2169 ClampInt32ToUint8(in_out, in_out);
2173 void MacroAssembler::ClampDoubleToUint8(Register output,
2174 DoubleRegister input,
2175 DoubleRegister dbl_scratch) {
2176 // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
2177 // - Inputs lower than 0 (including -infinity) produce 0.
2178 // - Inputs higher than 255 (including +infinity) produce 255.
2179 // Also, it seems that PIXEL types use round-to-nearest rather than
2180 // round-towards-zero.
2182 // Squash +infinity before the conversion, since Fcvtnu will normally
2184 Fmov(dbl_scratch, 255);
2185 Fmin(dbl_scratch, dbl_scratch, input);
2187 // Convert double to unsigned integer. Values less than zero become zero.
2188 // Values greater than 255 have already been clamped to 255.
2189 Fcvtnu(output, dbl_scratch);
2193 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
2200 Register scratch5) {
2201 // Untag src and dst into scratch registers.
2202 // Copy src->dst in a tight loop.
2203 DCHECK(!AreAliased(dst, src,
2204 scratch1, scratch2, scratch3, scratch4, scratch5));
2207 const Register& remaining = scratch3;
2208 Mov(remaining, count / 2);
2210 const Register& dst_untagged = scratch1;
2211 const Register& src_untagged = scratch2;
2212 Sub(dst_untagged, dst, kHeapObjectTag);
2213 Sub(src_untagged, src, kHeapObjectTag);
2215 // Copy fields in pairs.
2218 Ldp(scratch4, scratch5,
2219 MemOperand(src_untagged, kXRegSize* 2, PostIndex));
2220 Stp(scratch4, scratch5,
2221 MemOperand(dst_untagged, kXRegSize* 2, PostIndex));
2222 Sub(remaining, remaining, 1);
2223 Cbnz(remaining, &loop);
2225 // Handle the leftovers.
2227 Ldr(scratch4, MemOperand(src_untagged));
2228 Str(scratch4, MemOperand(dst_untagged));
2233 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
2239 Register scratch4) {
2240 // Untag src and dst into scratch registers.
2241 // Copy src->dst in an unrolled loop.
2242 DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
2244 const Register& dst_untagged = scratch1;
2245 const Register& src_untagged = scratch2;
2246 sub(dst_untagged, dst, kHeapObjectTag);
2247 sub(src_untagged, src, kHeapObjectTag);
2249 // Copy fields in pairs.
2250 for (unsigned i = 0; i < count / 2; i++) {
2251 Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex));
2252 Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex));
2255 // Handle the leftovers.
2257 Ldr(scratch3, MemOperand(src_untagged));
2258 Str(scratch3, MemOperand(dst_untagged));
2263 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
2268 Register scratch3) {
2269 // Untag src and dst into scratch registers.
2270 // Copy src->dst in an unrolled loop.
2271 DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3));
2273 const Register& dst_untagged = scratch1;
2274 const Register& src_untagged = scratch2;
2275 Sub(dst_untagged, dst, kHeapObjectTag);
2276 Sub(src_untagged, src, kHeapObjectTag);
2278 // Copy fields one by one.
2279 for (unsigned i = 0; i < count; i++) {
2280 Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex));
2281 Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex));
2286 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
2288 // One of two methods is used:
2290 // For high 'count' values where many scratch registers are available:
2291 // Untag src and dst into scratch registers.
2292 // Copy src->dst in a tight loop.
2294 // For low 'count' values or where few scratch registers are available:
2295 // Untag src and dst into scratch registers.
2296 // Copy src->dst in an unrolled loop.
2298 // In both cases, fields are copied in pairs if possible, and left-overs are
2299 // handled separately.
2300 DCHECK(!AreAliased(dst, src));
2301 DCHECK(!temps.IncludesAliasOf(dst));
2302 DCHECK(!temps.IncludesAliasOf(src));
2303 DCHECK(!temps.IncludesAliasOf(xzr));
2305 if (emit_debug_code()) {
2307 Check(ne, kTheSourceAndDestinationAreTheSame);
2310 // The value of 'count' at which a loop will be generated (if there are
2311 // enough scratch registers).
2312 static const unsigned kLoopThreshold = 8;
2314 UseScratchRegisterScope masm_temps(this);
2315 if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
2316 CopyFieldsLoopPairsHelper(dst, src, count,
2317 Register(temps.PopLowestIndex()),
2318 Register(temps.PopLowestIndex()),
2319 Register(temps.PopLowestIndex()),
2320 masm_temps.AcquireX(),
2321 masm_temps.AcquireX());
2322 } else if (temps.Count() >= 2) {
2323 CopyFieldsUnrolledPairsHelper(dst, src, count,
2324 Register(temps.PopLowestIndex()),
2325 Register(temps.PopLowestIndex()),
2326 masm_temps.AcquireX(),
2327 masm_temps.AcquireX());
2328 } else if (temps.Count() == 1) {
2329 CopyFieldsUnrolledHelper(dst, src, count,
2330 Register(temps.PopLowestIndex()),
2331 masm_temps.AcquireX(),
2332 masm_temps.AcquireX());
2339 void MacroAssembler::CopyBytes(Register dst,
2344 UseScratchRegisterScope temps(this);
2345 Register tmp1 = temps.AcquireX();
2346 Register tmp2 = temps.AcquireX();
2347 DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
2348 DCHECK(!AreAliased(src, dst, csp));
2350 if (emit_debug_code()) {
2351 // Check copy length.
2353 Assert(ge, kUnexpectedNegativeValue);
2355 // Check src and dst buffers don't overlap.
2356 Add(scratch, src, length); // Calculate end of src buffer.
2358 Add(scratch, dst, length); // Calculate end of dst buffer.
2359 Ccmp(scratch, src, ZFlag, gt);
2360 Assert(le, kCopyBuffersOverlap);
2363 Label short_copy, short_loop, bulk_loop, done;
2365 if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
2366 Register bulk_length = scratch;
2367 int pair_size = 2 * kXRegSize;
2368 int pair_mask = pair_size - 1;
2370 Bic(bulk_length, length, pair_mask);
2371 Cbz(bulk_length, &short_copy);
2373 Sub(bulk_length, bulk_length, pair_size);
2374 Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
2375 Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
2376 Cbnz(bulk_length, &bulk_loop);
2378 And(length, length, pair_mask);
2384 Sub(length, length, 1);
2385 Ldrb(tmp1, MemOperand(src, 1, PostIndex));
2386 Strb(tmp1, MemOperand(dst, 1, PostIndex));
2387 Cbnz(length, &short_loop);
2394 void MacroAssembler::FillFields(Register dst,
2395 Register field_count,
2397 DCHECK(!dst.Is(csp));
2398 UseScratchRegisterScope temps(this);
2399 Register field_ptr = temps.AcquireX();
2400 Register counter = temps.AcquireX();
2403 // Decrement count. If the result < zero, count was zero, and there's nothing
2404 // to do. If count was one, flags are set to fail the gt condition at the end
2405 // of the pairs loop.
2406 Subs(counter, field_count, 1);
2409 // There's at least one field to fill, so do this unconditionally.
2410 Str(filler, MemOperand(dst, kPointerSize, PostIndex));
2412 // If the bottom bit of counter is set, there are an even number of fields to
2413 // fill, so pull the start pointer back by one field, allowing the pairs loop
2414 // to overwrite the field that was stored above.
2415 And(field_ptr, counter, 1);
2416 Sub(field_ptr, dst, Operand(field_ptr, LSL, kPointerSizeLog2));
2418 // Store filler to memory in pairs.
2422 Stp(filler, filler, MemOperand(field_ptr, 2 * kPointerSize, PostIndex));
2423 Subs(counter, counter, 2);
2431 void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
2432 Register first, Register second, Register scratch1, Register scratch2,
2433 Label* failure, SmiCheckType smi_check) {
2434 if (smi_check == DO_SMI_CHECK) {
2435 JumpIfEitherSmi(first, second, failure);
2436 } else if (emit_debug_code()) {
2437 DCHECK(smi_check == DONT_DO_SMI_CHECK);
2439 JumpIfEitherSmi(first, second, NULL, ¬_smi);
2441 // At least one input is a smi, but the flags indicated a smi check wasn't
2443 Abort(kUnexpectedSmi);
2448 // Test that both first and second are sequential one-byte strings.
2449 Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2450 Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2451 Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2452 Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2454 JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
2459 void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
2460 Register first, Register second, Register scratch1, Register scratch2,
2462 DCHECK(!AreAliased(scratch1, second));
2463 DCHECK(!AreAliased(scratch1, scratch2));
2464 static const int kFlatOneByteStringMask =
2465 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2466 static const int kFlatOneByteStringTag = ONE_BYTE_STRING_TYPE;
2467 And(scratch1, first, kFlatOneByteStringMask);
2468 And(scratch2, second, kFlatOneByteStringMask);
2469 Cmp(scratch1, kFlatOneByteStringTag);
2470 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2475 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
2478 const int kFlatOneByteStringMask =
2479 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2480 const int kFlatOneByteStringTag =
2481 kStringTag | kOneByteStringTag | kSeqStringTag;
2482 And(scratch, type, kFlatOneByteStringMask);
2483 Cmp(scratch, kFlatOneByteStringTag);
2488 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2489 Register first, Register second, Register scratch1, Register scratch2,
2491 DCHECK(!AreAliased(first, second, scratch1, scratch2));
2492 const int kFlatOneByteStringMask =
2493 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2494 const int kFlatOneByteStringTag =
2495 kStringTag | kOneByteStringTag | kSeqStringTag;
2496 And(scratch1, first, kFlatOneByteStringMask);
2497 And(scratch2, second, kFlatOneByteStringMask);
2498 Cmp(scratch1, kFlatOneByteStringTag);
2499 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2504 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
2505 Label* not_unique_name) {
2506 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
2507 // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
2510 // goto not_unique_name
2512 Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
2513 Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
2514 B(ne, not_unique_name);
2518 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2519 const ParameterCount& actual,
2520 Handle<Code> code_constant,
2524 bool* definitely_mismatches,
2525 const CallWrapper& call_wrapper) {
2526 bool definitely_matches = false;
2527 *definitely_mismatches = false;
2528 Label regular_invoke;
2530 // Check whether the expected and actual arguments count match. If not,
2531 // setup registers according to contract with ArgumentsAdaptorTrampoline:
2532 // x0: actual arguments count.
2533 // x1: function (passed through to callee).
2534 // x2: expected arguments count.
2536 // The code below is made a lot easier because the calling code already sets
2537 // up actual and expected registers according to the contract if values are
2538 // passed in registers.
2539 DCHECK(actual.is_immediate() || actual.reg().is(x0));
2540 DCHECK(expected.is_immediate() || expected.reg().is(x2));
2541 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
2543 if (expected.is_immediate()) {
2544 DCHECK(actual.is_immediate());
2545 if (expected.immediate() == actual.immediate()) {
2546 definitely_matches = true;
2549 Mov(x0, actual.immediate());
2550 if (expected.immediate() ==
2551 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2552 // Don't worry about adapting arguments for builtins that
2553 // don't want that done. Skip adaption code by making it look
2554 // like we have a match between expected and actual number of
2556 definitely_matches = true;
2558 *definitely_mismatches = true;
2559 // Set up x2 for the argument adaptor.
2560 Mov(x2, expected.immediate());
2564 } else { // expected is a register.
2565 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2566 : Operand(actual.reg());
2567 // If actual == expected perform a regular invocation.
2568 Cmp(expected.reg(), actual_op);
2569 B(eq, ®ular_invoke);
2570 // Otherwise set up x0 for the argument adaptor.
2574 // If the argument counts may mismatch, generate a call to the argument
2576 if (!definitely_matches) {
2577 if (!code_constant.is_null()) {
2578 Mov(x3, Operand(code_constant));
2579 Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
2582 Handle<Code> adaptor =
2583 isolate()->builtins()->ArgumentsAdaptorTrampoline();
2584 if (flag == CALL_FUNCTION) {
2585 call_wrapper.BeforeCall(CallSize(adaptor));
2587 call_wrapper.AfterCall();
2588 if (!*definitely_mismatches) {
2589 // If the arg counts don't match, no extra code is emitted by
2590 // MAsm::InvokeCode and we can just fall through.
2594 Jump(adaptor, RelocInfo::CODE_TARGET);
2597 Bind(®ular_invoke);
2601 void MacroAssembler::InvokeCode(Register code,
2602 const ParameterCount& expected,
2603 const ParameterCount& actual,
2605 const CallWrapper& call_wrapper) {
2606 // You can't call a function without a valid frame.
2607 DCHECK(flag == JUMP_FUNCTION || has_frame());
2611 bool definitely_mismatches = false;
2612 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
2613 &definitely_mismatches, call_wrapper);
2615 // If we are certain that actual != expected, then we know InvokePrologue will
2616 // have handled the call through the argument adaptor mechanism.
2617 // The called function expects the call kind in x5.
2618 if (!definitely_mismatches) {
2619 if (flag == CALL_FUNCTION) {
2620 call_wrapper.BeforeCall(CallSize(code));
2622 call_wrapper.AfterCall();
2624 DCHECK(flag == JUMP_FUNCTION);
2629 // Continue here if InvokePrologue does handle the invocation due to
2630 // mismatched parameter counts.
2635 void MacroAssembler::InvokeFunction(Register function,
2636 const ParameterCount& actual,
2638 const CallWrapper& call_wrapper) {
2639 // You can't call a function without a valid frame.
2640 DCHECK(flag == JUMP_FUNCTION || has_frame());
2642 // Contract with called JS functions requires that function is passed in x1.
2643 // (See FullCodeGenerator::Generate().)
2644 DCHECK(function.is(x1));
2646 Register expected_reg = x2;
2647 Register code_reg = x3;
2649 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2650 // The number of arguments is stored as an int32_t, and -1 is a marker
2651 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
2652 // extension to correctly handle it.
2653 Ldr(expected_reg, FieldMemOperand(function,
2654 JSFunction::kSharedFunctionInfoOffset));
2656 FieldMemOperand(expected_reg,
2657 SharedFunctionInfo::kFormalParameterCountOffset));
2659 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2661 ParameterCount expected(expected_reg);
2662 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2666 void MacroAssembler::InvokeFunction(Register function,
2667 const ParameterCount& expected,
2668 const ParameterCount& actual,
2670 const CallWrapper& call_wrapper) {
2671 // You can't call a function without a valid frame.
2672 DCHECK(flag == JUMP_FUNCTION || has_frame());
2674 // Contract with called JS functions requires that function is passed in x1.
2675 // (See FullCodeGenerator::Generate().)
2676 DCHECK(function.Is(x1));
2678 Register code_reg = x3;
2680 // Set up the context.
2681 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2683 // We call indirectly through the code field in the function to
2684 // allow recompilation to take effect without changing any of the
2686 Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2687 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2691 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2692 const ParameterCount& expected,
2693 const ParameterCount& actual,
2695 const CallWrapper& call_wrapper) {
2696 // Contract with called JS functions requires that function is passed in x1.
2697 // (See FullCodeGenerator::Generate().)
2698 __ LoadObject(x1, function);
2699 InvokeFunction(x1, expected, actual, flag, call_wrapper);
2703 void MacroAssembler::TryConvertDoubleToInt64(Register result,
2704 DoubleRegister double_input,
2706 // Try to convert with an FPU convert instruction. It's trivial to compute
2707 // the modulo operation on an integer register so we convert to a 64-bit
2710 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
2711 // when the double is out of range. NaNs and infinities will be converted to 0
2712 // (as ECMA-262 requires).
2713 Fcvtzs(result.X(), double_input);
2715 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
2716 // representable using a double, so if the result is one of those then we know
2717 // that saturation occured, and we need to manually handle the conversion.
2719 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
2720 // 1 will cause signed overflow.
2722 Ccmp(result.X(), -1, VFlag, vc);
2728 void MacroAssembler::TruncateDoubleToI(Register result,
2729 DoubleRegister double_input) {
2732 // Try to convert the double to an int64. If successful, the bottom 32 bits
2733 // contain our truncated int32 result.
2734 TryConvertDoubleToInt64(result, double_input, &done);
2736 const Register old_stack_pointer = StackPointer();
2737 if (csp.Is(old_stack_pointer)) {
2738 // This currently only happens during compiler-unittest. If it arises
2739 // during regular code generation the DoubleToI stub should be updated to
2740 // cope with csp and have an extra parameter indicating which stack pointer
2742 Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
2744 SetStackPointer(jssp);
2747 // If we fell through then inline version didn't succeed - call stub instead.
2748 Push(lr, double_input);
2750 DoubleToIStub stub(isolate(),
2754 true, // is_truncating
2755 true); // skip_fastpath
2756 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2758 DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
2759 Pop(xzr, lr); // xzr to drop the double input on the stack.
2761 if (csp.Is(old_stack_pointer)) {
2763 SetStackPointer(csp);
2764 AssertStackConsistency();
2772 void MacroAssembler::TruncateHeapNumberToI(Register result,
2775 DCHECK(!result.is(object));
2776 DCHECK(jssp.Is(StackPointer()));
2778 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2780 // Try to convert the double to an int64. If successful, the bottom 32 bits
2781 // contain our truncated int32 result.
2782 TryConvertDoubleToInt64(result, fp_scratch, &done);
2784 // If we fell through then inline version didn't succeed - call stub instead.
2786 DoubleToIStub stub(isolate(),
2789 HeapNumber::kValueOffset - kHeapObjectTag,
2790 true, // is_truncating
2791 true); // skip_fastpath
2792 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2799 void MacroAssembler::StubPrologue() {
2800 DCHECK(StackPointer().Is(jssp));
2801 UseScratchRegisterScope temps(this);
2802 Register temp = temps.AcquireX();
2803 __ Mov(temp, Smi::FromInt(StackFrame::STUB));
2804 // Compiled stubs don't age, and so they don't need the predictable code
2806 __ Push(lr, fp, cp, temp);
2807 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
2811 void MacroAssembler::Prologue(bool code_pre_aging) {
2812 if (code_pre_aging) {
2813 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
2814 __ EmitCodeAgeSequence(stub);
2816 __ EmitFrameSetupForCodeAgePatching();
2821 void MacroAssembler::EnterFrame(StackFrame::Type type,
2822 bool load_constant_pool_pointer_reg) {
2823 // Out-of-line constant pool not implemented on arm64.
2828 void MacroAssembler::EnterFrame(StackFrame::Type type) {
2829 DCHECK(jssp.Is(StackPointer()));
2830 UseScratchRegisterScope temps(this);
2831 Register type_reg = temps.AcquireX();
2832 Register code_reg = temps.AcquireX();
2835 Mov(type_reg, Smi::FromInt(type));
2836 Mov(code_reg, Operand(CodeObject()));
2837 Push(type_reg, code_reg);
2842 // jssp[0] : code object
2844 // Adjust FP to point to saved FP.
2845 Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
2849 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
2850 DCHECK(jssp.Is(StackPointer()));
2851 // Drop the execution stack down to the frame pointer and restore
2852 // the caller frame pointer and return address.
2854 AssertStackConsistency();
2859 void MacroAssembler::ExitFramePreserveFPRegs() {
2860 PushCPURegList(kCallerSavedFP);
2864 void MacroAssembler::ExitFrameRestoreFPRegs() {
2865 // Read the registers from the stack without popping them. The stack pointer
2866 // will be reset as part of the unwinding process.
2867 CPURegList saved_fp_regs = kCallerSavedFP;
2868 DCHECK(saved_fp_regs.Count() % 2 == 0);
2870 int offset = ExitFrameConstants::kLastExitFrameField;
2871 while (!saved_fp_regs.IsEmpty()) {
2872 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
2873 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
2874 offset -= 2 * kDRegSize;
2875 Ldp(dst1, dst0, MemOperand(fp, offset));
2880 void MacroAssembler::EnterExitFrame(bool save_doubles,
2881 const Register& scratch,
2883 DCHECK(jssp.Is(StackPointer()));
2885 // Set up the new stack frame.
2886 Mov(scratch, Operand(CodeObject()));
2888 Mov(fp, StackPointer());
2890 // fp[8]: CallerPC (lr)
2891 // fp -> fp[0]: CallerFP (old fp)
2892 // fp[-8]: Space reserved for SPOffset.
2893 // jssp -> fp[-16]: CodeObject()
2894 STATIC_ASSERT((2 * kPointerSize) ==
2895 ExitFrameConstants::kCallerSPDisplacement);
2896 STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
2897 STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
2898 STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
2899 STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
2901 // Save the frame pointer and context pointer in the top frame.
2902 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2904 Str(fp, MemOperand(scratch));
2905 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2907 Str(cp, MemOperand(scratch));
2909 STATIC_ASSERT((-2 * kPointerSize) ==
2910 ExitFrameConstants::kLastExitFrameField);
2912 ExitFramePreserveFPRegs();
2915 // Reserve space for the return address and for user requested memory.
2916 // We do this before aligning to make sure that we end up correctly
2917 // aligned with the minimum of wasted space.
2918 Claim(extra_space + 1, kXRegSize);
2919 // fp[8]: CallerPC (lr)
2920 // fp -> fp[0]: CallerFP (old fp)
2921 // fp[-8]: Space reserved for SPOffset.
2922 // fp[-16]: CodeObject()
2923 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
2924 // jssp[8]: Extra space reserved for caller (if extra_space != 0).
2925 // jssp -> jssp[0]: Space reserved for the return address.
2927 // Align and synchronize the system stack pointer with jssp.
2928 AlignAndSetCSPForFrame();
2929 DCHECK(csp.Is(StackPointer()));
2931 // fp[8]: CallerPC (lr)
2932 // fp -> fp[0]: CallerFP (old fp)
2933 // fp[-8]: Space reserved for SPOffset.
2934 // fp[-16]: CodeObject()
2935 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
2936 // csp[8]: Memory reserved for the caller if extra_space != 0.
2937 // Alignment padding, if necessary.
2938 // csp -> csp[0]: Space reserved for the return address.
2940 // ExitFrame::GetStateForFramePointer expects to find the return address at
2941 // the memory address immediately below the pointer stored in SPOffset.
2942 // It is not safe to derive much else from SPOffset, because the size of the
2943 // padding can vary.
2944 Add(scratch, csp, kXRegSize);
2945 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
2949 // Leave the current exit frame.
2950 void MacroAssembler::LeaveExitFrame(bool restore_doubles,
2951 const Register& scratch,
2952 bool restore_context) {
2953 DCHECK(csp.Is(StackPointer()));
2955 if (restore_doubles) {
2956 ExitFrameRestoreFPRegs();
2959 // Restore the context pointer from the top frame.
2960 if (restore_context) {
2961 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2963 Ldr(cp, MemOperand(scratch));
2966 if (emit_debug_code()) {
2967 // Also emit debug code to clear the cp in the top frame.
2968 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2970 Str(xzr, MemOperand(scratch));
2972 // Clear the frame pointer from the top frame.
2973 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2975 Str(xzr, MemOperand(scratch));
2977 // Pop the exit frame.
2978 // fp[8]: CallerPC (lr)
2979 // fp -> fp[0]: CallerFP (old fp)
2980 // fp[...]: The rest of the frame.
2982 SetStackPointer(jssp);
2983 AssertStackConsistency();
2988 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2989 Register scratch1, Register scratch2) {
2990 if (FLAG_native_code_counters && counter->Enabled()) {
2991 Mov(scratch1, value);
2992 Mov(scratch2, ExternalReference(counter));
2993 Str(scratch1, MemOperand(scratch2));
2998 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2999 Register scratch1, Register scratch2) {
3001 if (FLAG_native_code_counters && counter->Enabled()) {
3002 Mov(scratch2, ExternalReference(counter));
3003 Ldr(scratch1, MemOperand(scratch2));
3004 Add(scratch1, scratch1, value);
3005 Str(scratch1, MemOperand(scratch2));
3010 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
3011 Register scratch1, Register scratch2) {
3012 IncrementCounter(counter, -value, scratch1, scratch2);
3016 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
3017 if (context_chain_length > 0) {
3018 // Move up the chain of contexts to the context containing the slot.
3019 Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3020 for (int i = 1; i < context_chain_length; i++) {
3021 Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3024 // Slot is in the current function context. Move it into the
3025 // destination register in case we store into it (the write barrier
3026 // cannot be allowed to destroy the context in cp).
3032 void MacroAssembler::DebugBreak() {
3034 Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
3035 CEntryStub ces(isolate(), 1);
3036 DCHECK(AllowThisStubCall(&ces));
3037 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3041 void MacroAssembler::PushStackHandler() {
3042 DCHECK(jssp.Is(StackPointer()));
3043 // Adjust this code if the asserts don't hold.
3044 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3045 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3047 // For the JSEntry handler, we must preserve the live registers x0-x4.
3048 // (See JSEntryStub::GenerateBody().)
3050 // Link the current handler as the next handler.
3051 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3052 Ldr(x10, MemOperand(x11));
3055 // Set this new handler as the current one.
3056 Str(jssp, MemOperand(x11));
3060 void MacroAssembler::PopStackHandler() {
3061 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3063 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3064 Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
3065 Str(x10, MemOperand(x11));
3069 void MacroAssembler::Allocate(int object_size,
3074 AllocationFlags flags) {
3075 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3076 if (!FLAG_inline_new) {
3077 if (emit_debug_code()) {
3078 // Trash the registers to simulate an allocation failure.
3079 // We apply salt to the original zap value to easily spot the values.
3080 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3081 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3082 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3088 UseScratchRegisterScope temps(this);
3089 Register scratch3 = temps.AcquireX();
3091 DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
3092 DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3094 // Make object size into bytes.
3095 if ((flags & SIZE_IN_WORDS) != 0) {
3096 object_size *= kPointerSize;
3098 DCHECK(0 == (object_size & kObjectAlignmentMask));
3100 // Check relative positions of allocation top and limit addresses.
3101 // The values must be adjacent in memory to allow the use of LDP.
3102 ExternalReference heap_allocation_top =
3103 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3104 ExternalReference heap_allocation_limit =
3105 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3106 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3107 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3108 DCHECK((limit - top) == kPointerSize);
3110 // Set up allocation top address and object size registers.
3111 Register top_address = scratch1;
3112 Register allocation_limit = scratch2;
3113 Mov(top_address, Operand(heap_allocation_top));
3115 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3116 // Load allocation top into result and the allocation limit.
3117 Ldp(result, allocation_limit, MemOperand(top_address));
3119 if (emit_debug_code()) {
3120 // Assert that result actually contains top on entry.
3121 Ldr(scratch3, MemOperand(top_address));
3122 Cmp(result, scratch3);
3123 Check(eq, kUnexpectedAllocationTop);
3125 // Load the allocation limit. 'result' already contains the allocation top.
3126 Ldr(allocation_limit, MemOperand(top_address, limit - top));
3129 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3130 // the same alignment on ARM64.
3131 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3133 // Calculate new top and bail out if new space is exhausted.
3134 Adds(scratch3, result, object_size);
3135 Ccmp(scratch3, allocation_limit, CFlag, cc);
3137 Str(scratch3, MemOperand(top_address));
3139 // Tag the object if requested.
3140 if ((flags & TAG_OBJECT) != 0) {
3141 ObjectTag(result, result);
3146 void MacroAssembler::Allocate(Register object_size,
3151 AllocationFlags flags) {
3152 if (!FLAG_inline_new) {
3153 if (emit_debug_code()) {
3154 // Trash the registers to simulate an allocation failure.
3155 // We apply salt to the original zap value to easily spot the values.
3156 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3157 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3158 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3164 UseScratchRegisterScope temps(this);
3165 Register scratch3 = temps.AcquireX();
3167 DCHECK(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
3168 DCHECK(object_size.Is64Bits() && result.Is64Bits() &&
3169 scratch1.Is64Bits() && scratch2.Is64Bits());
3171 // Check relative positions of allocation top and limit addresses.
3172 // The values must be adjacent in memory to allow the use of LDP.
3173 ExternalReference heap_allocation_top =
3174 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3175 ExternalReference heap_allocation_limit =
3176 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3177 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3178 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3179 DCHECK((limit - top) == kPointerSize);
3181 // Set up allocation top address and object size registers.
3182 Register top_address = scratch1;
3183 Register allocation_limit = scratch2;
3184 Mov(top_address, heap_allocation_top);
3186 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3187 // Load allocation top into result and the allocation limit.
3188 Ldp(result, allocation_limit, MemOperand(top_address));
3190 if (emit_debug_code()) {
3191 // Assert that result actually contains top on entry.
3192 Ldr(scratch3, MemOperand(top_address));
3193 Cmp(result, scratch3);
3194 Check(eq, kUnexpectedAllocationTop);
3196 // Load the allocation limit. 'result' already contains the allocation top.
3197 Ldr(allocation_limit, MemOperand(top_address, limit - top));
3200 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3201 // the same alignment on ARM64.
3202 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3204 // Calculate new top and bail out if new space is exhausted
3205 if ((flags & SIZE_IN_WORDS) != 0) {
3206 Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2));
3208 Adds(scratch3, result, object_size);
3211 if (emit_debug_code()) {
3212 Tst(scratch3, kObjectAlignmentMask);
3213 Check(eq, kUnalignedAllocationInNewSpace);
3216 Ccmp(scratch3, allocation_limit, CFlag, cc);
3218 Str(scratch3, MemOperand(top_address));
3220 // Tag the object if requested.
3221 if ((flags & TAG_OBJECT) != 0) {
3222 ObjectTag(result, result);
3227 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3229 ExternalReference new_space_allocation_top =
3230 ExternalReference::new_space_allocation_top_address(isolate());
3232 // Make sure the object has no tag before resetting top.
3233 Bic(object, object, kHeapObjectTagMask);
3235 // Check that the object un-allocated is below the current top.
3236 Mov(scratch, new_space_allocation_top);
3237 Ldr(scratch, MemOperand(scratch));
3238 Cmp(object, scratch);
3239 Check(lt, kUndoAllocationOfNonAllocatedMemory);
3241 // Write the address of the object to un-allocate as the current top.
3242 Mov(scratch, new_space_allocation_top);
3243 Str(object, MemOperand(scratch));
3247 void MacroAssembler::AllocateTwoByteString(Register result,
3252 Label* gc_required) {
3253 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3254 // Calculate the number of bytes needed for the characters in the string while
3255 // observing object alignment.
3256 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3257 Add(scratch1, length, length); // Length in bytes, not chars.
3258 Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3259 Bic(scratch1, scratch1, kObjectAlignmentMask);
3261 // Allocate two-byte string in new space.
3269 // Set the map, length and hash field.
3270 InitializeNewString(result,
3272 Heap::kStringMapRootIndex,
3278 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3279 Register scratch1, Register scratch2,
3281 Label* gc_required) {
3282 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3283 // Calculate the number of bytes needed for the characters in the string while
3284 // observing object alignment.
3285 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3286 STATIC_ASSERT(kCharSize == 1);
3287 Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3288 Bic(scratch1, scratch1, kObjectAlignmentMask);
3290 // Allocate one-byte string in new space.
3298 // Set the map, length and hash field.
3299 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3300 scratch1, scratch2);
3304 void MacroAssembler::AllocateTwoByteConsString(Register result,
3308 Label* gc_required) {
3309 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3312 InitializeNewString(result,
3314 Heap::kConsStringMapRootIndex,
3320 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3323 Label* gc_required) {
3324 Allocate(ConsString::kSize,
3331 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3332 scratch1, scratch2);
3336 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3340 Label* gc_required) {
3341 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3342 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3345 InitializeNewString(result,
3347 Heap::kSlicedStringMapRootIndex,
3353 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3357 Label* gc_required) {
3358 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3359 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3362 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3363 scratch1, scratch2);
3367 // Allocates a heap number or jumps to the need_gc label if the young space
3368 // is full and a scavenge is needed.
3369 void MacroAssembler::AllocateHeapNumber(Register result,
3374 CPURegister heap_number_map,
3376 DCHECK(!value.IsValid() || value.Is64Bits());
3377 UseScratchRegisterScope temps(this);
3379 // Allocate an object in the heap for the heap number and tag it as a heap
3381 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3382 NO_ALLOCATION_FLAGS);
3384 Heap::RootListIndex map_index = mode == MUTABLE
3385 ? Heap::kMutableHeapNumberMapRootIndex
3386 : Heap::kHeapNumberMapRootIndex;
3388 // Prepare the heap number map.
3389 if (!heap_number_map.IsValid()) {
3390 // If we have a valid value register, use the same type of register to store
3391 // the map so we can use STP to store both in one instruction.
3392 if (value.IsValid() && value.IsFPRegister()) {
3393 heap_number_map = temps.AcquireD();
3395 heap_number_map = scratch1;
3397 LoadRoot(heap_number_map, map_index);
3399 if (emit_debug_code()) {
3401 if (heap_number_map.IsFPRegister()) {
3403 Fmov(map, DoubleRegister(heap_number_map));
3405 map = Register(heap_number_map);
3407 AssertRegisterIsRoot(map, map_index);
3410 // Store the heap number map and the value in the allocated object.
3411 if (value.IsSameSizeAndType(heap_number_map)) {
3412 STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
3413 HeapNumber::kValueOffset);
3414 Stp(heap_number_map, value, MemOperand(result, HeapObject::kMapOffset));
3416 Str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3417 if (value.IsValid()) {
3418 Str(value, MemOperand(result, HeapNumber::kValueOffset));
3421 ObjectTag(result, result);
3425 void MacroAssembler::JumpIfObjectType(Register object,
3429 Label* if_cond_pass,
3431 CompareObjectType(object, map, type_reg, type);
3432 B(cond, if_cond_pass);
3436 void MacroAssembler::JumpIfNotObjectType(Register object,
3440 Label* if_not_object) {
3441 JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
3445 // Sets condition flags based on comparison, and returns type in type_reg.
3446 void MacroAssembler::CompareObjectType(Register object,
3449 InstanceType type) {
3450 Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3451 CompareInstanceType(map, type_reg, type);
3455 // Sets condition flags based on comparison, and returns type in type_reg.
3456 void MacroAssembler::CompareInstanceType(Register map,
3458 InstanceType type) {
3459 Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3460 Cmp(type_reg, type);
3464 void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
3465 UseScratchRegisterScope temps(this);
3466 Register obj_map = temps.AcquireX();
3467 Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
3468 CompareRoot(obj_map, index);
3472 void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
3474 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3475 CompareMap(scratch, map);
3479 void MacroAssembler::CompareMap(Register obj_map,
3481 Cmp(obj_map, Operand(map));
3485 void MacroAssembler::CheckMap(Register obj,
3489 SmiCheckType smi_check_type) {
3490 if (smi_check_type == DO_SMI_CHECK) {
3491 JumpIfSmi(obj, fail);
3494 CompareObjectMap(obj, scratch, map);
3499 void MacroAssembler::CheckMap(Register obj,
3501 Heap::RootListIndex index,
3503 SmiCheckType smi_check_type) {
3504 if (smi_check_type == DO_SMI_CHECK) {
3505 JumpIfSmi(obj, fail);
3507 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3508 JumpIfNotRoot(scratch, index, fail);
3512 void MacroAssembler::CheckMap(Register obj_map,
3515 SmiCheckType smi_check_type) {
3516 if (smi_check_type == DO_SMI_CHECK) {
3517 JumpIfSmi(obj_map, fail);
3520 CompareMap(obj_map, map);
3525 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3526 Register scratch2, Handle<WeakCell> cell,
3527 Handle<Code> success,
3528 SmiCheckType smi_check_type) {
3530 if (smi_check_type == DO_SMI_CHECK) {
3531 JumpIfSmi(obj, &fail);
3533 Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
3534 CmpWeakValue(scratch1, cell, scratch2);
3536 Jump(success, RelocInfo::CODE_TARGET);
3541 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
3543 Mov(scratch, Operand(cell));
3544 Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
3545 Cmp(value, scratch);
3549 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
3550 Mov(value, Operand(cell));
3551 Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
3555 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3557 GetWeakValue(value, cell);
3558 JumpIfSmi(value, miss);
3562 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
3563 UseScratchRegisterScope temps(this);
3564 Register temp = temps.AcquireX();
3565 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
3566 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3571 void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
3572 // Load the map's "bit field 2".
3573 __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
3574 // Retrieve elements_kind from bit field 2.
3575 DecodeField<Map::ElementsKindBits>(result);
3579 void MacroAssembler::GetMapConstructor(Register result, Register map,
3580 Register temp, Register temp2) {
3582 Ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
3584 JumpIfSmi(result, &done);
3585 CompareObjectType(result, temp, temp2, MAP_TYPE);
3587 Ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
3593 void MacroAssembler::TryGetFunctionPrototype(Register function,
3597 BoundFunctionAction action) {
3598 DCHECK(!AreAliased(function, result, scratch));
3601 if (action == kMissOnBoundFunction) {
3602 // Check that the receiver isn't a smi.
3603 JumpIfSmi(function, miss);
3605 // Check that the function really is a function. Load map into result reg.
3606 JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
3608 Register scratch_w = scratch.W();
3610 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3611 // On 64-bit platforms, compiler hints field is not a smi. See definition of
3612 // kCompilerHintsOffset in src/objects.h.
3614 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3615 Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
3617 // Make sure that the function has an instance prototype.
3618 Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3619 Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
3622 // Get the prototype or initial map from the function.
3624 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3626 // If the prototype or initial map is the hole, don't return it and simply
3627 // miss the cache instead. This will allow us to allocate a prototype object
3628 // on-demand in the runtime system.
3629 JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
3631 // If the function does not have an initial map, we're done.
3633 JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
3635 // Get the prototype from the initial map.
3636 Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3638 if (action == kMissOnBoundFunction) {
3641 // Non-instance prototype: fetch prototype from constructor field in initial
3643 Bind(&non_instance);
3644 GetMapConstructor(result, result, scratch, scratch);
3652 void MacroAssembler::CompareRoot(const Register& obj,
3653 Heap::RootListIndex index) {
3654 UseScratchRegisterScope temps(this);
3655 Register temp = temps.AcquireX();
3656 DCHECK(!AreAliased(obj, temp));
3657 LoadRoot(temp, index);
3662 void MacroAssembler::JumpIfRoot(const Register& obj,
3663 Heap::RootListIndex index,
3665 CompareRoot(obj, index);
3670 void MacroAssembler::JumpIfNotRoot(const Register& obj,
3671 Heap::RootListIndex index,
3672 Label* if_not_equal) {
3673 CompareRoot(obj, index);
3674 B(ne, if_not_equal);
3678 void MacroAssembler::CompareAndSplit(const Register& lhs,
3683 Label* fall_through) {
3684 if ((if_true == if_false) && (if_false == fall_through)) {
3686 } else if (if_true == if_false) {
3688 } else if (if_false == fall_through) {
3689 CompareAndBranch(lhs, rhs, cond, if_true);
3690 } else if (if_true == fall_through) {
3691 CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
3693 CompareAndBranch(lhs, rhs, cond, if_true);
3699 void MacroAssembler::TestAndSplit(const Register& reg,
3700 uint64_t bit_pattern,
3701 Label* if_all_clear,
3703 Label* fall_through) {
3704 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
3706 } else if (if_all_clear == if_any_set) {
3708 } else if (if_all_clear == fall_through) {
3709 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3710 } else if (if_any_set == fall_through) {
3711 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
3713 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3719 void MacroAssembler::CheckFastElements(Register map,
3722 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3723 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3724 STATIC_ASSERT(FAST_ELEMENTS == 2);
3725 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3726 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3727 Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
3732 void MacroAssembler::CheckFastObjectElements(Register map,
3735 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3736 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3737 STATIC_ASSERT(FAST_ELEMENTS == 2);
3738 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3739 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3740 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3741 // If cond==ls, set cond=hi, otherwise compare.
3743 Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
3748 // Note: The ARM version of this clobbers elements_reg, but this version does
3749 // not. Some uses of this in ARM64 assume that elements_reg will be preserved.
3750 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3752 Register elements_reg,
3754 FPRegister fpscratch1,
3756 int elements_offset) {
3757 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
3760 // Speculatively convert the smi to a double - all smis can be exactly
3761 // represented as a double.
3762 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
3764 // If value_reg is a smi, we're done.
3765 JumpIfSmi(value_reg, &store_num);
3767 // Ensure that the object is a heap number.
3768 JumpIfNotHeapNumber(value_reg, fail);
3770 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
3772 // Canonicalize NaNs.
3773 CanonicalizeNaN(fpscratch1);
3775 // Store the result.
3777 Add(scratch1, elements_reg,
3778 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
3780 FieldMemOperand(scratch1,
3781 FixedDoubleArray::kHeaderSize - elements_offset));
3785 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3786 return has_frame_ || !stub->SometimesSetsUpAFrame();
3790 void MacroAssembler::IndexFromHash(Register hash, Register index) {
3791 // If the hash field contains an array index pick it out. The assert checks
3792 // that the constants for the maximum number of digits for an array index
3793 // cached in the hash field and the number of bits reserved for it does not
3795 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
3796 (1 << String::kArrayIndexValueBits));
3797 DecodeField<String::ArrayIndexValueBits>(index, hash);
3798 SmiTag(index, index);
3802 void MacroAssembler::EmitSeqStringSetCharCheck(
3805 SeqStringSetCharCheckIndexType index_type,
3807 uint32_t encoding_mask) {
3808 DCHECK(!AreAliased(string, index, scratch));
3810 if (index_type == kIndexIsSmi) {
3814 // Check that string is an object.
3815 AssertNotSmi(string, kNonObject);
3817 // Check that string has an appropriate map.
3818 Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
3819 Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3821 And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
3822 Cmp(scratch, encoding_mask);
3823 Check(eq, kUnexpectedStringType);
3825 Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
3826 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
3827 Check(lt, kIndexIsTooLarge);
3829 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
3831 Check(ge, kIndexIsNegative);
3835 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3839 DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
3840 Label same_contexts;
3842 // Load current lexical context from the stack frame.
3843 Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
3844 // In debug mode, make sure the lexical context is set.
3847 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
3850 // Load the native context of the current context.
3852 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
3853 Ldr(scratch1, FieldMemOperand(scratch1, offset));
3854 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
3856 // Check the context is a native context.
3857 if (emit_debug_code()) {
3858 // Read the first word and compare to the native_context_map.
3859 Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
3860 CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
3861 Check(eq, kExpectedNativeContext);
3864 // Check if both contexts are the same.
3865 Ldr(scratch2, FieldMemOperand(holder_reg,
3866 JSGlobalProxy::kNativeContextOffset));
3867 Cmp(scratch1, scratch2);
3868 B(&same_contexts, eq);
3870 // Check the context is a native context.
3871 if (emit_debug_code()) {
3872 // We're short on scratch registers here, so use holder_reg as a scratch.
3874 Register scratch3 = holder_reg;
3876 CompareRoot(scratch2, Heap::kNullValueRootIndex);
3877 Check(ne, kExpectedNonNullContext);
3879 Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
3880 CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
3881 Check(eq, kExpectedNativeContext);
3885 // Check that the security token in the calling global object is
3886 // compatible with the security token in the receiving global
3888 int token_offset = Context::kHeaderSize +
3889 Context::SECURITY_TOKEN_INDEX * kPointerSize;
3891 Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
3892 Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
3893 Cmp(scratch1, scratch2);
3896 Bind(&same_contexts);
3900 // Compute the hash code from the untagged key. This must be kept in sync with
3901 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
3902 // code-stub-hydrogen.cc
3903 void MacroAssembler::GetNumberHash(Register key, Register scratch) {
3904 DCHECK(!AreAliased(key, scratch));
3906 // Xor original key with a seed.
3907 LoadRoot(scratch, Heap::kHashSeedRootIndex);
3908 Eor(key, key, Operand::UntagSmi(scratch));
3910 // The algorithm uses 32-bit integer values.
3912 scratch = scratch.W();
3914 // Compute the hash code from the untagged key. This must be kept in sync
3915 // with ComputeIntegerHash in utils.h.
3917 // hash = ~hash + (hash <<1 15);
3919 Add(key, scratch, Operand(key, LSL, 15));
3920 // hash = hash ^ (hash >> 12);
3921 Eor(key, key, Operand(key, LSR, 12));
3922 // hash = hash + (hash << 2);
3923 Add(key, key, Operand(key, LSL, 2));
3924 // hash = hash ^ (hash >> 4);
3925 Eor(key, key, Operand(key, LSR, 4));
3926 // hash = hash * 2057;
3927 Mov(scratch, Operand(key, LSL, 11));
3928 Add(key, key, Operand(key, LSL, 3));
3929 Add(key, key, scratch);
3930 // hash = hash ^ (hash >> 16);
3931 Eor(key, key, Operand(key, LSR, 16));
3935 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
3942 Register scratch3) {
3943 DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
3947 SmiUntag(scratch0, key);
3948 GetNumberHash(scratch0, scratch1);
3950 // Compute the capacity mask.
3952 UntagSmiFieldMemOperand(elements,
3953 SeededNumberDictionary::kCapacityOffset));
3954 Sub(scratch1, scratch1, 1);
3956 // Generate an unrolled loop that performs a few probes before giving up.
3957 for (int i = 0; i < kNumberDictionaryProbes; i++) {
3958 // Compute the masked index: (hash + i + i * i) & mask.
3960 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
3962 Mov(scratch2, scratch0);
3964 And(scratch2, scratch2, scratch1);
3966 // Scale the index by multiplying by the element size.
3967 DCHECK(SeededNumberDictionary::kEntrySize == 3);
3968 Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
3970 // Check if the key is identical to the name.
3971 Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
3973 FieldMemOperand(scratch2,
3974 SeededNumberDictionary::kElementsStartOffset));
3976 if (i != (kNumberDictionaryProbes - 1)) {
3984 // Check that the value is a field property.
3985 const int kDetailsOffset =
3986 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
3987 Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
3989 TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
3991 // Get the value at the masked, scaled index and return.
3992 const int kValueOffset =
3993 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
3994 Ldr(result, FieldMemOperand(scratch2, kValueOffset));
3998 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
4001 SaveFPRegsMode fp_mode,
4002 RememberedSetFinalAction and_then) {
4003 DCHECK(!AreAliased(object, address, scratch1));
4004 Label done, store_buffer_overflow;
4005 if (emit_debug_code()) {
4007 JumpIfNotInNewSpace(object, &ok);
4008 Abort(kRememberedSetPointerInNewSpace);
4011 UseScratchRegisterScope temps(this);
4012 Register scratch2 = temps.AcquireX();
4014 // Load store buffer top.
4015 Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
4016 Ldr(scratch1, MemOperand(scratch2));
4017 // Store pointer to buffer and increment buffer top.
4018 Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
4019 // Write back new top of buffer.
4020 Str(scratch1, MemOperand(scratch2));
4021 // Call stub on end of buffer.
4022 // Check for end of buffer.
4023 DCHECK(StoreBuffer::kStoreBufferOverflowBit ==
4024 (1 << (14 + kPointerSizeLog2)));
4025 if (and_then == kFallThroughAtEnd) {
4026 Tbz(scratch1, (14 + kPointerSizeLog2), &done);
4028 DCHECK(and_then == kReturnAtEnd);
4029 Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
4033 Bind(&store_buffer_overflow);
4035 StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
4036 CallStub(&store_buffer_overflow_stub);
4040 if (and_then == kReturnAtEnd) {
4046 void MacroAssembler::PopSafepointRegisters() {
4047 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4048 PopXRegList(kSafepointSavedRegisters);
4053 void MacroAssembler::PushSafepointRegisters() {
4054 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
4055 // adjust the stack for unsaved registers.
4056 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4057 DCHECK(num_unsaved >= 0);
4059 PushXRegList(kSafepointSavedRegisters);
4063 void MacroAssembler::PushSafepointRegistersAndDoubles() {
4064 PushSafepointRegisters();
4065 PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
4066 FPRegister::kAllocatableFPRegisters));
4070 void MacroAssembler::PopSafepointRegistersAndDoubles() {
4071 PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
4072 FPRegister::kAllocatableFPRegisters));
4073 PopSafepointRegisters();
4077 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
4078 // Make sure the safepoint registers list is what we expect.
4079 DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
4081 // Safepoint registers are stored contiguously on the stack, but not all the
4082 // registers are saved. The following registers are excluded:
4083 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
4084 // the macro assembler.
4085 // - x28 (jssp) because JS stack pointer doesn't need to be included in
4086 // safepoint registers.
4087 // - x31 (csp) because the system stack pointer doesn't need to be included
4088 // in safepoint registers.
4090 // This function implements the mapping of register code to index into the
4091 // safepoint register slots.
4092 if ((reg_code >= 0) && (reg_code <= 15)) {
4094 } else if ((reg_code >= 18) && (reg_code <= 27)) {
4095 // Skip ip0 and ip1.
4096 return reg_code - 2;
4097 } else if ((reg_code == 29) || (reg_code == 30)) {
4099 return reg_code - 3;
4101 // This register has no safepoint register slot.
4108 void MacroAssembler::CheckPageFlagSet(const Register& object,
4109 const Register& scratch,
4111 Label* if_any_set) {
4112 And(scratch, object, ~Page::kPageAlignmentMask);
4113 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4114 TestAndBranchIfAnySet(scratch, mask, if_any_set);
4118 void MacroAssembler::CheckPageFlagClear(const Register& object,
4119 const Register& scratch,
4121 Label* if_all_clear) {
4122 And(scratch, object, ~Page::kPageAlignmentMask);
4123 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4124 TestAndBranchIfAllClear(scratch, mask, if_all_clear);
4128 void MacroAssembler::RecordWriteField(
4133 LinkRegisterStatus lr_status,
4134 SaveFPRegsMode save_fp,
4135 RememberedSetAction remembered_set_action,
4137 PointersToHereCheck pointers_to_here_check_for_value) {
4138 // First, check if a write barrier is even needed. The tests below
4139 // catch stores of Smis.
4142 // Skip the barrier if writing a smi.
4143 if (smi_check == INLINE_SMI_CHECK) {
4144 JumpIfSmi(value, &done);
4147 // Although the object register is tagged, the offset is relative to the start
4148 // of the object, so offset must be a multiple of kPointerSize.
4149 DCHECK(IsAligned(offset, kPointerSize));
4151 Add(scratch, object, offset - kHeapObjectTag);
4152 if (emit_debug_code()) {
4154 Tst(scratch, (1 << kPointerSizeLog2) - 1);
4156 Abort(kUnalignedCellInWriteBarrier);
4165 remembered_set_action,
4167 pointers_to_here_check_for_value);
4171 // Clobber clobbered input registers when running with the debug-code flag
4172 // turned on to provoke errors.
4173 if (emit_debug_code()) {
4174 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
4175 Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
4180 // Will clobber: object, map, dst.
4181 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4182 void MacroAssembler::RecordWriteForMap(Register object,
4185 LinkRegisterStatus lr_status,
4186 SaveFPRegsMode fp_mode) {
4187 ASM_LOCATION("MacroAssembler::RecordWrite");
4188 DCHECK(!AreAliased(object, map));
4190 if (emit_debug_code()) {
4191 UseScratchRegisterScope temps(this);
4192 Register temp = temps.AcquireX();
4194 CompareObjectMap(map, temp, isolate()->factory()->meta_map());
4195 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4198 if (!FLAG_incremental_marking) {
4202 if (emit_debug_code()) {
4203 UseScratchRegisterScope temps(this);
4204 Register temp = temps.AcquireX();
4206 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4208 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4211 // First, check if a write barrier is even needed. The tests below
4212 // catch stores of smis and stores into the young generation.
4215 // A single check of the map's pages interesting flag suffices, since it is
4216 // only set during incremental collection, and then it's also guaranteed that
4217 // the from object's page's interesting flag is also set. This optimization
4218 // relies on the fact that maps can never be in new space.
4219 CheckPageFlagClear(map,
4220 map, // Used as scratch.
4221 MemoryChunk::kPointersToHereAreInterestingMask,
4224 // Record the actual write.
4225 if (lr_status == kLRHasNotBeenSaved) {
4228 Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag);
4229 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
4232 if (lr_status == kLRHasNotBeenSaved) {
4238 // Count number of write barriers in generated code.
4239 isolate()->counters()->write_barriers_static()->Increment();
4240 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map,
4243 // Clobber clobbered registers when running with the debug-code flag
4244 // turned on to provoke errors.
4245 if (emit_debug_code()) {
4246 Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
4247 Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
4252 // Will clobber: object, address, value.
4253 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4255 // The register 'object' contains a heap object pointer. The heap object tag is
4257 void MacroAssembler::RecordWrite(
4261 LinkRegisterStatus lr_status,
4262 SaveFPRegsMode fp_mode,
4263 RememberedSetAction remembered_set_action,
4265 PointersToHereCheck pointers_to_here_check_for_value) {
4266 ASM_LOCATION("MacroAssembler::RecordWrite");
4267 DCHECK(!AreAliased(object, value));
4269 if (emit_debug_code()) {
4270 UseScratchRegisterScope temps(this);
4271 Register temp = temps.AcquireX();
4273 Ldr(temp, MemOperand(address));
4275 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4278 // First, check if a write barrier is even needed. The tests below
4279 // catch stores of smis and stores into the young generation.
4282 if (smi_check == INLINE_SMI_CHECK) {
4283 DCHECK_EQ(0, kSmiTag);
4284 JumpIfSmi(value, &done);
4287 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
4288 CheckPageFlagClear(value,
4289 value, // Used as scratch.
4290 MemoryChunk::kPointersToHereAreInterestingMask,
4293 CheckPageFlagClear(object,
4294 value, // Used as scratch.
4295 MemoryChunk::kPointersFromHereAreInterestingMask,
4298 // Record the actual write.
4299 if (lr_status == kLRHasNotBeenSaved) {
4302 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
4305 if (lr_status == kLRHasNotBeenSaved) {
4311 // Count number of write barriers in generated code.
4312 isolate()->counters()->write_barriers_static()->Increment();
4313 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
4316 // Clobber clobbered registers when running with the debug-code flag
4317 // turned on to provoke errors.
4318 if (emit_debug_code()) {
4319 Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
4320 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
4325 void MacroAssembler::AssertHasValidColor(const Register& reg) {
4326 if (emit_debug_code()) {
4327 // The bit sequence is backward. The first character in the string
4328 // represents the least significant bit.
4329 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4331 Label color_is_valid;
4332 Tbnz(reg, 0, &color_is_valid);
4333 Tbz(reg, 1, &color_is_valid);
4334 Abort(kUnexpectedColorFound);
4335 Bind(&color_is_valid);
4340 void MacroAssembler::GetMarkBits(Register addr_reg,
4341 Register bitmap_reg,
4342 Register shift_reg) {
4343 DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
4344 DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
4345 // addr_reg is divided into fields:
4346 // |63 page base 20|19 high 8|7 shift 3|2 0|
4347 // 'high' gives the index of the cell holding color bits for the object.
4348 // 'shift' gives the offset in the cell for this object's color.
4349 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
4350 UseScratchRegisterScope temps(this);
4351 Register temp = temps.AcquireX();
4352 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
4353 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
4354 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
4356 // |63 page base 20|19 zeros 15|14 high 3|2 0|
4357 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
4361 void MacroAssembler::HasColor(Register object,
4362 Register bitmap_scratch,
4363 Register shift_scratch,
4367 // See mark-compact.h for color definitions.
4368 DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
4370 GetMarkBits(object, bitmap_scratch, shift_scratch);
4371 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4372 // Shift the bitmap down to get the color of the object in bits [1:0].
4373 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
4375 AssertHasValidColor(bitmap_scratch);
4377 // These bit sequences are backwards. The first character in the string
4378 // represents the least significant bit.
4379 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4380 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4381 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
4383 // Check for the color.
4384 if (first_bit == 0) {
4385 // Checking for white.
4386 DCHECK(second_bit == 0);
4387 // We only need to test the first bit.
4388 Tbz(bitmap_scratch, 0, has_color);
4391 // Checking for grey or black.
4392 Tbz(bitmap_scratch, 0, &other_color);
4393 if (second_bit == 0) {
4394 Tbz(bitmap_scratch, 1, has_color);
4396 Tbnz(bitmap_scratch, 1, has_color);
4401 // Fall through if it does not have the right color.
4405 void MacroAssembler::JumpIfBlack(Register object,
4409 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4410 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
4414 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
4419 DCHECK(!AreAliased(object, scratch0, scratch1));
4420 Factory* factory = isolate()->factory();
4421 Register current = scratch0;
4424 // Scratch contains elements pointer.
4425 Mov(current, object);
4427 // Loop based on the map going up the prototype chain.
4429 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4430 Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4431 DecodeField<Map::ElementsKindBits>(scratch1);
4432 CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
4433 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4434 CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
4438 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
4440 DCHECK(!result.Is(ldr_location));
4441 const uint32_t kLdrLitOffset_lsb = 5;
4442 const uint32_t kLdrLitOffset_width = 19;
4443 Ldr(result, MemOperand(ldr_location));
4444 if (emit_debug_code()) {
4445 And(result, result, LoadLiteralFMask);
4446 Cmp(result, LoadLiteralFixed);
4447 Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
4448 // The instruction was clobbered. Reload it.
4449 Ldr(result, MemOperand(ldr_location));
4451 Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
4452 Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
4456 void MacroAssembler::EnsureNotWhite(
4458 Register bitmap_scratch,
4459 Register shift_scratch,
4460 Register load_scratch,
4461 Register length_scratch,
4462 Label* value_is_white_and_not_data) {
4464 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
4466 // These bit sequences are backwards. The first character in the string
4467 // represents the least significant bit.
4468 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4469 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4470 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
4472 GetMarkBits(value, bitmap_scratch, shift_scratch);
4473 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4474 Lsr(load_scratch, load_scratch, shift_scratch);
4476 AssertHasValidColor(load_scratch);
4478 // If the value is black or grey we don't need to do anything.
4479 // Since both black and grey have a 1 in the first position and white does
4480 // not have a 1 there we only need to check one bit.
4482 Tbnz(load_scratch, 0, &done);
4484 // Value is white. We check whether it is data that doesn't need scanning.
4485 Register map = load_scratch; // Holds map while checking type.
4486 Label is_data_object;
4488 // Check for heap-number.
4489 Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
4490 Mov(length_scratch, HeapNumber::kSize);
4491 JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
4493 // Check for strings.
4494 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4495 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4496 // If it's a string and it's not a cons string then it's an object containing
4498 Register instance_type = load_scratch;
4499 Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
4500 TestAndBranchIfAnySet(instance_type,
4501 kIsIndirectStringMask | kIsNotStringMask,
4502 value_is_white_and_not_data);
4504 // It's a non-indirect (non-cons and non-slice) string.
4505 // If it's external, the length is just ExternalString::kSize.
4506 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4507 // External strings are the only ones with the kExternalStringTag bit
4509 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
4510 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
4511 Mov(length_scratch, ExternalString::kSize);
4512 TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
4514 // Sequential string, either Latin1 or UC16.
4515 // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
4516 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
4517 // getting the length multiplied by 2.
4518 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
4519 Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
4520 String::kLengthOffset));
4521 Tst(instance_type, kStringEncodingMask);
4522 Cset(load_scratch, eq);
4523 Lsl(length_scratch, length_scratch, load_scratch);
4526 SeqString::kHeaderSize + kObjectAlignmentMask);
4527 Bic(length_scratch, length_scratch, kObjectAlignmentMask);
4529 Bind(&is_data_object);
4530 // Value is a data object, and it is white. Mark it black. Since we know
4531 // that the object is white we can make it black by flipping one bit.
4532 Register mask = shift_scratch;
4533 Mov(load_scratch, 1);
4534 Lsl(mask, load_scratch, shift_scratch);
4536 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4537 Orr(load_scratch, load_scratch, mask);
4538 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4540 Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
4541 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4542 Add(load_scratch, load_scratch, length_scratch);
4543 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4549 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
4550 if (emit_debug_code()) {
4551 Check(cond, reason);
4557 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
4558 if (emit_debug_code()) {
4559 CheckRegisterIsClear(reg, reason);
4564 void MacroAssembler::AssertRegisterIsRoot(Register reg,
4565 Heap::RootListIndex index,
4566 BailoutReason reason) {
4567 if (emit_debug_code()) {
4568 CompareRoot(reg, index);
4574 void MacroAssembler::AssertFastElements(Register elements) {
4575 if (emit_debug_code()) {
4576 UseScratchRegisterScope temps(this);
4577 Register temp = temps.AcquireX();
4579 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
4580 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
4581 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
4582 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
4583 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4589 void MacroAssembler::AssertIsString(const Register& object) {
4590 if (emit_debug_code()) {
4591 UseScratchRegisterScope temps(this);
4592 Register temp = temps.AcquireX();
4593 STATIC_ASSERT(kSmiTag == 0);
4594 Tst(object, kSmiTagMask);
4595 Check(ne, kOperandIsNotAString);
4596 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4597 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
4598 Check(lo, kOperandIsNotAString);
4603 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
4607 // Will not return here.
4612 void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
4616 // Will not return here.
4621 void MacroAssembler::Abort(BailoutReason reason) {
4623 RecordComment("Abort message: ");
4624 RecordComment(GetBailoutReason(reason));
4626 if (FLAG_trap_on_abort) {
4632 // Abort is used in some contexts where csp is the stack pointer. In order to
4633 // simplify the CallRuntime code, make sure that jssp is the stack pointer.
4634 // There is no risk of register corruption here because Abort doesn't return.
4635 Register old_stack_pointer = StackPointer();
4636 SetStackPointer(jssp);
4637 Mov(jssp, old_stack_pointer);
4639 // We need some scratch registers for the MacroAssembler, so make sure we have
4640 // some. This is safe here because Abort never returns.
4641 RegList old_tmp_list = TmpList()->list();
4642 TmpList()->Combine(MacroAssembler::DefaultTmpList());
4644 if (use_real_aborts()) {
4645 // Avoid infinite recursion; Push contains some assertions that use Abort.
4646 NoUseRealAbortsScope no_real_aborts(this);
4648 Mov(x0, Smi::FromInt(reason));
4652 // We don't actually want to generate a pile of code for this, so just
4653 // claim there is a stack frame, without generating one.
4654 FrameScope scope(this, StackFrame::NONE);
4655 CallRuntime(Runtime::kAbort, 1);
4657 CallRuntime(Runtime::kAbort, 1);
4660 // Load the string to pass to Printf.
4662 Adr(x0, &msg_address);
4664 // Call Printf directly to report the error.
4667 // We need a way to stop execution on both the simulator and real hardware,
4668 // and Unreachable() is the best option.
4671 // Emit the message string directly in the instruction stream.
4673 BlockPoolsScope scope(this);
4675 EmitStringData(GetBailoutReason(reason));
4679 SetStackPointer(old_stack_pointer);
4680 TmpList()->set_list(old_tmp_list);
4684 void MacroAssembler::LoadTransitionedArrayMapConditional(
4685 ElementsKind expected_kind,
4686 ElementsKind transitioned_kind,
4687 Register map_in_out,
4690 Label* no_map_match) {
4691 // Load the global or builtins object from the current context.
4692 Ldr(scratch1, GlobalObjectMemOperand());
4693 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
4695 // Check that the function's map is the same as the expected cached map.
4696 Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
4697 size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4698 Ldr(scratch2, FieldMemOperand(scratch1, offset));
4699 Cmp(map_in_out, scratch2);
4700 B(ne, no_map_match);
4702 // Use the transitioned cached map.
4703 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4704 Ldr(map_in_out, FieldMemOperand(scratch1, offset));
4708 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4709 // Load the global or builtins object from the current context.
4710 Ldr(function, GlobalObjectMemOperand());
4711 // Load the native context from the global or builtins object.
4712 Ldr(function, FieldMemOperand(function,
4713 GlobalObject::kNativeContextOffset));
4714 // Load the function from the native context.
4715 Ldr(function, ContextMemOperand(function, index));
4719 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4722 // Load the initial map. The global functions all have initial maps.
4723 Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4724 if (emit_debug_code()) {
4726 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4729 Abort(kGlobalFunctionsMustHaveInitialMap);
4735 // This is the main Printf implementation. All other Printf variants call
4736 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
4737 void MacroAssembler::PrintfNoPreserve(const char * format,
4738 const CPURegister& arg0,
4739 const CPURegister& arg1,
4740 const CPURegister& arg2,
4741 const CPURegister& arg3) {
4742 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
4743 // in most cases anyway, so this restriction shouldn't be too serious.
4744 DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
4746 // The provided arguments, and their proper procedure-call standard registers.
4747 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
4748 CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
4750 int arg_count = kPrintfMaxArgCount;
4752 // The PCS varargs registers for printf. Note that x0 is used for the printf
4754 static const CPURegList kPCSVarargs =
4755 CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
4756 static const CPURegList kPCSVarargsFP =
4757 CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);
4759 // We can use caller-saved registers as scratch values, except for the
4760 // arguments and the PCS registers where they might need to go.
4761 CPURegList tmp_list = kCallerSaved;
4762 tmp_list.Remove(x0); // Used to pass the format string.
4763 tmp_list.Remove(kPCSVarargs);
4764 tmp_list.Remove(arg0, arg1, arg2, arg3);
4766 CPURegList fp_tmp_list = kCallerSavedFP;
4767 fp_tmp_list.Remove(kPCSVarargsFP);
4768 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4770 // Override the MacroAssembler's scratch register list. The lists will be
4771 // reset automatically at the end of the UseScratchRegisterScope.
4772 UseScratchRegisterScope temps(this);
4773 TmpList()->set_list(tmp_list.list());
4774 FPTmpList()->set_list(fp_tmp_list.list());
4776 // Copies of the printf vararg registers that we can pop from.
4777 CPURegList pcs_varargs = kPCSVarargs;
4778 CPURegList pcs_varargs_fp = kPCSVarargsFP;
4780 // Place the arguments. There are lots of clever tricks and optimizations we
4781 // could use here, but Printf is a debug tool so instead we just try to keep
4782 // it simple: Move each input that isn't already in the right place to a
4783 // scratch register, then move everything back.
4784 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
4785 // Work out the proper PCS register for this argument.
4786 if (args[i].IsRegister()) {
4787 pcs[i] = pcs_varargs.PopLowestIndex().X();
4788 // We might only need a W register here. We need to know the size of the
4789 // argument so we can properly encode it for the simulator call.
4790 if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
4791 } else if (args[i].IsFPRegister()) {
4792 // In C, floats are always cast to doubles for varargs calls.
4793 pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
4795 DCHECK(args[i].IsNone());
4800 // If the argument is already in the right place, leave it where it is.
4801 if (args[i].Aliases(pcs[i])) continue;
4803 // Otherwise, if the argument is in a PCS argument register, allocate an
4804 // appropriate scratch register and then move it out of the way.
4805 if (kPCSVarargs.IncludesAliasOf(args[i]) ||
4806 kPCSVarargsFP.IncludesAliasOf(args[i])) {
4807 if (args[i].IsRegister()) {
4808 Register old_arg = Register(args[i]);
4809 Register new_arg = temps.AcquireSameSizeAs(old_arg);
4810 Mov(new_arg, old_arg);
4813 FPRegister old_arg = FPRegister(args[i]);
4814 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
4815 Fmov(new_arg, old_arg);
4821 // Do a second pass to move values into their final positions and perform any
4822 // conversions that may be required.
4823 for (int i = 0; i < arg_count; i++) {
4824 DCHECK(pcs[i].type() == args[i].type());
4825 if (pcs[i].IsRegister()) {
4826 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
4828 DCHECK(pcs[i].IsFPRegister());
4829 if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
4830 Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
4832 Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
4837 // Load the format string into x0, as per the procedure-call standard.
4839 // To make the code as portable as possible, the format string is encoded
4840 // directly in the instruction stream. It might be cleaner to encode it in a
4841 // literal pool, but since Printf is usually used for debugging, it is
4842 // beneficial for it to be minimally dependent on other features.
4843 Label format_address;
4844 Adr(x0, &format_address);
4846 // Emit the format string directly in the instruction stream.
4847 { BlockPoolsScope scope(this);
4850 Bind(&format_address);
4851 EmitStringData(format);
4856 // We don't pass any arguments on the stack, but we still need to align the C
4857 // stack pointer to a 16-byte boundary for PCS compliance.
4858 if (!csp.Is(StackPointer())) {
4859 Bic(csp, StackPointer(), 0xf);
4862 CallPrintf(arg_count, pcs);
4866 void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
4867 // A call to printf needs special handling for the simulator, since the system
4868 // printf function will use a different instruction set and the procedure-call
4869 // standard will not be compatible.
4870 #ifdef USE_SIMULATOR
4871 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
4872 hlt(kImmExceptionIsPrintf);
4873 dc32(arg_count); // kPrintfArgCountOffset
4875 // Determine the argument pattern.
4876 uint32_t arg_pattern_list = 0;
4877 for (int i = 0; i < arg_count; i++) {
4878 uint32_t arg_pattern;
4879 if (args[i].IsRegister()) {
4880 arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
4882 DCHECK(args[i].Is64Bits());
4883 arg_pattern = kPrintfArgD;
4885 DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
4886 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
4888 dc32(arg_pattern_list); // kPrintfArgPatternListOffset
4891 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
4896 void MacroAssembler::Printf(const char * format,
4901 // We can only print sp if it is the current stack pointer.
4902 if (!csp.Is(StackPointer())) {
4903 DCHECK(!csp.Aliases(arg0));
4904 DCHECK(!csp.Aliases(arg1));
4905 DCHECK(!csp.Aliases(arg2));
4906 DCHECK(!csp.Aliases(arg3));
4909 // Printf is expected to preserve all registers, so make sure that none are
4910 // available as scratch registers until we've preserved them.
4911 RegList old_tmp_list = TmpList()->list();
4912 RegList old_fp_tmp_list = FPTmpList()->list();
4913 TmpList()->set_list(0);
4914 FPTmpList()->set_list(0);
4916 // Preserve all caller-saved registers as well as NZCV.
4917 // If csp is the stack pointer, PushCPURegList asserts that the size of each
4918 // list is a multiple of 16 bytes.
4919 PushCPURegList(kCallerSaved);
4920 PushCPURegList(kCallerSavedFP);
4922 // We can use caller-saved registers as scratch values (except for argN).
4923 CPURegList tmp_list = kCallerSaved;
4924 CPURegList fp_tmp_list = kCallerSavedFP;
4925 tmp_list.Remove(arg0, arg1, arg2, arg3);
4926 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4927 TmpList()->set_list(tmp_list.list());
4928 FPTmpList()->set_list(fp_tmp_list.list());
4930 { UseScratchRegisterScope temps(this);
4931 // If any of the arguments are the current stack pointer, allocate a new
4932 // register for them, and adjust the value to compensate for pushing the
4933 // caller-saved registers.
4934 bool arg0_sp = StackPointer().Aliases(arg0);
4935 bool arg1_sp = StackPointer().Aliases(arg1);
4936 bool arg2_sp = StackPointer().Aliases(arg2);
4937 bool arg3_sp = StackPointer().Aliases(arg3);
4938 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
4939 // Allocate a register to hold the original stack pointer value, to pass
4940 // to PrintfNoPreserve as an argument.
4941 Register arg_sp = temps.AcquireX();
4942 Add(arg_sp, StackPointer(),
4943 kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
4944 if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
4945 if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
4946 if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
4947 if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
4951 { UseScratchRegisterScope temps(this);
4952 Register tmp = temps.AcquireX();
4957 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
4960 { UseScratchRegisterScope temps(this);
4961 Register tmp = temps.AcquireX();
4967 PopCPURegList(kCallerSavedFP);
4968 PopCPURegList(kCallerSaved);
4970 TmpList()->set_list(old_tmp_list);
4971 FPTmpList()->set_list(old_fp_tmp_list);
4975 void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
4976 // TODO(jbramley): Other architectures use the internal memcpy to copy the
4977 // sequence. If this is a performance bottleneck, we should consider caching
4978 // the sequence and copying it in the same way.
4979 InstructionAccurateScope scope(this,
4980 kNoCodeAgeSequenceLength / kInstructionSize);
4981 DCHECK(jssp.Is(StackPointer()));
4982 EmitFrameSetupForCodeAgePatching(this);
4987 void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
4988 InstructionAccurateScope scope(this,
4989 kNoCodeAgeSequenceLength / kInstructionSize);
4990 DCHECK(jssp.Is(StackPointer()));
4991 EmitCodeAgeSequence(this, stub);
4999 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
5003 // We can do this sequence using four instructions, but the code ageing
5004 // sequence that patches it needs five, so we use the extra space to try to
5005 // simplify some addressing modes and remove some dependencies (compared to
5006 // using two stp instructions with write-back).
5007 __ sub(jssp, jssp, 4 * kXRegSize);
5008 __ sub(csp, csp, 4 * kXRegSize);
5009 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
5010 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
5011 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
5013 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
5017 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
5021 // When the stub is called, the sequence is replaced with the young sequence
5022 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
5023 // stub jumps to &start, stored in x0. The young sequence does not call the
5024 // stub so there is no infinite loop here.
5026 // A branch (br) is used rather than a call (blr) because this code replaces
5027 // the frame setup code that would normally preserve lr.
5028 __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
5031 // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
5032 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
5033 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
5035 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
5036 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
5041 bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
5042 bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
5044 isolate->code_aging_helper()->IsOld(sequence));
5049 void MacroAssembler::TruncatingDiv(Register result,
5052 DCHECK(!AreAliased(result, dividend));
5053 DCHECK(result.Is32Bits() && dividend.Is32Bits());
5054 base::MagicNumbersForDivision<uint32_t> mag =
5055 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5056 Mov(result, mag.multiplier);
5057 Smull(result.X(), dividend, result);
5058 Asr(result.X(), result.X(), 32);
5059 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5060 if (divisor > 0 && neg) Add(result, result, dividend);
5061 if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
5062 if (mag.shift > 0) Asr(result, result, mag.shift);
5063 Add(result, result, Operand(dividend, LSR, 31));
5070 UseScratchRegisterScope::~UseScratchRegisterScope() {
5071 available_->set_list(old_available_);
5072 availablefp_->set_list(old_availablefp_);
5076 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
5077 int code = AcquireNextAvailable(available_).code();
5078 return Register::Create(code, reg.SizeInBits());
5082 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
5083 int code = AcquireNextAvailable(availablefp_).code();
5084 return FPRegister::Create(code, reg.SizeInBits());
5088 CPURegister UseScratchRegisterScope::AcquireNextAvailable(
5089 CPURegList* available) {
5090 CHECK(!available->IsEmpty());
5091 CPURegister result = available->PopLowestIndex();
5092 DCHECK(!AreAliased(result, xzr, csp));
5097 CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
5098 const CPURegister& reg) {
5099 DCHECK(available->IncludesAliasOf(reg));
5100 available->Remove(reg);
5108 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
5109 const Label* smi_check) {
5110 Assembler::BlockPoolsScope scope(masm);
5111 if (reg.IsValid()) {
5112 DCHECK(smi_check->is_bound());
5113 DCHECK(reg.Is64Bits());
5115 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
5116 // 'check' in the other bits. The possible offset is limited in that we
5117 // use BitField to pack the data, and the underlying data type is a
5119 uint32_t delta = __ InstructionsGeneratedSince(smi_check);
5120 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
5122 DCHECK(!smi_check->is_bound());
5124 // An offset of 0 indicates that there is no patch site.
5130 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
5131 : reg_(NoReg), smi_check_(NULL) {
5132 InstructionSequence* inline_data = InstructionSequence::At(info);
5133 DCHECK(inline_data->IsInlineData());
5134 if (inline_data->IsInlineData()) {
5135 uint64_t payload = inline_data->InlineData();
5136 // We use BitField to decode the payload, and BitField can only handle
5138 DCHECK(is_uint32(payload));
5140 int reg_code = RegisterBits::decode(payload);
5141 reg_ = Register::XRegFromCode(reg_code);
5142 uint64_t smi_check_delta = DeltaBits::decode(payload);
5143 DCHECK(smi_check_delta != 0);
5144 smi_check_ = inline_data->preceding(smi_check_delta);
5153 } } // namespace v8::internal
5155 #endif // V8_TARGET_ARCH_ARM64