1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #if V8_TARGET_ARCH_ARM64
7 #include "src/base/bits.h"
8 #include "src/base/division-by-constant.h"
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/cpu-profiler.h"
12 #include "src/debug/debug.h"
13 #include "src/runtime/runtime.h"
15 #include "src/arm64/frames-arm64.h"
16 #include "src/arm64/macro-assembler-arm64.h"
21 // Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
25 MacroAssembler::MacroAssembler(Isolate* arg_isolate,
28 : Assembler(arg_isolate, buffer, buffer_size),
29 generating_stub_(false),
31 allow_macro_instructions_(true),
34 use_real_aborts_(true),
36 tmp_list_(DefaultTmpList()),
37 fptmp_list_(DefaultFPTmpList()) {
38 if (isolate() != NULL) {
39 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
45 CPURegList MacroAssembler::DefaultTmpList() {
46 return CPURegList(ip0, ip1);
50 CPURegList MacroAssembler::DefaultFPTmpList() {
51 return CPURegList(fp_scratch1, fp_scratch2);
55 void MacroAssembler::LogicalMacro(const Register& rd,
57 const Operand& operand,
59 UseScratchRegisterScope temps(this);
61 if (operand.NeedsRelocation(this)) {
62 Register temp = temps.AcquireX();
63 Ldr(temp, operand.immediate());
64 Logical(rd, rn, temp, op);
66 } else if (operand.IsImmediate()) {
67 int64_t immediate = operand.ImmediateValue();
68 unsigned reg_size = rd.SizeInBits();
70 // If the operation is NOT, invert the operation and immediate.
71 if ((op & NOT) == NOT) {
72 op = static_cast<LogicalOp>(op & ~NOT);
73 immediate = ~immediate;
76 // Ignore the top 32 bits of an immediate if we're moving to a W register.
78 // Check that the top 32 bits are consistent.
79 DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
80 ((immediate >> kWRegSizeInBits) == -1));
81 immediate &= kWRegMask;
84 DCHECK(rd.Is64Bits() || is_uint32(immediate));
86 // Special cases for all set or all clear immediates.
92 case ORR: // Fall through.
96 case ANDS: // Fall through.
102 } else if ((rd.Is64Bits() && (immediate == -1L)) ||
103 (rd.Is32Bits() && (immediate == 0xffffffffL))) {
114 case ANDS: // Fall through.
122 unsigned n, imm_s, imm_r;
123 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
124 // Immediate can be encoded in the instruction.
125 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
127 // Immediate can't be encoded: synthesize using move immediate.
128 Register temp = temps.AcquireSameSizeAs(rn);
129 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
131 // If rd is the stack pointer we cannot use it as the destination
132 // register so we use the temp register as an intermediate again.
133 Logical(temp, rn, imm_operand, op);
135 AssertStackConsistency();
137 Logical(rd, rn, imm_operand, op);
141 } else if (operand.IsExtendedRegister()) {
142 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
143 // Add/sub extended supports shift <= 4. We want to support exactly the
145 DCHECK(operand.shift_amount() <= 4);
146 DCHECK(operand.reg().Is64Bits() ||
147 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
148 Register temp = temps.AcquireSameSizeAs(rn);
149 EmitExtendShift(temp, operand.reg(), operand.extend(),
150 operand.shift_amount());
151 Logical(rd, rn, temp, op);
154 // The operand can be encoded in the instruction.
155 DCHECK(operand.IsShiftedRegister());
156 Logical(rd, rn, operand, op);
161 void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
162 DCHECK(allow_macro_instructions_);
163 DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
164 DCHECK(!rd.IsZero());
166 // TODO(all) extend to support more immediates.
168 // Immediates on Aarch64 can be produced using an initial value, and zero to
169 // three move keep operations.
171 // Initial values can be generated with:
172 // 1. 64-bit move zero (movz).
173 // 2. 32-bit move inverted (movn).
174 // 3. 64-bit move inverted.
175 // 4. 32-bit orr immediate.
176 // 5. 64-bit orr immediate.
177 // Move-keep may then be used to modify each of the 16-bit half-words.
179 // The code below supports all five initial value generators, and
180 // applying move-keep operations to move-zero and move-inverted initial
183 // Try to move the immediate in one instruction, and if that fails, switch to
184 // using multiple instructions.
185 if (!TryOneInstrMoveImmediate(rd, imm)) {
186 unsigned reg_size = rd.SizeInBits();
188 // Generic immediate case. Imm will be represented by
189 // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
190 // A move-zero or move-inverted is generated for the first non-zero or
191 // non-0xffff immX, and a move-keep for subsequent non-zero immX.
193 uint64_t ignored_halfword = 0;
194 bool invert_move = false;
195 // If the number of 0xffff halfwords is greater than the number of 0x0000
196 // halfwords, it's more efficient to use move-inverted.
197 if (CountClearHalfWords(~imm, reg_size) >
198 CountClearHalfWords(imm, reg_size)) {
199 ignored_halfword = 0xffffL;
203 // Mov instructions can't move immediate values into the stack pointer, so
204 // set up a temporary register, if needed.
205 UseScratchRegisterScope temps(this);
206 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
208 // Iterate through the halfwords. Use movn/movz for the first non-ignored
209 // halfword, and movk for subsequent halfwords.
210 DCHECK((reg_size % 16) == 0);
211 bool first_mov_done = false;
212 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
213 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
214 if (imm16 != ignored_halfword) {
215 if (!first_mov_done) {
217 movn(temp, (~imm16) & 0xffffL, 16 * i);
219 movz(temp, imm16, 16 * i);
221 first_mov_done = true;
223 // Construct a wider constant.
224 movk(temp, imm16, 16 * i);
228 DCHECK(first_mov_done);
230 // Move the temporary if the original destination register was the stack
234 AssertStackConsistency();
240 void MacroAssembler::Mov(const Register& rd,
241 const Operand& operand,
242 DiscardMoveMode discard_mode) {
243 DCHECK(allow_macro_instructions_);
244 DCHECK(!rd.IsZero());
246 // Provide a swap register for instructions that need to write into the
247 // system stack pointer (and can't do this inherently).
248 UseScratchRegisterScope temps(this);
249 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
251 if (operand.NeedsRelocation(this)) {
252 Ldr(dst, operand.immediate());
254 } else if (operand.IsImmediate()) {
255 // Call the macro assembler for generic immediates.
256 Mov(dst, operand.ImmediateValue());
258 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
259 // Emit a shift instruction if moving a shifted register. This operation
260 // could also be achieved using an orr instruction (like orn used by Mvn),
261 // but using a shift instruction makes the disassembly clearer.
262 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
264 } else if (operand.IsExtendedRegister()) {
265 // Emit an extend instruction if moving an extended register. This handles
266 // extend with post-shift operations, too.
267 EmitExtendShift(dst, operand.reg(), operand.extend(),
268 operand.shift_amount());
271 // Otherwise, emit a register move only if the registers are distinct, or
272 // if they are not X registers.
274 // Note that mov(w0, w0) is not a no-op because it clears the top word of
275 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
276 // registers is not required to clear the top word of the X register. In
277 // this case, the instruction is discarded.
279 // If csp is an operand, add #0 is emitted, otherwise, orr #0.
280 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
281 (discard_mode == kDontDiscardForSameWReg))) {
282 Assembler::mov(rd, operand.reg());
284 // This case can handle writes into the system stack pointer directly.
288 // Copy the result to the system stack pointer.
291 Assembler::mov(rd, dst);
296 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
297 DCHECK(allow_macro_instructions_);
299 if (operand.NeedsRelocation(this)) {
300 Ldr(rd, operand.immediate());
303 } else if (operand.IsImmediate()) {
304 // Call the macro assembler for generic immediates.
305 Mov(rd, ~operand.ImmediateValue());
307 } else if (operand.IsExtendedRegister()) {
308 // Emit two instructions for the extend case. This differs from Mov, as
309 // the extend and invert can't be achieved in one instruction.
310 EmitExtendShift(rd, operand.reg(), operand.extend(),
311 operand.shift_amount());
320 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
321 DCHECK((reg_size % 8) == 0);
323 for (unsigned i = 0; i < (reg_size / 16); i++) {
324 if ((imm & 0xffff) == 0) {
333 // The movz instruction can generate immediates containing an arbitrary 16-bit
334 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
335 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
336 DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
337 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
341 // The movn instruction can generate immediates containing an arbitrary 16-bit
342 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
343 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
344 return IsImmMovz(~imm, reg_size);
348 void MacroAssembler::ConditionalCompareMacro(const Register& rn,
349 const Operand& operand,
352 ConditionalCompareOp op) {
353 DCHECK((cond != al) && (cond != nv));
354 if (operand.NeedsRelocation(this)) {
355 UseScratchRegisterScope temps(this);
356 Register temp = temps.AcquireX();
357 Ldr(temp, operand.immediate());
358 ConditionalCompareMacro(rn, temp, nzcv, cond, op);
360 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
361 (operand.IsImmediate() &&
362 IsImmConditionalCompare(operand.ImmediateValue()))) {
363 // The immediate can be encoded in the instruction, or the operand is an
364 // unshifted register: call the assembler.
365 ConditionalCompare(rn, operand, nzcv, cond, op);
368 // The operand isn't directly supported by the instruction: perform the
369 // operation on a temporary register.
370 UseScratchRegisterScope temps(this);
371 Register temp = temps.AcquireSameSizeAs(rn);
373 ConditionalCompare(rn, temp, nzcv, cond, op);
378 void MacroAssembler::Csel(const Register& rd,
380 const Operand& operand,
382 DCHECK(allow_macro_instructions_);
383 DCHECK(!rd.IsZero());
384 DCHECK((cond != al) && (cond != nv));
385 if (operand.IsImmediate()) {
386 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
388 int64_t imm = operand.ImmediateValue();
389 Register zr = AppropriateZeroRegFor(rn);
391 csel(rd, rn, zr, cond);
392 } else if (imm == 1) {
393 csinc(rd, rn, zr, cond);
394 } else if (imm == -1) {
395 csinv(rd, rn, zr, cond);
397 UseScratchRegisterScope temps(this);
398 Register temp = temps.AcquireSameSizeAs(rn);
400 csel(rd, rn, temp, cond);
402 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
403 // Unshifted register argument.
404 csel(rd, rn, operand.reg(), cond);
406 // All other arguments.
407 UseScratchRegisterScope temps(this);
408 Register temp = temps.AcquireSameSizeAs(rn);
410 csel(rd, rn, temp, cond);
415 bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
417 unsigned n, imm_s, imm_r;
418 int reg_size = dst.SizeInBits();
419 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
420 // Immediate can be represented in a move zero instruction. Movz can't write
421 // to the stack pointer.
424 } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
425 // Immediate can be represented in a move not instruction. Movn can't write
426 // to the stack pointer.
427 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
429 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
430 // Immediate can be represented in a logical orr instruction.
431 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
438 Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
440 int reg_size = dst.SizeInBits();
442 // Encode the immediate in a single move instruction, if possible.
443 if (TryOneInstrMoveImmediate(dst, imm)) {
444 // The move was successful; nothing to do here.
446 // Pre-shift the immediate to the least-significant bits of the register.
447 int shift_low = CountTrailingZeros(imm, reg_size);
448 int64_t imm_low = imm >> shift_low;
450 // Pre-shift the immediate to the most-significant bits of the register. We
451 // insert set bits in the least-significant bits, as this creates a
452 // different immediate that may be encodable using movn or orr-immediate.
453 // If this new immediate is encodable, the set bits will be eliminated by
454 // the post shift on the following instruction.
455 int shift_high = CountLeadingZeros(imm, reg_size);
456 int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
458 if (TryOneInstrMoveImmediate(dst, imm_low)) {
459 // The new immediate has been moved into the destination's low bits:
460 // return a new leftward-shifting operand.
461 return Operand(dst, LSL, shift_low);
462 } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
463 // The new immediate has been moved into the destination's high bits:
464 // return a new rightward-shifting operand.
465 return Operand(dst, LSR, shift_high);
467 // Use the generic move operation to set up the immediate.
475 void MacroAssembler::AddSubMacro(const Register& rd,
477 const Operand& operand,
480 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
481 !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
482 // The instruction would be a nop. Avoid generating useless code.
486 if (operand.NeedsRelocation(this)) {
487 UseScratchRegisterScope temps(this);
488 Register temp = temps.AcquireX();
489 Ldr(temp, operand.immediate());
490 AddSubMacro(rd, rn, temp, S, op);
491 } else if ((operand.IsImmediate() &&
492 !IsImmAddSub(operand.ImmediateValue())) ||
493 (rn.IsZero() && !operand.IsShiftedRegister()) ||
494 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
495 UseScratchRegisterScope temps(this);
496 Register temp = temps.AcquireSameSizeAs(rn);
497 if (operand.IsImmediate()) {
498 Operand imm_operand =
499 MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
500 AddSub(rd, rn, imm_operand, S, op);
503 AddSub(rd, rn, temp, S, op);
506 AddSub(rd, rn, operand, S, op);
511 void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
513 const Operand& operand,
515 AddSubWithCarryOp op) {
516 DCHECK(rd.SizeInBits() == rn.SizeInBits());
517 UseScratchRegisterScope temps(this);
519 if (operand.NeedsRelocation(this)) {
520 Register temp = temps.AcquireX();
521 Ldr(temp, operand.immediate());
522 AddSubWithCarryMacro(rd, rn, temp, S, op);
524 } else if (operand.IsImmediate() ||
525 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
526 // Add/sub with carry (immediate or ROR shifted register.)
527 Register temp = temps.AcquireSameSizeAs(rn);
529 AddSubWithCarry(rd, rn, temp, S, op);
531 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
532 // Add/sub with carry (shifted register).
533 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
534 DCHECK(operand.shift() != ROR);
535 DCHECK(is_uintn(operand.shift_amount(),
536 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
537 : kWRegSizeInBitsLog2));
538 Register temp = temps.AcquireSameSizeAs(rn);
539 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
540 AddSubWithCarry(rd, rn, temp, S, op);
542 } else if (operand.IsExtendedRegister()) {
543 // Add/sub with carry (extended register).
544 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
545 // Add/sub extended supports a shift <= 4. We want to support exactly the
547 DCHECK(operand.shift_amount() <= 4);
548 DCHECK(operand.reg().Is64Bits() ||
549 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
550 Register temp = temps.AcquireSameSizeAs(rn);
551 EmitExtendShift(temp, operand.reg(), operand.extend(),
552 operand.shift_amount());
553 AddSubWithCarry(rd, rn, temp, S, op);
556 // The addressing mode is directly supported by the instruction.
557 AddSubWithCarry(rd, rn, operand, S, op);
562 void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
563 const MemOperand& addr,
565 int64_t offset = addr.offset();
566 LSDataSize size = CalcLSDataSize(op);
568 // Check if an immediate offset fits in the immediate field of the
569 // appropriate instruction. If not, emit two instructions to perform
571 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
572 !IsImmLSUnscaled(offset)) {
573 // Immediate offset that can't be encoded using unsigned or unscaled
575 UseScratchRegisterScope temps(this);
576 Register temp = temps.AcquireSameSizeAs(addr.base());
577 Mov(temp, addr.offset());
578 LoadStore(rt, MemOperand(addr.base(), temp), op);
579 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
580 // Post-index beyond unscaled addressing range.
581 LoadStore(rt, MemOperand(addr.base()), op);
582 add(addr.base(), addr.base(), offset);
583 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
584 // Pre-index beyond unscaled addressing range.
585 add(addr.base(), addr.base(), offset);
586 LoadStore(rt, MemOperand(addr.base()), op);
588 // Encodable in one load/store instruction.
589 LoadStore(rt, addr, op);
593 void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
594 const CPURegister& rt2,
595 const MemOperand& addr,
596 LoadStorePairOp op) {
597 // TODO(all): Should we support register offset for load-store-pair?
598 DCHECK(!addr.IsRegisterOffset());
600 int64_t offset = addr.offset();
601 LSDataSize size = CalcLSPairDataSize(op);
603 // Check if the offset fits in the immediate field of the appropriate
604 // instruction. If not, emit two instructions to perform the operation.
605 if (IsImmLSPair(offset, size)) {
606 // Encodable in one load/store pair instruction.
607 LoadStorePair(rt, rt2, addr, op);
609 Register base = addr.base();
610 if (addr.IsImmediateOffset()) {
611 UseScratchRegisterScope temps(this);
612 Register temp = temps.AcquireSameSizeAs(base);
613 Add(temp, base, offset);
614 LoadStorePair(rt, rt2, MemOperand(temp), op);
615 } else if (addr.IsPostIndex()) {
616 LoadStorePair(rt, rt2, MemOperand(base), op);
617 Add(base, base, offset);
619 DCHECK(addr.IsPreIndex());
620 Add(base, base, offset);
621 LoadStorePair(rt, rt2, MemOperand(base), op);
627 void MacroAssembler::Load(const Register& rt,
628 const MemOperand& addr,
630 DCHECK(!r.IsDouble());
632 if (r.IsInteger8()) {
634 } else if (r.IsUInteger8()) {
636 } else if (r.IsInteger16()) {
638 } else if (r.IsUInteger16()) {
640 } else if (r.IsInteger32()) {
643 DCHECK(rt.Is64Bits());
649 void MacroAssembler::Store(const Register& rt,
650 const MemOperand& addr,
652 DCHECK(!r.IsDouble());
654 if (r.IsInteger8() || r.IsUInteger8()) {
656 } else if (r.IsInteger16() || r.IsUInteger16()) {
658 } else if (r.IsInteger32()) {
661 DCHECK(rt.Is64Bits());
662 if (r.IsHeapObject()) {
664 } else if (r.IsSmi()) {
672 bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
673 Label *label, ImmBranchType b_type) {
674 bool need_longer_range = false;
675 // There are two situations in which we care about the offset being out of
677 // - The label is bound but too far away.
678 // - The label is not bound but linked, and the previous branch
679 // instruction in the chain is too far away.
680 if (label->is_bound() || label->is_linked()) {
682 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
684 if (!need_longer_range && !label->is_bound()) {
685 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
686 unresolved_branches_.insert(
687 std::pair<int, FarBranchInfo>(max_reachable_pc,
688 FarBranchInfo(pc_offset(), label)));
689 // Also maintain the next pool check.
690 next_veneer_pool_check_ =
691 Min(next_veneer_pool_check_,
692 max_reachable_pc - kVeneerDistanceCheckMargin);
694 return need_longer_range;
698 void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
699 DCHECK(allow_macro_instructions_);
700 DCHECK(!rd.IsZero());
702 if (hint == kAdrNear) {
707 DCHECK(hint == kAdrFar);
708 if (label->is_bound()) {
709 int label_offset = label->pos() - pc_offset();
710 if (Instruction::IsValidPCRelOffset(label_offset)) {
713 DCHECK(label_offset <= 0);
714 int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
715 adr(rd, min_adr_offset);
716 Add(rd, rd, label_offset - min_adr_offset);
719 UseScratchRegisterScope temps(this);
720 Register scratch = temps.AcquireX();
722 InstructionAccurateScope scope(
723 this, PatchingAssembler::kAdrFarPatchableNInstrs);
725 for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
733 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
734 DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
735 (bit == -1 || type >= kBranchTypeFirstUsingBit));
736 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
737 B(static_cast<Condition>(type), label);
740 case always: B(label); break;
742 case reg_zero: Cbz(reg, label); break;
743 case reg_not_zero: Cbnz(reg, label); break;
744 case reg_bit_clear: Tbz(reg, bit, label); break;
745 case reg_bit_set: Tbnz(reg, bit, label); break;
753 void MacroAssembler::B(Label* label, Condition cond) {
754 DCHECK(allow_macro_instructions_);
755 DCHECK((cond != al) && (cond != nv));
758 bool need_extra_instructions =
759 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
761 if (need_extra_instructions) {
762 b(&done, NegateCondition(cond));
771 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
772 DCHECK(allow_macro_instructions_);
775 bool need_extra_instructions =
776 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
778 if (need_extra_instructions) {
779 tbz(rt, bit_pos, &done);
782 tbnz(rt, bit_pos, label);
788 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
789 DCHECK(allow_macro_instructions_);
792 bool need_extra_instructions =
793 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
795 if (need_extra_instructions) {
796 tbnz(rt, bit_pos, &done);
799 tbz(rt, bit_pos, label);
805 void MacroAssembler::Cbnz(const Register& rt, Label* label) {
806 DCHECK(allow_macro_instructions_);
809 bool need_extra_instructions =
810 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
812 if (need_extra_instructions) {
822 void MacroAssembler::Cbz(const Register& rt, Label* label) {
823 DCHECK(allow_macro_instructions_);
826 bool need_extra_instructions =
827 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
829 if (need_extra_instructions) {
839 // Pseudo-instructions.
842 void MacroAssembler::Abs(const Register& rd, const Register& rm,
843 Label* is_not_representable,
844 Label* is_representable) {
845 DCHECK(allow_macro_instructions_);
846 DCHECK(AreSameSizeAndType(rd, rm));
851 // If the comparison sets the v flag, the input was the smallest value
852 // representable by rm, and the mathematical result of abs(rm) is not
853 // representable using two's complement.
854 if ((is_not_representable != NULL) && (is_representable != NULL)) {
855 B(is_not_representable, vs);
857 } else if (is_not_representable != NULL) {
858 B(is_not_representable, vs);
859 } else if (is_representable != NULL) {
860 B(is_representable, vc);
865 // Abstracted stack operations.
868 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
869 const CPURegister& src2, const CPURegister& src3) {
870 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
872 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
873 int size = src0.SizeInBytes();
875 PushPreamble(count, size);
876 PushHelper(count, size, src0, src1, src2, src3);
880 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
881 const CPURegister& src2, const CPURegister& src3,
882 const CPURegister& src4, const CPURegister& src5,
883 const CPURegister& src6, const CPURegister& src7) {
884 DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
886 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
887 int size = src0.SizeInBytes();
889 PushPreamble(count, size);
890 PushHelper(4, size, src0, src1, src2, src3);
891 PushHelper(count - 4, size, src4, src5, src6, src7);
895 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
896 const CPURegister& dst2, const CPURegister& dst3) {
897 // It is not valid to pop into the same register more than once in one
898 // instruction, not even into the zero register.
899 DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
900 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
901 DCHECK(dst0.IsValid());
903 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
904 int size = dst0.SizeInBytes();
906 PopHelper(count, size, dst0, dst1, dst2, dst3);
907 PopPostamble(count, size);
911 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
912 const CPURegister& dst2, const CPURegister& dst3,
913 const CPURegister& dst4, const CPURegister& dst5,
914 const CPURegister& dst6, const CPURegister& dst7) {
915 // It is not valid to pop into the same register more than once in one
916 // instruction, not even into the zero register.
917 DCHECK(!AreAliased(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
918 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
919 DCHECK(dst0.IsValid());
921 int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid();
922 int size = dst0.SizeInBytes();
924 PopHelper(4, size, dst0, dst1, dst2, dst3);
925 PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
926 PopPostamble(count, size);
930 void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
931 int size = src0.SizeInBytes() + src1.SizeInBytes();
934 // Reserve room for src0 and push src1.
935 str(src1, MemOperand(StackPointer(), -size, PreIndex));
936 // Fill the gap with src0.
937 str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
941 void MacroAssembler::PushPopQueue::PushQueued(
942 PreambleDirective preamble_directive) {
943 if (queued_.empty()) return;
945 if (preamble_directive == WITH_PREAMBLE) {
946 masm_->PushPreamble(size_);
949 size_t count = queued_.size();
951 while (index < count) {
952 // PushHelper can only handle registers with the same size and type, and it
953 // can handle only four at a time. Batch them up accordingly.
954 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
957 batch[batch_index++] = queued_[index++];
958 } while ((batch_index < 4) && (index < count) &&
959 batch[0].IsSameSizeAndType(queued_[index]));
961 masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
962 batch[0], batch[1], batch[2], batch[3]);
969 void MacroAssembler::PushPopQueue::PopQueued() {
970 if (queued_.empty()) return;
972 size_t count = queued_.size();
974 while (index < count) {
975 // PopHelper can only handle registers with the same size and type, and it
976 // can handle only four at a time. Batch them up accordingly.
977 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
980 batch[batch_index++] = queued_[index++];
981 } while ((batch_index < 4) && (index < count) &&
982 batch[0].IsSameSizeAndType(queued_[index]));
984 masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
985 batch[0], batch[1], batch[2], batch[3]);
988 masm_->PopPostamble(size_);
993 void MacroAssembler::PushCPURegList(CPURegList registers) {
994 int size = registers.RegisterSizeInBytes();
996 PushPreamble(registers.Count(), size);
997 // Push up to four registers at a time because if the current stack pointer is
998 // csp and reg_size is 32, registers must be pushed in blocks of four in order
999 // to maintain the 16-byte alignment for csp.
1000 while (!registers.IsEmpty()) {
1001 int count_before = registers.Count();
1002 const CPURegister& src0 = registers.PopHighestIndex();
1003 const CPURegister& src1 = registers.PopHighestIndex();
1004 const CPURegister& src2 = registers.PopHighestIndex();
1005 const CPURegister& src3 = registers.PopHighestIndex();
1006 int count = count_before - registers.Count();
1007 PushHelper(count, size, src0, src1, src2, src3);
1012 void MacroAssembler::PopCPURegList(CPURegList registers) {
1013 int size = registers.RegisterSizeInBytes();
1015 // Pop up to four registers at a time because if the current stack pointer is
1016 // csp and reg_size is 32, registers must be pushed in blocks of four in
1017 // order to maintain the 16-byte alignment for csp.
1018 while (!registers.IsEmpty()) {
1019 int count_before = registers.Count();
1020 const CPURegister& dst0 = registers.PopLowestIndex();
1021 const CPURegister& dst1 = registers.PopLowestIndex();
1022 const CPURegister& dst2 = registers.PopLowestIndex();
1023 const CPURegister& dst3 = registers.PopLowestIndex();
1024 int count = count_before - registers.Count();
1025 PopHelper(count, size, dst0, dst1, dst2, dst3);
1027 PopPostamble(registers.Count(), size);
1031 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
1032 int size = src.SizeInBytes();
1034 PushPreamble(count, size);
1036 if (FLAG_optimize_for_size && count > 8) {
1037 UseScratchRegisterScope temps(this);
1038 Register temp = temps.AcquireX();
1041 __ Mov(temp, count / 2);
1043 PushHelper(2, size, src, src, NoReg, NoReg);
1044 __ Subs(temp, temp, 1);
1050 // Push up to four registers at a time if possible because if the current
1051 // stack pointer is csp and the register size is 32, registers must be pushed
1052 // in blocks of four in order to maintain the 16-byte alignment for csp.
1053 while (count >= 4) {
1054 PushHelper(4, size, src, src, src, src);
1058 PushHelper(2, size, src, src, NoReg, NoReg);
1062 PushHelper(1, size, src, NoReg, NoReg, NoReg);
1069 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
1070 PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
1072 UseScratchRegisterScope temps(this);
1073 Register temp = temps.AcquireSameSizeAs(count);
1075 if (FLAG_optimize_for_size) {
1078 Subs(temp, count, 1);
1081 // Push all registers individually, to save code size.
1083 Subs(temp, temp, 1);
1084 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1089 Label loop, leftover2, leftover1, done;
1091 Subs(temp, count, 4);
1094 // Push groups of four first.
1096 Subs(temp, temp, 4);
1097 PushHelper(4, src.SizeInBytes(), src, src, src, src);
1100 // Push groups of two.
1102 Tbz(count, 1, &leftover1);
1103 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
1105 // Push the last one (if required).
1107 Tbz(count, 0, &done);
1108 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1115 void MacroAssembler::PushHelper(int count, int size,
1116 const CPURegister& src0,
1117 const CPURegister& src1,
1118 const CPURegister& src2,
1119 const CPURegister& src3) {
1120 // Ensure that we don't unintentially modify scratch or debug registers.
1121 InstructionAccurateScope scope(this);
1123 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
1124 DCHECK(size == src0.SizeInBytes());
1126 // When pushing multiple registers, the store order is chosen such that
1127 // Push(a, b) is equivalent to Push(a) followed by Push(b).
1130 DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
1131 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
1134 DCHECK(src2.IsNone() && src3.IsNone());
1135 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
1138 DCHECK(src3.IsNone());
1139 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
1140 str(src0, MemOperand(StackPointer(), 2 * size));
1143 // Skip over 4 * size, then fill in the gap. This allows four W registers
1144 // to be pushed using csp, whilst maintaining 16-byte alignment for csp
1146 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
1147 stp(src1, src0, MemOperand(StackPointer(), 2 * size));
1155 void MacroAssembler::PopHelper(int count, int size,
1156 const CPURegister& dst0,
1157 const CPURegister& dst1,
1158 const CPURegister& dst2,
1159 const CPURegister& dst3) {
1160 // Ensure that we don't unintentially modify scratch or debug registers.
1161 InstructionAccurateScope scope(this);
1163 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1164 DCHECK(size == dst0.SizeInBytes());
1166 // When popping multiple registers, the load order is chosen such that
1167 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1170 DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1171 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
1174 DCHECK(dst2.IsNone() && dst3.IsNone());
1175 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
1178 DCHECK(dst3.IsNone());
1179 ldr(dst2, MemOperand(StackPointer(), 2 * size));
1180 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
1183 // Load the higher addresses first, then load the lower addresses and
1184 // skip the whole block in the second instruction. This allows four W
1185 // registers to be popped using csp, whilst maintaining 16-byte alignment
1186 // for csp at all times.
1187 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
1188 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
1196 void MacroAssembler::PushPreamble(Operand total_size) {
1197 if (csp.Is(StackPointer())) {
1198 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1199 // on entry and the total size of the specified registers must also be a
1200 // multiple of 16 bytes.
1201 if (total_size.IsImmediate()) {
1202 DCHECK((total_size.ImmediateValue() % 16) == 0);
1205 // Don't check access size for non-immediate sizes. It's difficult to do
1206 // well, and it will be caught by hardware (or the simulator) anyway.
1208 // Even if the current stack pointer is not the system stack pointer (csp),
1209 // the system stack pointer will still be modified in order to comply with
1210 // ABI rules about accessing memory below the system stack pointer.
1211 BumpSystemStackPointer(total_size);
1216 void MacroAssembler::PopPostamble(Operand total_size) {
1217 if (csp.Is(StackPointer())) {
1218 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1219 // on entry and the total size of the specified registers must also be a
1220 // multiple of 16 bytes.
1221 if (total_size.IsImmediate()) {
1222 DCHECK((total_size.ImmediateValue() % 16) == 0);
1225 // Don't check access size for non-immediate sizes. It's difficult to do
1226 // well, and it will be caught by hardware (or the simulator) anyway.
1227 } else if (emit_debug_code()) {
1228 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1229 // but if we keep it matching StackPointer, the simulator can detect memory
1230 // accesses in the now-free part of the stack.
1231 SyncSystemStackPointer();
1236 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
1237 if (offset.IsImmediate()) {
1238 DCHECK(offset.ImmediateValue() >= 0);
1239 } else if (emit_debug_code()) {
1241 Check(le, kStackAccessBelowStackPointer);
1244 Str(src, MemOperand(StackPointer(), offset));
1248 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
1249 if (offset.IsImmediate()) {
1250 DCHECK(offset.ImmediateValue() >= 0);
1251 } else if (emit_debug_code()) {
1253 Check(le, kStackAccessBelowStackPointer);
1256 Ldr(dst, MemOperand(StackPointer(), offset));
1260 void MacroAssembler::PokePair(const CPURegister& src1,
1261 const CPURegister& src2,
1263 DCHECK(AreSameSizeAndType(src1, src2));
1264 DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1265 Stp(src1, src2, MemOperand(StackPointer(), offset));
1269 void MacroAssembler::PeekPair(const CPURegister& dst1,
1270 const CPURegister& dst2,
1272 DCHECK(AreSameSizeAndType(dst1, dst2));
1273 DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1274 Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
1278 void MacroAssembler::PushCalleeSavedRegisters() {
1279 // Ensure that the macro-assembler doesn't use any scratch registers.
1280 InstructionAccurateScope scope(this);
1282 // This method must not be called unless the current stack pointer is the
1283 // system stack pointer (csp).
1284 DCHECK(csp.Is(StackPointer()));
1286 MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
1294 stp(x27, x28, tos); // x28 = jssp
1302 void MacroAssembler::PopCalleeSavedRegisters() {
1303 // Ensure that the macro-assembler doesn't use any scratch registers.
1304 InstructionAccurateScope scope(this);
1306 // This method must not be called unless the current stack pointer is the
1307 // system stack pointer (csp).
1308 DCHECK(csp.Is(StackPointer()));
1310 MemOperand tos(csp, 2 * kXRegSize, PostIndex);
1316 ldp(x27, x28, tos); // x28 = jssp
1326 void MacroAssembler::AssertStackConsistency() {
1327 // Avoid emitting code when !use_real_abort() since non-real aborts cause too
1328 // much code to be generated.
1329 if (emit_debug_code() && use_real_aborts()) {
1330 if (csp.Is(StackPointer())) {
1331 // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
1332 // can't check the alignment of csp without using a scratch register (or
1333 // clobbering the flags), but the processor (or simulator) will abort if
1334 // it is not properly aligned during a load.
1335 ldr(xzr, MemOperand(csp, 0));
1337 if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
1339 // Check that csp <= StackPointer(), preserving all registers and NZCV.
1340 sub(StackPointer(), csp, StackPointer());
1341 cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
1342 tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
1344 // Avoid generating AssertStackConsistency checks for the Push in Abort.
1345 { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
1346 Abort(kTheCurrentStackPointerIsBelowCsp);
1350 // Restore StackPointer().
1351 sub(StackPointer(), csp, StackPointer());
1357 void MacroAssembler::AssertFPCRState(Register fpcr) {
1358 if (emit_debug_code()) {
1359 Label unexpected_mode, done;
1360 UseScratchRegisterScope temps(this);
1361 if (fpcr.IsNone()) {
1362 fpcr = temps.AcquireX();
1366 // Settings overridden by ConfiugreFPCR():
1367 // - Assert that default-NaN mode is set.
1368 Tbz(fpcr, DN_offset, &unexpected_mode);
1370 // Settings left to their default values:
1371 // - Assert that flush-to-zero is not set.
1372 Tbnz(fpcr, FZ_offset, &unexpected_mode);
1373 // - Assert that the rounding mode is nearest-with-ties-to-even.
1374 STATIC_ASSERT(FPTieEven == 0);
1375 Tst(fpcr, RMode_mask);
1378 Bind(&unexpected_mode);
1379 Abort(kUnexpectedFPCRMode);
1386 void MacroAssembler::ConfigureFPCR() {
1387 UseScratchRegisterScope temps(this);
1388 Register fpcr = temps.AcquireX();
1391 // If necessary, enable default-NaN mode. The default values of the other FPCR
1392 // options should be suitable, and AssertFPCRState will verify that.
1393 Label no_write_required;
1394 Tbnz(fpcr, DN_offset, &no_write_required);
1396 Orr(fpcr, fpcr, DN_mask);
1399 Bind(&no_write_required);
1400 AssertFPCRState(fpcr);
1404 void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
1405 const FPRegister& src) {
1408 // With DN=1 and RMode=FPTieEven, subtracting 0.0 preserves all inputs except
1409 // for NaNs, which become the default NaN. We use fsub rather than fadd
1410 // because sub preserves -0.0 inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
1411 Fsub(dst, src, fp_zero);
1415 void MacroAssembler::LoadRoot(CPURegister destination,
1416 Heap::RootListIndex index) {
1417 // TODO(jbramley): Most root values are constants, and can be synthesized
1418 // without a load. Refer to the ARM back end for details.
1419 Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
1423 void MacroAssembler::StoreRoot(Register source,
1424 Heap::RootListIndex index) {
1425 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
1426 Str(source, MemOperand(root, index << kPointerSizeLog2));
1430 void MacroAssembler::LoadTrueFalseRoots(Register true_root,
1431 Register false_root) {
1432 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
1433 Ldp(true_root, false_root,
1434 MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
1438 void MacroAssembler::LoadHeapObject(Register result,
1439 Handle<HeapObject> object) {
1440 AllowDeferredHandleDereference using_raw_address;
1441 if (isolate()->heap()->InNewSpace(*object)) {
1442 Handle<Cell> cell = isolate()->factory()->NewCell(object);
1443 Mov(result, Operand(cell));
1444 Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
1446 Mov(result, Operand(object));
1451 void MacroAssembler::LoadInstanceDescriptors(Register map,
1452 Register descriptors) {
1453 Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
1457 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
1458 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
1459 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
1463 void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
1464 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
1465 Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset));
1466 And(dst, dst, Map::EnumLengthBits::kMask);
1470 void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
1471 EnumLengthUntagged(dst, map);
1476 void MacroAssembler::LoadAccessor(Register dst, Register holder,
1478 AccessorComponent accessor) {
1479 Ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
1480 LoadInstanceDescriptors(dst, dst);
1482 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
1483 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
1484 : AccessorPair::kSetterOffset;
1485 Ldr(dst, FieldMemOperand(dst, offset));
1489 void MacroAssembler::CheckEnumCache(Register object,
1490 Register null_value,
1495 Label* call_runtime) {
1496 DCHECK(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
1499 Register empty_fixed_array_value = scratch0;
1500 Register current_object = scratch1;
1502 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
1505 Mov(current_object, object);
1507 // Check if the enum length field is properly initialized, indicating that
1508 // there is an enum cache.
1509 Register map = scratch2;
1510 Register enum_length = scratch3;
1511 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1513 EnumLengthUntagged(enum_length, map);
1514 Cmp(enum_length, kInvalidEnumCacheSentinel);
1515 B(eq, call_runtime);
1520 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1522 // For all objects but the receiver, check that the cache is empty.
1523 EnumLengthUntagged(enum_length, map);
1524 Cbnz(enum_length, call_runtime);
1528 // Check that there are no elements. Register current_object contains the
1529 // current JS object we've reached through the prototype chain.
1531 Ldr(current_object, FieldMemOperand(current_object,
1532 JSObject::kElementsOffset));
1533 Cmp(current_object, empty_fixed_array_value);
1534 B(eq, &no_elements);
1536 // Second chance, the object may be using the empty slow element dictionary.
1537 CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
1538 B(ne, call_runtime);
1541 Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
1542 Cmp(current_object, null_value);
1547 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
1550 Label* no_memento_found) {
1551 ExternalReference new_space_start =
1552 ExternalReference::new_space_start(isolate());
1553 ExternalReference new_space_allocation_top =
1554 ExternalReference::new_space_allocation_top_address(isolate());
1556 Add(scratch1, receiver,
1557 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
1558 Cmp(scratch1, new_space_start);
1559 B(lt, no_memento_found);
1561 Mov(scratch2, new_space_allocation_top);
1562 Ldr(scratch2, MemOperand(scratch2));
1563 Cmp(scratch1, scratch2);
1564 B(gt, no_memento_found);
1566 Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
1568 Operand(isolate()->factory()->allocation_memento_map()));
1572 void MacroAssembler::InNewSpace(Register object,
1575 DCHECK(cond == eq || cond == ne);
1576 UseScratchRegisterScope temps(this);
1577 Register temp = temps.AcquireX();
1578 And(temp, object, ExternalReference::new_space_mask(isolate()));
1579 Cmp(temp, ExternalReference::new_space_start(isolate()));
1584 void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
1585 if (emit_debug_code()) {
1586 STATIC_ASSERT(kSmiTag == 0);
1587 Tst(object, kSmiTagMask);
1593 void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
1594 if (emit_debug_code()) {
1595 STATIC_ASSERT(kSmiTag == 0);
1596 Tst(object, kSmiTagMask);
1602 void MacroAssembler::AssertName(Register object) {
1603 if (emit_debug_code()) {
1604 AssertNotSmi(object, kOperandIsASmiAndNotAName);
1606 UseScratchRegisterScope temps(this);
1607 Register temp = temps.AcquireX();
1609 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1610 CompareInstanceType(temp, temp, LAST_NAME_TYPE);
1611 Check(ls, kOperandIsNotAName);
1616 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1618 if (emit_debug_code()) {
1619 Label done_checking;
1620 AssertNotSmi(object);
1621 JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
1622 Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1623 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
1624 Assert(eq, kExpectedUndefinedOrCell);
1625 Bind(&done_checking);
1630 void MacroAssembler::AssertString(Register object) {
1631 if (emit_debug_code()) {
1632 UseScratchRegisterScope temps(this);
1633 Register temp = temps.AcquireX();
1634 STATIC_ASSERT(kSmiTag == 0);
1635 Tst(object, kSmiTagMask);
1636 Check(ne, kOperandIsASmiAndNotAString);
1637 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1638 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
1639 Check(lo, kOperandIsNotAString);
1644 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1645 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1646 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1650 void MacroAssembler::TailCallStub(CodeStub* stub) {
1651 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1655 void MacroAssembler::CallRuntime(const Runtime::Function* f,
1657 SaveFPRegsMode save_doubles) {
1658 // All arguments must be on the stack before this function is called.
1659 // x0 holds the return value after the call.
1661 // Check that the number of arguments matches what the function expects.
1662 // If f->nargs is -1, the function can accept a variable number of arguments.
1663 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1665 // Place the necessary arguments.
1666 Mov(x0, num_arguments);
1667 Mov(x1, ExternalReference(f, isolate()));
1669 CEntryStub stub(isolate(), 1, save_doubles);
1674 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
1675 int num_arguments) {
1676 Mov(x0, num_arguments);
1679 CEntryStub stub(isolate(), 1);
1684 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
1686 CEntryStub stub(isolate(), 1);
1687 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1691 void MacroAssembler::GetBuiltinFunction(Register target,
1692 int native_context_index) {
1693 // Load the builtins object into target register.
1694 Ldr(target, GlobalObjectMemOperand());
1695 Ldr(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
1696 // Load the JavaScript builtin function from the builtins object.
1697 Ldr(target, ContextMemOperand(target, native_context_index));
1701 void MacroAssembler::GetBuiltinEntry(Register target, Register function,
1702 int native_context_index) {
1703 DCHECK(!AreAliased(target, function));
1704 GetBuiltinFunction(function, native_context_index);
1705 // Load the code entry point from the builtins object.
1706 Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1710 void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
1711 const CallWrapper& call_wrapper) {
1712 ASM_LOCATION("MacroAssembler::InvokeBuiltin");
1713 // You can't call a builtin without a valid frame.
1714 DCHECK(flag == JUMP_FUNCTION || has_frame());
1716 // Get the builtin entry in x2 and setup the function object in x1.
1717 GetBuiltinEntry(x2, x1, native_context_index);
1718 if (flag == CALL_FUNCTION) {
1719 call_wrapper.BeforeCall(CallSize(x2));
1721 call_wrapper.AfterCall();
1723 DCHECK(flag == JUMP_FUNCTION);
1729 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
1732 // TODO(1236192): Most runtime routines don't need the number of
1733 // arguments passed in because it is constant. At some point we
1734 // should remove this need and make the runtime routine entry code
1736 Mov(x0, num_arguments);
1737 JumpToExternalReference(ext);
1741 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
1744 TailCallExternalReference(ExternalReference(fid, isolate()),
1750 void MacroAssembler::InitializeNewString(Register string,
1752 Heap::RootListIndex map_index,
1754 Register scratch2) {
1755 DCHECK(!AreAliased(string, length, scratch1, scratch2));
1756 LoadRoot(scratch2, map_index);
1757 SmiTag(scratch1, length);
1758 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1760 Mov(scratch2, String::kEmptyHashField);
1761 Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1762 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
1766 int MacroAssembler::ActivationFrameAlignment() {
1767 #if V8_HOST_ARCH_ARM64
1768 // Running on the real platform. Use the alignment as mandated by the local
1770 // Note: This will break if we ever start generating snapshots on one ARM
1771 // platform for another ARM platform with a different alignment.
1772 return base::OS::ActivationFrameAlignment();
1773 #else // V8_HOST_ARCH_ARM64
1774 // If we are using the simulator then we should always align to the expected
1775 // alignment. As the simulator is used to generate snapshots we do not know
1776 // if the target platform will need alignment, so this is controlled from a
1778 return FLAG_sim_stack_alignment;
1779 #endif // V8_HOST_ARCH_ARM64
1783 void MacroAssembler::CallCFunction(ExternalReference function,
1784 int num_of_reg_args) {
1785 CallCFunction(function, num_of_reg_args, 0);
1789 void MacroAssembler::CallCFunction(ExternalReference function,
1790 int num_of_reg_args,
1791 int num_of_double_args) {
1792 UseScratchRegisterScope temps(this);
1793 Register temp = temps.AcquireX();
1794 Mov(temp, function);
1795 CallCFunction(temp, num_of_reg_args, num_of_double_args);
1799 void MacroAssembler::CallCFunction(Register function,
1800 int num_of_reg_args,
1801 int num_of_double_args) {
1802 DCHECK(has_frame());
1803 // We can pass 8 integer arguments in registers. If we need to pass more than
1804 // that, we'll need to implement support for passing them on the stack.
1805 DCHECK(num_of_reg_args <= 8);
1807 // If we're passing doubles, we're limited to the following prototypes
1808 // (defined by ExternalReference::Type):
1809 // BUILTIN_COMPARE_CALL: int f(double, double)
1810 // BUILTIN_FP_FP_CALL: double f(double, double)
1811 // BUILTIN_FP_CALL: double f(double)
1812 // BUILTIN_FP_INT_CALL: double f(double, int)
1813 if (num_of_double_args > 0) {
1814 DCHECK(num_of_reg_args <= 1);
1815 DCHECK((num_of_double_args + num_of_reg_args) <= 2);
1819 // If the stack pointer is not csp, we need to derive an aligned csp from the
1820 // current stack pointer.
1821 const Register old_stack_pointer = StackPointer();
1822 if (!csp.Is(old_stack_pointer)) {
1823 AssertStackConsistency();
1825 int sp_alignment = ActivationFrameAlignment();
1826 // The ABI mandates at least 16-byte alignment.
1827 DCHECK(sp_alignment >= 16);
1828 DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
1830 // The current stack pointer is a callee saved register, and is preserved
1832 DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
1834 // Align and synchronize the system stack pointer with jssp.
1835 Bic(csp, old_stack_pointer, sp_alignment - 1);
1836 SetStackPointer(csp);
1839 // Call directly. The function called cannot cause a GC, or allow preemption,
1840 // so the return address in the link register stays correct.
1843 if (!csp.Is(old_stack_pointer)) {
1844 if (emit_debug_code()) {
1845 // Because the stack pointer must be aligned on a 16-byte boundary, the
1846 // aligned csp can be up to 12 bytes below the jssp. This is the case
1847 // where we only pushed one W register on top of an aligned jssp.
1848 UseScratchRegisterScope temps(this);
1849 Register temp = temps.AcquireX();
1850 DCHECK(ActivationFrameAlignment() == 16);
1851 Sub(temp, csp, old_stack_pointer);
1852 // We want temp <= 0 && temp >= -12.
1854 Ccmp(temp, -12, NFlag, le);
1855 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
1857 SetStackPointer(old_stack_pointer);
1862 void MacroAssembler::Jump(Register target) {
1867 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
1868 UseScratchRegisterScope temps(this);
1869 Register temp = temps.AcquireX();
1870 Mov(temp, Operand(target, rmode));
1875 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
1876 DCHECK(!RelocInfo::IsCodeTarget(rmode));
1877 Jump(reinterpret_cast<intptr_t>(target), rmode);
1881 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
1882 DCHECK(RelocInfo::IsCodeTarget(rmode));
1883 AllowDeferredHandleDereference embedding_raw_address;
1884 Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
1888 void MacroAssembler::Call(Register target) {
1889 BlockPoolsScope scope(this);
1898 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1903 void MacroAssembler::Call(Label* target) {
1904 BlockPoolsScope scope(this);
1913 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1918 // MacroAssembler::CallSize is sensitive to changes in this function, as it
1919 // requires to know how many instructions are used to branch to the target.
1920 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
1921 BlockPoolsScope scope(this);
1926 // Statement positions are expected to be recorded when the target
1927 // address is loaded.
1928 positions_recorder()->WriteRecordedPositions();
1930 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
1931 DCHECK(rmode != RelocInfo::NONE32);
1933 UseScratchRegisterScope temps(this);
1934 Register temp = temps.AcquireX();
1936 if (rmode == RelocInfo::NONE64) {
1937 // Addresses are 48 bits so we never need to load the upper 16 bits.
1938 uint64_t imm = reinterpret_cast<uint64_t>(target);
1939 // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
1940 DCHECK(((imm >> 48) & 0xffff) == 0);
1941 movz(temp, (imm >> 0) & 0xffff, 0);
1942 movk(temp, (imm >> 16) & 0xffff, 16);
1943 movk(temp, (imm >> 32) & 0xffff, 32);
1945 Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
1949 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
1954 void MacroAssembler::Call(Handle<Code> code,
1955 RelocInfo::Mode rmode,
1956 TypeFeedbackId ast_id) {
1962 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
1963 SetRecordedAstId(ast_id);
1964 rmode = RelocInfo::CODE_TARGET_WITH_ID;
1967 AllowDeferredHandleDereference embedding_raw_address;
1968 Call(reinterpret_cast<Address>(code.location()), rmode);
1971 // Check the size of the code generated.
1972 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
1977 int MacroAssembler::CallSize(Register target) {
1979 return kInstructionSize;
1983 int MacroAssembler::CallSize(Label* target) {
1985 return kInstructionSize;
1989 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
1992 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
1993 DCHECK(rmode != RelocInfo::NONE32);
1995 if (rmode == RelocInfo::NONE64) {
1996 return kCallSizeWithoutRelocation;
1998 return kCallSizeWithRelocation;
2003 int MacroAssembler::CallSize(Handle<Code> code,
2004 RelocInfo::Mode rmode,
2005 TypeFeedbackId ast_id) {
2009 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2010 DCHECK(rmode != RelocInfo::NONE32);
2012 if (rmode == RelocInfo::NONE64) {
2013 return kCallSizeWithoutRelocation;
2015 return kCallSizeWithRelocation;
2020 void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
2021 SmiCheckType smi_check_type) {
2022 Label on_not_heap_number;
2024 if (smi_check_type == DO_SMI_CHECK) {
2025 JumpIfSmi(object, &on_not_heap_number);
2028 AssertNotSmi(object);
2030 UseScratchRegisterScope temps(this);
2031 Register temp = temps.AcquireX();
2032 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2033 JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
2035 Bind(&on_not_heap_number);
2039 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2040 Label* on_not_heap_number,
2041 SmiCheckType smi_check_type) {
2042 if (smi_check_type == DO_SMI_CHECK) {
2043 JumpIfSmi(object, on_not_heap_number);
2046 AssertNotSmi(object);
2048 UseScratchRegisterScope temps(this);
2049 Register temp = temps.AcquireX();
2050 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2051 JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
2055 void MacroAssembler::LookupNumberStringCache(Register object,
2061 DCHECK(!AreAliased(object, result, scratch1, scratch2, scratch3));
2063 // Use of registers. Register result is used as a temporary.
2064 Register number_string_cache = result;
2065 Register mask = scratch3;
2067 // Load the number string cache.
2068 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2070 // Make the hash mask from the length of the number string cache. It
2071 // contains two elements (number and string) for each cache entry.
2072 Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache,
2073 FixedArray::kLengthOffset));
2074 Asr(mask, mask, 1); // Divide length by two.
2075 Sub(mask, mask, 1); // Make mask.
2077 // Calculate the entry in the number string cache. The hash value in the
2078 // number string cache for smis is just the smi value, and the hash for
2079 // doubles is the xor of the upper and lower words. See
2080 // Heap::GetNumberStringCache.
2082 Label load_result_from_cache;
2084 JumpIfSmi(object, &is_smi);
2085 JumpIfNotHeapNumber(object, not_found);
2087 STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
2088 Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
2089 Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
2090 Eor(scratch1, scratch1, scratch2);
2091 And(scratch1, scratch1, mask);
2093 // Calculate address of entry in string cache: each entry consists of two
2094 // pointer sized fields.
2095 Add(scratch1, number_string_cache,
2096 Operand(scratch1, LSL, kPointerSizeLog2 + 1));
2098 Register probe = mask;
2099 Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2100 JumpIfSmi(probe, not_found);
2101 Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
2102 Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
2105 B(&load_result_from_cache);
2108 Register scratch = scratch1;
2109 And(scratch, mask, Operand::UntagSmi(object));
2110 // Calculate address of entry in string cache: each entry consists
2111 // of two pointer sized fields.
2112 Add(scratch, number_string_cache,
2113 Operand(scratch, LSL, kPointerSizeLog2 + 1));
2115 // Check if the entry is the smi we are looking for.
2116 Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2120 // Get the result from the cache.
2121 Bind(&load_result_from_cache);
2122 Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
2123 IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
2124 scratch1, scratch2);
2128 void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
2130 FPRegister scratch_d,
2131 Label* on_successful_conversion,
2132 Label* on_failed_conversion) {
2133 // Convert to an int and back again, then compare with the original value.
2134 Fcvtzs(as_int, value);
2135 Scvtf(scratch_d, as_int);
2136 Fcmp(value, scratch_d);
2138 if (on_successful_conversion) {
2139 B(on_successful_conversion, eq);
2141 if (on_failed_conversion) {
2142 B(on_failed_conversion, ne);
2147 void MacroAssembler::TestForMinusZero(DoubleRegister input) {
2148 UseScratchRegisterScope temps(this);
2149 Register temp = temps.AcquireX();
2150 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
2157 void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
2158 Label* on_negative_zero) {
2159 TestForMinusZero(input);
2160 B(vs, on_negative_zero);
2164 void MacroAssembler::JumpIfMinusZero(Register input,
2165 Label* on_negative_zero) {
2166 DCHECK(input.Is64Bits());
2167 // Floating point value is in an integer register. Detect -0.0 by subtracting
2168 // 1 (cmp), which will cause overflow.
2170 B(vs, on_negative_zero);
2174 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
2175 // Clamp the value to [0..255].
2176 Cmp(input.W(), Operand(input.W(), UXTB));
2177 // If input < input & 0xff, it must be < 0, so saturate to 0.
2178 Csel(output.W(), wzr, input.W(), lt);
2179 // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
2180 Csel(output.W(), output.W(), 255, le);
2184 void MacroAssembler::ClampInt32ToUint8(Register in_out) {
2185 ClampInt32ToUint8(in_out, in_out);
2189 void MacroAssembler::ClampDoubleToUint8(Register output,
2190 DoubleRegister input,
2191 DoubleRegister dbl_scratch) {
2192 // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
2193 // - Inputs lower than 0 (including -infinity) produce 0.
2194 // - Inputs higher than 255 (including +infinity) produce 255.
2195 // Also, it seems that PIXEL types use round-to-nearest rather than
2196 // round-towards-zero.
2198 // Squash +infinity before the conversion, since Fcvtnu will normally
2200 Fmov(dbl_scratch, 255);
2201 Fmin(dbl_scratch, dbl_scratch, input);
2203 // Convert double to unsigned integer. Values less than zero become zero.
2204 // Values greater than 255 have already been clamped to 255.
2205 Fcvtnu(output, dbl_scratch);
2209 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
2216 Register scratch5) {
2217 // Untag src and dst into scratch registers.
2218 // Copy src->dst in a tight loop.
2219 DCHECK(!AreAliased(dst, src,
2220 scratch1, scratch2, scratch3, scratch4, scratch5));
2223 const Register& remaining = scratch3;
2224 Mov(remaining, count / 2);
2226 const Register& dst_untagged = scratch1;
2227 const Register& src_untagged = scratch2;
2228 Sub(dst_untagged, dst, kHeapObjectTag);
2229 Sub(src_untagged, src, kHeapObjectTag);
2231 // Copy fields in pairs.
2234 Ldp(scratch4, scratch5,
2235 MemOperand(src_untagged, kXRegSize* 2, PostIndex));
2236 Stp(scratch4, scratch5,
2237 MemOperand(dst_untagged, kXRegSize* 2, PostIndex));
2238 Sub(remaining, remaining, 1);
2239 Cbnz(remaining, &loop);
2241 // Handle the leftovers.
2243 Ldr(scratch4, MemOperand(src_untagged));
2244 Str(scratch4, MemOperand(dst_untagged));
2249 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
2255 Register scratch4) {
2256 // Untag src and dst into scratch registers.
2257 // Copy src->dst in an unrolled loop.
2258 DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
2260 const Register& dst_untagged = scratch1;
2261 const Register& src_untagged = scratch2;
2262 sub(dst_untagged, dst, kHeapObjectTag);
2263 sub(src_untagged, src, kHeapObjectTag);
2265 // Copy fields in pairs.
2266 for (unsigned i = 0; i < count / 2; i++) {
2267 Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex));
2268 Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex));
2271 // Handle the leftovers.
2273 Ldr(scratch3, MemOperand(src_untagged));
2274 Str(scratch3, MemOperand(dst_untagged));
2279 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
2284 Register scratch3) {
2285 // Untag src and dst into scratch registers.
2286 // Copy src->dst in an unrolled loop.
2287 DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3));
2289 const Register& dst_untagged = scratch1;
2290 const Register& src_untagged = scratch2;
2291 Sub(dst_untagged, dst, kHeapObjectTag);
2292 Sub(src_untagged, src, kHeapObjectTag);
2294 // Copy fields one by one.
2295 for (unsigned i = 0; i < count; i++) {
2296 Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex));
2297 Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex));
2302 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
2304 // One of two methods is used:
2306 // For high 'count' values where many scratch registers are available:
2307 // Untag src and dst into scratch registers.
2308 // Copy src->dst in a tight loop.
2310 // For low 'count' values or where few scratch registers are available:
2311 // Untag src and dst into scratch registers.
2312 // Copy src->dst in an unrolled loop.
2314 // In both cases, fields are copied in pairs if possible, and left-overs are
2315 // handled separately.
2316 DCHECK(!AreAliased(dst, src));
2317 DCHECK(!temps.IncludesAliasOf(dst));
2318 DCHECK(!temps.IncludesAliasOf(src));
2319 DCHECK(!temps.IncludesAliasOf(xzr));
2321 if (emit_debug_code()) {
2323 Check(ne, kTheSourceAndDestinationAreTheSame);
2326 // The value of 'count' at which a loop will be generated (if there are
2327 // enough scratch registers).
2328 static const unsigned kLoopThreshold = 8;
2330 UseScratchRegisterScope masm_temps(this);
2331 if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
2332 CopyFieldsLoopPairsHelper(dst, src, count,
2333 Register(temps.PopLowestIndex()),
2334 Register(temps.PopLowestIndex()),
2335 Register(temps.PopLowestIndex()),
2336 masm_temps.AcquireX(),
2337 masm_temps.AcquireX());
2338 } else if (temps.Count() >= 2) {
2339 CopyFieldsUnrolledPairsHelper(dst, src, count,
2340 Register(temps.PopLowestIndex()),
2341 Register(temps.PopLowestIndex()),
2342 masm_temps.AcquireX(),
2343 masm_temps.AcquireX());
2344 } else if (temps.Count() == 1) {
2345 CopyFieldsUnrolledHelper(dst, src, count,
2346 Register(temps.PopLowestIndex()),
2347 masm_temps.AcquireX(),
2348 masm_temps.AcquireX());
2355 void MacroAssembler::CopyBytes(Register dst,
2360 UseScratchRegisterScope temps(this);
2361 Register tmp1 = temps.AcquireX();
2362 Register tmp2 = temps.AcquireX();
2363 DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
2364 DCHECK(!AreAliased(src, dst, csp));
2366 if (emit_debug_code()) {
2367 // Check copy length.
2369 Assert(ge, kUnexpectedNegativeValue);
2371 // Check src and dst buffers don't overlap.
2372 Add(scratch, src, length); // Calculate end of src buffer.
2374 Add(scratch, dst, length); // Calculate end of dst buffer.
2375 Ccmp(scratch, src, ZFlag, gt);
2376 Assert(le, kCopyBuffersOverlap);
2379 Label short_copy, short_loop, bulk_loop, done;
2381 if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
2382 Register bulk_length = scratch;
2383 int pair_size = 2 * kXRegSize;
2384 int pair_mask = pair_size - 1;
2386 Bic(bulk_length, length, pair_mask);
2387 Cbz(bulk_length, &short_copy);
2389 Sub(bulk_length, bulk_length, pair_size);
2390 Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
2391 Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
2392 Cbnz(bulk_length, &bulk_loop);
2394 And(length, length, pair_mask);
2400 Sub(length, length, 1);
2401 Ldrb(tmp1, MemOperand(src, 1, PostIndex));
2402 Strb(tmp1, MemOperand(dst, 1, PostIndex));
2403 Cbnz(length, &short_loop);
2410 void MacroAssembler::FillFields(Register dst,
2411 Register field_count,
2413 DCHECK(!dst.Is(csp));
2414 UseScratchRegisterScope temps(this);
2415 Register field_ptr = temps.AcquireX();
2416 Register counter = temps.AcquireX();
2419 // Decrement count. If the result < zero, count was zero, and there's nothing
2420 // to do. If count was one, flags are set to fail the gt condition at the end
2421 // of the pairs loop.
2422 Subs(counter, field_count, 1);
2425 // There's at least one field to fill, so do this unconditionally.
2426 Str(filler, MemOperand(dst, kPointerSize, PostIndex));
2428 // If the bottom bit of counter is set, there are an even number of fields to
2429 // fill, so pull the start pointer back by one field, allowing the pairs loop
2430 // to overwrite the field that was stored above.
2431 And(field_ptr, counter, 1);
2432 Sub(field_ptr, dst, Operand(field_ptr, LSL, kPointerSizeLog2));
2434 // Store filler to memory in pairs.
2438 Stp(filler, filler, MemOperand(field_ptr, 2 * kPointerSize, PostIndex));
2439 Subs(counter, counter, 2);
2447 void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
2448 Register first, Register second, Register scratch1, Register scratch2,
2449 Label* failure, SmiCheckType smi_check) {
2450 if (smi_check == DO_SMI_CHECK) {
2451 JumpIfEitherSmi(first, second, failure);
2452 } else if (emit_debug_code()) {
2453 DCHECK(smi_check == DONT_DO_SMI_CHECK);
2455 JumpIfEitherSmi(first, second, NULL, ¬_smi);
2457 // At least one input is a smi, but the flags indicated a smi check wasn't
2459 Abort(kUnexpectedSmi);
2464 // Test that both first and second are sequential one-byte strings.
2465 Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2466 Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2467 Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2468 Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2470 JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
2475 void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
2476 Register first, Register second, Register scratch1, Register scratch2,
2478 DCHECK(!AreAliased(scratch1, second));
2479 DCHECK(!AreAliased(scratch1, scratch2));
2480 static const int kFlatOneByteStringMask =
2481 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2482 static const int kFlatOneByteStringTag = ONE_BYTE_STRING_TYPE;
2483 And(scratch1, first, kFlatOneByteStringMask);
2484 And(scratch2, second, kFlatOneByteStringMask);
2485 Cmp(scratch1, kFlatOneByteStringTag);
2486 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2491 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
2494 const int kFlatOneByteStringMask =
2495 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2496 const int kFlatOneByteStringTag =
2497 kStringTag | kOneByteStringTag | kSeqStringTag;
2498 And(scratch, type, kFlatOneByteStringMask);
2499 Cmp(scratch, kFlatOneByteStringTag);
2504 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2505 Register first, Register second, Register scratch1, Register scratch2,
2507 DCHECK(!AreAliased(first, second, scratch1, scratch2));
2508 const int kFlatOneByteStringMask =
2509 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2510 const int kFlatOneByteStringTag =
2511 kStringTag | kOneByteStringTag | kSeqStringTag;
2512 And(scratch1, first, kFlatOneByteStringMask);
2513 And(scratch2, second, kFlatOneByteStringMask);
2514 Cmp(scratch1, kFlatOneByteStringTag);
2515 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2520 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
2521 Label* not_unique_name) {
2522 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
2523 // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
2526 // goto not_unique_name
2528 Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
2529 Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
2530 B(ne, not_unique_name);
2534 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2535 const ParameterCount& actual,
2536 Handle<Code> code_constant,
2540 bool* definitely_mismatches,
2541 const CallWrapper& call_wrapper) {
2542 bool definitely_matches = false;
2543 *definitely_mismatches = false;
2544 Label regular_invoke;
2546 // Check whether the expected and actual arguments count match. If not,
2547 // setup registers according to contract with ArgumentsAdaptorTrampoline:
2548 // x0: actual arguments count.
2549 // x1: function (passed through to callee).
2550 // x2: expected arguments count.
2552 // The code below is made a lot easier because the calling code already sets
2553 // up actual and expected registers according to the contract if values are
2554 // passed in registers.
2555 DCHECK(actual.is_immediate() || actual.reg().is(x0));
2556 DCHECK(expected.is_immediate() || expected.reg().is(x2));
2557 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
2559 if (expected.is_immediate()) {
2560 DCHECK(actual.is_immediate());
2561 if (expected.immediate() == actual.immediate()) {
2562 definitely_matches = true;
2565 Mov(x0, actual.immediate());
2566 if (expected.immediate() ==
2567 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2568 // Don't worry about adapting arguments for builtins that
2569 // don't want that done. Skip adaption code by making it look
2570 // like we have a match between expected and actual number of
2572 definitely_matches = true;
2574 *definitely_mismatches = true;
2575 // Set up x2 for the argument adaptor.
2576 Mov(x2, expected.immediate());
2580 } else { // expected is a register.
2581 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2582 : Operand(actual.reg());
2583 // If actual == expected perform a regular invocation.
2584 Cmp(expected.reg(), actual_op);
2585 B(eq, ®ular_invoke);
2586 // Otherwise set up x0 for the argument adaptor.
2590 // If the argument counts may mismatch, generate a call to the argument
2592 if (!definitely_matches) {
2593 if (!code_constant.is_null()) {
2594 Mov(x3, Operand(code_constant));
2595 Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
2598 Handle<Code> adaptor =
2599 isolate()->builtins()->ArgumentsAdaptorTrampoline();
2600 if (flag == CALL_FUNCTION) {
2601 call_wrapper.BeforeCall(CallSize(adaptor));
2603 call_wrapper.AfterCall();
2604 if (!*definitely_mismatches) {
2605 // If the arg counts don't match, no extra code is emitted by
2606 // MAsm::InvokeCode and we can just fall through.
2610 Jump(adaptor, RelocInfo::CODE_TARGET);
2613 Bind(®ular_invoke);
2617 void MacroAssembler::InvokeCode(Register code,
2618 const ParameterCount& expected,
2619 const ParameterCount& actual,
2621 const CallWrapper& call_wrapper) {
2622 // You can't call a function without a valid frame.
2623 DCHECK(flag == JUMP_FUNCTION || has_frame());
2627 bool definitely_mismatches = false;
2628 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
2629 &definitely_mismatches, call_wrapper);
2631 // If we are certain that actual != expected, then we know InvokePrologue will
2632 // have handled the call through the argument adaptor mechanism.
2633 // The called function expects the call kind in x5.
2634 if (!definitely_mismatches) {
2635 if (flag == CALL_FUNCTION) {
2636 call_wrapper.BeforeCall(CallSize(code));
2638 call_wrapper.AfterCall();
2640 DCHECK(flag == JUMP_FUNCTION);
2645 // Continue here if InvokePrologue does handle the invocation due to
2646 // mismatched parameter counts.
2651 void MacroAssembler::InvokeFunction(Register function,
2652 const ParameterCount& actual,
2654 const CallWrapper& call_wrapper) {
2655 // You can't call a function without a valid frame.
2656 DCHECK(flag == JUMP_FUNCTION || has_frame());
2658 // Contract with called JS functions requires that function is passed in x1.
2659 // (See FullCodeGenerator::Generate().)
2660 DCHECK(function.is(x1));
2662 Register expected_reg = x2;
2663 Register code_reg = x3;
2665 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2666 // The number of arguments is stored as an int32_t, and -1 is a marker
2667 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
2668 // extension to correctly handle it.
2669 Ldr(expected_reg, FieldMemOperand(function,
2670 JSFunction::kSharedFunctionInfoOffset));
2672 FieldMemOperand(expected_reg,
2673 SharedFunctionInfo::kFormalParameterCountOffset));
2675 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2677 ParameterCount expected(expected_reg);
2678 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2682 void MacroAssembler::InvokeFunction(Register function,
2683 const ParameterCount& expected,
2684 const ParameterCount& actual,
2686 const CallWrapper& call_wrapper) {
2687 // You can't call a function without a valid frame.
2688 DCHECK(flag == JUMP_FUNCTION || has_frame());
2690 // Contract with called JS functions requires that function is passed in x1.
2691 // (See FullCodeGenerator::Generate().)
2692 DCHECK(function.Is(x1));
2694 Register code_reg = x3;
2696 // Set up the context.
2697 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2699 // We call indirectly through the code field in the function to
2700 // allow recompilation to take effect without changing any of the
2702 Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2703 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2707 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2708 const ParameterCount& expected,
2709 const ParameterCount& actual,
2711 const CallWrapper& call_wrapper) {
2712 // Contract with called JS functions requires that function is passed in x1.
2713 // (See FullCodeGenerator::Generate().)
2714 __ LoadObject(x1, function);
2715 InvokeFunction(x1, expected, actual, flag, call_wrapper);
2719 void MacroAssembler::TryConvertDoubleToInt64(Register result,
2720 DoubleRegister double_input,
2722 // Try to convert with an FPU convert instruction. It's trivial to compute
2723 // the modulo operation on an integer register so we convert to a 64-bit
2726 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
2727 // when the double is out of range. NaNs and infinities will be converted to 0
2728 // (as ECMA-262 requires).
2729 Fcvtzs(result.X(), double_input);
2731 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
2732 // representable using a double, so if the result is one of those then we know
2733 // that saturation occured, and we need to manually handle the conversion.
2735 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
2736 // 1 will cause signed overflow.
2738 Ccmp(result.X(), -1, VFlag, vc);
2744 void MacroAssembler::TruncateDoubleToI(Register result,
2745 DoubleRegister double_input) {
2748 // Try to convert the double to an int64. If successful, the bottom 32 bits
2749 // contain our truncated int32 result.
2750 TryConvertDoubleToInt64(result, double_input, &done);
2752 const Register old_stack_pointer = StackPointer();
2753 if (csp.Is(old_stack_pointer)) {
2754 // This currently only happens during compiler-unittest. If it arises
2755 // during regular code generation the DoubleToI stub should be updated to
2756 // cope with csp and have an extra parameter indicating which stack pointer
2758 Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
2760 SetStackPointer(jssp);
2763 // If we fell through then inline version didn't succeed - call stub instead.
2764 Push(lr, double_input);
2766 DoubleToIStub stub(isolate(),
2770 true, // is_truncating
2771 true); // skip_fastpath
2772 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2774 DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
2775 Pop(xzr, lr); // xzr to drop the double input on the stack.
2777 if (csp.Is(old_stack_pointer)) {
2779 SetStackPointer(csp);
2780 AssertStackConsistency();
2788 void MacroAssembler::TruncateHeapNumberToI(Register result,
2791 DCHECK(!result.is(object));
2792 DCHECK(jssp.Is(StackPointer()));
2794 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2796 // Try to convert the double to an int64. If successful, the bottom 32 bits
2797 // contain our truncated int32 result.
2798 TryConvertDoubleToInt64(result, fp_scratch, &done);
2800 // If we fell through then inline version didn't succeed - call stub instead.
2802 DoubleToIStub stub(isolate(),
2805 HeapNumber::kValueOffset - kHeapObjectTag,
2806 true, // is_truncating
2807 true); // skip_fastpath
2808 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2815 void MacroAssembler::StubPrologue() {
2816 DCHECK(StackPointer().Is(jssp));
2817 UseScratchRegisterScope temps(this);
2818 Register temp = temps.AcquireX();
2819 __ Mov(temp, Smi::FromInt(StackFrame::STUB));
2820 // Compiled stubs don't age, and so they don't need the predictable code
2822 __ Push(lr, fp, cp, temp);
2823 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
2827 void MacroAssembler::Prologue(bool code_pre_aging) {
2828 if (code_pre_aging) {
2829 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
2830 __ EmitCodeAgeSequence(stub);
2832 __ EmitFrameSetupForCodeAgePatching();
2837 void MacroAssembler::EnterFrame(StackFrame::Type type,
2838 bool load_constant_pool_pointer_reg) {
2839 // Out-of-line constant pool not implemented on arm64.
2844 void MacroAssembler::EnterFrame(StackFrame::Type type) {
2845 DCHECK(jssp.Is(StackPointer()));
2846 UseScratchRegisterScope temps(this);
2847 Register type_reg = temps.AcquireX();
2848 Register code_reg = temps.AcquireX();
2851 Mov(type_reg, Smi::FromInt(type));
2852 Mov(code_reg, Operand(CodeObject()));
2853 Push(type_reg, code_reg);
2858 // jssp[0] : code object
2860 // Adjust FP to point to saved FP.
2861 Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
2865 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
2866 DCHECK(jssp.Is(StackPointer()));
2867 // Drop the execution stack down to the frame pointer and restore
2868 // the caller frame pointer and return address.
2870 AssertStackConsistency();
2875 void MacroAssembler::ExitFramePreserveFPRegs() {
2876 PushCPURegList(kCallerSavedFP);
2880 void MacroAssembler::ExitFrameRestoreFPRegs() {
2881 // Read the registers from the stack without popping them. The stack pointer
2882 // will be reset as part of the unwinding process.
2883 CPURegList saved_fp_regs = kCallerSavedFP;
2884 DCHECK(saved_fp_regs.Count() % 2 == 0);
2886 int offset = ExitFrameConstants::kLastExitFrameField;
2887 while (!saved_fp_regs.IsEmpty()) {
2888 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
2889 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
2890 offset -= 2 * kDRegSize;
2891 Ldp(dst1, dst0, MemOperand(fp, offset));
2896 void MacroAssembler::EnterExitFrame(bool save_doubles,
2897 const Register& scratch,
2899 DCHECK(jssp.Is(StackPointer()));
2901 // Set up the new stack frame.
2902 Mov(scratch, Operand(CodeObject()));
2904 Mov(fp, StackPointer());
2906 // fp[8]: CallerPC (lr)
2907 // fp -> fp[0]: CallerFP (old fp)
2908 // fp[-8]: Space reserved for SPOffset.
2909 // jssp -> fp[-16]: CodeObject()
2910 STATIC_ASSERT((2 * kPointerSize) ==
2911 ExitFrameConstants::kCallerSPDisplacement);
2912 STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
2913 STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
2914 STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
2915 STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
2917 // Save the frame pointer and context pointer in the top frame.
2918 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2920 Str(fp, MemOperand(scratch));
2921 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2923 Str(cp, MemOperand(scratch));
2925 STATIC_ASSERT((-2 * kPointerSize) ==
2926 ExitFrameConstants::kLastExitFrameField);
2928 ExitFramePreserveFPRegs();
2931 // Reserve space for the return address and for user requested memory.
2932 // We do this before aligning to make sure that we end up correctly
2933 // aligned with the minimum of wasted space.
2934 Claim(extra_space + 1, kXRegSize);
2935 // fp[8]: CallerPC (lr)
2936 // fp -> fp[0]: CallerFP (old fp)
2937 // fp[-8]: Space reserved for SPOffset.
2938 // fp[-16]: CodeObject()
2939 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
2940 // jssp[8]: Extra space reserved for caller (if extra_space != 0).
2941 // jssp -> jssp[0]: Space reserved for the return address.
2943 // Align and synchronize the system stack pointer with jssp.
2944 AlignAndSetCSPForFrame();
2945 DCHECK(csp.Is(StackPointer()));
2947 // fp[8]: CallerPC (lr)
2948 // fp -> fp[0]: CallerFP (old fp)
2949 // fp[-8]: Space reserved for SPOffset.
2950 // fp[-16]: CodeObject()
2951 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
2952 // csp[8]: Memory reserved for the caller if extra_space != 0.
2953 // Alignment padding, if necessary.
2954 // csp -> csp[0]: Space reserved for the return address.
2956 // ExitFrame::GetStateForFramePointer expects to find the return address at
2957 // the memory address immediately below the pointer stored in SPOffset.
2958 // It is not safe to derive much else from SPOffset, because the size of the
2959 // padding can vary.
2960 Add(scratch, csp, kXRegSize);
2961 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
2965 // Leave the current exit frame.
2966 void MacroAssembler::LeaveExitFrame(bool restore_doubles,
2967 const Register& scratch,
2968 bool restore_context) {
2969 DCHECK(csp.Is(StackPointer()));
2971 if (restore_doubles) {
2972 ExitFrameRestoreFPRegs();
2975 // Restore the context pointer from the top frame.
2976 if (restore_context) {
2977 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2979 Ldr(cp, MemOperand(scratch));
2982 if (emit_debug_code()) {
2983 // Also emit debug code to clear the cp in the top frame.
2984 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2986 Str(xzr, MemOperand(scratch));
2988 // Clear the frame pointer from the top frame.
2989 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2991 Str(xzr, MemOperand(scratch));
2993 // Pop the exit frame.
2994 // fp[8]: CallerPC (lr)
2995 // fp -> fp[0]: CallerFP (old fp)
2996 // fp[...]: The rest of the frame.
2998 SetStackPointer(jssp);
2999 AssertStackConsistency();
3004 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
3005 Register scratch1, Register scratch2) {
3006 if (FLAG_native_code_counters && counter->Enabled()) {
3007 Mov(scratch1, value);
3008 Mov(scratch2, ExternalReference(counter));
3009 Str(scratch1, MemOperand(scratch2));
3014 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
3015 Register scratch1, Register scratch2) {
3017 if (FLAG_native_code_counters && counter->Enabled()) {
3018 Mov(scratch2, ExternalReference(counter));
3019 Ldr(scratch1, MemOperand(scratch2));
3020 Add(scratch1, scratch1, value);
3021 Str(scratch1, MemOperand(scratch2));
3026 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
3027 Register scratch1, Register scratch2) {
3028 IncrementCounter(counter, -value, scratch1, scratch2);
3032 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
3033 if (context_chain_length > 0) {
3034 // Move up the chain of contexts to the context containing the slot.
3035 Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3036 for (int i = 1; i < context_chain_length; i++) {
3037 Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3040 // Slot is in the current function context. Move it into the
3041 // destination register in case we store into it (the write barrier
3042 // cannot be allowed to destroy the context in cp).
3048 void MacroAssembler::DebugBreak() {
3050 Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
3051 CEntryStub ces(isolate(), 1);
3052 DCHECK(AllowThisStubCall(&ces));
3053 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
3057 void MacroAssembler::PushStackHandler() {
3058 DCHECK(jssp.Is(StackPointer()));
3059 // Adjust this code if the asserts don't hold.
3060 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3061 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3063 // For the JSEntry handler, we must preserve the live registers x0-x4.
3064 // (See JSEntryStub::GenerateBody().)
3066 // Link the current handler as the next handler.
3067 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3068 Ldr(x10, MemOperand(x11));
3071 // Set this new handler as the current one.
3072 Str(jssp, MemOperand(x11));
3076 void MacroAssembler::PopStackHandler() {
3077 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3079 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3080 Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
3081 Str(x10, MemOperand(x11));
3085 void MacroAssembler::Allocate(int object_size,
3090 AllocationFlags flags) {
3091 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3092 if (!FLAG_inline_new) {
3093 if (emit_debug_code()) {
3094 // Trash the registers to simulate an allocation failure.
3095 // We apply salt to the original zap value to easily spot the values.
3096 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3097 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3098 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3104 UseScratchRegisterScope temps(this);
3105 Register scratch3 = temps.AcquireX();
3107 DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
3108 DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3110 // Make object size into bytes.
3111 if ((flags & SIZE_IN_WORDS) != 0) {
3112 object_size *= kPointerSize;
3114 DCHECK(0 == (object_size & kObjectAlignmentMask));
3116 // Check relative positions of allocation top and limit addresses.
3117 // The values must be adjacent in memory to allow the use of LDP.
3118 ExternalReference heap_allocation_top =
3119 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3120 ExternalReference heap_allocation_limit =
3121 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3122 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3123 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3124 DCHECK((limit - top) == kPointerSize);
3126 // Set up allocation top address and object size registers.
3127 Register top_address = scratch1;
3128 Register allocation_limit = scratch2;
3129 Mov(top_address, Operand(heap_allocation_top));
3131 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3132 // Load allocation top into result and the allocation limit.
3133 Ldp(result, allocation_limit, MemOperand(top_address));
3135 if (emit_debug_code()) {
3136 // Assert that result actually contains top on entry.
3137 Ldr(scratch3, MemOperand(top_address));
3138 Cmp(result, scratch3);
3139 Check(eq, kUnexpectedAllocationTop);
3141 // Load the allocation limit. 'result' already contains the allocation top.
3142 Ldr(allocation_limit, MemOperand(top_address, limit - top));
3145 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3146 // the same alignment on ARM64.
3147 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3149 // Calculate new top and bail out if new space is exhausted.
3150 Adds(scratch3, result, object_size);
3151 Ccmp(scratch3, allocation_limit, CFlag, cc);
3153 Str(scratch3, MemOperand(top_address));
3155 // Tag the object if requested.
3156 if ((flags & TAG_OBJECT) != 0) {
3157 ObjectTag(result, result);
3162 void MacroAssembler::Allocate(Register object_size,
3167 AllocationFlags flags) {
3168 if (!FLAG_inline_new) {
3169 if (emit_debug_code()) {
3170 // Trash the registers to simulate an allocation failure.
3171 // We apply salt to the original zap value to easily spot the values.
3172 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3173 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3174 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3180 UseScratchRegisterScope temps(this);
3181 Register scratch3 = temps.AcquireX();
3183 DCHECK(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
3184 DCHECK(object_size.Is64Bits() && result.Is64Bits() &&
3185 scratch1.Is64Bits() && scratch2.Is64Bits());
3187 // Check relative positions of allocation top and limit addresses.
3188 // The values must be adjacent in memory to allow the use of LDP.
3189 ExternalReference heap_allocation_top =
3190 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3191 ExternalReference heap_allocation_limit =
3192 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3193 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3194 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3195 DCHECK((limit - top) == kPointerSize);
3197 // Set up allocation top address and object size registers.
3198 Register top_address = scratch1;
3199 Register allocation_limit = scratch2;
3200 Mov(top_address, heap_allocation_top);
3202 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3203 // Load allocation top into result and the allocation limit.
3204 Ldp(result, allocation_limit, MemOperand(top_address));
3206 if (emit_debug_code()) {
3207 // Assert that result actually contains top on entry.
3208 Ldr(scratch3, MemOperand(top_address));
3209 Cmp(result, scratch3);
3210 Check(eq, kUnexpectedAllocationTop);
3212 // Load the allocation limit. 'result' already contains the allocation top.
3213 Ldr(allocation_limit, MemOperand(top_address, limit - top));
3216 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3217 // the same alignment on ARM64.
3218 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3220 // Calculate new top and bail out if new space is exhausted
3221 if ((flags & SIZE_IN_WORDS) != 0) {
3222 Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2));
3224 Adds(scratch3, result, object_size);
3227 if (emit_debug_code()) {
3228 Tst(scratch3, kObjectAlignmentMask);
3229 Check(eq, kUnalignedAllocationInNewSpace);
3232 Ccmp(scratch3, allocation_limit, CFlag, cc);
3234 Str(scratch3, MemOperand(top_address));
3236 // Tag the object if requested.
3237 if ((flags & TAG_OBJECT) != 0) {
3238 ObjectTag(result, result);
3243 void MacroAssembler::AllocateTwoByteString(Register result,
3248 Label* gc_required) {
3249 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3250 // Calculate the number of bytes needed for the characters in the string while
3251 // observing object alignment.
3252 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3253 Add(scratch1, length, length); // Length in bytes, not chars.
3254 Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3255 Bic(scratch1, scratch1, kObjectAlignmentMask);
3257 // Allocate two-byte string in new space.
3265 // Set the map, length and hash field.
3266 InitializeNewString(result,
3268 Heap::kStringMapRootIndex,
3274 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3275 Register scratch1, Register scratch2,
3277 Label* gc_required) {
3278 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3279 // Calculate the number of bytes needed for the characters in the string while
3280 // observing object alignment.
3281 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3282 STATIC_ASSERT(kCharSize == 1);
3283 Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3284 Bic(scratch1, scratch1, kObjectAlignmentMask);
3286 // Allocate one-byte string in new space.
3294 // Set the map, length and hash field.
3295 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3296 scratch1, scratch2);
3300 void MacroAssembler::AllocateTwoByteConsString(Register result,
3304 Label* gc_required) {
3305 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3308 InitializeNewString(result,
3310 Heap::kConsStringMapRootIndex,
3316 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3319 Label* gc_required) {
3320 Allocate(ConsString::kSize,
3327 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3328 scratch1, scratch2);
3332 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3336 Label* gc_required) {
3337 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3338 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3341 InitializeNewString(result,
3343 Heap::kSlicedStringMapRootIndex,
3349 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3353 Label* gc_required) {
3354 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3355 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3358 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3359 scratch1, scratch2);
3363 // Allocates a heap number or jumps to the need_gc label if the young space
3364 // is full and a scavenge is needed.
3365 void MacroAssembler::AllocateHeapNumber(Register result,
3370 CPURegister heap_number_map,
3372 DCHECK(!value.IsValid() || value.Is64Bits());
3373 UseScratchRegisterScope temps(this);
3375 // Allocate an object in the heap for the heap number and tag it as a heap
3377 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3378 NO_ALLOCATION_FLAGS);
3380 Heap::RootListIndex map_index = mode == MUTABLE
3381 ? Heap::kMutableHeapNumberMapRootIndex
3382 : Heap::kHeapNumberMapRootIndex;
3384 // Prepare the heap number map.
3385 if (!heap_number_map.IsValid()) {
3386 // If we have a valid value register, use the same type of register to store
3387 // the map so we can use STP to store both in one instruction.
3388 if (value.IsValid() && value.IsFPRegister()) {
3389 heap_number_map = temps.AcquireD();
3391 heap_number_map = scratch1;
3393 LoadRoot(heap_number_map, map_index);
3395 if (emit_debug_code()) {
3397 if (heap_number_map.IsFPRegister()) {
3399 Fmov(map, DoubleRegister(heap_number_map));
3401 map = Register(heap_number_map);
3403 AssertRegisterIsRoot(map, map_index);
3406 // Store the heap number map and the value in the allocated object.
3407 if (value.IsSameSizeAndType(heap_number_map)) {
3408 STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
3409 HeapNumber::kValueOffset);
3410 Stp(heap_number_map, value, MemOperand(result, HeapObject::kMapOffset));
3412 Str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3413 if (value.IsValid()) {
3414 Str(value, MemOperand(result, HeapNumber::kValueOffset));
3417 ObjectTag(result, result);
3421 void MacroAssembler::JumpIfObjectType(Register object,
3425 Label* if_cond_pass,
3427 CompareObjectType(object, map, type_reg, type);
3428 B(cond, if_cond_pass);
3432 void MacroAssembler::JumpIfNotObjectType(Register object,
3436 Label* if_not_object) {
3437 JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
3441 // Sets condition flags based on comparison, and returns type in type_reg.
3442 void MacroAssembler::CompareObjectType(Register object,
3445 InstanceType type) {
3446 Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3447 CompareInstanceType(map, type_reg, type);
3451 // Sets condition flags based on comparison, and returns type in type_reg.
3452 void MacroAssembler::CompareInstanceType(Register map,
3454 InstanceType type) {
3455 Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3456 Cmp(type_reg, type);
3460 void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
3461 UseScratchRegisterScope temps(this);
3462 Register obj_map = temps.AcquireX();
3463 Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
3464 CompareRoot(obj_map, index);
3468 void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
3470 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3471 CompareMap(scratch, map);
3475 void MacroAssembler::CompareMap(Register obj_map,
3477 Cmp(obj_map, Operand(map));
3481 void MacroAssembler::CheckMap(Register obj,
3485 SmiCheckType smi_check_type) {
3486 if (smi_check_type == DO_SMI_CHECK) {
3487 JumpIfSmi(obj, fail);
3490 CompareObjectMap(obj, scratch, map);
3495 void MacroAssembler::CheckMap(Register obj,
3497 Heap::RootListIndex index,
3499 SmiCheckType smi_check_type) {
3500 if (smi_check_type == DO_SMI_CHECK) {
3501 JumpIfSmi(obj, fail);
3503 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3504 JumpIfNotRoot(scratch, index, fail);
3508 void MacroAssembler::CheckMap(Register obj_map,
3511 SmiCheckType smi_check_type) {
3512 if (smi_check_type == DO_SMI_CHECK) {
3513 JumpIfSmi(obj_map, fail);
3516 CompareMap(obj_map, map);
3521 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3522 Register scratch2, Handle<WeakCell> cell,
3523 Handle<Code> success,
3524 SmiCheckType smi_check_type) {
3526 if (smi_check_type == DO_SMI_CHECK) {
3527 JumpIfSmi(obj, &fail);
3529 Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
3530 CmpWeakValue(scratch1, cell, scratch2);
3532 Jump(success, RelocInfo::CODE_TARGET);
3537 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
3539 Mov(scratch, Operand(cell));
3540 Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
3541 Cmp(value, scratch);
3545 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
3546 Mov(value, Operand(cell));
3547 Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
3551 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3553 GetWeakValue(value, cell);
3554 JumpIfSmi(value, miss);
3558 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
3559 UseScratchRegisterScope temps(this);
3560 Register temp = temps.AcquireX();
3561 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
3562 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3567 void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
3568 // Load the map's "bit field 2".
3569 __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
3570 // Retrieve elements_kind from bit field 2.
3571 DecodeField<Map::ElementsKindBits>(result);
3575 void MacroAssembler::GetMapConstructor(Register result, Register map,
3576 Register temp, Register temp2) {
3578 Ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
3580 JumpIfSmi(result, &done);
3581 CompareObjectType(result, temp, temp2, MAP_TYPE);
3583 Ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
3589 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
3590 Register scratch, Label* miss) {
3591 DCHECK(!AreAliased(function, result, scratch));
3593 // Get the prototype or initial map from the function.
3595 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3597 // If the prototype or initial map is the hole, don't return it and simply
3598 // miss the cache instead. This will allow us to allocate a prototype object
3599 // on-demand in the runtime system.
3600 JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
3602 // If the function does not have an initial map, we're done.
3604 JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
3606 // Get the prototype from the initial map.
3607 Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3614 void MacroAssembler::CompareRoot(const Register& obj,
3615 Heap::RootListIndex index) {
3616 UseScratchRegisterScope temps(this);
3617 Register temp = temps.AcquireX();
3618 DCHECK(!AreAliased(obj, temp));
3619 LoadRoot(temp, index);
3624 void MacroAssembler::JumpIfRoot(const Register& obj,
3625 Heap::RootListIndex index,
3627 CompareRoot(obj, index);
3632 void MacroAssembler::JumpIfNotRoot(const Register& obj,
3633 Heap::RootListIndex index,
3634 Label* if_not_equal) {
3635 CompareRoot(obj, index);
3636 B(ne, if_not_equal);
3640 void MacroAssembler::CompareAndSplit(const Register& lhs,
3645 Label* fall_through) {
3646 if ((if_true == if_false) && (if_false == fall_through)) {
3648 } else if (if_true == if_false) {
3650 } else if (if_false == fall_through) {
3651 CompareAndBranch(lhs, rhs, cond, if_true);
3652 } else if (if_true == fall_through) {
3653 CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
3655 CompareAndBranch(lhs, rhs, cond, if_true);
3661 void MacroAssembler::TestAndSplit(const Register& reg,
3662 uint64_t bit_pattern,
3663 Label* if_all_clear,
3665 Label* fall_through) {
3666 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
3668 } else if (if_all_clear == if_any_set) {
3670 } else if (if_all_clear == fall_through) {
3671 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3672 } else if (if_any_set == fall_through) {
3673 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
3675 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3681 void MacroAssembler::CheckFastElements(Register map,
3684 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3685 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3686 STATIC_ASSERT(FAST_ELEMENTS == 2);
3687 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3688 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3689 Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
3694 void MacroAssembler::CheckFastObjectElements(Register map,
3697 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3698 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3699 STATIC_ASSERT(FAST_ELEMENTS == 2);
3700 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3701 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3702 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3703 // If cond==ls, set cond=hi, otherwise compare.
3705 Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
3710 // Note: The ARM version of this clobbers elements_reg, but this version does
3711 // not. Some uses of this in ARM64 assume that elements_reg will be preserved.
3712 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3714 Register elements_reg,
3716 FPRegister fpscratch1,
3718 int elements_offset) {
3719 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
3722 // Speculatively convert the smi to a double - all smis can be exactly
3723 // represented as a double.
3724 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
3726 // If value_reg is a smi, we're done.
3727 JumpIfSmi(value_reg, &store_num);
3729 // Ensure that the object is a heap number.
3730 JumpIfNotHeapNumber(value_reg, fail);
3732 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
3734 // Canonicalize NaNs.
3735 CanonicalizeNaN(fpscratch1);
3737 // Store the result.
3739 Add(scratch1, elements_reg,
3740 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
3742 FieldMemOperand(scratch1,
3743 FixedDoubleArray::kHeaderSize - elements_offset));
3747 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3748 return has_frame_ || !stub->SometimesSetsUpAFrame();
3752 void MacroAssembler::IndexFromHash(Register hash, Register index) {
3753 // If the hash field contains an array index pick it out. The assert checks
3754 // that the constants for the maximum number of digits for an array index
3755 // cached in the hash field and the number of bits reserved for it does not
3757 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
3758 (1 << String::kArrayIndexValueBits));
3759 DecodeField<String::ArrayIndexValueBits>(index, hash);
3760 SmiTag(index, index);
3764 void MacroAssembler::EmitSeqStringSetCharCheck(
3767 SeqStringSetCharCheckIndexType index_type,
3769 uint32_t encoding_mask) {
3770 DCHECK(!AreAliased(string, index, scratch));
3772 if (index_type == kIndexIsSmi) {
3776 // Check that string is an object.
3777 AssertNotSmi(string, kNonObject);
3779 // Check that string has an appropriate map.
3780 Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
3781 Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3783 And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
3784 Cmp(scratch, encoding_mask);
3785 Check(eq, kUnexpectedStringType);
3787 Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
3788 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
3789 Check(lt, kIndexIsTooLarge);
3791 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
3793 Check(ge, kIndexIsNegative);
3797 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3801 DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
3802 Label same_contexts;
3804 // Load current lexical context from the stack frame.
3805 Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
3806 // In debug mode, make sure the lexical context is set.
3809 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
3812 // Load the native context of the current context.
3814 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
3815 Ldr(scratch1, FieldMemOperand(scratch1, offset));
3816 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
3818 // Check the context is a native context.
3819 if (emit_debug_code()) {
3820 // Read the first word and compare to the native_context_map.
3821 Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
3822 CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
3823 Check(eq, kExpectedNativeContext);
3826 // Check if both contexts are the same.
3827 Ldr(scratch2, FieldMemOperand(holder_reg,
3828 JSGlobalProxy::kNativeContextOffset));
3829 Cmp(scratch1, scratch2);
3830 B(&same_contexts, eq);
3832 // Check the context is a native context.
3833 if (emit_debug_code()) {
3834 // We're short on scratch registers here, so use holder_reg as a scratch.
3836 Register scratch3 = holder_reg;
3838 CompareRoot(scratch2, Heap::kNullValueRootIndex);
3839 Check(ne, kExpectedNonNullContext);
3841 Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
3842 CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
3843 Check(eq, kExpectedNativeContext);
3847 // Check that the security token in the calling global object is
3848 // compatible with the security token in the receiving global
3850 int token_offset = Context::kHeaderSize +
3851 Context::SECURITY_TOKEN_INDEX * kPointerSize;
3853 Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
3854 Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
3855 Cmp(scratch1, scratch2);
3858 Bind(&same_contexts);
3862 // Compute the hash code from the untagged key. This must be kept in sync with
3863 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
3864 // code-stub-hydrogen.cc
3865 void MacroAssembler::GetNumberHash(Register key, Register scratch) {
3866 DCHECK(!AreAliased(key, scratch));
3868 // Xor original key with a seed.
3869 LoadRoot(scratch, Heap::kHashSeedRootIndex);
3870 Eor(key, key, Operand::UntagSmi(scratch));
3872 // The algorithm uses 32-bit integer values.
3874 scratch = scratch.W();
3876 // Compute the hash code from the untagged key. This must be kept in sync
3877 // with ComputeIntegerHash in utils.h.
3879 // hash = ~hash + (hash <<1 15);
3881 Add(key, scratch, Operand(key, LSL, 15));
3882 // hash = hash ^ (hash >> 12);
3883 Eor(key, key, Operand(key, LSR, 12));
3884 // hash = hash + (hash << 2);
3885 Add(key, key, Operand(key, LSL, 2));
3886 // hash = hash ^ (hash >> 4);
3887 Eor(key, key, Operand(key, LSR, 4));
3888 // hash = hash * 2057;
3889 Mov(scratch, Operand(key, LSL, 11));
3890 Add(key, key, Operand(key, LSL, 3));
3891 Add(key, key, scratch);
3892 // hash = hash ^ (hash >> 16);
3893 Eor(key, key, Operand(key, LSR, 16));
3894 Bic(key, key, Operand(0xc0000000u));
3898 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
3905 Register scratch3) {
3906 DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
3910 SmiUntag(scratch0, key);
3911 GetNumberHash(scratch0, scratch1);
3913 // Compute the capacity mask.
3915 UntagSmiFieldMemOperand(elements,
3916 SeededNumberDictionary::kCapacityOffset));
3917 Sub(scratch1, scratch1, 1);
3919 // Generate an unrolled loop that performs a few probes before giving up.
3920 for (int i = 0; i < kNumberDictionaryProbes; i++) {
3921 // Compute the masked index: (hash + i + i * i) & mask.
3923 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
3925 Mov(scratch2, scratch0);
3927 And(scratch2, scratch2, scratch1);
3929 // Scale the index by multiplying by the element size.
3930 DCHECK(SeededNumberDictionary::kEntrySize == 3);
3931 Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
3933 // Check if the key is identical to the name.
3934 Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
3936 FieldMemOperand(scratch2,
3937 SeededNumberDictionary::kElementsStartOffset));
3939 if (i != (kNumberDictionaryProbes - 1)) {
3947 // Check that the value is a field property.
3948 const int kDetailsOffset =
3949 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
3950 Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
3952 TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
3954 // Get the value at the masked, scaled index and return.
3955 const int kValueOffset =
3956 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
3957 Ldr(result, FieldMemOperand(scratch2, kValueOffset));
3961 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
3964 SaveFPRegsMode fp_mode,
3965 RememberedSetFinalAction and_then) {
3966 DCHECK(!AreAliased(object, address, scratch1));
3967 Label done, store_buffer_overflow;
3968 if (emit_debug_code()) {
3970 JumpIfNotInNewSpace(object, &ok);
3971 Abort(kRememberedSetPointerInNewSpace);
3974 UseScratchRegisterScope temps(this);
3975 Register scratch2 = temps.AcquireX();
3977 // Load store buffer top.
3978 Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
3979 Ldr(scratch1, MemOperand(scratch2));
3980 // Store pointer to buffer and increment buffer top.
3981 Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
3982 // Write back new top of buffer.
3983 Str(scratch1, MemOperand(scratch2));
3984 // Call stub on end of buffer.
3985 // Check for end of buffer.
3986 DCHECK(StoreBuffer::kStoreBufferOverflowBit ==
3987 (1 << (14 + kPointerSizeLog2)));
3988 if (and_then == kFallThroughAtEnd) {
3989 Tbz(scratch1, (14 + kPointerSizeLog2), &done);
3991 DCHECK(and_then == kReturnAtEnd);
3992 Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
3996 Bind(&store_buffer_overflow);
3998 StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
3999 CallStub(&store_buffer_overflow_stub);
4003 if (and_then == kReturnAtEnd) {
4009 void MacroAssembler::PopSafepointRegisters() {
4010 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4011 PopXRegList(kSafepointSavedRegisters);
4016 void MacroAssembler::PushSafepointRegisters() {
4017 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
4018 // adjust the stack for unsaved registers.
4019 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4020 DCHECK(num_unsaved >= 0);
4022 PushXRegList(kSafepointSavedRegisters);
4026 void MacroAssembler::PushSafepointRegistersAndDoubles() {
4027 PushSafepointRegisters();
4028 PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
4029 FPRegister::kAllocatableFPRegisters));
4033 void MacroAssembler::PopSafepointRegistersAndDoubles() {
4034 PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
4035 FPRegister::kAllocatableFPRegisters));
4036 PopSafepointRegisters();
4040 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
4041 // Make sure the safepoint registers list is what we expect.
4042 DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
4044 // Safepoint registers are stored contiguously on the stack, but not all the
4045 // registers are saved. The following registers are excluded:
4046 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
4047 // the macro assembler.
4048 // - x28 (jssp) because JS stack pointer doesn't need to be included in
4049 // safepoint registers.
4050 // - x31 (csp) because the system stack pointer doesn't need to be included
4051 // in safepoint registers.
4053 // This function implements the mapping of register code to index into the
4054 // safepoint register slots.
4055 if ((reg_code >= 0) && (reg_code <= 15)) {
4057 } else if ((reg_code >= 18) && (reg_code <= 27)) {
4058 // Skip ip0 and ip1.
4059 return reg_code - 2;
4060 } else if ((reg_code == 29) || (reg_code == 30)) {
4062 return reg_code - 3;
4064 // This register has no safepoint register slot.
4071 void MacroAssembler::CheckPageFlagSet(const Register& object,
4072 const Register& scratch,
4074 Label* if_any_set) {
4075 And(scratch, object, ~Page::kPageAlignmentMask);
4076 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4077 TestAndBranchIfAnySet(scratch, mask, if_any_set);
4081 void MacroAssembler::CheckPageFlagClear(const Register& object,
4082 const Register& scratch,
4084 Label* if_all_clear) {
4085 And(scratch, object, ~Page::kPageAlignmentMask);
4086 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4087 TestAndBranchIfAllClear(scratch, mask, if_all_clear);
4091 void MacroAssembler::RecordWriteField(
4096 LinkRegisterStatus lr_status,
4097 SaveFPRegsMode save_fp,
4098 RememberedSetAction remembered_set_action,
4100 PointersToHereCheck pointers_to_here_check_for_value) {
4101 // First, check if a write barrier is even needed. The tests below
4102 // catch stores of Smis.
4105 // Skip the barrier if writing a smi.
4106 if (smi_check == INLINE_SMI_CHECK) {
4107 JumpIfSmi(value, &done);
4110 // Although the object register is tagged, the offset is relative to the start
4111 // of the object, so offset must be a multiple of kPointerSize.
4112 DCHECK(IsAligned(offset, kPointerSize));
4114 Add(scratch, object, offset - kHeapObjectTag);
4115 if (emit_debug_code()) {
4117 Tst(scratch, (1 << kPointerSizeLog2) - 1);
4119 Abort(kUnalignedCellInWriteBarrier);
4128 remembered_set_action,
4130 pointers_to_here_check_for_value);
4134 // Clobber clobbered input registers when running with the debug-code flag
4135 // turned on to provoke errors.
4136 if (emit_debug_code()) {
4137 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
4138 Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
4143 // Will clobber: object, map, dst.
4144 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4145 void MacroAssembler::RecordWriteForMap(Register object,
4148 LinkRegisterStatus lr_status,
4149 SaveFPRegsMode fp_mode) {
4150 ASM_LOCATION("MacroAssembler::RecordWrite");
4151 DCHECK(!AreAliased(object, map));
4153 if (emit_debug_code()) {
4154 UseScratchRegisterScope temps(this);
4155 Register temp = temps.AcquireX();
4157 CompareObjectMap(map, temp, isolate()->factory()->meta_map());
4158 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4161 if (!FLAG_incremental_marking) {
4165 if (emit_debug_code()) {
4166 UseScratchRegisterScope temps(this);
4167 Register temp = temps.AcquireX();
4169 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4171 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4174 // First, check if a write barrier is even needed. The tests below
4175 // catch stores of smis and stores into the young generation.
4178 // A single check of the map's pages interesting flag suffices, since it is
4179 // only set during incremental collection, and then it's also guaranteed that
4180 // the from object's page's interesting flag is also set. This optimization
4181 // relies on the fact that maps can never be in new space.
4182 CheckPageFlagClear(map,
4183 map, // Used as scratch.
4184 MemoryChunk::kPointersToHereAreInterestingMask,
4187 // Record the actual write.
4188 if (lr_status == kLRHasNotBeenSaved) {
4191 Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag);
4192 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
4195 if (lr_status == kLRHasNotBeenSaved) {
4201 // Count number of write barriers in generated code.
4202 isolate()->counters()->write_barriers_static()->Increment();
4203 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map,
4206 // Clobber clobbered registers when running with the debug-code flag
4207 // turned on to provoke errors.
4208 if (emit_debug_code()) {
4209 Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
4210 Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
4215 // Will clobber: object, address, value.
4216 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4218 // The register 'object' contains a heap object pointer. The heap object tag is
4220 void MacroAssembler::RecordWrite(
4224 LinkRegisterStatus lr_status,
4225 SaveFPRegsMode fp_mode,
4226 RememberedSetAction remembered_set_action,
4228 PointersToHereCheck pointers_to_here_check_for_value) {
4229 ASM_LOCATION("MacroAssembler::RecordWrite");
4230 DCHECK(!AreAliased(object, value));
4232 if (emit_debug_code()) {
4233 UseScratchRegisterScope temps(this);
4234 Register temp = temps.AcquireX();
4236 Ldr(temp, MemOperand(address));
4238 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4241 // First, check if a write barrier is even needed. The tests below
4242 // catch stores of smis and stores into the young generation.
4245 if (smi_check == INLINE_SMI_CHECK) {
4246 DCHECK_EQ(0, kSmiTag);
4247 JumpIfSmi(value, &done);
4250 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
4251 CheckPageFlagClear(value,
4252 value, // Used as scratch.
4253 MemoryChunk::kPointersToHereAreInterestingMask,
4256 CheckPageFlagClear(object,
4257 value, // Used as scratch.
4258 MemoryChunk::kPointersFromHereAreInterestingMask,
4261 // Record the actual write.
4262 if (lr_status == kLRHasNotBeenSaved) {
4265 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
4268 if (lr_status == kLRHasNotBeenSaved) {
4274 // Count number of write barriers in generated code.
4275 isolate()->counters()->write_barriers_static()->Increment();
4276 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
4279 // Clobber clobbered registers when running with the debug-code flag
4280 // turned on to provoke errors.
4281 if (emit_debug_code()) {
4282 Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
4283 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
4288 void MacroAssembler::AssertHasValidColor(const Register& reg) {
4289 if (emit_debug_code()) {
4290 // The bit sequence is backward. The first character in the string
4291 // represents the least significant bit.
4292 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4294 Label color_is_valid;
4295 Tbnz(reg, 0, &color_is_valid);
4296 Tbz(reg, 1, &color_is_valid);
4297 Abort(kUnexpectedColorFound);
4298 Bind(&color_is_valid);
4303 void MacroAssembler::GetMarkBits(Register addr_reg,
4304 Register bitmap_reg,
4305 Register shift_reg) {
4306 DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
4307 DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
4308 // addr_reg is divided into fields:
4309 // |63 page base 20|19 high 8|7 shift 3|2 0|
4310 // 'high' gives the index of the cell holding color bits for the object.
4311 // 'shift' gives the offset in the cell for this object's color.
4312 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
4313 UseScratchRegisterScope temps(this);
4314 Register temp = temps.AcquireX();
4315 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
4316 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
4317 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
4319 // |63 page base 20|19 zeros 15|14 high 3|2 0|
4320 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
4324 void MacroAssembler::HasColor(Register object,
4325 Register bitmap_scratch,
4326 Register shift_scratch,
4330 // See mark-compact.h for color definitions.
4331 DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
4333 GetMarkBits(object, bitmap_scratch, shift_scratch);
4334 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4335 // Shift the bitmap down to get the color of the object in bits [1:0].
4336 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
4338 AssertHasValidColor(bitmap_scratch);
4340 // These bit sequences are backwards. The first character in the string
4341 // represents the least significant bit.
4342 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4343 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4344 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
4346 // Check for the color.
4347 if (first_bit == 0) {
4348 // Checking for white.
4349 DCHECK(second_bit == 0);
4350 // We only need to test the first bit.
4351 Tbz(bitmap_scratch, 0, has_color);
4354 // Checking for grey or black.
4355 Tbz(bitmap_scratch, 0, &other_color);
4356 if (second_bit == 0) {
4357 Tbz(bitmap_scratch, 1, has_color);
4359 Tbnz(bitmap_scratch, 1, has_color);
4364 // Fall through if it does not have the right color.
4368 void MacroAssembler::JumpIfBlack(Register object,
4372 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4373 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
4377 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
4382 DCHECK(!AreAliased(object, scratch0, scratch1));
4383 Register current = scratch0;
4384 Label loop_again, end;
4386 // Scratch contains elements pointer.
4387 Mov(current, object);
4388 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4389 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4390 CompareAndBranch(current, Heap::kNullValueRootIndex, eq, &end);
4392 // Loop based on the map going up the prototype chain.
4394 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4395 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
4396 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
4397 CompareInstanceType(current, scratch1, JS_OBJECT_TYPE);
4399 Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4400 DecodeField<Map::ElementsKindBits>(scratch1);
4401 CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
4402 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4403 CompareAndBranch(current, Heap::kNullValueRootIndex, ne, &loop_again);
4409 void MacroAssembler::EnsureNotWhite(
4411 Register bitmap_scratch,
4412 Register shift_scratch,
4413 Register load_scratch,
4414 Register length_scratch,
4415 Label* value_is_white_and_not_data) {
4417 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
4419 // These bit sequences are backwards. The first character in the string
4420 // represents the least significant bit.
4421 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4422 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4423 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
4425 GetMarkBits(value, bitmap_scratch, shift_scratch);
4426 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4427 Lsr(load_scratch, load_scratch, shift_scratch);
4429 AssertHasValidColor(load_scratch);
4431 // If the value is black or grey we don't need to do anything.
4432 // Since both black and grey have a 1 in the first position and white does
4433 // not have a 1 there we only need to check one bit.
4435 Tbnz(load_scratch, 0, &done);
4437 // Value is white. We check whether it is data that doesn't need scanning.
4438 Register map = load_scratch; // Holds map while checking type.
4439 Label is_data_object;
4441 // Check for heap-number.
4442 Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
4443 Mov(length_scratch, HeapNumber::kSize);
4444 JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
4446 // Check for strings.
4447 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4448 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4449 // If it's a string and it's not a cons string then it's an object containing
4451 Register instance_type = load_scratch;
4452 Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
4453 TestAndBranchIfAnySet(instance_type,
4454 kIsIndirectStringMask | kIsNotStringMask,
4455 value_is_white_and_not_data);
4457 // It's a non-indirect (non-cons and non-slice) string.
4458 // If it's external, the length is just ExternalString::kSize.
4459 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4460 // External strings are the only ones with the kExternalStringTag bit
4462 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
4463 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
4464 Mov(length_scratch, ExternalString::kSize);
4465 TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
4467 // Sequential string, either Latin1 or UC16.
4468 // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
4469 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
4470 // getting the length multiplied by 2.
4471 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
4472 Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
4473 String::kLengthOffset));
4474 Tst(instance_type, kStringEncodingMask);
4475 Cset(load_scratch, eq);
4476 Lsl(length_scratch, length_scratch, load_scratch);
4479 SeqString::kHeaderSize + kObjectAlignmentMask);
4480 Bic(length_scratch, length_scratch, kObjectAlignmentMask);
4482 Bind(&is_data_object);
4483 // Value is a data object, and it is white. Mark it black. Since we know
4484 // that the object is white we can make it black by flipping one bit.
4485 Register mask = shift_scratch;
4486 Mov(load_scratch, 1);
4487 Lsl(mask, load_scratch, shift_scratch);
4489 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4490 Orr(load_scratch, load_scratch, mask);
4491 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4493 Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
4494 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4495 Add(load_scratch, load_scratch, length_scratch);
4496 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4502 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
4503 if (emit_debug_code()) {
4504 Check(cond, reason);
4510 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
4511 if (emit_debug_code()) {
4512 CheckRegisterIsClear(reg, reason);
4517 void MacroAssembler::AssertRegisterIsRoot(Register reg,
4518 Heap::RootListIndex index,
4519 BailoutReason reason) {
4520 if (emit_debug_code()) {
4521 CompareRoot(reg, index);
4527 void MacroAssembler::AssertFastElements(Register elements) {
4528 if (emit_debug_code()) {
4529 UseScratchRegisterScope temps(this);
4530 Register temp = temps.AcquireX();
4532 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
4533 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
4534 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
4535 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
4536 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4542 void MacroAssembler::AssertIsString(const Register& object) {
4543 if (emit_debug_code()) {
4544 UseScratchRegisterScope temps(this);
4545 Register temp = temps.AcquireX();
4546 STATIC_ASSERT(kSmiTag == 0);
4547 Tst(object, kSmiTagMask);
4548 Check(ne, kOperandIsNotAString);
4549 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4550 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
4551 Check(lo, kOperandIsNotAString);
4556 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
4560 // Will not return here.
4565 void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
4569 // Will not return here.
4574 void MacroAssembler::Abort(BailoutReason reason) {
4576 RecordComment("Abort message: ");
4577 RecordComment(GetBailoutReason(reason));
4579 if (FLAG_trap_on_abort) {
4585 // Abort is used in some contexts where csp is the stack pointer. In order to
4586 // simplify the CallRuntime code, make sure that jssp is the stack pointer.
4587 // There is no risk of register corruption here because Abort doesn't return.
4588 Register old_stack_pointer = StackPointer();
4589 SetStackPointer(jssp);
4590 Mov(jssp, old_stack_pointer);
4592 // We need some scratch registers for the MacroAssembler, so make sure we have
4593 // some. This is safe here because Abort never returns.
4594 RegList old_tmp_list = TmpList()->list();
4595 TmpList()->Combine(MacroAssembler::DefaultTmpList());
4597 if (use_real_aborts()) {
4598 // Avoid infinite recursion; Push contains some assertions that use Abort.
4599 NoUseRealAbortsScope no_real_aborts(this);
4601 Mov(x0, Smi::FromInt(reason));
4605 // We don't actually want to generate a pile of code for this, so just
4606 // claim there is a stack frame, without generating one.
4607 FrameScope scope(this, StackFrame::NONE);
4608 CallRuntime(Runtime::kAbort, 1);
4610 CallRuntime(Runtime::kAbort, 1);
4613 // Load the string to pass to Printf.
4615 Adr(x0, &msg_address);
4617 // Call Printf directly to report the error.
4620 // We need a way to stop execution on both the simulator and real hardware,
4621 // and Unreachable() is the best option.
4624 // Emit the message string directly in the instruction stream.
4626 BlockPoolsScope scope(this);
4628 EmitStringData(GetBailoutReason(reason));
4632 SetStackPointer(old_stack_pointer);
4633 TmpList()->set_list(old_tmp_list);
4637 void MacroAssembler::LoadTransitionedArrayMapConditional(
4638 ElementsKind expected_kind,
4639 ElementsKind transitioned_kind,
4640 Register map_in_out,
4643 Label* no_map_match) {
4644 // Load the global or builtins object from the current context.
4645 Ldr(scratch1, GlobalObjectMemOperand());
4646 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
4648 // Check that the function's map is the same as the expected cached map.
4649 Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
4650 int offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4651 Ldr(scratch2, FieldMemOperand(scratch1, offset));
4652 Cmp(map_in_out, scratch2);
4653 B(ne, no_map_match);
4655 // Use the transitioned cached map.
4656 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4657 Ldr(map_in_out, FieldMemOperand(scratch1, offset));
4661 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4662 // Load the global or builtins object from the current context.
4663 Ldr(function, GlobalObjectMemOperand());
4664 // Load the native context from the global or builtins object.
4665 Ldr(function, FieldMemOperand(function,
4666 GlobalObject::kNativeContextOffset));
4667 // Load the function from the native context.
4668 Ldr(function, ContextMemOperand(function, index));
4672 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4675 // Load the initial map. The global functions all have initial maps.
4676 Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4677 if (emit_debug_code()) {
4679 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4682 Abort(kGlobalFunctionsMustHaveInitialMap);
4688 // This is the main Printf implementation. All other Printf variants call
4689 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
4690 void MacroAssembler::PrintfNoPreserve(const char * format,
4691 const CPURegister& arg0,
4692 const CPURegister& arg1,
4693 const CPURegister& arg2,
4694 const CPURegister& arg3) {
4695 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
4696 // in most cases anyway, so this restriction shouldn't be too serious.
4697 DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
4699 // The provided arguments, and their proper procedure-call standard registers.
4700 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
4701 CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
4703 int arg_count = kPrintfMaxArgCount;
4705 // The PCS varargs registers for printf. Note that x0 is used for the printf
4707 static const CPURegList kPCSVarargs =
4708 CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
4709 static const CPURegList kPCSVarargsFP =
4710 CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);
4712 // We can use caller-saved registers as scratch values, except for the
4713 // arguments and the PCS registers where they might need to go.
4714 CPURegList tmp_list = kCallerSaved;
4715 tmp_list.Remove(x0); // Used to pass the format string.
4716 tmp_list.Remove(kPCSVarargs);
4717 tmp_list.Remove(arg0, arg1, arg2, arg3);
4719 CPURegList fp_tmp_list = kCallerSavedFP;
4720 fp_tmp_list.Remove(kPCSVarargsFP);
4721 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4723 // Override the MacroAssembler's scratch register list. The lists will be
4724 // reset automatically at the end of the UseScratchRegisterScope.
4725 UseScratchRegisterScope temps(this);
4726 TmpList()->set_list(tmp_list.list());
4727 FPTmpList()->set_list(fp_tmp_list.list());
4729 // Copies of the printf vararg registers that we can pop from.
4730 CPURegList pcs_varargs = kPCSVarargs;
4731 CPURegList pcs_varargs_fp = kPCSVarargsFP;
4733 // Place the arguments. There are lots of clever tricks and optimizations we
4734 // could use here, but Printf is a debug tool so instead we just try to keep
4735 // it simple: Move each input that isn't already in the right place to a
4736 // scratch register, then move everything back.
4737 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
4738 // Work out the proper PCS register for this argument.
4739 if (args[i].IsRegister()) {
4740 pcs[i] = pcs_varargs.PopLowestIndex().X();
4741 // We might only need a W register here. We need to know the size of the
4742 // argument so we can properly encode it for the simulator call.
4743 if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
4744 } else if (args[i].IsFPRegister()) {
4745 // In C, floats are always cast to doubles for varargs calls.
4746 pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
4748 DCHECK(args[i].IsNone());
4753 // If the argument is already in the right place, leave it where it is.
4754 if (args[i].Aliases(pcs[i])) continue;
4756 // Otherwise, if the argument is in a PCS argument register, allocate an
4757 // appropriate scratch register and then move it out of the way.
4758 if (kPCSVarargs.IncludesAliasOf(args[i]) ||
4759 kPCSVarargsFP.IncludesAliasOf(args[i])) {
4760 if (args[i].IsRegister()) {
4761 Register old_arg = Register(args[i]);
4762 Register new_arg = temps.AcquireSameSizeAs(old_arg);
4763 Mov(new_arg, old_arg);
4766 FPRegister old_arg = FPRegister(args[i]);
4767 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
4768 Fmov(new_arg, old_arg);
4774 // Do a second pass to move values into their final positions and perform any
4775 // conversions that may be required.
4776 for (int i = 0; i < arg_count; i++) {
4777 DCHECK(pcs[i].type() == args[i].type());
4778 if (pcs[i].IsRegister()) {
4779 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
4781 DCHECK(pcs[i].IsFPRegister());
4782 if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
4783 Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
4785 Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
4790 // Load the format string into x0, as per the procedure-call standard.
4792 // To make the code as portable as possible, the format string is encoded
4793 // directly in the instruction stream. It might be cleaner to encode it in a
4794 // literal pool, but since Printf is usually used for debugging, it is
4795 // beneficial for it to be minimally dependent on other features.
4796 Label format_address;
4797 Adr(x0, &format_address);
4799 // Emit the format string directly in the instruction stream.
4800 { BlockPoolsScope scope(this);
4803 Bind(&format_address);
4804 EmitStringData(format);
4809 // We don't pass any arguments on the stack, but we still need to align the C
4810 // stack pointer to a 16-byte boundary for PCS compliance.
4811 if (!csp.Is(StackPointer())) {
4812 Bic(csp, StackPointer(), 0xf);
4815 CallPrintf(arg_count, pcs);
4819 void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
4820 // A call to printf needs special handling for the simulator, since the system
4821 // printf function will use a different instruction set and the procedure-call
4822 // standard will not be compatible.
4823 #ifdef USE_SIMULATOR
4824 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
4825 hlt(kImmExceptionIsPrintf);
4826 dc32(arg_count); // kPrintfArgCountOffset
4828 // Determine the argument pattern.
4829 uint32_t arg_pattern_list = 0;
4830 for (int i = 0; i < arg_count; i++) {
4831 uint32_t arg_pattern;
4832 if (args[i].IsRegister()) {
4833 arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
4835 DCHECK(args[i].Is64Bits());
4836 arg_pattern = kPrintfArgD;
4838 DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
4839 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
4841 dc32(arg_pattern_list); // kPrintfArgPatternListOffset
4844 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
4849 void MacroAssembler::Printf(const char * format,
4854 // We can only print sp if it is the current stack pointer.
4855 if (!csp.Is(StackPointer())) {
4856 DCHECK(!csp.Aliases(arg0));
4857 DCHECK(!csp.Aliases(arg1));
4858 DCHECK(!csp.Aliases(arg2));
4859 DCHECK(!csp.Aliases(arg3));
4862 // Printf is expected to preserve all registers, so make sure that none are
4863 // available as scratch registers until we've preserved them.
4864 RegList old_tmp_list = TmpList()->list();
4865 RegList old_fp_tmp_list = FPTmpList()->list();
4866 TmpList()->set_list(0);
4867 FPTmpList()->set_list(0);
4869 // Preserve all caller-saved registers as well as NZCV.
4870 // If csp is the stack pointer, PushCPURegList asserts that the size of each
4871 // list is a multiple of 16 bytes.
4872 PushCPURegList(kCallerSaved);
4873 PushCPURegList(kCallerSavedFP);
4875 // We can use caller-saved registers as scratch values (except for argN).
4876 CPURegList tmp_list = kCallerSaved;
4877 CPURegList fp_tmp_list = kCallerSavedFP;
4878 tmp_list.Remove(arg0, arg1, arg2, arg3);
4879 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4880 TmpList()->set_list(tmp_list.list());
4881 FPTmpList()->set_list(fp_tmp_list.list());
4883 { UseScratchRegisterScope temps(this);
4884 // If any of the arguments are the current stack pointer, allocate a new
4885 // register for them, and adjust the value to compensate for pushing the
4886 // caller-saved registers.
4887 bool arg0_sp = StackPointer().Aliases(arg0);
4888 bool arg1_sp = StackPointer().Aliases(arg1);
4889 bool arg2_sp = StackPointer().Aliases(arg2);
4890 bool arg3_sp = StackPointer().Aliases(arg3);
4891 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
4892 // Allocate a register to hold the original stack pointer value, to pass
4893 // to PrintfNoPreserve as an argument.
4894 Register arg_sp = temps.AcquireX();
4895 Add(arg_sp, StackPointer(),
4896 kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
4897 if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
4898 if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
4899 if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
4900 if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
4904 { UseScratchRegisterScope temps(this);
4905 Register tmp = temps.AcquireX();
4910 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
4913 { UseScratchRegisterScope temps(this);
4914 Register tmp = temps.AcquireX();
4920 PopCPURegList(kCallerSavedFP);
4921 PopCPURegList(kCallerSaved);
4923 TmpList()->set_list(old_tmp_list);
4924 FPTmpList()->set_list(old_fp_tmp_list);
4928 void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
4929 // TODO(jbramley): Other architectures use the internal memcpy to copy the
4930 // sequence. If this is a performance bottleneck, we should consider caching
4931 // the sequence and copying it in the same way.
4932 InstructionAccurateScope scope(this,
4933 kNoCodeAgeSequenceLength / kInstructionSize);
4934 DCHECK(jssp.Is(StackPointer()));
4935 EmitFrameSetupForCodeAgePatching(this);
4940 void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
4941 InstructionAccurateScope scope(this,
4942 kNoCodeAgeSequenceLength / kInstructionSize);
4943 DCHECK(jssp.Is(StackPointer()));
4944 EmitCodeAgeSequence(this, stub);
4952 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
4956 // We can do this sequence using four instructions, but the code ageing
4957 // sequence that patches it needs five, so we use the extra space to try to
4958 // simplify some addressing modes and remove some dependencies (compared to
4959 // using two stp instructions with write-back).
4960 __ sub(jssp, jssp, 4 * kXRegSize);
4961 __ sub(csp, csp, 4 * kXRegSize);
4962 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
4963 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
4964 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
4966 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
4970 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
4974 // When the stub is called, the sequence is replaced with the young sequence
4975 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
4976 // stub jumps to &start, stored in x0. The young sequence does not call the
4977 // stub so there is no infinite loop here.
4979 // A branch (br) is used rather than a call (blr) because this code replaces
4980 // the frame setup code that would normally preserve lr.
4981 __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
4984 // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
4985 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
4986 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
4988 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
4989 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
4994 bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
4995 bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
4997 isolate->code_aging_helper()->IsOld(sequence));
5002 void MacroAssembler::TruncatingDiv(Register result,
5005 DCHECK(!AreAliased(result, dividend));
5006 DCHECK(result.Is32Bits() && dividend.Is32Bits());
5007 base::MagicNumbersForDivision<uint32_t> mag =
5008 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5009 Mov(result, mag.multiplier);
5010 Smull(result.X(), dividend, result);
5011 Asr(result.X(), result.X(), 32);
5012 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5013 if (divisor > 0 && neg) Add(result, result, dividend);
5014 if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
5015 if (mag.shift > 0) Asr(result, result, mag.shift);
5016 Add(result, result, Operand(dividend, LSR, 31));
5023 UseScratchRegisterScope::~UseScratchRegisterScope() {
5024 available_->set_list(old_available_);
5025 availablefp_->set_list(old_availablefp_);
5029 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
5030 int code = AcquireNextAvailable(available_).code();
5031 return Register::Create(code, reg.SizeInBits());
5035 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
5036 int code = AcquireNextAvailable(availablefp_).code();
5037 return FPRegister::Create(code, reg.SizeInBits());
5041 CPURegister UseScratchRegisterScope::AcquireNextAvailable(
5042 CPURegList* available) {
5043 CHECK(!available->IsEmpty());
5044 CPURegister result = available->PopLowestIndex();
5045 DCHECK(!AreAliased(result, xzr, csp));
5050 CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
5051 const CPURegister& reg) {
5052 DCHECK(available->IncludesAliasOf(reg));
5053 available->Remove(reg);
5061 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
5062 const Label* smi_check) {
5063 Assembler::BlockPoolsScope scope(masm);
5064 if (reg.IsValid()) {
5065 DCHECK(smi_check->is_bound());
5066 DCHECK(reg.Is64Bits());
5068 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
5069 // 'check' in the other bits. The possible offset is limited in that we
5070 // use BitField to pack the data, and the underlying data type is a
5073 static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
5074 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
5076 DCHECK(!smi_check->is_bound());
5078 // An offset of 0 indicates that there is no patch site.
5084 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
5085 : reg_(NoReg), smi_check_(NULL) {
5086 InstructionSequence* inline_data = InstructionSequence::At(info);
5087 DCHECK(inline_data->IsInlineData());
5088 if (inline_data->IsInlineData()) {
5089 uint64_t payload = inline_data->InlineData();
5090 // We use BitField to decode the payload, and BitField can only handle
5092 DCHECK(is_uint32(payload));
5094 uint32_t payload32 = static_cast<uint32_t>(payload);
5095 int reg_code = RegisterBits::decode(payload32);
5096 reg_ = Register::XRegFromCode(reg_code);
5097 int smi_check_delta = DeltaBits::decode(payload32);
5098 DCHECK(smi_check_delta != 0);
5099 smi_check_ = inline_data->preceding(smi_check_delta);
5108 } // namespace internal
5111 #endif // V8_TARGET_ARCH_ARM64