1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_ARM64
9 #include "bootstrapper.h"
11 #include "cpu-profiler.h"
13 #include "isolate-inl.h"
19 // Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
23 MacroAssembler::MacroAssembler(Isolate* arg_isolate,
26 : Assembler(arg_isolate, buffer, buffer_size),
27 generating_stub_(false),
29 allow_macro_instructions_(true),
32 use_real_aborts_(true),
34 tmp_list_(DefaultTmpList()),
35 fptmp_list_(DefaultFPTmpList()) {
36 if (isolate() != NULL) {
37 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
43 CPURegList MacroAssembler::DefaultTmpList() {
44 return CPURegList(ip0, ip1);
48 CPURegList MacroAssembler::DefaultFPTmpList() {
49 return CPURegList(fp_scratch1, fp_scratch2);
53 void MacroAssembler::LogicalMacro(const Register& rd,
55 const Operand& operand,
57 UseScratchRegisterScope temps(this);
59 if (operand.NeedsRelocation(isolate())) {
60 Register temp = temps.AcquireX();
61 LoadRelocated(temp, operand);
62 Logical(rd, rn, temp, op);
64 } else if (operand.IsImmediate()) {
65 int64_t immediate = operand.immediate();
66 unsigned reg_size = rd.SizeInBits();
67 ASSERT(rd.Is64Bits() || is_uint32(immediate));
69 // If the operation is NOT, invert the operation and immediate.
70 if ((op & NOT) == NOT) {
71 op = static_cast<LogicalOp>(op & ~NOT);
72 immediate = ~immediate;
74 immediate &= kWRegMask;
78 // Special cases for all set or all clear immediates.
84 case ORR: // Fall through.
88 case ANDS: // Fall through.
94 } else if ((rd.Is64Bits() && (immediate == -1L)) ||
95 (rd.Is32Bits() && (immediate == 0xffffffffL))) {
106 case ANDS: // Fall through.
114 unsigned n, imm_s, imm_r;
115 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
116 // Immediate can be encoded in the instruction.
117 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
119 // Immediate can't be encoded: synthesize using move immediate.
120 Register temp = temps.AcquireSameSizeAs(rn);
121 Mov(temp, immediate);
123 // If rd is the stack pointer we cannot use it as the destination
124 // register so we use the temp register as an intermediate again.
125 Logical(temp, rn, temp, op);
128 Logical(rd, rn, temp, op);
132 } else if (operand.IsExtendedRegister()) {
133 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
134 // Add/sub extended supports shift <= 4. We want to support exactly the
136 ASSERT(operand.shift_amount() <= 4);
137 ASSERT(operand.reg().Is64Bits() ||
138 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
139 Register temp = temps.AcquireSameSizeAs(rn);
140 EmitExtendShift(temp, operand.reg(), operand.extend(),
141 operand.shift_amount());
142 Logical(rd, rn, temp, op);
145 // The operand can be encoded in the instruction.
146 ASSERT(operand.IsShiftedRegister());
147 Logical(rd, rn, operand, op);
152 void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
153 ASSERT(allow_macro_instructions_);
154 ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
155 ASSERT(!rd.IsZero());
157 // TODO(all) extend to support more immediates.
159 // Immediates on Aarch64 can be produced using an initial value, and zero to
160 // three move keep operations.
162 // Initial values can be generated with:
163 // 1. 64-bit move zero (movz).
164 // 2. 32-bit move inverted (movn).
165 // 3. 64-bit move inverted.
166 // 4. 32-bit orr immediate.
167 // 5. 64-bit orr immediate.
168 // Move-keep may then be used to modify each of the 16-bit half-words.
170 // The code below supports all five initial value generators, and
171 // applying move-keep operations to move-zero and move-inverted initial
174 unsigned reg_size = rd.SizeInBits();
175 unsigned n, imm_s, imm_r;
176 if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
177 // Immediate can be represented in a move zero instruction. Movz can't
178 // write to the stack pointer.
180 } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
181 // Immediate can be represented in a move inverted instruction. Movn can't
182 // write to the stack pointer.
183 movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
184 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
185 // Immediate can be represented in a logical orr instruction.
186 LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
188 // Generic immediate case. Imm will be represented by
189 // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
190 // A move-zero or move-inverted is generated for the first non-zero or
191 // non-0xffff immX, and a move-keep for subsequent non-zero immX.
193 uint64_t ignored_halfword = 0;
194 bool invert_move = false;
195 // If the number of 0xffff halfwords is greater than the number of 0x0000
196 // halfwords, it's more efficient to use move-inverted.
197 if (CountClearHalfWords(~imm, reg_size) >
198 CountClearHalfWords(imm, reg_size)) {
199 ignored_halfword = 0xffffL;
203 // Mov instructions can't move immediate values into the stack pointer, so
204 // set up a temporary register, if needed.
205 UseScratchRegisterScope temps(this);
206 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
208 // Iterate through the halfwords. Use movn/movz for the first non-ignored
209 // halfword, and movk for subsequent halfwords.
210 ASSERT((reg_size % 16) == 0);
211 bool first_mov_done = false;
212 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
213 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
214 if (imm16 != ignored_halfword) {
215 if (!first_mov_done) {
217 movn(temp, (~imm16) & 0xffffL, 16 * i);
219 movz(temp, imm16, 16 * i);
221 first_mov_done = true;
223 // Construct a wider constant.
224 movk(temp, imm16, 16 * i);
228 ASSERT(first_mov_done);
230 // Move the temporary if the original destination register was the stack
239 void MacroAssembler::Mov(const Register& rd,
240 const Operand& operand,
241 DiscardMoveMode discard_mode) {
242 ASSERT(allow_macro_instructions_);
243 ASSERT(!rd.IsZero());
245 // Provide a swap register for instructions that need to write into the
246 // system stack pointer (and can't do this inherently).
247 UseScratchRegisterScope temps(this);
248 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
250 if (operand.NeedsRelocation(isolate())) {
251 LoadRelocated(dst, operand);
253 } else if (operand.IsImmediate()) {
254 // Call the macro assembler for generic immediates.
255 Mov(dst, operand.immediate());
257 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
258 // Emit a shift instruction if moving a shifted register. This operation
259 // could also be achieved using an orr instruction (like orn used by Mvn),
260 // but using a shift instruction makes the disassembly clearer.
261 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
263 } else if (operand.IsExtendedRegister()) {
264 // Emit an extend instruction if moving an extended register. This handles
265 // extend with post-shift operations, too.
266 EmitExtendShift(dst, operand.reg(), operand.extend(),
267 operand.shift_amount());
270 // Otherwise, emit a register move only if the registers are distinct, or
271 // if they are not X registers.
273 // Note that mov(w0, w0) is not a no-op because it clears the top word of
274 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
275 // registers is not required to clear the top word of the X register. In
276 // this case, the instruction is discarded.
278 // If csp is an operand, add #0 is emitted, otherwise, orr #0.
279 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
280 (discard_mode == kDontDiscardForSameWReg))) {
281 Assembler::mov(rd, operand.reg());
283 // This case can handle writes into the system stack pointer directly.
287 // Copy the result to the system stack pointer.
290 Assembler::mov(rd, dst);
295 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
296 ASSERT(allow_macro_instructions_);
298 if (operand.NeedsRelocation(isolate())) {
299 LoadRelocated(rd, operand);
302 } else if (operand.IsImmediate()) {
303 // Call the macro assembler for generic immediates.
304 Mov(rd, ~operand.immediate());
306 } else if (operand.IsExtendedRegister()) {
307 // Emit two instructions for the extend case. This differs from Mov, as
308 // the extend and invert can't be achieved in one instruction.
309 EmitExtendShift(rd, operand.reg(), operand.extend(),
310 operand.shift_amount());
319 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
320 ASSERT((reg_size % 8) == 0);
322 for (unsigned i = 0; i < (reg_size / 16); i++) {
323 if ((imm & 0xffff) == 0) {
332 // The movz instruction can generate immediates containing an arbitrary 16-bit
333 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
334 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
335 ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
336 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
340 // The movn instruction can generate immediates containing an arbitrary 16-bit
341 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
342 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
343 return IsImmMovz(~imm, reg_size);
347 void MacroAssembler::ConditionalCompareMacro(const Register& rn,
348 const Operand& operand,
351 ConditionalCompareOp op) {
352 ASSERT((cond != al) && (cond != nv));
353 if (operand.NeedsRelocation(isolate())) {
354 UseScratchRegisterScope temps(this);
355 Register temp = temps.AcquireX();
356 LoadRelocated(temp, operand);
357 ConditionalCompareMacro(rn, temp, nzcv, cond, op);
359 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
360 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
361 // The immediate can be encoded in the instruction, or the operand is an
362 // unshifted register: call the assembler.
363 ConditionalCompare(rn, operand, nzcv, cond, op);
366 // The operand isn't directly supported by the instruction: perform the
367 // operation on a temporary register.
368 UseScratchRegisterScope temps(this);
369 Register temp = temps.AcquireSameSizeAs(rn);
371 ConditionalCompare(rn, temp, nzcv, cond, op);
376 void MacroAssembler::Csel(const Register& rd,
378 const Operand& operand,
380 ASSERT(allow_macro_instructions_);
381 ASSERT(!rd.IsZero());
382 ASSERT((cond != al) && (cond != nv));
383 if (operand.IsImmediate()) {
384 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
386 int64_t imm = operand.immediate();
387 Register zr = AppropriateZeroRegFor(rn);
389 csel(rd, rn, zr, cond);
390 } else if (imm == 1) {
391 csinc(rd, rn, zr, cond);
392 } else if (imm == -1) {
393 csinv(rd, rn, zr, cond);
395 UseScratchRegisterScope temps(this);
396 Register temp = temps.AcquireSameSizeAs(rn);
397 Mov(temp, operand.immediate());
398 csel(rd, rn, temp, cond);
400 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
401 // Unshifted register argument.
402 csel(rd, rn, operand.reg(), cond);
404 // All other arguments.
405 UseScratchRegisterScope temps(this);
406 Register temp = temps.AcquireSameSizeAs(rn);
408 csel(rd, rn, temp, cond);
413 void MacroAssembler::AddSubMacro(const Register& rd,
415 const Operand& operand,
418 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
419 !operand.NeedsRelocation(isolate()) && (S == LeaveFlags)) {
420 // The instruction would be a nop. Avoid generating useless code.
424 if (operand.NeedsRelocation(isolate())) {
425 UseScratchRegisterScope temps(this);
426 Register temp = temps.AcquireX();
427 LoadRelocated(temp, operand);
428 AddSubMacro(rd, rn, temp, S, op);
429 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
430 (rn.IsZero() && !operand.IsShiftedRegister()) ||
431 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
432 UseScratchRegisterScope temps(this);
433 Register temp = temps.AcquireSameSizeAs(rn);
435 AddSub(rd, rn, temp, S, op);
437 AddSub(rd, rn, operand, S, op);
442 void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
444 const Operand& operand,
446 AddSubWithCarryOp op) {
447 ASSERT(rd.SizeInBits() == rn.SizeInBits());
448 UseScratchRegisterScope temps(this);
450 if (operand.NeedsRelocation(isolate())) {
451 Register temp = temps.AcquireX();
452 LoadRelocated(temp, operand);
453 AddSubWithCarryMacro(rd, rn, temp, S, op);
455 } else if (operand.IsImmediate() ||
456 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
457 // Add/sub with carry (immediate or ROR shifted register.)
458 Register temp = temps.AcquireSameSizeAs(rn);
460 AddSubWithCarry(rd, rn, temp, S, op);
462 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
463 // Add/sub with carry (shifted register).
464 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
465 ASSERT(operand.shift() != ROR);
466 ASSERT(is_uintn(operand.shift_amount(),
467 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
468 : kWRegSizeInBitsLog2));
469 Register temp = temps.AcquireSameSizeAs(rn);
470 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
471 AddSubWithCarry(rd, rn, temp, S, op);
473 } else if (operand.IsExtendedRegister()) {
474 // Add/sub with carry (extended register).
475 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
476 // Add/sub extended supports a shift <= 4. We want to support exactly the
478 ASSERT(operand.shift_amount() <= 4);
479 ASSERT(operand.reg().Is64Bits() ||
480 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
481 Register temp = temps.AcquireSameSizeAs(rn);
482 EmitExtendShift(temp, operand.reg(), operand.extend(),
483 operand.shift_amount());
484 AddSubWithCarry(rd, rn, temp, S, op);
487 // The addressing mode is directly supported by the instruction.
488 AddSubWithCarry(rd, rn, operand, S, op);
493 void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
494 const MemOperand& addr,
496 int64_t offset = addr.offset();
497 LSDataSize size = CalcLSDataSize(op);
499 // Check if an immediate offset fits in the immediate field of the
500 // appropriate instruction. If not, emit two instructions to perform
502 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
503 !IsImmLSUnscaled(offset)) {
504 // Immediate offset that can't be encoded using unsigned or unscaled
506 UseScratchRegisterScope temps(this);
507 Register temp = temps.AcquireSameSizeAs(addr.base());
508 Mov(temp, addr.offset());
509 LoadStore(rt, MemOperand(addr.base(), temp), op);
510 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
511 // Post-index beyond unscaled addressing range.
512 LoadStore(rt, MemOperand(addr.base()), op);
513 add(addr.base(), addr.base(), offset);
514 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
515 // Pre-index beyond unscaled addressing range.
516 add(addr.base(), addr.base(), offset);
517 LoadStore(rt, MemOperand(addr.base()), op);
519 // Encodable in one load/store instruction.
520 LoadStore(rt, addr, op);
525 void MacroAssembler::Load(const Register& rt,
526 const MemOperand& addr,
528 ASSERT(!r.IsDouble());
530 if (r.IsInteger8()) {
532 } else if (r.IsUInteger8()) {
534 } else if (r.IsInteger16()) {
536 } else if (r.IsUInteger16()) {
538 } else if (r.IsInteger32()) {
541 ASSERT(rt.Is64Bits());
547 void MacroAssembler::Store(const Register& rt,
548 const MemOperand& addr,
550 ASSERT(!r.IsDouble());
552 if (r.IsInteger8() || r.IsUInteger8()) {
554 } else if (r.IsInteger16() || r.IsUInteger16()) {
556 } else if (r.IsInteger32()) {
559 ASSERT(rt.Is64Bits());
565 bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
566 Label *label, ImmBranchType b_type) {
567 bool need_longer_range = false;
568 // There are two situations in which we care about the offset being out of
570 // - The label is bound but too far away.
571 // - The label is not bound but linked, and the previous branch
572 // instruction in the chain is too far away.
573 if (label->is_bound() || label->is_linked()) {
575 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
577 if (!need_longer_range && !label->is_bound()) {
578 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
579 unresolved_branches_.insert(
580 std::pair<int, FarBranchInfo>(max_reachable_pc,
581 FarBranchInfo(pc_offset(), label)));
582 // Also maintain the next pool check.
583 next_veneer_pool_check_ =
584 Min(next_veneer_pool_check_,
585 max_reachable_pc - kVeneerDistanceCheckMargin);
587 return need_longer_range;
591 void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
592 ASSERT(allow_macro_instructions_);
593 ASSERT(!rd.IsZero());
595 if (hint == kAdrNear) {
600 ASSERT(hint == kAdrFar);
601 UseScratchRegisterScope temps(this);
602 Register scratch = temps.AcquireX();
603 ASSERT(!AreAliased(rd, scratch));
605 if (label->is_bound()) {
606 int label_offset = label->pos() - pc_offset();
607 if (Instruction::IsValidPCRelOffset(label_offset)) {
610 ASSERT(label_offset <= 0);
611 int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
612 adr(rd, min_adr_offset);
613 Add(rd, rd, label_offset - min_adr_offset);
616 InstructionAccurateScope scope(
617 this, PatchingAssembler::kAdrFarPatchableNInstrs);
619 for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
623 add(rd, rd, scratch);
628 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
629 ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
630 (bit == -1 || type >= kBranchTypeFirstUsingBit));
631 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
632 B(static_cast<Condition>(type), label);
635 case always: B(label); break;
637 case reg_zero: Cbz(reg, label); break;
638 case reg_not_zero: Cbnz(reg, label); break;
639 case reg_bit_clear: Tbz(reg, bit, label); break;
640 case reg_bit_set: Tbnz(reg, bit, label); break;
648 void MacroAssembler::B(Label* label, Condition cond) {
649 ASSERT(allow_macro_instructions_);
650 ASSERT((cond != al) && (cond != nv));
653 bool need_extra_instructions =
654 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
656 if (need_extra_instructions) {
657 b(&done, InvertCondition(cond));
666 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
667 ASSERT(allow_macro_instructions_);
670 bool need_extra_instructions =
671 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
673 if (need_extra_instructions) {
674 tbz(rt, bit_pos, &done);
677 tbnz(rt, bit_pos, label);
683 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
684 ASSERT(allow_macro_instructions_);
687 bool need_extra_instructions =
688 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
690 if (need_extra_instructions) {
691 tbnz(rt, bit_pos, &done);
694 tbz(rt, bit_pos, label);
700 void MacroAssembler::Cbnz(const Register& rt, Label* label) {
701 ASSERT(allow_macro_instructions_);
704 bool need_extra_instructions =
705 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
707 if (need_extra_instructions) {
717 void MacroAssembler::Cbz(const Register& rt, Label* label) {
718 ASSERT(allow_macro_instructions_);
721 bool need_extra_instructions =
722 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
724 if (need_extra_instructions) {
734 // Pseudo-instructions.
737 void MacroAssembler::Abs(const Register& rd, const Register& rm,
738 Label* is_not_representable,
739 Label* is_representable) {
740 ASSERT(allow_macro_instructions_);
741 ASSERT(AreSameSizeAndType(rd, rm));
746 // If the comparison sets the v flag, the input was the smallest value
747 // representable by rm, and the mathematical result of abs(rm) is not
748 // representable using two's complement.
749 if ((is_not_representable != NULL) && (is_representable != NULL)) {
750 B(is_not_representable, vs);
752 } else if (is_not_representable != NULL) {
753 B(is_not_representable, vs);
754 } else if (is_representable != NULL) {
755 B(is_representable, vc);
760 // Abstracted stack operations.
763 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
764 const CPURegister& src2, const CPURegister& src3) {
765 ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
767 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
768 int size = src0.SizeInBytes();
770 PrepareForPush(count, size);
771 PushHelper(count, size, src0, src1, src2, src3);
775 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
776 const CPURegister& src2, const CPURegister& src3,
777 const CPURegister& src4, const CPURegister& src5,
778 const CPURegister& src6, const CPURegister& src7) {
779 ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
781 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
782 int size = src0.SizeInBytes();
784 PrepareForPush(count, size);
785 PushHelper(4, size, src0, src1, src2, src3);
786 PushHelper(count - 4, size, src4, src5, src6, src7);
790 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
791 const CPURegister& dst2, const CPURegister& dst3) {
792 // It is not valid to pop into the same register more than once in one
793 // instruction, not even into the zero register.
794 ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
795 ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
796 ASSERT(dst0.IsValid());
798 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
799 int size = dst0.SizeInBytes();
801 PrepareForPop(count, size);
802 PopHelper(count, size, dst0, dst1, dst2, dst3);
804 if (!csp.Is(StackPointer()) && emit_debug_code()) {
805 // It is safe to leave csp where it is when unwinding the JavaScript stack,
806 // but if we keep it matching StackPointer, the simulator can detect memory
807 // accesses in the now-free part of the stack.
808 Mov(csp, StackPointer());
813 void MacroAssembler::PushPopQueue::PushQueued() {
814 if (queued_.empty()) return;
816 masm_->PrepareForPush(size_);
818 int count = queued_.size();
820 while (index < count) {
821 // PushHelper can only handle registers with the same size and type, and it
822 // can handle only four at a time. Batch them up accordingly.
823 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
826 batch[batch_index++] = queued_[index++];
827 } while ((batch_index < 4) && (index < count) &&
828 batch[0].IsSameSizeAndType(queued_[index]));
830 masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
831 batch[0], batch[1], batch[2], batch[3]);
838 void MacroAssembler::PushPopQueue::PopQueued() {
839 if (queued_.empty()) return;
841 masm_->PrepareForPop(size_);
843 int count = queued_.size();
845 while (index < count) {
846 // PopHelper can only handle registers with the same size and type, and it
847 // can handle only four at a time. Batch them up accordingly.
848 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
851 batch[batch_index++] = queued_[index++];
852 } while ((batch_index < 4) && (index < count) &&
853 batch[0].IsSameSizeAndType(queued_[index]));
855 masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
856 batch[0], batch[1], batch[2], batch[3]);
863 void MacroAssembler::PushCPURegList(CPURegList registers) {
864 int size = registers.RegisterSizeInBytes();
866 PrepareForPush(registers.Count(), size);
867 // Push up to four registers at a time because if the current stack pointer is
868 // csp and reg_size is 32, registers must be pushed in blocks of four in order
869 // to maintain the 16-byte alignment for csp.
870 while (!registers.IsEmpty()) {
871 int count_before = registers.Count();
872 const CPURegister& src0 = registers.PopHighestIndex();
873 const CPURegister& src1 = registers.PopHighestIndex();
874 const CPURegister& src2 = registers.PopHighestIndex();
875 const CPURegister& src3 = registers.PopHighestIndex();
876 int count = count_before - registers.Count();
877 PushHelper(count, size, src0, src1, src2, src3);
882 void MacroAssembler::PopCPURegList(CPURegList registers) {
883 int size = registers.RegisterSizeInBytes();
885 PrepareForPop(registers.Count(), size);
886 // Pop up to four registers at a time because if the current stack pointer is
887 // csp and reg_size is 32, registers must be pushed in blocks of four in
888 // order to maintain the 16-byte alignment for csp.
889 while (!registers.IsEmpty()) {
890 int count_before = registers.Count();
891 const CPURegister& dst0 = registers.PopLowestIndex();
892 const CPURegister& dst1 = registers.PopLowestIndex();
893 const CPURegister& dst2 = registers.PopLowestIndex();
894 const CPURegister& dst3 = registers.PopLowestIndex();
895 int count = count_before - registers.Count();
896 PopHelper(count, size, dst0, dst1, dst2, dst3);
899 if (!csp.Is(StackPointer()) && emit_debug_code()) {
900 // It is safe to leave csp where it is when unwinding the JavaScript stack,
901 // but if we keep it matching StackPointer, the simulator can detect memory
902 // accesses in the now-free part of the stack.
903 Mov(csp, StackPointer());
908 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
909 int size = src.SizeInBytes();
911 PrepareForPush(count, size);
913 if (FLAG_optimize_for_size && count > 8) {
914 UseScratchRegisterScope temps(this);
915 Register temp = temps.AcquireX();
918 __ Mov(temp, count / 2);
920 PushHelper(2, size, src, src, NoReg, NoReg);
921 __ Subs(temp, temp, 1);
927 // Push up to four registers at a time if possible because if the current
928 // stack pointer is csp and the register size is 32, registers must be pushed
929 // in blocks of four in order to maintain the 16-byte alignment for csp.
931 PushHelper(4, size, src, src, src, src);
935 PushHelper(2, size, src, src, NoReg, NoReg);
939 PushHelper(1, size, src, NoReg, NoReg, NoReg);
946 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
947 PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
949 UseScratchRegisterScope temps(this);
950 Register temp = temps.AcquireSameSizeAs(count);
952 if (FLAG_optimize_for_size) {
955 Subs(temp, count, 1);
958 // Push all registers individually, to save code size.
961 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
966 Label loop, leftover2, leftover1, done;
968 Subs(temp, count, 4);
971 // Push groups of four first.
974 PushHelper(4, src.SizeInBytes(), src, src, src, src);
977 // Push groups of two.
979 Tbz(count, 1, &leftover1);
980 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
982 // Push the last one (if required).
984 Tbz(count, 0, &done);
985 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
992 void MacroAssembler::PushHelper(int count, int size,
993 const CPURegister& src0,
994 const CPURegister& src1,
995 const CPURegister& src2,
996 const CPURegister& src3) {
997 // Ensure that we don't unintentially modify scratch or debug registers.
998 InstructionAccurateScope scope(this);
1000 ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
1001 ASSERT(size == src0.SizeInBytes());
1003 // When pushing multiple registers, the store order is chosen such that
1004 // Push(a, b) is equivalent to Push(a) followed by Push(b).
1007 ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
1008 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
1011 ASSERT(src2.IsNone() && src3.IsNone());
1012 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
1015 ASSERT(src3.IsNone());
1016 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
1017 str(src0, MemOperand(StackPointer(), 2 * size));
1020 // Skip over 4 * size, then fill in the gap. This allows four W registers
1021 // to be pushed using csp, whilst maintaining 16-byte alignment for csp
1023 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
1024 stp(src1, src0, MemOperand(StackPointer(), 2 * size));
1032 void MacroAssembler::PopHelper(int count, int size,
1033 const CPURegister& dst0,
1034 const CPURegister& dst1,
1035 const CPURegister& dst2,
1036 const CPURegister& dst3) {
1037 // Ensure that we don't unintentially modify scratch or debug registers.
1038 InstructionAccurateScope scope(this);
1040 ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1041 ASSERT(size == dst0.SizeInBytes());
1043 // When popping multiple registers, the load order is chosen such that
1044 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1047 ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1048 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
1051 ASSERT(dst2.IsNone() && dst3.IsNone());
1052 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
1055 ASSERT(dst3.IsNone());
1056 ldr(dst2, MemOperand(StackPointer(), 2 * size));
1057 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
1060 // Load the higher addresses first, then load the lower addresses and
1061 // skip the whole block in the second instruction. This allows four W
1062 // registers to be popped using csp, whilst maintaining 16-byte alignment
1063 // for csp at all times.
1064 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
1065 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
1073 void MacroAssembler::PrepareForPush(Operand total_size) {
1074 // TODO(jbramley): This assertion generates too much code in some debug tests.
1075 // AssertStackConsistency();
1076 if (csp.Is(StackPointer())) {
1077 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1078 // on entry and the total size of the specified registers must also be a
1079 // multiple of 16 bytes.
1080 if (total_size.IsImmediate()) {
1081 ASSERT((total_size.immediate() % 16) == 0);
1084 // Don't check access size for non-immediate sizes. It's difficult to do
1085 // well, and it will be caught by hardware (or the simulator) anyway.
1087 // Even if the current stack pointer is not the system stack pointer (csp),
1088 // the system stack pointer will still be modified in order to comply with
1089 // ABI rules about accessing memory below the system stack pointer.
1090 BumpSystemStackPointer(total_size);
1095 void MacroAssembler::PrepareForPop(Operand total_size) {
1096 AssertStackConsistency();
1097 if (csp.Is(StackPointer())) {
1098 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1099 // on entry and the total size of the specified registers must also be a
1100 // multiple of 16 bytes.
1101 if (total_size.IsImmediate()) {
1102 ASSERT((total_size.immediate() % 16) == 0);
1105 // Don't check access size for non-immediate sizes. It's difficult to do
1106 // well, and it will be caught by hardware (or the simulator) anyway.
1111 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
1112 if (offset.IsImmediate()) {
1113 ASSERT(offset.immediate() >= 0);
1114 } else if (emit_debug_code()) {
1116 Check(le, kStackAccessBelowStackPointer);
1119 Str(src, MemOperand(StackPointer(), offset));
1123 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
1124 if (offset.IsImmediate()) {
1125 ASSERT(offset.immediate() >= 0);
1126 } else if (emit_debug_code()) {
1128 Check(le, kStackAccessBelowStackPointer);
1131 Ldr(dst, MemOperand(StackPointer(), offset));
1135 void MacroAssembler::PokePair(const CPURegister& src1,
1136 const CPURegister& src2,
1138 ASSERT(AreSameSizeAndType(src1, src2));
1139 ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1140 Stp(src1, src2, MemOperand(StackPointer(), offset));
1144 void MacroAssembler::PeekPair(const CPURegister& dst1,
1145 const CPURegister& dst2,
1147 ASSERT(AreSameSizeAndType(dst1, dst2));
1148 ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1149 Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
1153 void MacroAssembler::PushCalleeSavedRegisters() {
1154 // Ensure that the macro-assembler doesn't use any scratch registers.
1155 InstructionAccurateScope scope(this);
1157 // This method must not be called unless the current stack pointer is the
1158 // system stack pointer (csp).
1159 ASSERT(csp.Is(StackPointer()));
1161 MemOperand tos(csp, -2 * kXRegSize, PreIndex);
1169 stp(x27, x28, tos); // x28 = jssp
1177 void MacroAssembler::PopCalleeSavedRegisters() {
1178 // Ensure that the macro-assembler doesn't use any scratch registers.
1179 InstructionAccurateScope scope(this);
1181 // This method must not be called unless the current stack pointer is the
1182 // system stack pointer (csp).
1183 ASSERT(csp.Is(StackPointer()));
1185 MemOperand tos(csp, 2 * kXRegSize, PostIndex);
1191 ldp(x27, x28, tos); // x28 = jssp
1201 void MacroAssembler::AssertStackConsistency() {
1202 if (emit_debug_code()) {
1203 if (csp.Is(StackPointer())) {
1204 // We can't check the alignment of csp without using a scratch register
1205 // (or clobbering the flags), but the processor (or simulator) will abort
1206 // if it is not properly aligned during a load.
1207 ldr(xzr, MemOperand(csp, 0));
1208 } else if (FLAG_enable_slow_asserts) {
1210 // Check that csp <= StackPointer(), preserving all registers and NZCV.
1211 sub(StackPointer(), csp, StackPointer());
1212 cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
1213 tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
1215 Abort(kTheCurrentStackPointerIsBelowCsp);
1218 // Restore StackPointer().
1219 sub(StackPointer(), csp, StackPointer());
1225 void MacroAssembler::LoadRoot(CPURegister destination,
1226 Heap::RootListIndex index) {
1227 // TODO(jbramley): Most root values are constants, and can be synthesized
1228 // without a load. Refer to the ARM back end for details.
1229 Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
1233 void MacroAssembler::StoreRoot(Register source,
1234 Heap::RootListIndex index) {
1235 Str(source, MemOperand(root, index << kPointerSizeLog2));
1239 void MacroAssembler::LoadTrueFalseRoots(Register true_root,
1240 Register false_root) {
1241 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
1242 Ldp(true_root, false_root,
1243 MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
1247 void MacroAssembler::LoadHeapObject(Register result,
1248 Handle<HeapObject> object) {
1249 AllowDeferredHandleDereference using_raw_address;
1250 if (isolate()->heap()->InNewSpace(*object)) {
1251 Handle<Cell> cell = isolate()->factory()->NewCell(object);
1252 Mov(result, Operand(cell));
1253 Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
1255 Mov(result, Operand(object));
1260 void MacroAssembler::LoadInstanceDescriptors(Register map,
1261 Register descriptors) {
1262 Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
1266 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
1267 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
1268 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
1272 void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
1273 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
1274 Ldrsw(dst, UntagSmiFieldMemOperand(map, Map::kBitField3Offset));
1275 And(dst, dst, Map::EnumLengthBits::kMask);
1279 void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
1280 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
1281 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
1282 And(dst, dst, Smi::FromInt(Map::EnumLengthBits::kMask));
1286 void MacroAssembler::CheckEnumCache(Register object,
1287 Register null_value,
1292 Label* call_runtime) {
1293 ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
1296 Register empty_fixed_array_value = scratch0;
1297 Register current_object = scratch1;
1299 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
1302 Mov(current_object, object);
1304 // Check if the enum length field is properly initialized, indicating that
1305 // there is an enum cache.
1306 Register map = scratch2;
1307 Register enum_length = scratch3;
1308 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1310 EnumLengthUntagged(enum_length, map);
1311 Cmp(enum_length, kInvalidEnumCacheSentinel);
1312 B(eq, call_runtime);
1317 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1319 // For all objects but the receiver, check that the cache is empty.
1320 EnumLengthUntagged(enum_length, map);
1321 Cbnz(enum_length, call_runtime);
1325 // Check that there are no elements. Register current_object contains the
1326 // current JS object we've reached through the prototype chain.
1328 Ldr(current_object, FieldMemOperand(current_object,
1329 JSObject::kElementsOffset));
1330 Cmp(current_object, empty_fixed_array_value);
1331 B(eq, &no_elements);
1333 // Second chance, the object may be using the empty slow element dictionary.
1334 CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
1335 B(ne, call_runtime);
1338 Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
1339 Cmp(current_object, null_value);
1344 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
1347 Label* no_memento_found) {
1348 ExternalReference new_space_start =
1349 ExternalReference::new_space_start(isolate());
1350 ExternalReference new_space_allocation_top =
1351 ExternalReference::new_space_allocation_top_address(isolate());
1353 Add(scratch1, receiver,
1354 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
1355 Cmp(scratch1, new_space_start);
1356 B(lt, no_memento_found);
1358 Mov(scratch2, new_space_allocation_top);
1359 Ldr(scratch2, MemOperand(scratch2));
1360 Cmp(scratch1, scratch2);
1361 B(gt, no_memento_found);
1363 Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
1365 Operand(isolate()->factory()->allocation_memento_map()));
1369 void MacroAssembler::JumpToHandlerEntry(Register exception,
1373 Register scratch2) {
1374 // Handler expects argument in x0.
1375 ASSERT(exception.Is(x0));
1377 // Compute the handler entry address and jump to it. The handler table is
1378 // a fixed array of (smi-tagged) code offsets.
1379 Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset));
1380 Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
1381 STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2);
1382 Lsr(scratch2, state, StackHandler::kKindWidth);
1383 Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
1384 Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag);
1385 Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
1390 void MacroAssembler::InNewSpace(Register object,
1393 ASSERT(cond == eq || cond == ne);
1394 UseScratchRegisterScope temps(this);
1395 Register temp = temps.AcquireX();
1396 And(temp, object, ExternalReference::new_space_mask(isolate()));
1397 Cmp(temp, ExternalReference::new_space_start(isolate()));
1402 void MacroAssembler::Throw(Register value,
1406 Register scratch4) {
1407 // Adjust this code if not the case.
1408 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1409 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1410 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1411 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1412 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1413 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1415 // The handler expects the exception in x0.
1416 ASSERT(value.Is(x0));
1418 // Drop the stack pointer to the top of the top handler.
1419 ASSERT(jssp.Is(StackPointer()));
1420 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1422 Ldr(jssp, MemOperand(scratch1));
1423 // Restore the next handler.
1425 Str(scratch2, MemOperand(scratch1));
1427 // Get the code object and state. Restore the context and frame pointer.
1428 Register object = scratch1;
1429 Register state = scratch2;
1430 Pop(object, state, cp, fp);
1432 // If the handler is a JS frame, restore the context to the frame.
1433 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1436 Cbz(cp, ¬_js_frame);
1437 Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1438 Bind(¬_js_frame);
1440 JumpToHandlerEntry(value, object, state, scratch3, scratch4);
1444 void MacroAssembler::ThrowUncatchable(Register value,
1448 Register scratch4) {
1449 // Adjust this code if not the case.
1450 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1451 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1452 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1453 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1454 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1455 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1457 // The handler expects the exception in x0.
1458 ASSERT(value.Is(x0));
1460 // Drop the stack pointer to the top of the top stack handler.
1461 ASSERT(jssp.Is(StackPointer()));
1462 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1464 Ldr(jssp, MemOperand(scratch1));
1466 // Unwind the handlers until the ENTRY handler is found.
1467 Label fetch_next, check_kind;
1470 Peek(jssp, StackHandlerConstants::kNextOffset);
1473 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1474 Peek(scratch2, StackHandlerConstants::kStateOffset);
1475 TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next);
1477 // Set the top handler address to next handler past the top ENTRY handler.
1479 Str(scratch2, MemOperand(scratch1));
1481 // Get the code object and state. Clear the context and frame pointer (0 was
1482 // saved in the handler).
1483 Register object = scratch1;
1484 Register state = scratch2;
1485 Pop(object, state, cp, fp);
1487 JumpToHandlerEntry(value, object, state, scratch3, scratch4);
1491 void MacroAssembler::Throw(BailoutReason reason) {
1495 const char* msg = GetBailoutReason(reason);
1496 RecordComment("Throw message: ");
1497 RecordComment((msg != NULL) ? msg : "UNKNOWN");
1500 Mov(x0, Smi::FromInt(reason));
1503 // Disable stub call restrictions to always allow calls to throw.
1505 // We don't actually want to generate a pile of code for this, so just
1506 // claim there is a stack frame, without generating one.
1507 FrameScope scope(this, StackFrame::NONE);
1508 CallRuntime(Runtime::kHiddenThrowMessage, 1);
1510 CallRuntime(Runtime::kHiddenThrowMessage, 1);
1512 // ThrowMessage should not return here.
1517 void MacroAssembler::ThrowIf(Condition cond, BailoutReason reason) {
1519 B(InvertCondition(cond), &ok);
1525 void MacroAssembler::ThrowIfSmi(const Register& value, BailoutReason reason) {
1527 JumpIfNotSmi(value, &ok);
1533 void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
1534 ASSERT(smi.Is64Bits());
1535 Abs(smi, smi, slow);
1539 void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
1540 if (emit_debug_code()) {
1541 STATIC_ASSERT(kSmiTag == 0);
1542 Tst(object, kSmiTagMask);
1548 void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
1549 if (emit_debug_code()) {
1550 STATIC_ASSERT(kSmiTag == 0);
1551 Tst(object, kSmiTagMask);
1557 void MacroAssembler::AssertName(Register object) {
1558 if (emit_debug_code()) {
1559 AssertNotSmi(object, kOperandIsASmiAndNotAName);
1561 UseScratchRegisterScope temps(this);
1562 Register temp = temps.AcquireX();
1564 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1565 CompareInstanceType(temp, temp, LAST_NAME_TYPE);
1566 Check(ls, kOperandIsNotAName);
1571 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1573 if (emit_debug_code()) {
1574 Label done_checking;
1575 AssertNotSmi(object);
1576 JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
1577 Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1578 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
1579 Assert(eq, kExpectedUndefinedOrCell);
1580 Bind(&done_checking);
1585 void MacroAssembler::AssertString(Register object) {
1586 if (emit_debug_code()) {
1587 UseScratchRegisterScope temps(this);
1588 Register temp = temps.AcquireX();
1589 STATIC_ASSERT(kSmiTag == 0);
1590 Tst(object, kSmiTagMask);
1591 Check(ne, kOperandIsASmiAndNotAString);
1592 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1593 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
1594 Check(lo, kOperandIsNotAString);
1599 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1600 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1601 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1605 void MacroAssembler::TailCallStub(CodeStub* stub) {
1606 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1610 void MacroAssembler::CallRuntime(const Runtime::Function* f,
1612 SaveFPRegsMode save_doubles) {
1613 // All arguments must be on the stack before this function is called.
1614 // x0 holds the return value after the call.
1616 // Check that the number of arguments matches what the function expects.
1617 // If f->nargs is -1, the function can accept a variable number of arguments.
1618 if (f->nargs >= 0 && f->nargs != num_arguments) {
1619 // Illegal operation: drop the stack arguments and return undefined.
1620 if (num_arguments > 0) {
1621 Drop(num_arguments);
1623 LoadRoot(x0, Heap::kUndefinedValueRootIndex);
1627 // Place the necessary arguments.
1628 Mov(x0, num_arguments);
1629 Mov(x1, ExternalReference(f, isolate()));
1631 CEntryStub stub(isolate(), 1, save_doubles);
1636 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
1637 return ref0.address() - ref1.address();
1641 void MacroAssembler::CallApiFunctionAndReturn(
1642 Register function_address,
1643 ExternalReference thunk_ref,
1646 MemOperand return_value_operand,
1647 MemOperand* context_restore_operand) {
1648 ASM_LOCATION("CallApiFunctionAndReturn");
1649 ExternalReference next_address =
1650 ExternalReference::handle_scope_next_address(isolate());
1651 const int kNextOffset = 0;
1652 const int kLimitOffset = AddressOffset(
1653 ExternalReference::handle_scope_limit_address(isolate()),
1655 const int kLevelOffset = AddressOffset(
1656 ExternalReference::handle_scope_level_address(isolate()),
1659 ASSERT(function_address.is(x1) || function_address.is(x2));
1661 Label profiler_disabled;
1662 Label end_profiler_check;
1663 Mov(x10, ExternalReference::is_profiling_address(isolate()));
1664 Ldrb(w10, MemOperand(x10));
1665 Cbz(w10, &profiler_disabled);
1667 B(&end_profiler_check);
1669 Bind(&profiler_disabled);
1670 Mov(x3, function_address);
1671 Bind(&end_profiler_check);
1673 // Save the callee-save registers we are going to use.
1674 // TODO(all): Is this necessary? ARM doesn't do it.
1675 STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
1676 Poke(x19, (spill_offset + 0) * kXRegSize);
1677 Poke(x20, (spill_offset + 1) * kXRegSize);
1678 Poke(x21, (spill_offset + 2) * kXRegSize);
1679 Poke(x22, (spill_offset + 3) * kXRegSize);
1681 // Allocate HandleScope in callee-save registers.
1682 // We will need to restore the HandleScope after the call to the API function,
1683 // by allocating it in callee-save registers they will be preserved by C code.
1684 Register handle_scope_base = x22;
1685 Register next_address_reg = x19;
1686 Register limit_reg = x20;
1687 Register level_reg = w21;
1689 Mov(handle_scope_base, next_address);
1690 Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
1691 Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
1692 Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
1693 Add(level_reg, level_reg, 1);
1694 Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
1696 if (FLAG_log_timer_events) {
1697 FrameScope frame(this, StackFrame::MANUAL);
1698 PushSafepointRegisters();
1699 Mov(x0, ExternalReference::isolate_address(isolate()));
1700 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
1701 PopSafepointRegisters();
1704 // Native call returns to the DirectCEntry stub which redirects to the
1705 // return address pushed on stack (could have moved after GC).
1706 // DirectCEntry stub itself is generated early and never moves.
1707 DirectCEntryStub stub(isolate());
1708 stub.GenerateCall(this, x3);
1710 if (FLAG_log_timer_events) {
1711 FrameScope frame(this, StackFrame::MANUAL);
1712 PushSafepointRegisters();
1713 Mov(x0, ExternalReference::isolate_address(isolate()));
1714 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
1715 PopSafepointRegisters();
1718 Label promote_scheduled_exception;
1719 Label exception_handled;
1720 Label delete_allocated_handles;
1721 Label leave_exit_frame;
1722 Label return_value_loaded;
1724 // Load value from ReturnValue.
1725 Ldr(x0, return_value_operand);
1726 Bind(&return_value_loaded);
1727 // No more valid handles (the result handle was the last one). Restore
1728 // previous handle scope.
1729 Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
1730 if (emit_debug_code()) {
1731 Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
1733 Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
1735 Sub(level_reg, level_reg, 1);
1736 Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
1737 Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
1739 B(ne, &delete_allocated_handles);
1741 Bind(&leave_exit_frame);
1742 // Restore callee-saved registers.
1743 Peek(x19, (spill_offset + 0) * kXRegSize);
1744 Peek(x20, (spill_offset + 1) * kXRegSize);
1745 Peek(x21, (spill_offset + 2) * kXRegSize);
1746 Peek(x22, (spill_offset + 3) * kXRegSize);
1748 // Check if the function scheduled an exception.
1749 Mov(x5, ExternalReference::scheduled_exception_address(isolate()));
1750 Ldr(x5, MemOperand(x5));
1751 JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception);
1752 Bind(&exception_handled);
1754 bool restore_context = context_restore_operand != NULL;
1755 if (restore_context) {
1756 Ldr(cp, *context_restore_operand);
1759 LeaveExitFrame(false, x1, !restore_context);
1763 Bind(&promote_scheduled_exception);
1765 FrameScope frame(this, StackFrame::INTERNAL);
1766 CallExternalReference(
1768 Runtime::kHiddenPromoteScheduledException, isolate()), 0);
1770 B(&exception_handled);
1772 // HandleScope limit has changed. Delete allocated extensions.
1773 Bind(&delete_allocated_handles);
1774 Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
1775 // Save the return value in a callee-save register.
1776 Register saved_result = x19;
1777 Mov(saved_result, x0);
1778 Mov(x0, ExternalReference::isolate_address(isolate()));
1780 ExternalReference::delete_handle_scope_extensions(isolate()), 1);
1781 Mov(x0, saved_result);
1782 B(&leave_exit_frame);
1786 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
1787 int num_arguments) {
1788 Mov(x0, num_arguments);
1791 CEntryStub stub(isolate(), 1);
1796 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
1798 CEntryStub stub(isolate(), 1);
1799 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1803 void MacroAssembler::GetBuiltinFunction(Register target,
1804 Builtins::JavaScript id) {
1805 // Load the builtins object into target register.
1806 Ldr(target, GlobalObjectMemOperand());
1807 Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
1808 // Load the JavaScript builtin function from the builtins object.
1809 Ldr(target, FieldMemOperand(target,
1810 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
1814 void MacroAssembler::GetBuiltinEntry(Register target,
1816 Builtins::JavaScript id) {
1817 ASSERT(!AreAliased(target, function));
1818 GetBuiltinFunction(function, id);
1819 // Load the code entry point from the builtins object.
1820 Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1824 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
1826 const CallWrapper& call_wrapper) {
1827 ASM_LOCATION("MacroAssembler::InvokeBuiltin");
1828 // You can't call a builtin without a valid frame.
1829 ASSERT(flag == JUMP_FUNCTION || has_frame());
1831 // Get the builtin entry in x2 and setup the function object in x1.
1832 GetBuiltinEntry(x2, x1, id);
1833 if (flag == CALL_FUNCTION) {
1834 call_wrapper.BeforeCall(CallSize(x2));
1836 call_wrapper.AfterCall();
1838 ASSERT(flag == JUMP_FUNCTION);
1844 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
1847 // TODO(1236192): Most runtime routines don't need the number of
1848 // arguments passed in because it is constant. At some point we
1849 // should remove this need and make the runtime routine entry code
1851 Mov(x0, num_arguments);
1852 JumpToExternalReference(ext);
1856 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
1859 TailCallExternalReference(ExternalReference(fid, isolate()),
1865 void MacroAssembler::InitializeNewString(Register string,
1867 Heap::RootListIndex map_index,
1869 Register scratch2) {
1870 ASSERT(!AreAliased(string, length, scratch1, scratch2));
1871 LoadRoot(scratch2, map_index);
1872 SmiTag(scratch1, length);
1873 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1875 Mov(scratch2, String::kEmptyHashField);
1876 Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1877 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
1881 int MacroAssembler::ActivationFrameAlignment() {
1882 #if V8_HOST_ARCH_ARM64
1883 // Running on the real platform. Use the alignment as mandated by the local
1885 // Note: This will break if we ever start generating snapshots on one ARM
1886 // platform for another ARM platform with a different alignment.
1887 return OS::ActivationFrameAlignment();
1888 #else // V8_HOST_ARCH_ARM64
1889 // If we are using the simulator then we should always align to the expected
1890 // alignment. As the simulator is used to generate snapshots we do not know
1891 // if the target platform will need alignment, so this is controlled from a
1893 return FLAG_sim_stack_alignment;
1894 #endif // V8_HOST_ARCH_ARM64
1898 void MacroAssembler::CallCFunction(ExternalReference function,
1899 int num_of_reg_args) {
1900 CallCFunction(function, num_of_reg_args, 0);
1904 void MacroAssembler::CallCFunction(ExternalReference function,
1905 int num_of_reg_args,
1906 int num_of_double_args) {
1907 UseScratchRegisterScope temps(this);
1908 Register temp = temps.AcquireX();
1909 Mov(temp, function);
1910 CallCFunction(temp, num_of_reg_args, num_of_double_args);
1914 void MacroAssembler::CallCFunction(Register function,
1915 int num_of_reg_args,
1916 int num_of_double_args) {
1917 ASSERT(has_frame());
1918 // We can pass 8 integer arguments in registers. If we need to pass more than
1919 // that, we'll need to implement support for passing them on the stack.
1920 ASSERT(num_of_reg_args <= 8);
1922 // If we're passing doubles, we're limited to the following prototypes
1923 // (defined by ExternalReference::Type):
1924 // BUILTIN_COMPARE_CALL: int f(double, double)
1925 // BUILTIN_FP_FP_CALL: double f(double, double)
1926 // BUILTIN_FP_CALL: double f(double)
1927 // BUILTIN_FP_INT_CALL: double f(double, int)
1928 if (num_of_double_args > 0) {
1929 ASSERT(num_of_reg_args <= 1);
1930 ASSERT((num_of_double_args + num_of_reg_args) <= 2);
1934 // If the stack pointer is not csp, we need to derive an aligned csp from the
1935 // current stack pointer.
1936 const Register old_stack_pointer = StackPointer();
1937 if (!csp.Is(old_stack_pointer)) {
1938 AssertStackConsistency();
1940 int sp_alignment = ActivationFrameAlignment();
1941 // The ABI mandates at least 16-byte alignment.
1942 ASSERT(sp_alignment >= 16);
1943 ASSERT(IsPowerOf2(sp_alignment));
1945 // The current stack pointer is a callee saved register, and is preserved
1947 ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
1949 // Align and synchronize the system stack pointer with jssp.
1950 Bic(csp, old_stack_pointer, sp_alignment - 1);
1951 SetStackPointer(csp);
1954 // Call directly. The function called cannot cause a GC, or allow preemption,
1955 // so the return address in the link register stays correct.
1958 if (!csp.Is(old_stack_pointer)) {
1959 if (emit_debug_code()) {
1960 // Because the stack pointer must be aligned on a 16-byte boundary, the
1961 // aligned csp can be up to 12 bytes below the jssp. This is the case
1962 // where we only pushed one W register on top of an aligned jssp.
1963 UseScratchRegisterScope temps(this);
1964 Register temp = temps.AcquireX();
1965 ASSERT(ActivationFrameAlignment() == 16);
1966 Sub(temp, csp, old_stack_pointer);
1967 // We want temp <= 0 && temp >= -12.
1969 Ccmp(temp, -12, NFlag, le);
1970 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
1972 SetStackPointer(old_stack_pointer);
1977 void MacroAssembler::Jump(Register target) {
1982 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
1983 UseScratchRegisterScope temps(this);
1984 Register temp = temps.AcquireX();
1985 Mov(temp, Operand(target, rmode));
1990 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
1991 ASSERT(!RelocInfo::IsCodeTarget(rmode));
1992 Jump(reinterpret_cast<intptr_t>(target), rmode);
1996 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
1997 ASSERT(RelocInfo::IsCodeTarget(rmode));
1998 AllowDeferredHandleDereference embedding_raw_address;
1999 Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
2003 void MacroAssembler::Call(Register target) {
2004 BlockPoolsScope scope(this);
2013 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
2018 void MacroAssembler::Call(Label* target) {
2019 BlockPoolsScope scope(this);
2028 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
2033 // MacroAssembler::CallSize is sensitive to changes in this function, as it
2034 // requires to know how many instructions are used to branch to the target.
2035 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
2036 BlockPoolsScope scope(this);
2041 // Statement positions are expected to be recorded when the target
2042 // address is loaded.
2043 positions_recorder()->WriteRecordedPositions();
2045 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2046 ASSERT(rmode != RelocInfo::NONE32);
2048 UseScratchRegisterScope temps(this);
2049 Register temp = temps.AcquireX();
2051 if (rmode == RelocInfo::NONE64) {
2052 // Addresses are 48 bits so we never need to load the upper 16 bits.
2053 uint64_t imm = reinterpret_cast<uint64_t>(target);
2054 // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
2055 ASSERT(((imm >> 48) & 0xffff) == 0);
2056 movz(temp, (imm >> 0) & 0xffff, 0);
2057 movk(temp, (imm >> 16) & 0xffff, 16);
2058 movk(temp, (imm >> 32) & 0xffff, 32);
2060 LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode));
2064 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
2069 void MacroAssembler::Call(Handle<Code> code,
2070 RelocInfo::Mode rmode,
2071 TypeFeedbackId ast_id) {
2077 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
2078 SetRecordedAstId(ast_id);
2079 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2082 AllowDeferredHandleDereference embedding_raw_address;
2083 Call(reinterpret_cast<Address>(code.location()), rmode);
2086 // Check the size of the code generated.
2087 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
2092 int MacroAssembler::CallSize(Register target) {
2094 return kInstructionSize;
2098 int MacroAssembler::CallSize(Label* target) {
2100 return kInstructionSize;
2104 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
2107 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2108 ASSERT(rmode != RelocInfo::NONE32);
2110 if (rmode == RelocInfo::NONE64) {
2111 return kCallSizeWithoutRelocation;
2113 return kCallSizeWithRelocation;
2118 int MacroAssembler::CallSize(Handle<Code> code,
2119 RelocInfo::Mode rmode,
2120 TypeFeedbackId ast_id) {
2124 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2125 ASSERT(rmode != RelocInfo::NONE32);
2127 if (rmode == RelocInfo::NONE64) {
2128 return kCallSizeWithoutRelocation;
2130 return kCallSizeWithRelocation;
2138 void MacroAssembler::JumpForHeapNumber(Register object,
2139 Register heap_number_map,
2140 Label* on_heap_number,
2141 Label* on_not_heap_number) {
2142 ASSERT(on_heap_number || on_not_heap_number);
2143 AssertNotSmi(object);
2145 UseScratchRegisterScope temps(this);
2146 Register temp = temps.AcquireX();
2148 // Load the HeapNumber map if it is not passed.
2149 if (heap_number_map.Is(NoReg)) {
2150 heap_number_map = temps.AcquireX();
2151 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2153 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2156 ASSERT(!AreAliased(temp, heap_number_map));
2158 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2159 Cmp(temp, heap_number_map);
2161 if (on_heap_number) {
2162 B(eq, on_heap_number);
2164 if (on_not_heap_number) {
2165 B(ne, on_not_heap_number);
2170 void MacroAssembler::JumpIfHeapNumber(Register object,
2171 Label* on_heap_number,
2172 Register heap_number_map) {
2173 JumpForHeapNumber(object,
2180 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2181 Label* on_not_heap_number,
2182 Register heap_number_map) {
2183 JumpForHeapNumber(object,
2186 on_not_heap_number);
2190 void MacroAssembler::LookupNumberStringCache(Register object,
2196 ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3));
2198 // Use of registers. Register result is used as a temporary.
2199 Register number_string_cache = result;
2200 Register mask = scratch3;
2202 // Load the number string cache.
2203 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2205 // Make the hash mask from the length of the number string cache. It
2206 // contains two elements (number and string) for each cache entry.
2207 Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache,
2208 FixedArray::kLengthOffset));
2209 Asr(mask, mask, 1); // Divide length by two.
2210 Sub(mask, mask, 1); // Make mask.
2212 // Calculate the entry in the number string cache. The hash value in the
2213 // number string cache for smis is just the smi value, and the hash for
2214 // doubles is the xor of the upper and lower words. See
2215 // Heap::GetNumberStringCache.
2217 Label load_result_from_cache;
2219 JumpIfSmi(object, &is_smi);
2220 CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
2223 STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
2224 Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
2225 Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
2226 Eor(scratch1, scratch1, scratch2);
2227 And(scratch1, scratch1, mask);
2229 // Calculate address of entry in string cache: each entry consists of two
2230 // pointer sized fields.
2231 Add(scratch1, number_string_cache,
2232 Operand(scratch1, LSL, kPointerSizeLog2 + 1));
2234 Register probe = mask;
2235 Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2236 JumpIfSmi(probe, not_found);
2237 Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
2238 Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
2241 B(&load_result_from_cache);
2244 Register scratch = scratch1;
2245 And(scratch, mask, Operand::UntagSmi(object));
2246 // Calculate address of entry in string cache: each entry consists
2247 // of two pointer sized fields.
2248 Add(scratch, number_string_cache,
2249 Operand(scratch, LSL, kPointerSizeLog2 + 1));
2251 // Check if the entry is the smi we are looking for.
2252 Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2256 // Get the result from the cache.
2257 Bind(&load_result_from_cache);
2258 Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
2259 IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
2260 scratch1, scratch2);
2264 void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
2266 FPRegister scratch_d,
2267 Label* on_successful_conversion,
2268 Label* on_failed_conversion) {
2269 // Convert to an int and back again, then compare with the original value.
2270 Fcvtzs(as_int, value);
2271 Scvtf(scratch_d, as_int);
2272 Fcmp(value, scratch_d);
2274 if (on_successful_conversion) {
2275 B(on_successful_conversion, eq);
2277 if (on_failed_conversion) {
2278 B(on_failed_conversion, ne);
2283 void MacroAssembler::TestForMinusZero(DoubleRegister input) {
2284 UseScratchRegisterScope temps(this);
2285 Register temp = temps.AcquireX();
2286 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
2293 void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
2294 Label* on_negative_zero) {
2295 TestForMinusZero(input);
2296 B(vs, on_negative_zero);
2300 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
2301 // Clamp the value to [0..255].
2302 Cmp(input.W(), Operand(input.W(), UXTB));
2303 // If input < input & 0xff, it must be < 0, so saturate to 0.
2304 Csel(output.W(), wzr, input.W(), lt);
2305 // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
2306 Csel(output.W(), output.W(), 255, le);
2310 void MacroAssembler::ClampInt32ToUint8(Register in_out) {
2311 ClampInt32ToUint8(in_out, in_out);
2315 void MacroAssembler::ClampDoubleToUint8(Register output,
2316 DoubleRegister input,
2317 DoubleRegister dbl_scratch) {
2318 // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
2319 // - Inputs lower than 0 (including -infinity) produce 0.
2320 // - Inputs higher than 255 (including +infinity) produce 255.
2321 // Also, it seems that PIXEL types use round-to-nearest rather than
2322 // round-towards-zero.
2324 // Squash +infinity before the conversion, since Fcvtnu will normally
2326 Fmov(dbl_scratch, 255);
2327 Fmin(dbl_scratch, dbl_scratch, input);
2329 // Convert double to unsigned integer. Values less than zero become zero.
2330 // Values greater than 255 have already been clamped to 255.
2331 Fcvtnu(output, dbl_scratch);
2335 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
2342 Register scratch5) {
2343 // Untag src and dst into scratch registers.
2344 // Copy src->dst in a tight loop.
2345 ASSERT(!AreAliased(dst, src,
2346 scratch1, scratch2, scratch3, scratch4, scratch5));
2349 const Register& remaining = scratch3;
2350 Mov(remaining, count / 2);
2352 const Register& dst_untagged = scratch1;
2353 const Register& src_untagged = scratch2;
2354 Sub(dst_untagged, dst, kHeapObjectTag);
2355 Sub(src_untagged, src, kHeapObjectTag);
2357 // Copy fields in pairs.
2360 Ldp(scratch4, scratch5,
2361 MemOperand(src_untagged, kXRegSize* 2, PostIndex));
2362 Stp(scratch4, scratch5,
2363 MemOperand(dst_untagged, kXRegSize* 2, PostIndex));
2364 Sub(remaining, remaining, 1);
2365 Cbnz(remaining, &loop);
2367 // Handle the leftovers.
2369 Ldr(scratch4, MemOperand(src_untagged));
2370 Str(scratch4, MemOperand(dst_untagged));
2375 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
2381 Register scratch4) {
2382 // Untag src and dst into scratch registers.
2383 // Copy src->dst in an unrolled loop.
2384 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
2386 const Register& dst_untagged = scratch1;
2387 const Register& src_untagged = scratch2;
2388 sub(dst_untagged, dst, kHeapObjectTag);
2389 sub(src_untagged, src, kHeapObjectTag);
2391 // Copy fields in pairs.
2392 for (unsigned i = 0; i < count / 2; i++) {
2393 Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex));
2394 Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex));
2397 // Handle the leftovers.
2399 Ldr(scratch3, MemOperand(src_untagged));
2400 Str(scratch3, MemOperand(dst_untagged));
2405 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
2410 Register scratch3) {
2411 // Untag src and dst into scratch registers.
2412 // Copy src->dst in an unrolled loop.
2413 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3));
2415 const Register& dst_untagged = scratch1;
2416 const Register& src_untagged = scratch2;
2417 Sub(dst_untagged, dst, kHeapObjectTag);
2418 Sub(src_untagged, src, kHeapObjectTag);
2420 // Copy fields one by one.
2421 for (unsigned i = 0; i < count; i++) {
2422 Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex));
2423 Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex));
2428 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
2430 // One of two methods is used:
2432 // For high 'count' values where many scratch registers are available:
2433 // Untag src and dst into scratch registers.
2434 // Copy src->dst in a tight loop.
2436 // For low 'count' values or where few scratch registers are available:
2437 // Untag src and dst into scratch registers.
2438 // Copy src->dst in an unrolled loop.
2440 // In both cases, fields are copied in pairs if possible, and left-overs are
2441 // handled separately.
2442 ASSERT(!AreAliased(dst, src));
2443 ASSERT(!temps.IncludesAliasOf(dst));
2444 ASSERT(!temps.IncludesAliasOf(src));
2445 ASSERT(!temps.IncludesAliasOf(xzr));
2447 if (emit_debug_code()) {
2449 Check(ne, kTheSourceAndDestinationAreTheSame);
2452 // The value of 'count' at which a loop will be generated (if there are
2453 // enough scratch registers).
2454 static const unsigned kLoopThreshold = 8;
2456 UseScratchRegisterScope masm_temps(this);
2457 if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
2458 CopyFieldsLoopPairsHelper(dst, src, count,
2459 Register(temps.PopLowestIndex()),
2460 Register(temps.PopLowestIndex()),
2461 Register(temps.PopLowestIndex()),
2462 masm_temps.AcquireX(),
2463 masm_temps.AcquireX());
2464 } else if (temps.Count() >= 2) {
2465 CopyFieldsUnrolledPairsHelper(dst, src, count,
2466 Register(temps.PopLowestIndex()),
2467 Register(temps.PopLowestIndex()),
2468 masm_temps.AcquireX(),
2469 masm_temps.AcquireX());
2470 } else if (temps.Count() == 1) {
2471 CopyFieldsUnrolledHelper(dst, src, count,
2472 Register(temps.PopLowestIndex()),
2473 masm_temps.AcquireX(),
2474 masm_temps.AcquireX());
2481 void MacroAssembler::CopyBytes(Register dst,
2486 UseScratchRegisterScope temps(this);
2487 Register tmp1 = temps.AcquireX();
2488 Register tmp2 = temps.AcquireX();
2489 ASSERT(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
2490 ASSERT(!AreAliased(src, dst, csp));
2492 if (emit_debug_code()) {
2493 // Check copy length.
2495 Assert(ge, kUnexpectedNegativeValue);
2497 // Check src and dst buffers don't overlap.
2498 Add(scratch, src, length); // Calculate end of src buffer.
2500 Add(scratch, dst, length); // Calculate end of dst buffer.
2501 Ccmp(scratch, src, ZFlag, gt);
2502 Assert(le, kCopyBuffersOverlap);
2505 Label short_copy, short_loop, bulk_loop, done;
2507 if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
2508 Register bulk_length = scratch;
2509 int pair_size = 2 * kXRegSize;
2510 int pair_mask = pair_size - 1;
2512 Bic(bulk_length, length, pair_mask);
2513 Cbz(bulk_length, &short_copy);
2515 Sub(bulk_length, bulk_length, pair_size);
2516 Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
2517 Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
2518 Cbnz(bulk_length, &bulk_loop);
2520 And(length, length, pair_mask);
2526 Sub(length, length, 1);
2527 Ldrb(tmp1, MemOperand(src, 1, PostIndex));
2528 Strb(tmp1, MemOperand(dst, 1, PostIndex));
2529 Cbnz(length, &short_loop);
2536 void MacroAssembler::FillFields(Register dst,
2537 Register field_count,
2539 ASSERT(!dst.Is(csp));
2540 UseScratchRegisterScope temps(this);
2541 Register field_ptr = temps.AcquireX();
2542 Register counter = temps.AcquireX();
2545 // Decrement count. If the result < zero, count was zero, and there's nothing
2546 // to do. If count was one, flags are set to fail the gt condition at the end
2547 // of the pairs loop.
2548 Subs(counter, field_count, 1);
2551 // There's at least one field to fill, so do this unconditionally.
2552 Str(filler, MemOperand(dst, kPointerSize, PostIndex));
2554 // If the bottom bit of counter is set, there are an even number of fields to
2555 // fill, so pull the start pointer back by one field, allowing the pairs loop
2556 // to overwrite the field that was stored above.
2557 And(field_ptr, counter, 1);
2558 Sub(field_ptr, dst, Operand(field_ptr, LSL, kPointerSizeLog2));
2560 // Store filler to memory in pairs.
2564 Stp(filler, filler, MemOperand(field_ptr, 2 * kPointerSize, PostIndex));
2565 Subs(counter, counter, 2);
2573 void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
2579 SmiCheckType smi_check) {
2581 if (smi_check == DO_SMI_CHECK) {
2582 JumpIfEitherSmi(first, second, failure);
2583 } else if (emit_debug_code()) {
2584 ASSERT(smi_check == DONT_DO_SMI_CHECK);
2586 JumpIfEitherSmi(first, second, NULL, ¬_smi);
2588 // At least one input is a smi, but the flags indicated a smi check wasn't
2590 Abort(kUnexpectedSmi);
2595 // Test that both first and second are sequential ASCII strings.
2596 Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2597 Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2598 Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2599 Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2601 JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1,
2609 void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii(
2615 ASSERT(!AreAliased(scratch1, second));
2616 ASSERT(!AreAliased(scratch1, scratch2));
2617 static const int kFlatAsciiStringMask =
2618 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2619 static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2620 And(scratch1, first, kFlatAsciiStringMask);
2621 And(scratch2, second, kFlatAsciiStringMask);
2622 Cmp(scratch1, kFlatAsciiStringTag);
2623 Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
2628 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
2631 const int kFlatAsciiStringMask =
2632 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2633 const int kFlatAsciiStringTag =
2634 kStringTag | kOneByteStringTag | kSeqStringTag;
2635 And(scratch, type, kFlatAsciiStringMask);
2636 Cmp(scratch, kFlatAsciiStringTag);
2641 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2647 ASSERT(!AreAliased(first, second, scratch1, scratch2));
2648 const int kFlatAsciiStringMask =
2649 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2650 const int kFlatAsciiStringTag =
2651 kStringTag | kOneByteStringTag | kSeqStringTag;
2652 And(scratch1, first, kFlatAsciiStringMask);
2653 And(scratch2, second, kFlatAsciiStringMask);
2654 Cmp(scratch1, kFlatAsciiStringTag);
2655 Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
2660 void MacroAssembler::JumpIfNotUniqueName(Register type,
2661 Label* not_unique_name) {
2662 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
2663 // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
2666 // goto not_unique_name
2668 Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
2669 Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
2670 B(ne, not_unique_name);
2674 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2675 const ParameterCount& actual,
2676 Handle<Code> code_constant,
2680 bool* definitely_mismatches,
2681 const CallWrapper& call_wrapper) {
2682 bool definitely_matches = false;
2683 *definitely_mismatches = false;
2684 Label regular_invoke;
2686 // Check whether the expected and actual arguments count match. If not,
2687 // setup registers according to contract with ArgumentsAdaptorTrampoline:
2688 // x0: actual arguments count.
2689 // x1: function (passed through to callee).
2690 // x2: expected arguments count.
2692 // The code below is made a lot easier because the calling code already sets
2693 // up actual and expected registers according to the contract if values are
2694 // passed in registers.
2695 ASSERT(actual.is_immediate() || actual.reg().is(x0));
2696 ASSERT(expected.is_immediate() || expected.reg().is(x2));
2697 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
2699 if (expected.is_immediate()) {
2700 ASSERT(actual.is_immediate());
2701 if (expected.immediate() == actual.immediate()) {
2702 definitely_matches = true;
2705 Mov(x0, actual.immediate());
2706 if (expected.immediate() ==
2707 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2708 // Don't worry about adapting arguments for builtins that
2709 // don't want that done. Skip adaption code by making it look
2710 // like we have a match between expected and actual number of
2712 definitely_matches = true;
2714 *definitely_mismatches = true;
2715 // Set up x2 for the argument adaptor.
2716 Mov(x2, expected.immediate());
2720 } else { // expected is a register.
2721 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2722 : Operand(actual.reg());
2723 // If actual == expected perform a regular invocation.
2724 Cmp(expected.reg(), actual_op);
2725 B(eq, ®ular_invoke);
2726 // Otherwise set up x0 for the argument adaptor.
2730 // If the argument counts may mismatch, generate a call to the argument
2732 if (!definitely_matches) {
2733 if (!code_constant.is_null()) {
2734 Mov(x3, Operand(code_constant));
2735 Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
2738 Handle<Code> adaptor =
2739 isolate()->builtins()->ArgumentsAdaptorTrampoline();
2740 if (flag == CALL_FUNCTION) {
2741 call_wrapper.BeforeCall(CallSize(adaptor));
2743 call_wrapper.AfterCall();
2744 if (!*definitely_mismatches) {
2745 // If the arg counts don't match, no extra code is emitted by
2746 // MAsm::InvokeCode and we can just fall through.
2750 Jump(adaptor, RelocInfo::CODE_TARGET);
2753 Bind(®ular_invoke);
2757 void MacroAssembler::InvokeCode(Register code,
2758 const ParameterCount& expected,
2759 const ParameterCount& actual,
2761 const CallWrapper& call_wrapper) {
2762 // You can't call a function without a valid frame.
2763 ASSERT(flag == JUMP_FUNCTION || has_frame());
2767 bool definitely_mismatches = false;
2768 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
2769 &definitely_mismatches, call_wrapper);
2771 // If we are certain that actual != expected, then we know InvokePrologue will
2772 // have handled the call through the argument adaptor mechanism.
2773 // The called function expects the call kind in x5.
2774 if (!definitely_mismatches) {
2775 if (flag == CALL_FUNCTION) {
2776 call_wrapper.BeforeCall(CallSize(code));
2778 call_wrapper.AfterCall();
2780 ASSERT(flag == JUMP_FUNCTION);
2785 // Continue here if InvokePrologue does handle the invocation due to
2786 // mismatched parameter counts.
2791 void MacroAssembler::InvokeFunction(Register function,
2792 const ParameterCount& actual,
2794 const CallWrapper& call_wrapper) {
2795 // You can't call a function without a valid frame.
2796 ASSERT(flag == JUMP_FUNCTION || has_frame());
2798 // Contract with called JS functions requires that function is passed in x1.
2799 // (See FullCodeGenerator::Generate().)
2800 ASSERT(function.is(x1));
2802 Register expected_reg = x2;
2803 Register code_reg = x3;
2805 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2806 // The number of arguments is stored as an int32_t, and -1 is a marker
2807 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
2808 // extension to correctly handle it.
2809 Ldr(expected_reg, FieldMemOperand(function,
2810 JSFunction::kSharedFunctionInfoOffset));
2812 FieldMemOperand(expected_reg,
2813 SharedFunctionInfo::kFormalParameterCountOffset));
2815 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2817 ParameterCount expected(expected_reg);
2818 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2822 void MacroAssembler::InvokeFunction(Register function,
2823 const ParameterCount& expected,
2824 const ParameterCount& actual,
2826 const CallWrapper& call_wrapper) {
2827 // You can't call a function without a valid frame.
2828 ASSERT(flag == JUMP_FUNCTION || has_frame());
2830 // Contract with called JS functions requires that function is passed in x1.
2831 // (See FullCodeGenerator::Generate().)
2832 ASSERT(function.Is(x1));
2834 Register code_reg = x3;
2836 // Set up the context.
2837 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2839 // We call indirectly through the code field in the function to
2840 // allow recompilation to take effect without changing any of the
2842 Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2843 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2847 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2848 const ParameterCount& expected,
2849 const ParameterCount& actual,
2851 const CallWrapper& call_wrapper) {
2852 // Contract with called JS functions requires that function is passed in x1.
2853 // (See FullCodeGenerator::Generate().)
2854 __ LoadObject(x1, function);
2855 InvokeFunction(x1, expected, actual, flag, call_wrapper);
2859 void MacroAssembler::TryConvertDoubleToInt64(Register result,
2860 DoubleRegister double_input,
2862 // Try to convert with an FPU convert instruction. It's trivial to compute
2863 // the modulo operation on an integer register so we convert to a 64-bit
2866 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
2867 // when the double is out of range. NaNs and infinities will be converted to 0
2868 // (as ECMA-262 requires).
2869 Fcvtzs(result.X(), double_input);
2871 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
2872 // representable using a double, so if the result is one of those then we know
2873 // that saturation occured, and we need to manually handle the conversion.
2875 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
2876 // 1 will cause signed overflow.
2878 Ccmp(result.X(), -1, VFlag, vc);
2884 void MacroAssembler::TruncateDoubleToI(Register result,
2885 DoubleRegister double_input) {
2887 ASSERT(jssp.Is(StackPointer()));
2889 // Try to convert the double to an int64. If successful, the bottom 32 bits
2890 // contain our truncated int32 result.
2891 TryConvertDoubleToInt64(result, double_input, &done);
2893 // If we fell through then inline version didn't succeed - call stub instead.
2895 Push(double_input); // Put input on stack.
2897 DoubleToIStub stub(isolate(),
2901 true, // is_truncating
2902 true); // skip_fastpath
2903 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2905 Drop(1, kDoubleSize); // Drop the double input on the stack.
2912 void MacroAssembler::TruncateHeapNumberToI(Register result,
2915 ASSERT(!result.is(object));
2916 ASSERT(jssp.Is(StackPointer()));
2918 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2920 // Try to convert the double to an int64. If successful, the bottom 32 bits
2921 // contain our truncated int32 result.
2922 TryConvertDoubleToInt64(result, fp_scratch, &done);
2924 // If we fell through then inline version didn't succeed - call stub instead.
2926 DoubleToIStub stub(isolate(),
2929 HeapNumber::kValueOffset - kHeapObjectTag,
2930 true, // is_truncating
2931 true); // skip_fastpath
2932 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2939 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
2940 if (frame_mode == BUILD_STUB_FRAME) {
2941 ASSERT(StackPointer().Is(jssp));
2942 UseScratchRegisterScope temps(this);
2943 Register temp = temps.AcquireX();
2944 __ Mov(temp, Smi::FromInt(StackFrame::STUB));
2945 // Compiled stubs don't age, and so they don't need the predictable code
2947 __ Push(lr, fp, cp, temp);
2948 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
2950 if (isolate()->IsCodePreAgingActive()) {
2951 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
2952 __ EmitCodeAgeSequence(stub);
2954 __ EmitFrameSetupForCodeAgePatching();
2960 void MacroAssembler::EnterFrame(StackFrame::Type type) {
2961 ASSERT(jssp.Is(StackPointer()));
2962 UseScratchRegisterScope temps(this);
2963 Register type_reg = temps.AcquireX();
2964 Register code_reg = temps.AcquireX();
2967 Mov(type_reg, Smi::FromInt(type));
2968 Mov(code_reg, Operand(CodeObject()));
2969 Push(type_reg, code_reg);
2974 // jssp[0] : code object
2976 // Adjust FP to point to saved FP.
2977 Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
2981 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
2982 ASSERT(jssp.Is(StackPointer()));
2983 // Drop the execution stack down to the frame pointer and restore
2984 // the caller frame pointer and return address.
2986 AssertStackConsistency();
2991 void MacroAssembler::ExitFramePreserveFPRegs() {
2992 PushCPURegList(kCallerSavedFP);
2996 void MacroAssembler::ExitFrameRestoreFPRegs() {
2997 // Read the registers from the stack without popping them. The stack pointer
2998 // will be reset as part of the unwinding process.
2999 CPURegList saved_fp_regs = kCallerSavedFP;
3000 ASSERT(saved_fp_regs.Count() % 2 == 0);
3002 int offset = ExitFrameConstants::kLastExitFrameField;
3003 while (!saved_fp_regs.IsEmpty()) {
3004 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
3005 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
3006 offset -= 2 * kDRegSize;
3007 Ldp(dst1, dst0, MemOperand(fp, offset));
3012 void MacroAssembler::EnterExitFrame(bool save_doubles,
3013 const Register& scratch,
3015 ASSERT(jssp.Is(StackPointer()));
3017 // Set up the new stack frame.
3018 Mov(scratch, Operand(CodeObject()));
3020 Mov(fp, StackPointer());
3022 // fp[8]: CallerPC (lr)
3023 // fp -> fp[0]: CallerFP (old fp)
3024 // fp[-8]: Space reserved for SPOffset.
3025 // jssp -> fp[-16]: CodeObject()
3026 STATIC_ASSERT((2 * kPointerSize) ==
3027 ExitFrameConstants::kCallerSPDisplacement);
3028 STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
3029 STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
3030 STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
3031 STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
3033 // Save the frame pointer and context pointer in the top frame.
3034 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
3036 Str(fp, MemOperand(scratch));
3037 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3039 Str(cp, MemOperand(scratch));
3041 STATIC_ASSERT((-2 * kPointerSize) ==
3042 ExitFrameConstants::kLastExitFrameField);
3044 ExitFramePreserveFPRegs();
3047 // Reserve space for the return address and for user requested memory.
3048 // We do this before aligning to make sure that we end up correctly
3049 // aligned with the minimum of wasted space.
3050 Claim(extra_space + 1, kXRegSize);
3051 // fp[8]: CallerPC (lr)
3052 // fp -> fp[0]: CallerFP (old fp)
3053 // fp[-8]: Space reserved for SPOffset.
3054 // fp[-16]: CodeObject()
3055 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
3056 // jssp[8]: Extra space reserved for caller (if extra_space != 0).
3057 // jssp -> jssp[0]: Space reserved for the return address.
3059 // Align and synchronize the system stack pointer with jssp.
3060 AlignAndSetCSPForFrame();
3061 ASSERT(csp.Is(StackPointer()));
3063 // fp[8]: CallerPC (lr)
3064 // fp -> fp[0]: CallerFP (old fp)
3065 // fp[-8]: Space reserved for SPOffset.
3066 // fp[-16]: CodeObject()
3067 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
3068 // csp[8]: Memory reserved for the caller if extra_space != 0.
3069 // Alignment padding, if necessary.
3070 // csp -> csp[0]: Space reserved for the return address.
3072 // ExitFrame::GetStateForFramePointer expects to find the return address at
3073 // the memory address immediately below the pointer stored in SPOffset.
3074 // It is not safe to derive much else from SPOffset, because the size of the
3075 // padding can vary.
3076 Add(scratch, csp, kXRegSize);
3077 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
3081 // Leave the current exit frame.
3082 void MacroAssembler::LeaveExitFrame(bool restore_doubles,
3083 const Register& scratch,
3084 bool restore_context) {
3085 ASSERT(csp.Is(StackPointer()));
3087 if (restore_doubles) {
3088 ExitFrameRestoreFPRegs();
3091 // Restore the context pointer from the top frame.
3092 if (restore_context) {
3093 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3095 Ldr(cp, MemOperand(scratch));
3098 if (emit_debug_code()) {
3099 // Also emit debug code to clear the cp in the top frame.
3100 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3102 Str(xzr, MemOperand(scratch));
3104 // Clear the frame pointer from the top frame.
3105 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
3107 Str(xzr, MemOperand(scratch));
3109 // Pop the exit frame.
3110 // fp[8]: CallerPC (lr)
3111 // fp -> fp[0]: CallerFP (old fp)
3112 // fp[...]: The rest of the frame.
3114 SetStackPointer(jssp);
3115 AssertStackConsistency();
3120 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
3121 Register scratch1, Register scratch2) {
3122 if (FLAG_native_code_counters && counter->Enabled()) {
3123 Mov(scratch1, value);
3124 Mov(scratch2, ExternalReference(counter));
3125 Str(scratch1, MemOperand(scratch2));
3130 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
3131 Register scratch1, Register scratch2) {
3133 if (FLAG_native_code_counters && counter->Enabled()) {
3134 Mov(scratch2, ExternalReference(counter));
3135 Ldr(scratch1, MemOperand(scratch2));
3136 Add(scratch1, scratch1, value);
3137 Str(scratch1, MemOperand(scratch2));
3142 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
3143 Register scratch1, Register scratch2) {
3144 IncrementCounter(counter, -value, scratch1, scratch2);
3148 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
3149 if (context_chain_length > 0) {
3150 // Move up the chain of contexts to the context containing the slot.
3151 Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3152 for (int i = 1; i < context_chain_length; i++) {
3153 Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3156 // Slot is in the current function context. Move it into the
3157 // destination register in case we store into it (the write barrier
3158 // cannot be allowed to destroy the context in cp).
3164 void MacroAssembler::DebugBreak() {
3166 Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
3167 CEntryStub ces(isolate(), 1);
3168 ASSERT(AllowThisStubCall(&ces));
3169 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3173 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3174 int handler_index) {
3175 ASSERT(jssp.Is(StackPointer()));
3176 // Adjust this code if the asserts don't hold.
3177 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3178 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3179 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3180 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3181 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3182 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3184 // For the JSEntry handler, we must preserve the live registers x0-x4.
3185 // (See JSEntryStub::GenerateBody().)
3188 StackHandler::IndexField::encode(handler_index) |
3189 StackHandler::KindField::encode(kind);
3191 // Set up the code object and the state for pushing.
3192 Mov(x10, Operand(CodeObject()));
3195 // Push the frame pointer, context, state, and code object.
3196 if (kind == StackHandler::JS_ENTRY) {
3197 ASSERT(Smi::FromInt(0) == 0);
3198 Push(xzr, xzr, x11, x10);
3200 Push(fp, cp, x11, x10);
3203 // Link the current handler as the next handler.
3204 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3205 Ldr(x10, MemOperand(x11));
3207 // Set this new handler as the current one.
3208 Str(jssp, MemOperand(x11));
3212 void MacroAssembler::PopTryHandler() {
3213 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3215 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3216 Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
3217 Str(x10, MemOperand(x11));
3221 void MacroAssembler::Allocate(int object_size,
3226 AllocationFlags flags) {
3227 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
3228 if (!FLAG_inline_new) {
3229 if (emit_debug_code()) {
3230 // Trash the registers to simulate an allocation failure.
3231 // We apply salt to the original zap value to easily spot the values.
3232 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3233 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3234 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3240 UseScratchRegisterScope temps(this);
3241 Register scratch3 = temps.AcquireX();
3243 ASSERT(!AreAliased(result, scratch1, scratch2, scratch3));
3244 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3246 // Make object size into bytes.
3247 if ((flags & SIZE_IN_WORDS) != 0) {
3248 object_size *= kPointerSize;
3250 ASSERT(0 == (object_size & kObjectAlignmentMask));
3252 // Check relative positions of allocation top and limit addresses.
3253 // The values must be adjacent in memory to allow the use of LDP.
3254 ExternalReference heap_allocation_top =
3255 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3256 ExternalReference heap_allocation_limit =
3257 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3258 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3259 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3260 ASSERT((limit - top) == kPointerSize);
3262 // Set up allocation top address and object size registers.
3263 Register top_address = scratch1;
3264 Register allocation_limit = scratch2;
3265 Mov(top_address, Operand(heap_allocation_top));
3267 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3268 // Load allocation top into result and the allocation limit.
3269 Ldp(result, allocation_limit, MemOperand(top_address));
3271 if (emit_debug_code()) {
3272 // Assert that result actually contains top on entry.
3273 Ldr(scratch3, MemOperand(top_address));
3274 Cmp(result, scratch3);
3275 Check(eq, kUnexpectedAllocationTop);
3277 // Load the allocation limit. 'result' already contains the allocation top.
3278 Ldr(allocation_limit, MemOperand(top_address, limit - top));
3281 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3282 // the same alignment on ARM64.
3283 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3285 // Calculate new top and bail out if new space is exhausted.
3286 Adds(scratch3, result, object_size);
3287 Ccmp(scratch3, allocation_limit, CFlag, cc);
3289 Str(scratch3, MemOperand(top_address));
3291 // Tag the object if requested.
3292 if ((flags & TAG_OBJECT) != 0) {
3293 ObjectTag(result, result);
3298 void MacroAssembler::Allocate(Register object_size,
3303 AllocationFlags flags) {
3304 if (!FLAG_inline_new) {
3305 if (emit_debug_code()) {
3306 // Trash the registers to simulate an allocation failure.
3307 // We apply salt to the original zap value to easily spot the values.
3308 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3309 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3310 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3316 UseScratchRegisterScope temps(this);
3317 Register scratch3 = temps.AcquireX();
3319 ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
3320 ASSERT(object_size.Is64Bits() && result.Is64Bits() &&
3321 scratch1.Is64Bits() && scratch2.Is64Bits());
3323 // Check relative positions of allocation top and limit addresses.
3324 // The values must be adjacent in memory to allow the use of LDP.
3325 ExternalReference heap_allocation_top =
3326 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3327 ExternalReference heap_allocation_limit =
3328 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3329 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3330 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3331 ASSERT((limit - top) == kPointerSize);
3333 // Set up allocation top address and object size registers.
3334 Register top_address = scratch1;
3335 Register allocation_limit = scratch2;
3336 Mov(top_address, heap_allocation_top);
3338 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3339 // Load allocation top into result and the allocation limit.
3340 Ldp(result, allocation_limit, MemOperand(top_address));
3342 if (emit_debug_code()) {
3343 // Assert that result actually contains top on entry.
3344 Ldr(scratch3, MemOperand(top_address));
3345 Cmp(result, scratch3);
3346 Check(eq, kUnexpectedAllocationTop);
3348 // Load the allocation limit. 'result' already contains the allocation top.
3349 Ldr(allocation_limit, MemOperand(top_address, limit - top));
3352 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3353 // the same alignment on ARM64.
3354 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3356 // Calculate new top and bail out if new space is exhausted
3357 if ((flags & SIZE_IN_WORDS) != 0) {
3358 Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2));
3360 Adds(scratch3, result, object_size);
3363 if (emit_debug_code()) {
3364 Tst(scratch3, kObjectAlignmentMask);
3365 Check(eq, kUnalignedAllocationInNewSpace);
3368 Ccmp(scratch3, allocation_limit, CFlag, cc);
3370 Str(scratch3, MemOperand(top_address));
3372 // Tag the object if requested.
3373 if ((flags & TAG_OBJECT) != 0) {
3374 ObjectTag(result, result);
3379 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3381 ExternalReference new_space_allocation_top =
3382 ExternalReference::new_space_allocation_top_address(isolate());
3384 // Make sure the object has no tag before resetting top.
3385 Bic(object, object, kHeapObjectTagMask);
3387 // Check that the object un-allocated is below the current top.
3388 Mov(scratch, new_space_allocation_top);
3389 Ldr(scratch, MemOperand(scratch));
3390 Cmp(object, scratch);
3391 Check(lt, kUndoAllocationOfNonAllocatedMemory);
3393 // Write the address of the object to un-allocate as the current top.
3394 Mov(scratch, new_space_allocation_top);
3395 Str(object, MemOperand(scratch));
3399 void MacroAssembler::AllocateTwoByteString(Register result,
3404 Label* gc_required) {
3405 ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
3406 // Calculate the number of bytes needed for the characters in the string while
3407 // observing object alignment.
3408 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3409 Add(scratch1, length, length); // Length in bytes, not chars.
3410 Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3411 Bic(scratch1, scratch1, kObjectAlignmentMask);
3413 // Allocate two-byte string in new space.
3421 // Set the map, length and hash field.
3422 InitializeNewString(result,
3424 Heap::kStringMapRootIndex,
3430 void MacroAssembler::AllocateAsciiString(Register result,
3435 Label* gc_required) {
3436 ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
3437 // Calculate the number of bytes needed for the characters in the string while
3438 // observing object alignment.
3439 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3440 STATIC_ASSERT(kCharSize == 1);
3441 Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3442 Bic(scratch1, scratch1, kObjectAlignmentMask);
3444 // Allocate ASCII string in new space.
3452 // Set the map, length and hash field.
3453 InitializeNewString(result,
3455 Heap::kAsciiStringMapRootIndex,
3461 void MacroAssembler::AllocateTwoByteConsString(Register result,
3465 Label* gc_required) {
3466 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3469 InitializeNewString(result,
3471 Heap::kConsStringMapRootIndex,
3477 void MacroAssembler::AllocateAsciiConsString(Register result,
3481 Label* gc_required) {
3482 Label allocate_new_space, install_map;
3483 AllocationFlags flags = TAG_OBJECT;
3485 ExternalReference high_promotion_mode = ExternalReference::
3486 new_space_high_promotion_mode_active_address(isolate());
3487 Mov(scratch1, high_promotion_mode);
3488 Ldr(scratch1, MemOperand(scratch1));
3489 Cbz(scratch1, &allocate_new_space);
3491 Allocate(ConsString::kSize,
3496 static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
3500 Bind(&allocate_new_space);
3501 Allocate(ConsString::kSize,
3510 InitializeNewString(result,
3512 Heap::kConsAsciiStringMapRootIndex,
3518 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3522 Label* gc_required) {
3523 ASSERT(!AreAliased(result, length, scratch1, scratch2));
3524 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3527 InitializeNewString(result,
3529 Heap::kSlicedStringMapRootIndex,
3535 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3539 Label* gc_required) {
3540 ASSERT(!AreAliased(result, length, scratch1, scratch2));
3541 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3544 InitializeNewString(result,
3546 Heap::kSlicedAsciiStringMapRootIndex,
3552 // Allocates a heap number or jumps to the need_gc label if the young space
3553 // is full and a scavenge is needed.
3554 void MacroAssembler::AllocateHeapNumber(Register result,
3559 CPURegister heap_number_map) {
3560 ASSERT(!value.IsValid() || value.Is64Bits());
3561 UseScratchRegisterScope temps(this);
3563 // Allocate an object in the heap for the heap number and tag it as a heap
3565 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3566 NO_ALLOCATION_FLAGS);
3568 // Prepare the heap number map.
3569 if (!heap_number_map.IsValid()) {
3570 // If we have a valid value register, use the same type of register to store
3571 // the map so we can use STP to store both in one instruction.
3572 if (value.IsValid() && value.IsFPRegister()) {
3573 heap_number_map = temps.AcquireD();
3575 heap_number_map = scratch1;
3577 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3579 if (emit_debug_code()) {
3581 if (heap_number_map.IsFPRegister()) {
3583 Fmov(map, DoubleRegister(heap_number_map));
3585 map = Register(heap_number_map);
3587 AssertRegisterIsRoot(map, Heap::kHeapNumberMapRootIndex);
3590 // Store the heap number map and the value in the allocated object.
3591 if (value.IsSameSizeAndType(heap_number_map)) {
3592 STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
3593 HeapNumber::kValueOffset);
3594 Stp(heap_number_map, value, MemOperand(result, HeapObject::kMapOffset));
3596 Str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3597 if (value.IsValid()) {
3598 Str(value, MemOperand(result, HeapNumber::kValueOffset));
3601 ObjectTag(result, result);
3605 void MacroAssembler::JumpIfObjectType(Register object,
3609 Label* if_cond_pass,
3611 CompareObjectType(object, map, type_reg, type);
3612 B(cond, if_cond_pass);
3616 void MacroAssembler::JumpIfNotObjectType(Register object,
3620 Label* if_not_object) {
3621 JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
3625 // Sets condition flags based on comparison, and returns type in type_reg.
3626 void MacroAssembler::CompareObjectType(Register object,
3629 InstanceType type) {
3630 Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3631 CompareInstanceType(map, type_reg, type);
3635 // Sets condition flags based on comparison, and returns type in type_reg.
3636 void MacroAssembler::CompareInstanceType(Register map,
3638 InstanceType type) {
3639 Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3640 Cmp(type_reg, type);
3644 void MacroAssembler::CompareMap(Register obj,
3647 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3648 CompareMap(scratch, map);
3652 void MacroAssembler::CompareMap(Register obj_map,
3654 Cmp(obj_map, Operand(map));
3658 void MacroAssembler::CheckMap(Register obj,
3662 SmiCheckType smi_check_type) {
3663 if (smi_check_type == DO_SMI_CHECK) {
3664 JumpIfSmi(obj, fail);
3667 CompareMap(obj, scratch, map);
3672 void MacroAssembler::CheckMap(Register obj,
3674 Heap::RootListIndex index,
3676 SmiCheckType smi_check_type) {
3677 if (smi_check_type == DO_SMI_CHECK) {
3678 JumpIfSmi(obj, fail);
3680 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3681 JumpIfNotRoot(scratch, index, fail);
3685 void MacroAssembler::CheckMap(Register obj_map,
3688 SmiCheckType smi_check_type) {
3689 if (smi_check_type == DO_SMI_CHECK) {
3690 JumpIfSmi(obj_map, fail);
3693 CompareMap(obj_map, map);
3698 void MacroAssembler::DispatchMap(Register obj,
3701 Handle<Code> success,
3702 SmiCheckType smi_check_type) {
3704 if (smi_check_type == DO_SMI_CHECK) {
3705 JumpIfSmi(obj, &fail);
3707 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3708 Cmp(scratch, Operand(map));
3710 Jump(success, RelocInfo::CODE_TARGET);
3715 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
3716 UseScratchRegisterScope temps(this);
3717 Register temp = temps.AcquireX();
3718 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
3719 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3724 void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
3725 // Load the map's "bit field 2".
3726 __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
3727 // Retrieve elements_kind from bit field 2.
3728 __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
3732 void MacroAssembler::TryGetFunctionPrototype(Register function,
3736 BoundFunctionAction action) {
3737 ASSERT(!AreAliased(function, result, scratch));
3739 // Check that the receiver isn't a smi.
3740 JumpIfSmi(function, miss);
3742 // Check that the function really is a function. Load map into result reg.
3743 JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
3745 if (action == kMissOnBoundFunction) {
3746 Register scratch_w = scratch.W();
3748 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3749 // On 64-bit platforms, compiler hints field is not a smi. See definition of
3750 // kCompilerHintsOffset in src/objects.h.
3752 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3753 Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
3756 // Make sure that the function has an instance prototype.
3758 Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3759 Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
3761 // Get the prototype or initial map from the function.
3763 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3765 // If the prototype or initial map is the hole, don't return it and simply
3766 // miss the cache instead. This will allow us to allocate a prototype object
3767 // on-demand in the runtime system.
3768 JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
3770 // If the function does not have an initial map, we're done.
3772 JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
3774 // Get the prototype from the initial map.
3775 Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3778 // Non-instance prototype: fetch prototype from constructor field in initial
3780 Bind(&non_instance);
3781 Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3788 void MacroAssembler::CompareRoot(const Register& obj,
3789 Heap::RootListIndex index) {
3790 UseScratchRegisterScope temps(this);
3791 Register temp = temps.AcquireX();
3792 ASSERT(!AreAliased(obj, temp));
3793 LoadRoot(temp, index);
3798 void MacroAssembler::JumpIfRoot(const Register& obj,
3799 Heap::RootListIndex index,
3801 CompareRoot(obj, index);
3806 void MacroAssembler::JumpIfNotRoot(const Register& obj,
3807 Heap::RootListIndex index,
3808 Label* if_not_equal) {
3809 CompareRoot(obj, index);
3810 B(ne, if_not_equal);
3814 void MacroAssembler::CompareAndSplit(const Register& lhs,
3819 Label* fall_through) {
3820 if ((if_true == if_false) && (if_false == fall_through)) {
3822 } else if (if_true == if_false) {
3824 } else if (if_false == fall_through) {
3825 CompareAndBranch(lhs, rhs, cond, if_true);
3826 } else if (if_true == fall_through) {
3827 CompareAndBranch(lhs, rhs, InvertCondition(cond), if_false);
3829 CompareAndBranch(lhs, rhs, cond, if_true);
3835 void MacroAssembler::TestAndSplit(const Register& reg,
3836 uint64_t bit_pattern,
3837 Label* if_all_clear,
3839 Label* fall_through) {
3840 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
3842 } else if (if_all_clear == if_any_set) {
3844 } else if (if_all_clear == fall_through) {
3845 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3846 } else if (if_any_set == fall_through) {
3847 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
3849 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3855 void MacroAssembler::CheckFastElements(Register map,
3858 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3859 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3860 STATIC_ASSERT(FAST_ELEMENTS == 2);
3861 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3862 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3863 Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
3868 void MacroAssembler::CheckFastObjectElements(Register map,
3871 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3872 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3873 STATIC_ASSERT(FAST_ELEMENTS == 2);
3874 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3875 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3876 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3877 // If cond==ls, set cond=hi, otherwise compare.
3879 Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
3884 // Note: The ARM version of this clobbers elements_reg, but this version does
3885 // not. Some uses of this in ARM64 assume that elements_reg will be preserved.
3886 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3888 Register elements_reg,
3890 FPRegister fpscratch1,
3891 FPRegister fpscratch2,
3893 int elements_offset) {
3894 ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
3897 // Speculatively convert the smi to a double - all smis can be exactly
3898 // represented as a double.
3899 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
3901 // If value_reg is a smi, we're done.
3902 JumpIfSmi(value_reg, &store_num);
3904 // Ensure that the object is a heap number.
3905 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(),
3906 fail, DONT_DO_SMI_CHECK);
3908 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
3909 Fmov(fpscratch2, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3911 // Check for NaN by comparing the number to itself: NaN comparison will
3912 // report unordered, indicated by the overflow flag being set.
3913 Fcmp(fpscratch1, fpscratch1);
3914 Fcsel(fpscratch1, fpscratch2, fpscratch1, vs);
3916 // Store the result.
3918 Add(scratch1, elements_reg,
3919 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
3921 FieldMemOperand(scratch1,
3922 FixedDoubleArray::kHeaderSize - elements_offset));
3926 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3927 return has_frame_ || !stub->SometimesSetsUpAFrame();
3931 void MacroAssembler::IndexFromHash(Register hash, Register index) {
3932 // If the hash field contains an array index pick it out. The assert checks
3933 // that the constants for the maximum number of digits for an array index
3934 // cached in the hash field and the number of bits reserved for it does not
3936 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
3937 (1 << String::kArrayIndexValueBits));
3938 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
3939 // the low kHashShift bits.
3940 STATIC_ASSERT(kSmiTag == 0);
3941 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
3942 SmiTag(index, hash);
3946 void MacroAssembler::EmitSeqStringSetCharCheck(
3949 SeqStringSetCharCheckIndexType index_type,
3951 uint32_t encoding_mask) {
3952 ASSERT(!AreAliased(string, index, scratch));
3954 if (index_type == kIndexIsSmi) {
3958 // Check that string is an object.
3959 AssertNotSmi(string, kNonObject);
3961 // Check that string has an appropriate map.
3962 Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
3963 Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3965 And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
3966 Cmp(scratch, encoding_mask);
3967 Check(eq, kUnexpectedStringType);
3969 Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
3970 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
3971 Check(lt, kIndexIsTooLarge);
3973 ASSERT_EQ(0, Smi::FromInt(0));
3975 Check(ge, kIndexIsNegative);
3979 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3983 ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
3984 Label same_contexts;
3986 // Load current lexical context from the stack frame.
3987 Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
3988 // In debug mode, make sure the lexical context is set.
3991 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
3994 // Load the native context of the current context.
3996 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
3997 Ldr(scratch1, FieldMemOperand(scratch1, offset));
3998 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
4000 // Check the context is a native context.
4001 if (emit_debug_code()) {
4002 // Read the first word and compare to the global_context_map.
4003 Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
4004 CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
4005 Check(eq, kExpectedNativeContext);
4008 // Check if both contexts are the same.
4009 Ldr(scratch2, FieldMemOperand(holder_reg,
4010 JSGlobalProxy::kNativeContextOffset));
4011 Cmp(scratch1, scratch2);
4012 B(&same_contexts, eq);
4014 // Check the context is a native context.
4015 if (emit_debug_code()) {
4016 // We're short on scratch registers here, so use holder_reg as a scratch.
4018 Register scratch3 = holder_reg;
4020 CompareRoot(scratch2, Heap::kNullValueRootIndex);
4021 Check(ne, kExpectedNonNullContext);
4023 Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
4024 CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
4025 Check(eq, kExpectedNativeContext);
4029 // Check that the security token in the calling global object is
4030 // compatible with the security token in the receiving global
4032 int token_offset = Context::kHeaderSize +
4033 Context::SECURITY_TOKEN_INDEX * kPointerSize;
4035 Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
4036 Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
4037 Cmp(scratch1, scratch2);
4040 Bind(&same_contexts);
4044 // Compute the hash code from the untagged key. This must be kept in sync with
4045 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
4046 // code-stub-hydrogen.cc
4047 void MacroAssembler::GetNumberHash(Register key, Register scratch) {
4048 ASSERT(!AreAliased(key, scratch));
4050 // Xor original key with a seed.
4051 LoadRoot(scratch, Heap::kHashSeedRootIndex);
4052 Eor(key, key, Operand::UntagSmi(scratch));
4054 // The algorithm uses 32-bit integer values.
4056 scratch = scratch.W();
4058 // Compute the hash code from the untagged key. This must be kept in sync
4059 // with ComputeIntegerHash in utils.h.
4061 // hash = ~hash + (hash <<1 15);
4063 Add(key, scratch, Operand(key, LSL, 15));
4064 // hash = hash ^ (hash >> 12);
4065 Eor(key, key, Operand(key, LSR, 12));
4066 // hash = hash + (hash << 2);
4067 Add(key, key, Operand(key, LSL, 2));
4068 // hash = hash ^ (hash >> 4);
4069 Eor(key, key, Operand(key, LSR, 4));
4070 // hash = hash * 2057;
4071 Mov(scratch, Operand(key, LSL, 11));
4072 Add(key, key, Operand(key, LSL, 3));
4073 Add(key, key, scratch);
4074 // hash = hash ^ (hash >> 16);
4075 Eor(key, key, Operand(key, LSR, 16));
4079 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4086 Register scratch3) {
4087 ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
4091 SmiUntag(scratch0, key);
4092 GetNumberHash(scratch0, scratch1);
4094 // Compute the capacity mask.
4096 UntagSmiFieldMemOperand(elements,
4097 SeededNumberDictionary::kCapacityOffset));
4098 Sub(scratch1, scratch1, 1);
4100 // Generate an unrolled loop that performs a few probes before giving up.
4101 for (int i = 0; i < kNumberDictionaryProbes; i++) {
4102 // Compute the masked index: (hash + i + i * i) & mask.
4104 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
4106 Mov(scratch2, scratch0);
4108 And(scratch2, scratch2, scratch1);
4110 // Scale the index by multiplying by the element size.
4111 ASSERT(SeededNumberDictionary::kEntrySize == 3);
4112 Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
4114 // Check if the key is identical to the name.
4115 Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
4117 FieldMemOperand(scratch2,
4118 SeededNumberDictionary::kElementsStartOffset));
4120 if (i != (kNumberDictionaryProbes - 1)) {
4128 // Check that the value is a normal property.
4129 const int kDetailsOffset =
4130 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4131 Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
4132 TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
4134 // Get the value at the masked, scaled index and return.
4135 const int kValueOffset =
4136 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4137 Ldr(result, FieldMemOperand(scratch2, kValueOffset));
4141 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
4144 SaveFPRegsMode fp_mode,
4145 RememberedSetFinalAction and_then) {
4146 ASSERT(!AreAliased(object, address, scratch1));
4147 Label done, store_buffer_overflow;
4148 if (emit_debug_code()) {
4150 JumpIfNotInNewSpace(object, &ok);
4151 Abort(kRememberedSetPointerInNewSpace);
4154 UseScratchRegisterScope temps(this);
4155 Register scratch2 = temps.AcquireX();
4157 // Load store buffer top.
4158 Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
4159 Ldr(scratch1, MemOperand(scratch2));
4160 // Store pointer to buffer and increment buffer top.
4161 Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
4162 // Write back new top of buffer.
4163 Str(scratch1, MemOperand(scratch2));
4164 // Call stub on end of buffer.
4165 // Check for end of buffer.
4166 ASSERT(StoreBuffer::kStoreBufferOverflowBit ==
4167 (1 << (14 + kPointerSizeLog2)));
4168 if (and_then == kFallThroughAtEnd) {
4169 Tbz(scratch1, (14 + kPointerSizeLog2), &done);
4171 ASSERT(and_then == kReturnAtEnd);
4172 Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
4176 Bind(&store_buffer_overflow);
4178 StoreBufferOverflowStub store_buffer_overflow_stub =
4179 StoreBufferOverflowStub(isolate(), fp_mode);
4180 CallStub(&store_buffer_overflow_stub);
4184 if (and_then == kReturnAtEnd) {
4190 void MacroAssembler::PopSafepointRegisters() {
4191 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4192 PopXRegList(kSafepointSavedRegisters);
4197 void MacroAssembler::PushSafepointRegisters() {
4198 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
4199 // adjust the stack for unsaved registers.
4200 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4201 ASSERT(num_unsaved >= 0);
4203 PushXRegList(kSafepointSavedRegisters);
4207 void MacroAssembler::PushSafepointRegistersAndDoubles() {
4208 PushSafepointRegisters();
4209 PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
4210 FPRegister::kAllocatableFPRegisters));
4214 void MacroAssembler::PopSafepointRegistersAndDoubles() {
4215 PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
4216 FPRegister::kAllocatableFPRegisters));
4217 PopSafepointRegisters();
4221 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
4222 // Make sure the safepoint registers list is what we expect.
4223 ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
4225 // Safepoint registers are stored contiguously on the stack, but not all the
4226 // registers are saved. The following registers are excluded:
4227 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
4228 // the macro assembler.
4229 // - x28 (jssp) because JS stack pointer doesn't need to be included in
4230 // safepoint registers.
4231 // - x31 (csp) because the system stack pointer doesn't need to be included
4232 // in safepoint registers.
4234 // This function implements the mapping of register code to index into the
4235 // safepoint register slots.
4236 if ((reg_code >= 0) && (reg_code <= 15)) {
4238 } else if ((reg_code >= 18) && (reg_code <= 27)) {
4239 // Skip ip0 and ip1.
4240 return reg_code - 2;
4241 } else if ((reg_code == 29) || (reg_code == 30)) {
4243 return reg_code - 3;
4245 // This register has no safepoint register slot.
4252 void MacroAssembler::CheckPageFlagSet(const Register& object,
4253 const Register& scratch,
4255 Label* if_any_set) {
4256 And(scratch, object, ~Page::kPageAlignmentMask);
4257 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4258 TestAndBranchIfAnySet(scratch, mask, if_any_set);
4262 void MacroAssembler::CheckPageFlagClear(const Register& object,
4263 const Register& scratch,
4265 Label* if_all_clear) {
4266 And(scratch, object, ~Page::kPageAlignmentMask);
4267 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4268 TestAndBranchIfAllClear(scratch, mask, if_all_clear);
4272 void MacroAssembler::RecordWriteField(
4277 LinkRegisterStatus lr_status,
4278 SaveFPRegsMode save_fp,
4279 RememberedSetAction remembered_set_action,
4280 SmiCheck smi_check) {
4281 // First, check if a write barrier is even needed. The tests below
4282 // catch stores of Smis.
4285 // Skip the barrier if writing a smi.
4286 if (smi_check == INLINE_SMI_CHECK) {
4287 JumpIfSmi(value, &done);
4290 // Although the object register is tagged, the offset is relative to the start
4291 // of the object, so offset must be a multiple of kPointerSize.
4292 ASSERT(IsAligned(offset, kPointerSize));
4294 Add(scratch, object, offset - kHeapObjectTag);
4295 if (emit_debug_code()) {
4297 Tst(scratch, (1 << kPointerSizeLog2) - 1);
4299 Abort(kUnalignedCellInWriteBarrier);
4308 remembered_set_action,
4313 // Clobber clobbered input registers when running with the debug-code flag
4314 // turned on to provoke errors.
4315 if (emit_debug_code()) {
4316 Mov(value, Operand(BitCast<int64_t>(kZapValue + 4)));
4317 Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8)));
4322 // Will clobber: object, address, value.
4323 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4325 // The register 'object' contains a heap object pointer. The heap object tag is
4327 void MacroAssembler::RecordWrite(Register object,
4330 LinkRegisterStatus lr_status,
4331 SaveFPRegsMode fp_mode,
4332 RememberedSetAction remembered_set_action,
4333 SmiCheck smi_check) {
4334 ASM_LOCATION("MacroAssembler::RecordWrite");
4335 ASSERT(!AreAliased(object, value));
4337 if (emit_debug_code()) {
4338 UseScratchRegisterScope temps(this);
4339 Register temp = temps.AcquireX();
4341 Ldr(temp, MemOperand(address));
4343 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4346 // Count number of write barriers in generated code.
4347 isolate()->counters()->write_barriers_static()->Increment();
4348 // TODO(mstarzinger): Dynamic counter missing.
4350 // First, check if a write barrier is even needed. The tests below
4351 // catch stores of smis and stores into the young generation.
4354 if (smi_check == INLINE_SMI_CHECK) {
4355 ASSERT_EQ(0, kSmiTag);
4356 JumpIfSmi(value, &done);
4359 CheckPageFlagClear(value,
4360 value, // Used as scratch.
4361 MemoryChunk::kPointersToHereAreInterestingMask,
4363 CheckPageFlagClear(object,
4364 value, // Used as scratch.
4365 MemoryChunk::kPointersFromHereAreInterestingMask,
4368 // Record the actual write.
4369 if (lr_status == kLRHasNotBeenSaved) {
4372 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
4375 if (lr_status == kLRHasNotBeenSaved) {
4381 // Clobber clobbered registers when running with the debug-code flag
4382 // turned on to provoke errors.
4383 if (emit_debug_code()) {
4384 Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
4385 Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
4390 void MacroAssembler::AssertHasValidColor(const Register& reg) {
4391 if (emit_debug_code()) {
4392 // The bit sequence is backward. The first character in the string
4393 // represents the least significant bit.
4394 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4396 Label color_is_valid;
4397 Tbnz(reg, 0, &color_is_valid);
4398 Tbz(reg, 1, &color_is_valid);
4399 Abort(kUnexpectedColorFound);
4400 Bind(&color_is_valid);
4405 void MacroAssembler::GetMarkBits(Register addr_reg,
4406 Register bitmap_reg,
4407 Register shift_reg) {
4408 ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg));
4409 ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
4410 // addr_reg is divided into fields:
4411 // |63 page base 20|19 high 8|7 shift 3|2 0|
4412 // 'high' gives the index of the cell holding color bits for the object.
4413 // 'shift' gives the offset in the cell for this object's color.
4414 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
4415 UseScratchRegisterScope temps(this);
4416 Register temp = temps.AcquireX();
4417 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
4418 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
4419 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
4421 // |63 page base 20|19 zeros 15|14 high 3|2 0|
4422 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
4426 void MacroAssembler::HasColor(Register object,
4427 Register bitmap_scratch,
4428 Register shift_scratch,
4432 // See mark-compact.h for color definitions.
4433 ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch));
4435 GetMarkBits(object, bitmap_scratch, shift_scratch);
4436 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4437 // Shift the bitmap down to get the color of the object in bits [1:0].
4438 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
4440 AssertHasValidColor(bitmap_scratch);
4442 // These bit sequences are backwards. The first character in the string
4443 // represents the least significant bit.
4444 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4445 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4446 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4448 // Check for the color.
4449 if (first_bit == 0) {
4450 // Checking for white.
4451 ASSERT(second_bit == 0);
4452 // We only need to test the first bit.
4453 Tbz(bitmap_scratch, 0, has_color);
4456 // Checking for grey or black.
4457 Tbz(bitmap_scratch, 0, &other_color);
4458 if (second_bit == 0) {
4459 Tbz(bitmap_scratch, 1, has_color);
4461 Tbnz(bitmap_scratch, 1, has_color);
4466 // Fall through if it does not have the right color.
4470 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
4472 Label* if_deprecated) {
4473 if (map->CanBeDeprecated()) {
4474 Mov(scratch, Operand(map));
4475 Ldrsw(scratch, UntagSmiFieldMemOperand(scratch, Map::kBitField3Offset));
4476 TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
4481 void MacroAssembler::JumpIfBlack(Register object,
4485 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4486 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
4490 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
4495 ASSERT(!AreAliased(object, scratch0, scratch1));
4496 Factory* factory = isolate()->factory();
4497 Register current = scratch0;
4500 // Scratch contains elements pointer.
4501 Mov(current, object);
4503 // Loop based on the map going up the prototype chain.
4505 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4506 Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4507 Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
4508 CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
4509 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4510 CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
4514 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
4516 ASSERT(!result.Is(ldr_location));
4517 const uint32_t kLdrLitOffset_lsb = 5;
4518 const uint32_t kLdrLitOffset_width = 19;
4519 Ldr(result, MemOperand(ldr_location));
4520 if (emit_debug_code()) {
4521 And(result, result, LoadLiteralFMask);
4522 Cmp(result, LoadLiteralFixed);
4523 Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
4524 // The instruction was clobbered. Reload it.
4525 Ldr(result, MemOperand(ldr_location));
4527 Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
4528 Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
4532 void MacroAssembler::EnsureNotWhite(
4534 Register bitmap_scratch,
4535 Register shift_scratch,
4536 Register load_scratch,
4537 Register length_scratch,
4538 Label* value_is_white_and_not_data) {
4540 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
4542 // These bit sequences are backwards. The first character in the string
4543 // represents the least significant bit.
4544 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4545 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4546 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4548 GetMarkBits(value, bitmap_scratch, shift_scratch);
4549 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4550 Lsr(load_scratch, load_scratch, shift_scratch);
4552 AssertHasValidColor(load_scratch);
4554 // If the value is black or grey we don't need to do anything.
4555 // Since both black and grey have a 1 in the first position and white does
4556 // not have a 1 there we only need to check one bit.
4558 Tbnz(load_scratch, 0, &done);
4560 // Value is white. We check whether it is data that doesn't need scanning.
4561 Register map = load_scratch; // Holds map while checking type.
4562 Label is_data_object;
4564 // Check for heap-number.
4565 Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
4566 Mov(length_scratch, HeapNumber::kSize);
4567 JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
4569 // Check for strings.
4570 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4571 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4572 // If it's a string and it's not a cons string then it's an object containing
4574 Register instance_type = load_scratch;
4575 Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
4576 TestAndBranchIfAnySet(instance_type,
4577 kIsIndirectStringMask | kIsNotStringMask,
4578 value_is_white_and_not_data);
4580 // It's a non-indirect (non-cons and non-slice) string.
4581 // If it's external, the length is just ExternalString::kSize.
4582 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4583 // External strings are the only ones with the kExternalStringTag bit
4585 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
4586 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
4587 Mov(length_scratch, ExternalString::kSize);
4588 TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
4590 // Sequential string, either ASCII or UC16.
4591 // For ASCII (char-size of 1) we shift the smi tag away to get the length.
4592 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
4593 // getting the length multiplied by 2.
4594 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
4595 Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
4596 String::kLengthOffset));
4597 Tst(instance_type, kStringEncodingMask);
4598 Cset(load_scratch, eq);
4599 Lsl(length_scratch, length_scratch, load_scratch);
4602 SeqString::kHeaderSize + kObjectAlignmentMask);
4603 Bic(length_scratch, length_scratch, kObjectAlignmentMask);
4605 Bind(&is_data_object);
4606 // Value is a data object, and it is white. Mark it black. Since we know
4607 // that the object is white we can make it black by flipping one bit.
4608 Register mask = shift_scratch;
4609 Mov(load_scratch, 1);
4610 Lsl(mask, load_scratch, shift_scratch);
4612 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4613 Orr(load_scratch, load_scratch, mask);
4614 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4616 Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
4617 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4618 Add(load_scratch, load_scratch, length_scratch);
4619 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4625 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
4626 if (emit_debug_code()) {
4627 Check(cond, reason);
4633 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
4634 if (emit_debug_code()) {
4635 CheckRegisterIsClear(reg, reason);
4640 void MacroAssembler::AssertRegisterIsRoot(Register reg,
4641 Heap::RootListIndex index,
4642 BailoutReason reason) {
4643 if (emit_debug_code()) {
4644 CompareRoot(reg, index);
4650 void MacroAssembler::AssertFastElements(Register elements) {
4651 if (emit_debug_code()) {
4652 UseScratchRegisterScope temps(this);
4653 Register temp = temps.AcquireX();
4655 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
4656 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
4657 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
4658 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
4659 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4665 void MacroAssembler::AssertIsString(const Register& object) {
4666 if (emit_debug_code()) {
4667 UseScratchRegisterScope temps(this);
4668 Register temp = temps.AcquireX();
4669 STATIC_ASSERT(kSmiTag == 0);
4670 Tst(object, kSmiTagMask);
4671 Check(ne, kOperandIsNotAString);
4672 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4673 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
4674 Check(lo, kOperandIsNotAString);
4679 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
4683 // Will not return here.
4688 void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
4692 // Will not return here.
4697 void MacroAssembler::Abort(BailoutReason reason) {
4699 RecordComment("Abort message: ");
4700 RecordComment(GetBailoutReason(reason));
4702 if (FLAG_trap_on_abort) {
4708 // Abort is used in some contexts where csp is the stack pointer. In order to
4709 // simplify the CallRuntime code, make sure that jssp is the stack pointer.
4710 // There is no risk of register corruption here because Abort doesn't return.
4711 Register old_stack_pointer = StackPointer();
4712 SetStackPointer(jssp);
4713 Mov(jssp, old_stack_pointer);
4715 // We need some scratch registers for the MacroAssembler, so make sure we have
4716 // some. This is safe here because Abort never returns.
4717 RegList old_tmp_list = TmpList()->list();
4718 TmpList()->Combine(MacroAssembler::DefaultTmpList());
4720 if (use_real_aborts()) {
4721 // Avoid infinite recursion; Push contains some assertions that use Abort.
4722 NoUseRealAbortsScope no_real_aborts(this);
4724 Mov(x0, Smi::FromInt(reason));
4728 // We don't actually want to generate a pile of code for this, so just
4729 // claim there is a stack frame, without generating one.
4730 FrameScope scope(this, StackFrame::NONE);
4731 CallRuntime(Runtime::kAbort, 1);
4733 CallRuntime(Runtime::kAbort, 1);
4736 // Load the string to pass to Printf.
4738 Adr(x0, &msg_address);
4740 // Call Printf directly to report the error.
4743 // We need a way to stop execution on both the simulator and real hardware,
4744 // and Unreachable() is the best option.
4747 // Emit the message string directly in the instruction stream.
4749 BlockPoolsScope scope(this);
4751 EmitStringData(GetBailoutReason(reason));
4755 SetStackPointer(old_stack_pointer);
4756 TmpList()->set_list(old_tmp_list);
4760 void MacroAssembler::LoadTransitionedArrayMapConditional(
4761 ElementsKind expected_kind,
4762 ElementsKind transitioned_kind,
4763 Register map_in_out,
4766 Label* no_map_match) {
4767 // Load the global or builtins object from the current context.
4768 Ldr(scratch1, GlobalObjectMemOperand());
4769 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
4771 // Check that the function's map is the same as the expected cached map.
4772 Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
4773 size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4774 Ldr(scratch2, FieldMemOperand(scratch1, offset));
4775 Cmp(map_in_out, scratch2);
4776 B(ne, no_map_match);
4778 // Use the transitioned cached map.
4779 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4780 Ldr(map_in_out, FieldMemOperand(scratch1, offset));
4784 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4785 // Load the global or builtins object from the current context.
4786 Ldr(function, GlobalObjectMemOperand());
4787 // Load the native context from the global or builtins object.
4788 Ldr(function, FieldMemOperand(function,
4789 GlobalObject::kNativeContextOffset));
4790 // Load the function from the native context.
4791 Ldr(function, ContextMemOperand(function, index));
4795 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4798 // Load the initial map. The global functions all have initial maps.
4799 Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4800 if (emit_debug_code()) {
4802 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4805 Abort(kGlobalFunctionsMustHaveInitialMap);
4811 // This is the main Printf implementation. All other Printf variants call
4812 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
4813 void MacroAssembler::PrintfNoPreserve(const char * format,
4814 const CPURegister& arg0,
4815 const CPURegister& arg1,
4816 const CPURegister& arg2,
4817 const CPURegister& arg3) {
4818 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
4819 // in most cases anyway, so this restriction shouldn't be too serious.
4820 ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
4822 // Make sure that the macro assembler doesn't try to use any of our arguments
4823 // as scratch registers.
4824 ASSERT(!TmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3));
4825 ASSERT(!FPTmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3));
4827 // We cannot print the stack pointer because it is typically used to preserve
4828 // caller-saved registers (using other Printf variants which depend on this
4830 ASSERT(!AreAliased(arg0, StackPointer()));
4831 ASSERT(!AreAliased(arg1, StackPointer()));
4832 ASSERT(!AreAliased(arg2, StackPointer()));
4833 ASSERT(!AreAliased(arg3, StackPointer()));
4835 static const int kMaxArgCount = 4;
4836 // Assume that we have the maximum number of arguments until we know
4838 int arg_count = kMaxArgCount;
4840 // The provided arguments.
4841 CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3};
4843 // The PCS registers where the arguments need to end up.
4844 CPURegister pcs[kMaxArgCount] = {NoCPUReg, NoCPUReg, NoCPUReg, NoCPUReg};
4846 // Promote FP arguments to doubles, and integer arguments to X registers.
4847 // Note that FP and integer arguments cannot be mixed, but we'll check
4848 // AreSameSizeAndType once we've processed these promotions.
4849 for (int i = 0; i < kMaxArgCount; i++) {
4850 if (args[i].IsRegister()) {
4851 // Note that we use x1 onwards, because x0 will hold the format string.
4852 pcs[i] = Register::XRegFromCode(i + 1);
4853 // For simplicity, we handle all integer arguments as X registers. An X
4854 // register argument takes the same space as a W register argument in the
4855 // PCS anyway. The only limitation is that we must explicitly clear the
4856 // top word for W register arguments as the callee will expect it to be
4858 if (!args[i].Is64Bits()) {
4859 const Register& as_x = args[i].X();
4860 And(as_x, as_x, 0x00000000ffffffff);
4863 } else if (args[i].IsFPRegister()) {
4864 pcs[i] = FPRegister::DRegFromCode(i);
4865 // C and C++ varargs functions (such as printf) implicitly promote float
4866 // arguments to doubles.
4867 if (!args[i].Is64Bits()) {
4868 FPRegister s(args[i]);
4869 const FPRegister& as_d = args[i].D();
4874 // This is the first empty (NoCPUReg) argument, so use it to set the
4875 // argument count and bail out.
4880 ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount));
4881 // Check that every remaining argument is NoCPUReg.
4882 for (int i = arg_count; i < kMaxArgCount; i++) {
4883 ASSERT(args[i].IsNone());
4885 ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1],
4890 // Move the arguments into the appropriate PCS registers.
4892 // Arranging an arbitrary list of registers into x1-x4 (or d0-d3) is
4893 // surprisingly complicated.
4895 // * For even numbers of registers, we push the arguments and then pop them
4896 // into their final registers. This maintains 16-byte stack alignment in
4897 // case csp is the stack pointer, since we're only handling X or D
4898 // registers at this point.
4900 // * For odd numbers of registers, we push and pop all but one register in
4901 // the same way, but the left-over register is moved directly, since we
4902 // can always safely move one register without clobbering any source.
4903 if (arg_count >= 4) {
4904 Push(args[3], args[2], args[1], args[0]);
4905 } else if (arg_count >= 2) {
4906 Push(args[1], args[0]);
4909 if ((arg_count % 2) != 0) {
4910 // Move the left-over register directly.
4911 const CPURegister& leftover_arg = args[arg_count - 1];
4912 const CPURegister& leftover_pcs = pcs[arg_count - 1];
4913 if (leftover_arg.IsRegister()) {
4914 Mov(Register(leftover_pcs), Register(leftover_arg));
4916 Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg));
4920 if (arg_count >= 4) {
4921 Pop(pcs[0], pcs[1], pcs[2], pcs[3]);
4922 } else if (arg_count >= 2) {
4923 Pop(pcs[0], pcs[1]);
4926 // Load the format string into x0, as per the procedure-call standard.
4928 // To make the code as portable as possible, the format string is encoded
4929 // directly in the instruction stream. It might be cleaner to encode it in a
4930 // literal pool, but since Printf is usually used for debugging, it is
4931 // beneficial for it to be minimally dependent on other features.
4932 Label format_address;
4933 Adr(x0, &format_address);
4935 // Emit the format string directly in the instruction stream.
4936 { BlockPoolsScope scope(this);
4939 Bind(&format_address);
4940 EmitStringData(format);
4945 // We don't pass any arguments on the stack, but we still need to align the C
4946 // stack pointer to a 16-byte boundary for PCS compliance.
4947 if (!csp.Is(StackPointer())) {
4948 Bic(csp, StackPointer(), 0xf);
4951 CallPrintf(pcs[0].type());
4955 void MacroAssembler::CallPrintf(CPURegister::RegisterType type) {
4956 // A call to printf needs special handling for the simulator, since the system
4957 // printf function will use a different instruction set and the procedure-call
4958 // standard will not be compatible.
4959 #ifdef USE_SIMULATOR
4960 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
4961 hlt(kImmExceptionIsPrintf);
4965 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
4970 void MacroAssembler::Printf(const char * format,
4971 const CPURegister& arg0,
4972 const CPURegister& arg1,
4973 const CPURegister& arg2,
4974 const CPURegister& arg3) {
4975 // Printf is expected to preserve all registers, so make sure that none are
4976 // available as scratch registers until we've preserved them.
4977 RegList old_tmp_list = TmpList()->list();
4978 RegList old_fp_tmp_list = FPTmpList()->list();
4979 TmpList()->set_list(0);
4980 FPTmpList()->set_list(0);
4982 // Preserve all caller-saved registers as well as NZCV.
4983 // If csp is the stack pointer, PushCPURegList asserts that the size of each
4984 // list is a multiple of 16 bytes.
4985 PushCPURegList(kCallerSaved);
4986 PushCPURegList(kCallerSavedFP);
4988 // We can use caller-saved registers as scratch values (except for argN).
4989 CPURegList tmp_list = kCallerSaved;
4990 CPURegList fp_tmp_list = kCallerSavedFP;
4991 tmp_list.Remove(arg0, arg1, arg2, arg3);
4992 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4993 TmpList()->set_list(tmp_list.list());
4994 FPTmpList()->set_list(fp_tmp_list.list());
4997 { UseScratchRegisterScope temps(this);
4998 Register tmp = temps.AcquireX();
5003 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
5005 { UseScratchRegisterScope temps(this);
5006 Register tmp = temps.AcquireX();
5011 PopCPURegList(kCallerSavedFP);
5012 PopCPURegList(kCallerSaved);
5014 TmpList()->set_list(old_tmp_list);
5015 FPTmpList()->set_list(old_fp_tmp_list);
5019 void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
5020 // TODO(jbramley): Other architectures use the internal memcpy to copy the
5021 // sequence. If this is a performance bottleneck, we should consider caching
5022 // the sequence and copying it in the same way.
5023 InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
5024 ASSERT(jssp.Is(StackPointer()));
5025 EmitFrameSetupForCodeAgePatching(this);
5030 void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
5031 InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
5032 ASSERT(jssp.Is(StackPointer()));
5033 EmitCodeAgeSequence(this, stub);
5041 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
5045 // We can do this sequence using four instructions, but the code ageing
5046 // sequence that patches it needs five, so we use the extra space to try to
5047 // simplify some addressing modes and remove some dependencies (compared to
5048 // using two stp instructions with write-back).
5049 __ sub(jssp, jssp, 4 * kXRegSize);
5050 __ sub(csp, csp, 4 * kXRegSize);
5051 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
5052 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
5053 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
5055 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
5059 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
5063 // When the stub is called, the sequence is replaced with the young sequence
5064 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
5065 // stub jumps to &start, stored in x0. The young sequence does not call the
5066 // stub so there is no infinite loop here.
5068 // A branch (br) is used rather than a call (blr) because this code replaces
5069 // the frame setup code that would normally preserve lr.
5070 __ LoadLiteral(ip0, kCodeAgeStubEntryOffset);
5073 // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
5074 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
5075 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
5077 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
5078 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
5083 bool MacroAssembler::IsYoungSequence(byte* sequence) {
5084 // Generate a young sequence to compare with.
5085 const int length = kCodeAgeSequenceSize / kInstructionSize;
5086 static bool initialized = false;
5087 static byte young[kCodeAgeSequenceSize];
5089 PatchingAssembler patcher(young, length);
5090 // The young sequence is the frame setup code for FUNCTION code types. It is
5091 // generated by FullCodeGenerator::Generate.
5092 MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
5096 bool is_young = (memcmp(sequence, young, kCodeAgeSequenceSize) == 0);
5097 ASSERT(is_young || IsCodeAgeSequence(sequence));
5103 bool MacroAssembler::IsCodeAgeSequence(byte* sequence) {
5104 // The old sequence varies depending on the code age. However, the code up
5105 // until kCodeAgeStubEntryOffset does not change, so we can check that part to
5106 // get a reasonable level of verification.
5107 const int length = kCodeAgeStubEntryOffset / kInstructionSize;
5108 static bool initialized = false;
5109 static byte old[kCodeAgeStubEntryOffset];
5111 PatchingAssembler patcher(old, length);
5112 MacroAssembler::EmitCodeAgeSequence(&patcher, NULL);
5115 return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0;
5120 void MacroAssembler::TruncatingDiv(Register result,
5123 ASSERT(!AreAliased(result, dividend));
5124 ASSERT(result.Is32Bits() && dividend.Is32Bits());
5125 MultiplierAndShift ms(divisor);
5126 Mov(result, ms.multiplier());
5127 Smull(result.X(), dividend, result);
5128 Asr(result.X(), result.X(), 32);
5129 if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend);
5130 if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend);
5131 if (ms.shift() > 0) Asr(result, result, ms.shift());
5132 Add(result, result, Operand(dividend, LSR, 31));
5139 UseScratchRegisterScope::~UseScratchRegisterScope() {
5140 available_->set_list(old_available_);
5141 availablefp_->set_list(old_availablefp_);
5145 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
5146 int code = AcquireNextAvailable(available_).code();
5147 return Register::Create(code, reg.SizeInBits());
5151 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
5152 int code = AcquireNextAvailable(availablefp_).code();
5153 return FPRegister::Create(code, reg.SizeInBits());
5157 CPURegister UseScratchRegisterScope::AcquireNextAvailable(
5158 CPURegList* available) {
5159 CHECK(!available->IsEmpty());
5160 CPURegister result = available->PopLowestIndex();
5161 ASSERT(!AreAliased(result, xzr, csp));
5166 CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
5167 const CPURegister& reg) {
5168 ASSERT(available->IncludesAliasOf(reg));
5169 available->Remove(reg);
5177 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
5178 const Label* smi_check) {
5179 Assembler::BlockPoolsScope scope(masm);
5180 if (reg.IsValid()) {
5181 ASSERT(smi_check->is_bound());
5182 ASSERT(reg.Is64Bits());
5184 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
5185 // 'check' in the other bits. The possible offset is limited in that we
5186 // use BitField to pack the data, and the underlying data type is a
5188 uint32_t delta = __ InstructionsGeneratedSince(smi_check);
5189 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
5191 ASSERT(!smi_check->is_bound());
5193 // An offset of 0 indicates that there is no patch site.
5199 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
5200 : reg_(NoReg), smi_check_(NULL) {
5201 InstructionSequence* inline_data = InstructionSequence::At(info);
5202 ASSERT(inline_data->IsInlineData());
5203 if (inline_data->IsInlineData()) {
5204 uint64_t payload = inline_data->InlineData();
5205 // We use BitField to decode the payload, and BitField can only handle
5207 ASSERT(is_uint32(payload));
5209 int reg_code = RegisterBits::decode(payload);
5210 reg_ = Register::XRegFromCode(reg_code);
5211 uint64_t smi_check_delta = DeltaBits::decode(payload);
5212 ASSERT(smi_check_delta != 0);
5213 smi_check_ = inline_data->preceding(smi_check_delta);
5222 } } // namespace v8::internal
5224 #endif // V8_TARGET_ARCH_ARM64