1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <limits.h> // For LONG_MIN, LONG_MAX.
32 #if V8_TARGET_ARCH_ARM
34 #include "bootstrapper.h"
36 #include "cpu-profiler.h"
38 #include "isolate-inl.h"
44 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
45 : Assembler(arg_isolate, buffer, size),
46 generating_stub_(false),
48 if (isolate() != NULL) {
49 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
55 void MacroAssembler::Jump(Register target, Condition cond) {
60 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
62 ASSERT(RelocInfo::IsCodeTarget(rmode));
63 mov(pc, Operand(target, rmode), LeaveCC, cond);
67 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
69 ASSERT(!RelocInfo::IsCodeTarget(rmode));
70 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
74 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
76 ASSERT(RelocInfo::IsCodeTarget(rmode));
77 // 'code' is always generated ARM code, never THUMB code
78 AllowDeferredHandleDereference embedding_raw_address;
79 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
83 int MacroAssembler::CallSize(Register target, Condition cond) {
88 void MacroAssembler::Call(Register target, Condition cond) {
89 // Block constant pool for the call instruction sequence.
90 BlockConstPoolScope block_const_pool(this);
94 ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
98 int MacroAssembler::CallSize(
99 Address target, RelocInfo::Mode rmode, Condition cond) {
100 int size = 2 * kInstrSize;
101 Instr mov_instr = cond | MOV | LeaveCC;
102 intptr_t immediate = reinterpret_cast<intptr_t>(target);
103 if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
110 int MacroAssembler::CallSizeNotPredictableCodeSize(
111 Address target, RelocInfo::Mode rmode, Condition cond) {
112 int size = 2 * kInstrSize;
113 Instr mov_instr = cond | MOV | LeaveCC;
114 intptr_t immediate = reinterpret_cast<intptr_t>(target);
115 if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
122 void MacroAssembler::Call(Address target,
123 RelocInfo::Mode rmode,
125 TargetAddressStorageMode mode) {
126 // Block constant pool for the call instruction sequence.
127 BlockConstPoolScope block_const_pool(this);
131 bool old_predictable_code_size = predictable_code_size();
132 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
133 set_predictable_code_size(true);
136 // Call sequence on V7 or later may be :
137 // movw ip, #... @ call address low 16
138 // movt ip, #... @ call address high 16
141 // Or for pre-V7 or values that may be back-patched
142 // to avoid ICache flushes:
143 // ldr ip, [pc, #...] @ call address
147 // Statement positions are expected to be recorded when the target
148 // address is loaded. The mov method will automatically record
149 // positions when pc is the target, since this is not the case here
150 // we have to do it explicitly.
151 positions_recorder()->WriteRecordedPositions();
153 mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
156 ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
157 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
158 set_predictable_code_size(old_predictable_code_size);
163 int MacroAssembler::CallSize(Handle<Code> code,
164 RelocInfo::Mode rmode,
165 TypeFeedbackId ast_id,
167 AllowDeferredHandleDereference using_raw_address;
168 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
172 void MacroAssembler::Call(Handle<Code> code,
173 RelocInfo::Mode rmode,
174 TypeFeedbackId ast_id,
176 TargetAddressStorageMode mode) {
179 ASSERT(RelocInfo::IsCodeTarget(rmode));
180 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
181 SetRecordedAstId(ast_id);
182 rmode = RelocInfo::CODE_TARGET_WITH_ID;
184 // 'code' is always generated ARM code, never THUMB code
185 AllowDeferredHandleDereference embedding_raw_address;
186 Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
190 void MacroAssembler::Ret(Condition cond) {
195 void MacroAssembler::Drop(int count, Condition cond) {
197 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
202 void MacroAssembler::Ret(int drop, Condition cond) {
208 void MacroAssembler::Swap(Register reg1,
212 if (scratch.is(no_reg)) {
213 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
214 eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
215 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
217 mov(scratch, reg1, LeaveCC, cond);
218 mov(reg1, reg2, LeaveCC, cond);
219 mov(reg2, scratch, LeaveCC, cond);
224 void MacroAssembler::Call(Label* target) {
229 void MacroAssembler::Push(Handle<Object> handle) {
230 mov(ip, Operand(handle));
235 void MacroAssembler::Move(Register dst, Handle<Object> value) {
236 AllowDeferredHandleDereference smi_check;
237 if (value->IsSmi()) {
238 mov(dst, Operand(value));
240 ASSERT(value->IsHeapObject());
241 if (isolate()->heap()->InNewSpace(*value)) {
242 Handle<Cell> cell = isolate()->factory()->NewCell(value);
243 mov(dst, Operand(cell));
244 ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
246 mov(dst, Operand(value));
252 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
254 mov(dst, src, LeaveCC, cond);
259 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
266 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
268 if (!src2.is_reg() &&
269 !src2.must_output_reloc_info(this) &&
270 src2.immediate() == 0) {
271 mov(dst, Operand::Zero(), LeaveCC, cond);
272 } else if (!src2.is_single_instruction(this) &&
273 !src2.must_output_reloc_info(this) &&
274 CpuFeatures::IsSupported(ARMv7) &&
275 IsPowerOf2(src2.immediate() + 1)) {
277 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
279 and_(dst, src1, src2, LeaveCC, cond);
284 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
287 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
288 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
289 and_(dst, src1, Operand(mask), LeaveCC, cond);
291 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
294 ubfx(dst, src1, lsb, width, cond);
299 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
302 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
303 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
304 and_(dst, src1, Operand(mask), LeaveCC, cond);
305 int shift_up = 32 - lsb - width;
306 int shift_down = lsb + shift_up;
308 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
310 if (shift_down != 0) {
311 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
314 sbfx(dst, src1, lsb, width, cond);
319 void MacroAssembler::Bfi(Register dst,
325 ASSERT(0 <= lsb && lsb < 32);
326 ASSERT(0 <= width && width < 32);
327 ASSERT(lsb + width < 32);
328 ASSERT(!scratch.is(dst));
329 if (width == 0) return;
330 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
331 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
332 bic(dst, dst, Operand(mask));
333 and_(scratch, src, Operand((1 << width) - 1));
334 mov(scratch, Operand(scratch, LSL, lsb));
335 orr(dst, dst, scratch);
337 bfi(dst, src, lsb, width, cond);
342 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
345 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
346 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
347 bic(dst, src, Operand(mask));
349 Move(dst, src, cond);
350 bfc(dst, lsb, width, cond);
355 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
357 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
358 ASSERT(!dst.is(pc) && !src.rm().is(pc));
359 ASSERT((satpos >= 0) && (satpos <= 31));
361 // These asserts are required to ensure compatibility with the ARMv7
363 ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
364 ASSERT(src.rs().is(no_reg));
367 int satval = (1 << satpos) - 1;
370 b(NegateCondition(cond), &done); // Skip saturate if !condition.
372 if (!(src.is_reg() && dst.is(src.rm()))) {
375 tst(dst, Operand(~satval));
377 mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
378 mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
381 usat(dst, satpos, src, cond);
386 void MacroAssembler::Load(Register dst,
387 const MemOperand& src,
389 ASSERT(!r.IsDouble());
390 if (r.IsInteger8()) {
392 } else if (r.IsUInteger8()) {
394 } else if (r.IsInteger16()) {
396 } else if (r.IsUInteger16()) {
404 void MacroAssembler::Store(Register src,
405 const MemOperand& dst,
407 ASSERT(!r.IsDouble());
408 if (r.IsInteger8() || r.IsUInteger8()) {
410 } else if (r.IsInteger16() || r.IsUInteger16()) {
418 void MacroAssembler::LoadRoot(Register destination,
419 Heap::RootListIndex index,
421 if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
422 isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
423 !predictable_code_size()) {
424 // The CPU supports fast immediate values, and this root will never
425 // change. We will load it as a relocatable immediate value.
426 Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
427 mov(destination, Operand(root), LeaveCC, cond);
430 ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
434 void MacroAssembler::StoreRoot(Register source,
435 Heap::RootListIndex index,
437 str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
441 void MacroAssembler::InNewSpace(Register object,
445 ASSERT(cond == eq || cond == ne);
446 and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
447 cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
452 void MacroAssembler::RecordWriteField(
457 LinkRegisterStatus lr_status,
458 SaveFPRegsMode save_fp,
459 RememberedSetAction remembered_set_action,
460 SmiCheck smi_check) {
461 // First, check if a write barrier is even needed. The tests below
462 // catch stores of Smis.
465 // Skip barrier if writing a smi.
466 if (smi_check == INLINE_SMI_CHECK) {
467 JumpIfSmi(value, &done);
470 // Although the object register is tagged, the offset is relative to the start
471 // of the object, so so offset must be a multiple of kPointerSize.
472 ASSERT(IsAligned(offset, kPointerSize));
474 add(dst, object, Operand(offset - kHeapObjectTag));
475 if (emit_debug_code()) {
477 tst(dst, Operand((1 << kPointerSizeLog2) - 1));
479 stop("Unaligned cell in write barrier");
488 remembered_set_action,
493 // Clobber clobbered input registers when running with the debug-code flag
494 // turned on to provoke errors.
495 if (emit_debug_code()) {
496 mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
497 mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
502 // Will clobber 4 registers: object, address, scratch, ip. The
503 // register 'object' contains a heap object pointer. The heap object
504 // tag is shifted away.
505 void MacroAssembler::RecordWrite(Register object,
508 LinkRegisterStatus lr_status,
509 SaveFPRegsMode fp_mode,
510 RememberedSetAction remembered_set_action,
511 SmiCheck smi_check) {
512 ASSERT(!object.is(value));
513 if (emit_debug_code()) {
514 ldr(ip, MemOperand(address));
516 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
519 // Count number of write barriers in generated code.
520 isolate()->counters()->write_barriers_static()->Increment();
521 // TODO(mstarzinger): Dynamic counter missing.
523 // First, check if a write barrier is even needed. The tests below
524 // catch stores of smis and stores into the young generation.
527 if (smi_check == INLINE_SMI_CHECK) {
528 JumpIfSmi(value, &done);
532 value, // Used as scratch.
533 MemoryChunk::kPointersToHereAreInterestingMask,
536 CheckPageFlag(object,
537 value, // Used as scratch.
538 MemoryChunk::kPointersFromHereAreInterestingMask,
542 // Record the actual write.
543 if (lr_status == kLRHasNotBeenSaved) {
546 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
548 if (lr_status == kLRHasNotBeenSaved) {
554 // Clobber clobbered registers when running with the debug-code flag
555 // turned on to provoke errors.
556 if (emit_debug_code()) {
557 mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
558 mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
563 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
566 SaveFPRegsMode fp_mode,
567 RememberedSetFinalAction and_then) {
569 if (emit_debug_code()) {
571 JumpIfNotInNewSpace(object, scratch, &ok);
572 stop("Remembered set pointer is in new space");
575 // Load store buffer top.
576 ExternalReference store_buffer =
577 ExternalReference::store_buffer_top(isolate());
578 mov(ip, Operand(store_buffer));
579 ldr(scratch, MemOperand(ip));
580 // Store pointer to buffer and increment buffer top.
581 str(address, MemOperand(scratch, kPointerSize, PostIndex));
582 // Write back new top of buffer.
583 str(scratch, MemOperand(ip));
584 // Call stub on end of buffer.
585 // Check for end of buffer.
586 tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
587 if (and_then == kFallThroughAtEnd) {
590 ASSERT(and_then == kReturnAtEnd);
594 StoreBufferOverflowStub store_buffer_overflow =
595 StoreBufferOverflowStub(fp_mode);
596 CallStub(&store_buffer_overflow);
599 if (and_then == kReturnAtEnd) {
605 void MacroAssembler::PushFixedFrame(Register marker_reg) {
606 ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code());
607 stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
609 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
615 void MacroAssembler::PopFixedFrame(Register marker_reg) {
616 ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code());
617 ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
619 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
625 // Push and pop all registers that can hold pointers.
626 void MacroAssembler::PushSafepointRegisters() {
627 // Safepoints expect a block of contiguous register values starting with r0:
628 ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
629 // Safepoints expect a block of kNumSafepointRegisters values on the
630 // stack, so adjust the stack for unsaved registers.
631 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
632 ASSERT(num_unsaved >= 0);
633 sub(sp, sp, Operand(num_unsaved * kPointerSize));
634 stm(db_w, sp, kSafepointSavedRegisters);
638 void MacroAssembler::PopSafepointRegisters() {
639 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
640 ldm(ia_w, sp, kSafepointSavedRegisters);
641 add(sp, sp, Operand(num_unsaved * kPointerSize));
645 void MacroAssembler::PushSafepointRegistersAndDoubles() {
646 // Number of d-regs not known at snapshot time.
647 ASSERT(!Serializer::enabled());
648 PushSafepointRegisters();
649 // Only save allocatable registers.
650 ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
651 ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
652 if (CpuFeatures::IsSupported(VFP32DREGS)) {
653 vstm(db_w, sp, d16, d31);
655 vstm(db_w, sp, d0, d13);
659 void MacroAssembler::PopSafepointRegistersAndDoubles() {
660 // Number of d-regs not known at snapshot time.
661 ASSERT(!Serializer::enabled());
662 // Only save allocatable registers.
663 ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
664 ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
665 vldm(ia_w, sp, d0, d13);
666 if (CpuFeatures::IsSupported(VFP32DREGS)) {
667 vldm(ia_w, sp, d16, d31);
669 PopSafepointRegisters();
672 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
674 str(src, SafepointRegistersAndDoublesSlot(dst));
678 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
679 str(src, SafepointRegisterSlot(dst));
683 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
684 ldr(dst, SafepointRegisterSlot(src));
688 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
689 // The registers are pushed starting with the highest encoding,
690 // which means that lowest encodings are closest to the stack pointer.
691 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
696 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
697 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
701 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
702 // Number of d-regs not known at snapshot time.
703 ASSERT(!Serializer::enabled());
704 // General purpose registers are pushed last on the stack.
705 int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
706 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
707 return MemOperand(sp, doubles_size + register_offset);
711 void MacroAssembler::Ldrd(Register dst1, Register dst2,
712 const MemOperand& src, Condition cond) {
713 ASSERT(src.rm().is(no_reg));
714 ASSERT(!dst1.is(lr)); // r14.
716 // V8 does not use this addressing mode, so the fallback code
717 // below doesn't support it yet.
718 ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
720 // Generate two ldr instructions if ldrd is not available.
721 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
722 (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
723 CpuFeatureScope scope(this, ARMv7);
724 ldrd(dst1, dst2, src, cond);
726 if ((src.am() == Offset) || (src.am() == NegOffset)) {
727 MemOperand src2(src);
728 src2.set_offset(src2.offset() + 4);
729 if (dst1.is(src.rn())) {
730 ldr(dst2, src2, cond);
731 ldr(dst1, src, cond);
733 ldr(dst1, src, cond);
734 ldr(dst2, src2, cond);
736 } else { // PostIndex or NegPostIndex.
737 ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
738 if (dst1.is(src.rn())) {
739 ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
740 ldr(dst1, src, cond);
742 MemOperand src2(src);
743 src2.set_offset(src2.offset() - 4);
744 ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
745 ldr(dst2, src2, cond);
752 void MacroAssembler::Strd(Register src1, Register src2,
753 const MemOperand& dst, Condition cond) {
754 ASSERT(dst.rm().is(no_reg));
755 ASSERT(!src1.is(lr)); // r14.
757 // V8 does not use this addressing mode, so the fallback code
758 // below doesn't support it yet.
759 ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
761 // Generate two str instructions if strd is not available.
762 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
763 (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
764 CpuFeatureScope scope(this, ARMv7);
765 strd(src1, src2, dst, cond);
767 MemOperand dst2(dst);
768 if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
769 dst2.set_offset(dst2.offset() + 4);
770 str(src1, dst, cond);
771 str(src2, dst2, cond);
772 } else { // PostIndex or NegPostIndex.
773 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
774 dst2.set_offset(dst2.offset() - 4);
775 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
776 str(src2, dst2, cond);
782 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
783 // If needed, restore wanted bits of FPSCR.
786 tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
788 orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
794 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
795 const DwVfpRegister src,
796 const Condition cond) {
797 vsub(dst, src, kDoubleRegZero, cond);
801 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
802 const DwVfpRegister src2,
803 const Condition cond) {
804 // Compare and move FPSCR flags to the normal condition flags.
805 VFPCompareAndLoadFlags(src1, src2, pc, cond);
808 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
810 const Condition cond) {
811 // Compare and move FPSCR flags to the normal condition flags.
812 VFPCompareAndLoadFlags(src1, src2, pc, cond);
816 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
817 const DwVfpRegister src2,
818 const Register fpscr_flags,
819 const Condition cond) {
820 // Compare and load FPSCR.
821 vcmp(src1, src2, cond);
822 vmrs(fpscr_flags, cond);
825 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
827 const Register fpscr_flags,
828 const Condition cond) {
829 // Compare and load FPSCR.
830 vcmp(src1, src2, cond);
831 vmrs(fpscr_flags, cond);
834 void MacroAssembler::Vmov(const DwVfpRegister dst,
836 const Register scratch) {
837 static const DoubleRepresentation minus_zero(-0.0);
838 static const DoubleRepresentation zero(0.0);
839 DoubleRepresentation value_rep(imm);
840 // Handle special values first.
841 if (value_rep == zero) {
842 vmov(dst, kDoubleRegZero);
843 } else if (value_rep == minus_zero) {
844 vneg(dst, kDoubleRegZero);
846 vmov(dst, imm, scratch);
851 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
852 if (src.code() < 16) {
853 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
854 vmov(dst, loc.high());
856 vmov(dst, VmovIndexHi, src);
861 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
862 if (dst.code() < 16) {
863 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
864 vmov(loc.high(), src);
866 vmov(dst, VmovIndexHi, src);
871 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
872 if (src.code() < 16) {
873 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
874 vmov(dst, loc.low());
876 vmov(dst, VmovIndexLo, src);
881 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
882 if (dst.code() < 16) {
883 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
884 vmov(loc.low(), src);
886 vmov(dst, VmovIndexLo, src);
891 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
892 if (frame_mode == BUILD_STUB_FRAME) {
894 Push(Smi::FromInt(StackFrame::STUB));
895 // Adjust FP to point to saved FP.
896 add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
898 PredictableCodeSizeScope predictible_code_size_scope(
899 this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
900 // The following three instructions must remain together and unmodified
901 // for code aging to work properly.
902 if (isolate()->IsCodePreAgingActive()) {
904 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
905 add(r0, pc, Operand(-8));
906 ldr(pc, MemOperand(pc, -4));
907 emit_code_stub_address(stub);
911 // Adjust FP to point to saved FP.
912 add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
918 void MacroAssembler::LoadConstantPoolPointerRegister() {
919 if (FLAG_enable_ool_constant_pool) {
920 int constant_pool_offset =
921 Code::kConstantPoolOffset - Code::kHeaderSize - pc_offset() - 8;
922 ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
923 ldr(pp, MemOperand(pc, constant_pool_offset));
928 void MacroAssembler::EnterFrame(StackFrame::Type type) {
931 mov(ip, Operand(Smi::FromInt(type)));
933 mov(ip, Operand(CodeObject()));
935 // Adjust FP to point to saved FP.
937 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
941 int MacroAssembler::LeaveFrame(StackFrame::Type type) {
946 // Drop the execution stack down to the frame pointer and restore
947 // the caller frame pointer, return address and constant pool pointer
948 // (if FLAG_enable_ool_constant_pool).
950 if (FLAG_enable_ool_constant_pool) {
951 add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
952 frame_ends = pc_offset();
953 ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
956 frame_ends = pc_offset();
957 ldm(ia_w, sp, fp.bit() | lr.bit());
963 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
964 // Set up the frame structure on the stack.
965 ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
966 ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
967 ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
969 mov(fp, Operand(sp)); // Set up new frame pointer.
970 // Reserve room for saved entry sp and code object.
971 sub(sp, sp, Operand(ExitFrameConstants::kFrameSize));
972 if (emit_debug_code()) {
973 mov(ip, Operand::Zero());
974 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
976 if (FLAG_enable_ool_constant_pool) {
977 str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
979 mov(ip, Operand(CodeObject()));
980 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
982 // Save the frame pointer and the context in top.
983 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
984 str(fp, MemOperand(ip));
985 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
986 str(cp, MemOperand(ip));
988 // Optionally save all double registers.
991 // Note that d0 will be accessible at
992 // fp - ExitFrameConstants::kFrameSize -
993 // DwVfpRegister::kMaxNumRegisters * kDoubleSize,
994 // since the sp slot, code slot and constant pool slot (if
995 // FLAG_enable_ool_constant_pool) were pushed after the fp.
998 // Reserve place for the return address and stack space and align the frame
999 // preparing for calling the runtime function.
1000 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1001 sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
1002 if (frame_alignment > 0) {
1003 ASSERT(IsPowerOf2(frame_alignment));
1004 and_(sp, sp, Operand(-frame_alignment));
1007 // Set the exit frame sp value to point just before the return address
1009 add(ip, sp, Operand(kPointerSize));
1010 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1014 void MacroAssembler::InitializeNewString(Register string,
1016 Heap::RootListIndex map_index,
1018 Register scratch2) {
1019 SmiTag(scratch1, length);
1020 LoadRoot(scratch2, map_index);
1021 str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1022 mov(scratch1, Operand(String::kEmptyHashField));
1023 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1024 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
1028 int MacroAssembler::ActivationFrameAlignment() {
1029 #if V8_HOST_ARCH_ARM
1030 // Running on the real platform. Use the alignment as mandated by the local
1032 // Note: This will break if we ever start generating snapshots on one ARM
1033 // platform for another ARM platform with a different alignment.
1034 return OS::ActivationFrameAlignment();
1035 #else // V8_HOST_ARCH_ARM
1036 // If we are using the simulator then we should always align to the expected
1037 // alignment. As the simulator is used to generate snapshots we do not know
1038 // if the target platform will need alignment, so this is controlled from a
1040 return FLAG_sim_stack_alignment;
1041 #endif // V8_HOST_ARCH_ARM
1045 void MacroAssembler::LeaveExitFrame(bool save_doubles,
1046 Register argument_count,
1047 bool restore_context) {
1048 // Optionally restore all double registers.
1050 // Calculate the stack location of the saved doubles and restore them.
1051 const int offset = ExitFrameConstants::kFrameSize;
1053 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
1054 RestoreFPRegs(r3, ip);
1058 mov(r3, Operand::Zero());
1059 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1060 str(r3, MemOperand(ip));
1063 // Restore current context from top and clear it in debug mode.
1064 if (restore_context) {
1065 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1066 ldr(cp, MemOperand(ip));
1069 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1070 str(r3, MemOperand(ip));
1073 // Tear down the exit frame, pop the arguments, and return.
1074 if (FLAG_enable_ool_constant_pool) {
1075 ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1077 mov(sp, Operand(fp));
1078 ldm(ia_w, sp, fp.bit() | lr.bit());
1079 if (argument_count.is_valid()) {
1080 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1085 void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1086 if (use_eabi_hardfloat()) {
1094 // On ARM this is just a synonym to make the purpose clear.
1095 void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1096 MovFromFloatResult(dst);
1100 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1101 const ParameterCount& actual,
1102 Handle<Code> code_constant,
1105 bool* definitely_mismatches,
1107 const CallWrapper& call_wrapper) {
1108 bool definitely_matches = false;
1109 *definitely_mismatches = false;
1110 Label regular_invoke;
1112 // Check whether the expected and actual arguments count match. If not,
1113 // setup registers according to contract with ArgumentsAdaptorTrampoline:
1114 // r0: actual arguments count
1115 // r1: function (passed through to callee)
1116 // r2: expected arguments count
1118 // The code below is made a lot easier because the calling code already sets
1119 // up actual and expected registers according to the contract if values are
1120 // passed in registers.
1121 ASSERT(actual.is_immediate() || actual.reg().is(r0));
1122 ASSERT(expected.is_immediate() || expected.reg().is(r2));
1123 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
1125 if (expected.is_immediate()) {
1126 ASSERT(actual.is_immediate());
1127 if (expected.immediate() == actual.immediate()) {
1128 definitely_matches = true;
1130 mov(r0, Operand(actual.immediate()));
1131 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1132 if (expected.immediate() == sentinel) {
1133 // Don't worry about adapting arguments for builtins that
1134 // don't want that done. Skip adaption code by making it look
1135 // like we have a match between expected and actual number of
1137 definitely_matches = true;
1139 *definitely_mismatches = true;
1140 mov(r2, Operand(expected.immediate()));
1144 if (actual.is_immediate()) {
1145 cmp(expected.reg(), Operand(actual.immediate()));
1146 b(eq, ®ular_invoke);
1147 mov(r0, Operand(actual.immediate()));
1149 cmp(expected.reg(), Operand(actual.reg()));
1150 b(eq, ®ular_invoke);
1154 if (!definitely_matches) {
1155 if (!code_constant.is_null()) {
1156 mov(r3, Operand(code_constant));
1157 add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
1160 Handle<Code> adaptor =
1161 isolate()->builtins()->ArgumentsAdaptorTrampoline();
1162 if (flag == CALL_FUNCTION) {
1163 call_wrapper.BeforeCall(CallSize(adaptor));
1165 call_wrapper.AfterCall();
1166 if (!*definitely_mismatches) {
1170 Jump(adaptor, RelocInfo::CODE_TARGET);
1172 bind(®ular_invoke);
1177 void MacroAssembler::InvokeCode(Register code,
1178 const ParameterCount& expected,
1179 const ParameterCount& actual,
1181 const CallWrapper& call_wrapper) {
1182 // You can't call a function without a valid frame.
1183 ASSERT(flag == JUMP_FUNCTION || has_frame());
1186 bool definitely_mismatches = false;
1187 InvokePrologue(expected, actual, Handle<Code>::null(), code,
1188 &done, &definitely_mismatches, flag,
1190 if (!definitely_mismatches) {
1191 if (flag == CALL_FUNCTION) {
1192 call_wrapper.BeforeCall(CallSize(code));
1194 call_wrapper.AfterCall();
1196 ASSERT(flag == JUMP_FUNCTION);
1200 // Continue here if InvokePrologue does handle the invocation due to
1201 // mismatched parameter counts.
1207 void MacroAssembler::InvokeFunction(Register fun,
1208 const ParameterCount& actual,
1210 const CallWrapper& call_wrapper) {
1211 // You can't call a function without a valid frame.
1212 ASSERT(flag == JUMP_FUNCTION || has_frame());
1214 // Contract with called JS functions requires that function is passed in r1.
1217 Register expected_reg = r2;
1218 Register code_reg = r3;
1220 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1221 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1223 FieldMemOperand(code_reg,
1224 SharedFunctionInfo::kFormalParameterCountOffset));
1225 SmiUntag(expected_reg);
1227 FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1229 ParameterCount expected(expected_reg);
1230 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
1234 void MacroAssembler::InvokeFunction(Register function,
1235 const ParameterCount& expected,
1236 const ParameterCount& actual,
1238 const CallWrapper& call_wrapper) {
1239 // You can't call a function without a valid frame.
1240 ASSERT(flag == JUMP_FUNCTION || has_frame());
1242 // Contract with called JS functions requires that function is passed in r1.
1243 ASSERT(function.is(r1));
1245 // Get the function and setup the context.
1246 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1248 // We call indirectly through the code field in the function to
1249 // allow recompilation to take effect without changing any of the
1251 ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1252 InvokeCode(r3, expected, actual, flag, call_wrapper);
1256 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1257 const ParameterCount& expected,
1258 const ParameterCount& actual,
1260 const CallWrapper& call_wrapper) {
1262 InvokeFunction(r1, expected, actual, flag, call_wrapper);
1266 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1270 ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1271 IsInstanceJSObjectType(map, scratch, fail);
1275 void MacroAssembler::IsInstanceJSObjectType(Register map,
1278 ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1279 cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1281 cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1286 void MacroAssembler::IsObjectJSStringType(Register object,
1289 ASSERT(kNotStringTag != 0);
1291 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1292 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1293 tst(scratch, Operand(kIsNotStringMask));
1298 void MacroAssembler::IsObjectNameType(Register object,
1301 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1302 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1303 cmp(scratch, Operand(LAST_NAME_TYPE));
1308 #ifdef ENABLE_DEBUGGER_SUPPORT
1309 void MacroAssembler::DebugBreak() {
1310 mov(r0, Operand::Zero());
1311 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1313 ASSERT(AllowThisStubCall(&ces));
1314 Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
1319 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1320 int handler_index) {
1321 // Adjust this code if not the case.
1322 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1323 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1324 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1325 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1326 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1327 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1329 // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
1330 // We will build up the handler from the bottom by pushing on the stack.
1331 // Set up the code object (r5) and the state (r6) for pushing.
1333 StackHandler::IndexField::encode(handler_index) |
1334 StackHandler::KindField::encode(kind);
1335 mov(r5, Operand(CodeObject()));
1336 mov(r6, Operand(state));
1338 // Push the frame pointer, context, state, and code object.
1339 if (kind == StackHandler::JS_ENTRY) {
1340 mov(cp, Operand(Smi::FromInt(0))); // Indicates no context.
1341 mov(ip, Operand::Zero()); // NULL frame pointer.
1342 stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
1344 stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
1347 // Link the current handler as the next handler.
1348 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1349 ldr(r5, MemOperand(r6));
1351 // Set this new handler as the current one.
1352 str(sp, MemOperand(r6));
1356 void MacroAssembler::PopTryHandler() {
1357 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1359 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1360 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1361 str(r1, MemOperand(ip));
1365 void MacroAssembler::JumpToHandlerEntry() {
1366 // Compute the handler entry address and jump to it. The handler table is
1367 // a fixed array of (smi-tagged) code offsets.
1368 // r0 = exception, r1 = code object, r2 = state.
1369 ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
1370 add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1371 mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
1372 ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
1373 add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
1374 add(pc, r1, Operand::SmiUntag(r2)); // Jump
1378 void MacroAssembler::Throw(Register value) {
1379 // Adjust this code if not the case.
1380 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1381 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1382 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1383 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1384 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1385 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1387 // The exception is expected in r0.
1388 if (!value.is(r0)) {
1391 // Drop the stack pointer to the top of the top handler.
1392 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1393 ldr(sp, MemOperand(r3));
1394 // Restore the next handler.
1396 str(r2, MemOperand(r3));
1398 // Get the code object (r1) and state (r2). Restore the context and frame
1400 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1402 // If the handler is a JS frame, restore the context to the frame.
1403 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1406 str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1408 JumpToHandlerEntry();
1412 void MacroAssembler::ThrowUncatchable(Register value) {
1413 // Adjust this code if not the case.
1414 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1415 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1416 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1417 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1418 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1419 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1421 // The exception is expected in r0.
1422 if (!value.is(r0)) {
1425 // Drop the stack pointer to the top of the top stack handler.
1426 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1427 ldr(sp, MemOperand(r3));
1429 // Unwind the handlers until the ENTRY handler is found.
1430 Label fetch_next, check_kind;
1433 ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
1436 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1437 ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
1438 tst(r2, Operand(StackHandler::KindField::kMask));
1441 // Set the top handler address to next handler past the top ENTRY handler.
1443 str(r2, MemOperand(r3));
1444 // Get the code object (r1) and state (r2). Clear the context and frame
1445 // pointer (0 was saved in the handler).
1446 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1448 JumpToHandlerEntry();
1452 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1455 Label same_contexts;
1457 ASSERT(!holder_reg.is(scratch));
1458 ASSERT(!holder_reg.is(ip));
1459 ASSERT(!scratch.is(ip));
1461 // Load current lexical context from the stack frame.
1462 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1463 // In debug mode, make sure the lexical context is set.
1465 cmp(scratch, Operand::Zero());
1466 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1469 // Load the native context of the current context.
1471 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1472 ldr(scratch, FieldMemOperand(scratch, offset));
1473 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1475 // Check the context is a native context.
1476 if (emit_debug_code()) {
1477 // Cannot use ip as a temporary in this verification code. Due to the fact
1478 // that ip is clobbered as part of cmp with an object Operand.
1479 push(holder_reg); // Temporarily save holder on the stack.
1480 // Read the first word and compare to the native_context_map.
1481 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1482 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1483 cmp(holder_reg, ip);
1484 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1485 pop(holder_reg); // Restore holder.
1488 // Check if both contexts are the same.
1489 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1490 cmp(scratch, Operand(ip));
1491 b(eq, &same_contexts);
1493 // Check the context is a native context.
1494 if (emit_debug_code()) {
1495 // Cannot use ip as a temporary in this verification code. Due to the fact
1496 // that ip is clobbered as part of cmp with an object Operand.
1497 push(holder_reg); // Temporarily save holder on the stack.
1498 mov(holder_reg, ip); // Move ip to its holding place.
1499 LoadRoot(ip, Heap::kNullValueRootIndex);
1500 cmp(holder_reg, ip);
1501 Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1503 ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1504 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1505 cmp(holder_reg, ip);
1506 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1507 // Restore ip is not needed. ip is reloaded below.
1508 pop(holder_reg); // Restore holder.
1509 // Restore ip to holder's context.
1510 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1513 // Check that the security token in the calling global object is
1514 // compatible with the security token in the receiving global
1516 int token_offset = Context::kHeaderSize +
1517 Context::SECURITY_TOKEN_INDEX * kPointerSize;
1519 ldr(scratch, FieldMemOperand(scratch, token_offset));
1520 ldr(ip, FieldMemOperand(ip, token_offset));
1521 cmp(scratch, Operand(ip));
1524 bind(&same_contexts);
1528 // Compute the hash code from the untagged key. This must be kept in sync with
1529 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
1530 // code-stub-hydrogen.cc
1531 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1532 // First of all we assign the hash seed to scratch.
1533 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1536 // Xor original key with a seed.
1537 eor(t0, t0, Operand(scratch));
1539 // Compute the hash code from the untagged key. This must be kept in sync
1540 // with ComputeIntegerHash in utils.h.
1542 // hash = ~hash + (hash << 15);
1543 mvn(scratch, Operand(t0));
1544 add(t0, scratch, Operand(t0, LSL, 15));
1545 // hash = hash ^ (hash >> 12);
1546 eor(t0, t0, Operand(t0, LSR, 12));
1547 // hash = hash + (hash << 2);
1548 add(t0, t0, Operand(t0, LSL, 2));
1549 // hash = hash ^ (hash >> 4);
1550 eor(t0, t0, Operand(t0, LSR, 4));
1551 // hash = hash * 2057;
1552 mov(scratch, Operand(t0, LSL, 11));
1553 add(t0, t0, Operand(t0, LSL, 3));
1554 add(t0, t0, scratch);
1555 // hash = hash ^ (hash >> 16);
1556 eor(t0, t0, Operand(t0, LSR, 16));
1560 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1569 // elements - holds the slow-case elements of the receiver on entry.
1570 // Unchanged unless 'result' is the same register.
1572 // key - holds the smi key on entry.
1573 // Unchanged unless 'result' is the same register.
1575 // result - holds the result on exit if the load succeeded.
1576 // Allowed to be the same as 'key' or 'result'.
1577 // Unchanged on bailout so 'key' or 'result' can be used
1578 // in further computation.
1580 // Scratch registers:
1582 // t0 - holds the untagged key on entry and holds the hash once computed.
1584 // t1 - used to hold the capacity mask of the dictionary
1586 // t2 - used for the index into the dictionary.
1589 GetNumberHash(t0, t1);
1591 // Compute the capacity mask.
1592 ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1594 sub(t1, t1, Operand(1));
1596 // Generate an unrolled loop that performs a few probes before giving up.
1597 for (int i = 0; i < kNumberDictionaryProbes; i++) {
1598 // Use t2 for index calculations and keep the hash intact in t0.
1600 // Compute the masked index: (hash + i + i * i) & mask.
1602 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1604 and_(t2, t2, Operand(t1));
1606 // Scale the index by multiplying by the element size.
1607 ASSERT(SeededNumberDictionary::kEntrySize == 3);
1608 add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
1610 // Check if the key is identical to the name.
1611 add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
1612 ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1613 cmp(key, Operand(ip));
1614 if (i != kNumberDictionaryProbes - 1) {
1622 // Check that the value is a normal property.
1623 // t2: elements + (index * kPointerSize)
1624 const int kDetailsOffset =
1625 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1626 ldr(t1, FieldMemOperand(t2, kDetailsOffset));
1627 tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1630 // Get the value at the masked, scaled index and return.
1631 const int kValueOffset =
1632 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1633 ldr(result, FieldMemOperand(t2, kValueOffset));
1637 void MacroAssembler::Allocate(int object_size,
1642 AllocationFlags flags) {
1643 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
1644 if (!FLAG_inline_new) {
1645 if (emit_debug_code()) {
1646 // Trash the registers to simulate an allocation failure.
1647 mov(result, Operand(0x7091));
1648 mov(scratch1, Operand(0x7191));
1649 mov(scratch2, Operand(0x7291));
1655 ASSERT(!result.is(scratch1));
1656 ASSERT(!result.is(scratch2));
1657 ASSERT(!scratch1.is(scratch2));
1658 ASSERT(!scratch1.is(ip));
1659 ASSERT(!scratch2.is(ip));
1661 // Make object size into bytes.
1662 if ((flags & SIZE_IN_WORDS) != 0) {
1663 object_size *= kPointerSize;
1665 ASSERT_EQ(0, object_size & kObjectAlignmentMask);
1667 // Check relative positions of allocation top and limit addresses.
1668 // The values must be adjacent in memory to allow the use of LDM.
1669 // Also, assert that the registers are numbered such that the values
1670 // are loaded in the correct order.
1671 ExternalReference allocation_top =
1672 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1673 ExternalReference allocation_limit =
1674 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1677 reinterpret_cast<intptr_t>(allocation_top.address());
1679 reinterpret_cast<intptr_t>(allocation_limit.address());
1680 ASSERT((limit - top) == kPointerSize);
1681 ASSERT(result.code() < ip.code());
1683 // Set up allocation top address register.
1684 Register topaddr = scratch1;
1685 mov(topaddr, Operand(allocation_top));
1687 // This code stores a temporary value in ip. This is OK, as the code below
1688 // does not need ip for implicit literal generation.
1689 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1690 // Load allocation top into result and allocation limit into ip.
1691 ldm(ia, topaddr, result.bit() | ip.bit());
1693 if (emit_debug_code()) {
1694 // Assert that result actually contains top on entry. ip is used
1695 // immediately below so this use of ip does not cause difference with
1696 // respect to register content between debug and release mode.
1697 ldr(ip, MemOperand(topaddr));
1699 Check(eq, kUnexpectedAllocationTop);
1701 // Load allocation limit into ip. Result already contains allocation top.
1702 ldr(ip, MemOperand(topaddr, limit - top));
1705 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1706 // Align the next allocation. Storing the filler map without checking top is
1707 // safe in new-space because the limit of the heap is aligned there.
1708 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1709 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1710 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1713 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1714 cmp(result, Operand(ip));
1717 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1718 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1722 // Calculate new top and bail out if new space is exhausted. Use result
1723 // to calculate the new top. We must preserve the ip register at this
1724 // point, so we cannot just use add().
1725 ASSERT(object_size > 0);
1726 Register source = result;
1727 Condition cond = al;
1729 while (object_size != 0) {
1730 if (((object_size >> shift) & 0x03) == 0) {
1733 int bits = object_size & (0xff << shift);
1734 object_size -= bits;
1736 Operand bits_operand(bits);
1737 ASSERT(bits_operand.is_single_instruction(this));
1738 add(scratch2, source, bits_operand, SetCC, cond);
1744 cmp(scratch2, Operand(ip));
1746 str(scratch2, MemOperand(topaddr));
1748 // Tag object if requested.
1749 if ((flags & TAG_OBJECT) != 0) {
1750 add(result, result, Operand(kHeapObjectTag));
1755 void MacroAssembler::Allocate(Register object_size,
1760 AllocationFlags flags) {
1761 if (!FLAG_inline_new) {
1762 if (emit_debug_code()) {
1763 // Trash the registers to simulate an allocation failure.
1764 mov(result, Operand(0x7091));
1765 mov(scratch1, Operand(0x7191));
1766 mov(scratch2, Operand(0x7291));
1772 // Assert that the register arguments are different and that none of
1773 // them are ip. ip is used explicitly in the code generated below.
1774 ASSERT(!result.is(scratch1));
1775 ASSERT(!result.is(scratch2));
1776 ASSERT(!scratch1.is(scratch2));
1777 ASSERT(!object_size.is(ip));
1778 ASSERT(!result.is(ip));
1779 ASSERT(!scratch1.is(ip));
1780 ASSERT(!scratch2.is(ip));
1782 // Check relative positions of allocation top and limit addresses.
1783 // The values must be adjacent in memory to allow the use of LDM.
1784 // Also, assert that the registers are numbered such that the values
1785 // are loaded in the correct order.
1786 ExternalReference allocation_top =
1787 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1788 ExternalReference allocation_limit =
1789 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1791 reinterpret_cast<intptr_t>(allocation_top.address());
1793 reinterpret_cast<intptr_t>(allocation_limit.address());
1794 ASSERT((limit - top) == kPointerSize);
1795 ASSERT(result.code() < ip.code());
1797 // Set up allocation top address.
1798 Register topaddr = scratch1;
1799 mov(topaddr, Operand(allocation_top));
1801 // This code stores a temporary value in ip. This is OK, as the code below
1802 // does not need ip for implicit literal generation.
1803 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1804 // Load allocation top into result and allocation limit into ip.
1805 ldm(ia, topaddr, result.bit() | ip.bit());
1807 if (emit_debug_code()) {
1808 // Assert that result actually contains top on entry. ip is used
1809 // immediately below so this use of ip does not cause difference with
1810 // respect to register content between debug and release mode.
1811 ldr(ip, MemOperand(topaddr));
1813 Check(eq, kUnexpectedAllocationTop);
1815 // Load allocation limit into ip. Result already contains allocation top.
1816 ldr(ip, MemOperand(topaddr, limit - top));
1819 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1820 // Align the next allocation. Storing the filler map without checking top is
1821 // safe in new-space because the limit of the heap is aligned there.
1822 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1823 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1824 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1827 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1828 cmp(result, Operand(ip));
1831 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1832 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1836 // Calculate new top and bail out if new space is exhausted. Use result
1837 // to calculate the new top. Object size may be in words so a shift is
1838 // required to get the number of bytes.
1839 if ((flags & SIZE_IN_WORDS) != 0) {
1840 add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1842 add(scratch2, result, Operand(object_size), SetCC);
1845 cmp(scratch2, Operand(ip));
1848 // Update allocation top. result temporarily holds the new top.
1849 if (emit_debug_code()) {
1850 tst(scratch2, Operand(kObjectAlignmentMask));
1851 Check(eq, kUnalignedAllocationInNewSpace);
1853 str(scratch2, MemOperand(topaddr));
1855 // Tag object if requested.
1856 if ((flags & TAG_OBJECT) != 0) {
1857 add(result, result, Operand(kHeapObjectTag));
1862 void MacroAssembler::UndoAllocationInNewSpace(Register object,
1864 ExternalReference new_space_allocation_top =
1865 ExternalReference::new_space_allocation_top_address(isolate());
1867 // Make sure the object has no tag before resetting top.
1868 and_(object, object, Operand(~kHeapObjectTagMask));
1870 // Check that the object un-allocated is below the current top.
1871 mov(scratch, Operand(new_space_allocation_top));
1872 ldr(scratch, MemOperand(scratch));
1873 cmp(object, scratch);
1874 Check(lt, kUndoAllocationOfNonAllocatedMemory);
1876 // Write the address of the object to un-allocate as the current top.
1877 mov(scratch, Operand(new_space_allocation_top));
1878 str(object, MemOperand(scratch));
1882 void MacroAssembler::AllocateTwoByteString(Register result,
1887 Label* gc_required) {
1888 // Calculate the number of bytes needed for the characters in the string while
1889 // observing object alignment.
1890 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1891 mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
1892 add(scratch1, scratch1,
1893 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1894 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1896 // Allocate two-byte string in new space.
1904 // Set the map, length and hash field.
1905 InitializeNewString(result,
1907 Heap::kStringMapRootIndex,
1913 void MacroAssembler::AllocateAsciiString(Register result,
1918 Label* gc_required) {
1919 // Calculate the number of bytes needed for the characters in the string while
1920 // observing object alignment.
1921 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1922 ASSERT(kCharSize == 1);
1923 add(scratch1, length,
1924 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1925 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1927 // Allocate ASCII string in new space.
1935 // Set the map, length and hash field.
1936 InitializeNewString(result,
1938 Heap::kAsciiStringMapRootIndex,
1944 void MacroAssembler::AllocateTwoByteConsString(Register result,
1948 Label* gc_required) {
1949 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1952 InitializeNewString(result,
1954 Heap::kConsStringMapRootIndex,
1960 void MacroAssembler::AllocateAsciiConsString(Register result,
1964 Label* gc_required) {
1965 Label allocate_new_space, install_map;
1966 AllocationFlags flags = TAG_OBJECT;
1968 ExternalReference high_promotion_mode = ExternalReference::
1969 new_space_high_promotion_mode_active_address(isolate());
1970 mov(scratch1, Operand(high_promotion_mode));
1971 ldr(scratch1, MemOperand(scratch1, 0));
1972 cmp(scratch1, Operand::Zero());
1973 b(eq, &allocate_new_space);
1975 Allocate(ConsString::kSize,
1980 static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
1984 bind(&allocate_new_space);
1985 Allocate(ConsString::kSize,
1994 InitializeNewString(result,
1996 Heap::kConsAsciiStringMapRootIndex,
2002 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
2006 Label* gc_required) {
2007 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2010 InitializeNewString(result,
2012 Heap::kSlicedStringMapRootIndex,
2018 void MacroAssembler::AllocateAsciiSlicedString(Register result,
2022 Label* gc_required) {
2023 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2026 InitializeNewString(result,
2028 Heap::kSlicedAsciiStringMapRootIndex,
2034 void MacroAssembler::CompareObjectType(Register object,
2037 InstanceType type) {
2038 const Register temp = type_reg.is(no_reg) ? ip : type_reg;
2040 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2041 CompareInstanceType(map, temp, type);
2045 void MacroAssembler::CheckObjectTypeRange(Register object,
2047 InstanceType min_type,
2048 InstanceType max_type,
2049 Label* false_label) {
2050 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2051 STATIC_ASSERT(LAST_TYPE < 256);
2052 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2053 ldrb(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
2054 sub(ip, ip, Operand(min_type));
2055 cmp(ip, Operand(max_type - min_type));
2060 void MacroAssembler::CompareInstanceType(Register map,
2062 InstanceType type) {
2063 // Registers map and type_reg can be ip. These two lines assert
2064 // that ip can be used with the two instructions (the constants
2065 // will never need ip).
2066 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2067 STATIC_ASSERT(LAST_TYPE < 256);
2068 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2069 cmp(type_reg, Operand(type));
2073 void MacroAssembler::CompareRoot(Register obj,
2074 Heap::RootListIndex index) {
2075 ASSERT(!obj.is(ip));
2076 LoadRoot(ip, index);
2081 void MacroAssembler::CheckFastElements(Register map,
2084 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2085 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2086 STATIC_ASSERT(FAST_ELEMENTS == 2);
2087 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2088 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2089 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2094 void MacroAssembler::CheckFastObjectElements(Register map,
2097 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2098 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2099 STATIC_ASSERT(FAST_ELEMENTS == 2);
2100 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2101 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2102 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2104 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2109 void MacroAssembler::CheckFastSmiElements(Register map,
2112 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2113 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2114 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2115 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2120 void MacroAssembler::StoreNumberToDoubleElements(
2123 Register elements_reg,
2125 LowDwVfpRegister double_scratch,
2127 int elements_offset) {
2128 Label smi_value, store;
2130 // Handle smi values specially.
2131 JumpIfSmi(value_reg, &smi_value);
2133 // Ensure that the object is a heap number
2136 isolate()->factory()->heap_number_map(),
2140 vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2141 // Force a canonical NaN.
2142 if (emit_debug_code()) {
2144 tst(ip, Operand(kVFPDefaultNaNModeControlBit));
2145 Assert(ne, kDefaultNaNModeNotSet);
2147 VFPCanonicalizeNaN(double_scratch);
2151 SmiToDouble(double_scratch, value_reg);
2154 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
2155 vstr(double_scratch,
2156 FieldMemOperand(scratch1,
2157 FixedDoubleArray::kHeaderSize - elements_offset));
2161 void MacroAssembler::CompareMap(Register obj,
2164 Label* early_success) {
2165 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2166 CompareMap(scratch, map, early_success);
2170 void MacroAssembler::CompareMap(Register obj_map,
2172 Label* early_success) {
2173 cmp(obj_map, Operand(map));
2177 void MacroAssembler::CheckMap(Register obj,
2181 SmiCheckType smi_check_type) {
2182 if (smi_check_type == DO_SMI_CHECK) {
2183 JumpIfSmi(obj, fail);
2187 CompareMap(obj, scratch, map, &success);
2193 void MacroAssembler::CheckMap(Register obj,
2195 Heap::RootListIndex index,
2197 SmiCheckType smi_check_type) {
2198 if (smi_check_type == DO_SMI_CHECK) {
2199 JumpIfSmi(obj, fail);
2201 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2202 LoadRoot(ip, index);
2208 void MacroAssembler::DispatchMap(Register obj,
2211 Handle<Code> success,
2212 SmiCheckType smi_check_type) {
2214 if (smi_check_type == DO_SMI_CHECK) {
2215 JumpIfSmi(obj, &fail);
2217 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2218 mov(ip, Operand(map));
2220 Jump(success, RelocInfo::CODE_TARGET, eq);
2225 void MacroAssembler::TryGetFunctionPrototype(Register function,
2229 bool miss_on_bound_function) {
2230 // Check that the receiver isn't a smi.
2231 JumpIfSmi(function, miss);
2233 // Check that the function really is a function. Load map into result reg.
2234 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2237 if (miss_on_bound_function) {
2239 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2241 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2243 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
2247 // Make sure that the function has an instance prototype.
2249 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2250 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2251 b(ne, &non_instance);
2253 // Get the prototype or initial map from the function.
2255 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2257 // If the prototype or initial map is the hole, don't return it and
2258 // simply miss the cache instead. This will allow us to allocate a
2259 // prototype object on-demand in the runtime system.
2260 LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2264 // If the function does not have an initial map, we're done.
2266 CompareObjectType(result, scratch, scratch, MAP_TYPE);
2269 // Get the prototype from the initial map.
2270 ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2273 // Non-instance prototype: Fetch prototype from constructor field
2275 bind(&non_instance);
2276 ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2283 void MacroAssembler::CallStub(CodeStub* stub,
2284 TypeFeedbackId ast_id,
2286 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2287 Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond);
2291 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2292 Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond);
2296 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2297 return ref0.address() - ref1.address();
2301 void MacroAssembler::CallApiFunctionAndReturn(
2302 Register function_address,
2303 ExternalReference thunk_ref,
2305 MemOperand return_value_operand,
2306 MemOperand* context_restore_operand) {
2307 ExternalReference next_address =
2308 ExternalReference::handle_scope_next_address(isolate());
2309 const int kNextOffset = 0;
2310 const int kLimitOffset = AddressOffset(
2311 ExternalReference::handle_scope_limit_address(isolate()),
2313 const int kLevelOffset = AddressOffset(
2314 ExternalReference::handle_scope_level_address(isolate()),
2317 ASSERT(function_address.is(r1) || function_address.is(r2));
2319 Label profiler_disabled;
2320 Label end_profiler_check;
2321 bool* is_profiling_flag =
2322 isolate()->cpu_profiler()->is_profiling_address();
2323 STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
2324 mov(r9, Operand(reinterpret_cast<int32_t>(is_profiling_flag)));
2325 ldrb(r9, MemOperand(r9, 0));
2326 cmp(r9, Operand(0));
2327 b(eq, &profiler_disabled);
2329 // Additional parameter is the address of the actual callback.
2330 mov(r3, Operand(thunk_ref));
2331 jmp(&end_profiler_check);
2333 bind(&profiler_disabled);
2334 Move(r3, function_address);
2335 bind(&end_profiler_check);
2337 // Allocate HandleScope in callee-save registers.
2338 mov(r9, Operand(next_address));
2339 ldr(r4, MemOperand(r9, kNextOffset));
2340 ldr(r5, MemOperand(r9, kLimitOffset));
2341 ldr(r6, MemOperand(r9, kLevelOffset));
2342 add(r6, r6, Operand(1));
2343 str(r6, MemOperand(r9, kLevelOffset));
2345 if (FLAG_log_timer_events) {
2346 FrameScope frame(this, StackFrame::MANUAL);
2347 PushSafepointRegisters();
2348 PrepareCallCFunction(1, r0);
2349 mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2350 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2351 PopSafepointRegisters();
2354 // Native call returns to the DirectCEntry stub which redirects to the
2355 // return address pushed on stack (could have moved after GC).
2356 // DirectCEntry stub itself is generated early and never moves.
2357 DirectCEntryStub stub;
2358 stub.GenerateCall(this, r3);
2360 if (FLAG_log_timer_events) {
2361 FrameScope frame(this, StackFrame::MANUAL);
2362 PushSafepointRegisters();
2363 PrepareCallCFunction(1, r0);
2364 mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2365 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2366 PopSafepointRegisters();
2369 Label promote_scheduled_exception;
2370 Label exception_handled;
2371 Label delete_allocated_handles;
2372 Label leave_exit_frame;
2373 Label return_value_loaded;
2375 // load value from ReturnValue
2376 ldr(r0, return_value_operand);
2377 bind(&return_value_loaded);
2378 // No more valid handles (the result handle was the last one). Restore
2379 // previous handle scope.
2380 str(r4, MemOperand(r9, kNextOffset));
2381 if (emit_debug_code()) {
2382 ldr(r1, MemOperand(r9, kLevelOffset));
2384 Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
2386 sub(r6, r6, Operand(1));
2387 str(r6, MemOperand(r9, kLevelOffset));
2388 ldr(ip, MemOperand(r9, kLimitOffset));
2390 b(ne, &delete_allocated_handles);
2392 // Check if the function scheduled an exception.
2393 bind(&leave_exit_frame);
2394 LoadRoot(r4, Heap::kTheHoleValueRootIndex);
2395 mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
2396 ldr(r5, MemOperand(ip));
2398 b(ne, &promote_scheduled_exception);
2399 bind(&exception_handled);
2401 bool restore_context = context_restore_operand != NULL;
2402 if (restore_context) {
2403 ldr(cp, *context_restore_operand);
2405 // LeaveExitFrame expects unwind space to be in a register.
2406 mov(r4, Operand(stack_space));
2407 LeaveExitFrame(false, r4, !restore_context);
2410 bind(&promote_scheduled_exception);
2412 FrameScope frame(this, StackFrame::INTERNAL);
2413 CallExternalReference(
2414 ExternalReference(Runtime::kPromoteScheduledException, isolate()),
2417 jmp(&exception_handled);
2419 // HandleScope limit has changed. Delete allocated extensions.
2420 bind(&delete_allocated_handles);
2421 str(r5, MemOperand(r9, kLimitOffset));
2423 PrepareCallCFunction(1, r5);
2424 mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2426 ExternalReference::delete_handle_scope_extensions(isolate()), 1);
2428 jmp(&leave_exit_frame);
2432 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2433 return has_frame_ || !stub->SometimesSetsUpAFrame();
2437 void MacroAssembler::IllegalOperation(int num_arguments) {
2438 if (num_arguments > 0) {
2439 add(sp, sp, Operand(num_arguments * kPointerSize));
2441 LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2445 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2446 // If the hash field contains an array index pick it out. The assert checks
2447 // that the constants for the maximum number of digits for an array index
2448 // cached in the hash field and the number of bits reserved for it does not
2450 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
2451 (1 << String::kArrayIndexValueBits));
2452 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
2453 // the low kHashShift bits.
2454 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
2455 SmiTag(index, hash);
2459 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
2460 if (CpuFeatures::IsSupported(VFP3)) {
2461 vmov(value.low(), smi);
2462 vcvt_f64_s32(value, 1);
2465 vmov(value.low(), ip);
2466 vcvt_f64_s32(value, value.low());
2471 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
2472 LowDwVfpRegister double_scratch) {
2473 ASSERT(!double_input.is(double_scratch));
2474 vcvt_s32_f64(double_scratch.low(), double_input);
2475 vcvt_f64_s32(double_scratch, double_scratch.low());
2476 VFPCompareAndSetFlags(double_input, double_scratch);
2480 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2481 DwVfpRegister double_input,
2482 LowDwVfpRegister double_scratch) {
2483 ASSERT(!double_input.is(double_scratch));
2484 vcvt_s32_f64(double_scratch.low(), double_input);
2485 vmov(result, double_scratch.low());
2486 vcvt_f64_s32(double_scratch, double_scratch.low());
2487 VFPCompareAndSetFlags(double_input, double_scratch);
2491 void MacroAssembler::TryInt32Floor(Register result,
2492 DwVfpRegister double_input,
2493 Register input_high,
2494 LowDwVfpRegister double_scratch,
2497 ASSERT(!result.is(input_high));
2498 ASSERT(!double_input.is(double_scratch));
2499 Label negative, exception;
2501 VmovHigh(input_high, double_input);
2503 // Test for NaN and infinities.
2504 Sbfx(result, input_high,
2505 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2506 cmp(result, Operand(-1));
2508 // Test for values that can be exactly represented as a
2509 // signed 32-bit integer.
2510 TryDoubleToInt32Exact(result, double_input, double_scratch);
2511 // If exact, return (result already fetched).
2513 cmp(input_high, Operand::Zero());
2516 // Input is in ]+0, +inf[.
2517 // If result equals 0x7fffffff input was out of range or
2518 // in ]0x7fffffff, 0x80000000[. We ignore this last case which
2519 // could fits into an int32, that means we always think input was
2520 // out of range and always go to exception.
2521 // If result < 0x7fffffff, go to done, result fetched.
2522 cmn(result, Operand(1));
2526 // Input is in ]-inf, -0[.
2527 // If x is a non integer negative number,
2528 // floor(x) <=> round_to_zero(x) - 1.
2530 sub(result, result, Operand(1), SetCC);
2531 // If result is still negative, go to done, result fetched.
2532 // Else, we had an overflow and we fall through exception.
2537 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2538 DwVfpRegister double_input,
2540 LowDwVfpRegister double_scratch = kScratchDoubleReg;
2541 vcvt_s32_f64(double_scratch.low(), double_input);
2542 vmov(result, double_scratch.low());
2544 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
2545 sub(ip, result, Operand(1));
2546 cmp(ip, Operand(0x7ffffffe));
2551 void MacroAssembler::TruncateDoubleToI(Register result,
2552 DwVfpRegister double_input) {
2555 TryInlineTruncateDoubleToI(result, double_input, &done);
2557 // If we fell through then inline version didn't succeed - call stub instead.
2559 sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2560 vstr(double_input, MemOperand(sp, 0));
2562 DoubleToIStub stub(sp, result, 0, true, true);
2565 add(sp, sp, Operand(kDoubleSize));
2572 void MacroAssembler::TruncateHeapNumberToI(Register result,
2575 LowDwVfpRegister double_scratch = kScratchDoubleReg;
2576 ASSERT(!result.is(object));
2578 vldr(double_scratch,
2579 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2580 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2582 // If we fell through then inline version didn't succeed - call stub instead.
2584 DoubleToIStub stub(object,
2586 HeapNumber::kValueOffset - kHeapObjectTag,
2596 void MacroAssembler::TruncateNumberToI(Register object,
2598 Register heap_number_map,
2600 Label* not_number) {
2602 ASSERT(!result.is(object));
2604 UntagAndJumpIfSmi(result, object, &done);
2605 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2606 TruncateHeapNumberToI(result, object);
2612 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2614 int num_least_bits) {
2615 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2616 ubfx(dst, src, kSmiTagSize, num_least_bits);
2619 and_(dst, dst, Operand((1 << num_least_bits) - 1));
2624 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2626 int num_least_bits) {
2627 and_(dst, src, Operand((1 << num_least_bits) - 1));
2631 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2633 SaveFPRegsMode save_doubles) {
2634 // All parameters are on the stack. r0 has the return value after call.
2636 // If the expected number of arguments of the runtime function is
2637 // constant, we check that the actual number of arguments match the
2639 if (f->nargs >= 0 && f->nargs != num_arguments) {
2640 IllegalOperation(num_arguments);
2644 // TODO(1236192): Most runtime routines don't need the number of
2645 // arguments passed in because it is constant. At some point we
2646 // should remove this need and make the runtime routine entry code
2648 mov(r0, Operand(num_arguments));
2649 mov(r1, Operand(ExternalReference(f, isolate())));
2650 CEntryStub stub(1, save_doubles);
2655 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2656 int num_arguments) {
2657 mov(r0, Operand(num_arguments));
2658 mov(r1, Operand(ext));
2665 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2668 // TODO(1236192): Most runtime routines don't need the number of
2669 // arguments passed in because it is constant. At some point we
2670 // should remove this need and make the runtime routine entry code
2672 mov(r0, Operand(num_arguments));
2673 JumpToExternalReference(ext);
2677 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2680 TailCallExternalReference(ExternalReference(fid, isolate()),
2686 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2687 #if defined(__thumb__)
2688 // Thumb mode builtin.
2689 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2691 mov(r1, Operand(builtin));
2693 Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
2697 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2699 const CallWrapper& call_wrapper) {
2700 // You can't call a builtin without a valid frame.
2701 ASSERT(flag == JUMP_FUNCTION || has_frame());
2703 GetBuiltinEntry(r2, id);
2704 if (flag == CALL_FUNCTION) {
2705 call_wrapper.BeforeCall(CallSize(r2));
2707 call_wrapper.AfterCall();
2709 ASSERT(flag == JUMP_FUNCTION);
2715 void MacroAssembler::GetBuiltinFunction(Register target,
2716 Builtins::JavaScript id) {
2717 // Load the builtins object into target register.
2719 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2720 ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2721 // Load the JavaScript builtin function from the builtins object.
2722 ldr(target, FieldMemOperand(target,
2723 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2727 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2728 ASSERT(!target.is(r1));
2729 GetBuiltinFunction(r1, id);
2730 // Load the code entry point from the builtins object.
2731 ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2735 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2736 Register scratch1, Register scratch2) {
2737 if (FLAG_native_code_counters && counter->Enabled()) {
2738 mov(scratch1, Operand(value));
2739 mov(scratch2, Operand(ExternalReference(counter)));
2740 str(scratch1, MemOperand(scratch2));
2745 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2746 Register scratch1, Register scratch2) {
2748 if (FLAG_native_code_counters && counter->Enabled()) {
2749 mov(scratch2, Operand(ExternalReference(counter)));
2750 ldr(scratch1, MemOperand(scratch2));
2751 add(scratch1, scratch1, Operand(value));
2752 str(scratch1, MemOperand(scratch2));
2757 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2758 Register scratch1, Register scratch2) {
2760 if (FLAG_native_code_counters && counter->Enabled()) {
2761 mov(scratch2, Operand(ExternalReference(counter)));
2762 ldr(scratch1, MemOperand(scratch2));
2763 sub(scratch1, scratch1, Operand(value));
2764 str(scratch1, MemOperand(scratch2));
2769 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
2770 if (emit_debug_code())
2771 Check(cond, reason);
2775 void MacroAssembler::AssertFastElements(Register elements) {
2776 if (emit_debug_code()) {
2777 ASSERT(!elements.is(ip));
2780 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2781 LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2784 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2787 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2790 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2797 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
2801 // will not return here
2806 void MacroAssembler::Abort(BailoutReason reason) {
2809 // We want to pass the msg string like a smi to avoid GC
2810 // problems, however msg is not guaranteed to be aligned
2811 // properly. Instead, we pass an aligned pointer that is
2812 // a proper v8 smi, but also pass the alignment difference
2813 // from the real pointer as a smi.
2814 const char* msg = GetBailoutReason(reason);
2815 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2816 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2817 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2820 RecordComment("Abort message: ");
2824 if (FLAG_trap_on_abort) {
2830 mov(r0, Operand(p0));
2832 mov(r0, Operand(Smi::FromInt(p1 - p0)));
2834 // Disable stub call restrictions to always allow calls to abort.
2836 // We don't actually want to generate a pile of code for this, so just
2837 // claim there is a stack frame, without generating one.
2838 FrameScope scope(this, StackFrame::NONE);
2839 CallRuntime(Runtime::kAbort, 2);
2841 CallRuntime(Runtime::kAbort, 2);
2843 // will not return here
2844 if (is_const_pool_blocked()) {
2845 // If the calling code cares about the exact number of
2846 // instructions generated, we insert padding here to keep the size
2847 // of the Abort macro constant.
2848 static const int kExpectedAbortInstructions = 10;
2849 int abort_instructions = InstructionsGeneratedSince(&abort_start);
2850 ASSERT(abort_instructions <= kExpectedAbortInstructions);
2851 while (abort_instructions++ < kExpectedAbortInstructions) {
2858 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2859 if (context_chain_length > 0) {
2860 // Move up the chain of contexts to the context containing the slot.
2861 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2862 for (int i = 1; i < context_chain_length; i++) {
2863 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2866 // Slot is in the current function context. Move it into the
2867 // destination register in case we store into it (the write barrier
2868 // cannot be allowed to destroy the context in esi).
2874 void MacroAssembler::LoadTransitionedArrayMapConditional(
2875 ElementsKind expected_kind,
2876 ElementsKind transitioned_kind,
2877 Register map_in_out,
2879 Label* no_map_match) {
2880 // Load the global or builtins object from the current context.
2882 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2883 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2885 // Check that the function's map is the same as the expected cached map.
2888 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2889 size_t offset = expected_kind * kPointerSize +
2890 FixedArrayBase::kHeaderSize;
2891 ldr(ip, FieldMemOperand(scratch, offset));
2892 cmp(map_in_out, ip);
2893 b(ne, no_map_match);
2895 // Use the transitioned cached map.
2896 offset = transitioned_kind * kPointerSize +
2897 FixedArrayBase::kHeaderSize;
2898 ldr(map_in_out, FieldMemOperand(scratch, offset));
2902 void MacroAssembler::LoadInitialArrayMap(
2903 Register function_in, Register scratch,
2904 Register map_out, bool can_have_holes) {
2905 ASSERT(!function_in.is(map_out));
2907 ldr(map_out, FieldMemOperand(function_in,
2908 JSFunction::kPrototypeOrInitialMapOffset));
2909 if (!FLAG_smi_only_arrays) {
2910 ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
2911 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2916 } else if (can_have_holes) {
2917 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2918 FAST_HOLEY_SMI_ELEMENTS,
2927 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2928 // Load the global or builtins object from the current context.
2930 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2931 // Load the native context from the global or builtins object.
2932 ldr(function, FieldMemOperand(function,
2933 GlobalObject::kNativeContextOffset));
2934 // Load the function from the native context.
2935 ldr(function, MemOperand(function, Context::SlotOffset(index)));
2939 void MacroAssembler::LoadArrayFunction(Register function) {
2940 // Load the global or builtins object from the current context.
2942 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2943 // Load the global context from the global or builtins object.
2945 FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
2946 // Load the array function from the native context.
2948 MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
2952 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2955 // Load the initial map. The global functions all have initial maps.
2956 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2957 if (emit_debug_code()) {
2959 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2962 Abort(kGlobalFunctionsMustHaveInitialMap);
2968 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2971 Label* not_power_of_two_or_zero) {
2972 sub(scratch, reg, Operand(1), SetCC);
2973 b(mi, not_power_of_two_or_zero);
2975 b(ne, not_power_of_two_or_zero);
2979 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2982 Label* zero_and_neg,
2983 Label* not_power_of_two) {
2984 sub(scratch, reg, Operand(1), SetCC);
2985 b(mi, zero_and_neg);
2987 b(ne, not_power_of_two);
2991 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
2993 Label* on_not_both_smi) {
2994 STATIC_ASSERT(kSmiTag == 0);
2995 tst(reg1, Operand(kSmiTagMask));
2996 tst(reg2, Operand(kSmiTagMask), eq);
2997 b(ne, on_not_both_smi);
3001 void MacroAssembler::UntagAndJumpIfSmi(
3002 Register dst, Register src, Label* smi_case) {
3003 STATIC_ASSERT(kSmiTag == 0);
3004 SmiUntag(dst, src, SetCC);
3005 b(cc, smi_case); // Shifter carry is not set for a smi.
3009 void MacroAssembler::UntagAndJumpIfNotSmi(
3010 Register dst, Register src, Label* non_smi_case) {
3011 STATIC_ASSERT(kSmiTag == 0);
3012 SmiUntag(dst, src, SetCC);
3013 b(cs, non_smi_case); // Shifter carry is set for a non-smi.
3017 void MacroAssembler::JumpIfEitherSmi(Register reg1,
3019 Label* on_either_smi) {
3020 STATIC_ASSERT(kSmiTag == 0);
3021 tst(reg1, Operand(kSmiTagMask));
3022 tst(reg2, Operand(kSmiTagMask), ne);
3023 b(eq, on_either_smi);
3027 void MacroAssembler::AssertNotSmi(Register object) {
3028 if (emit_debug_code()) {
3029 STATIC_ASSERT(kSmiTag == 0);
3030 tst(object, Operand(kSmiTagMask));
3031 Check(ne, kOperandIsASmi);
3036 void MacroAssembler::AssertSmi(Register object) {
3037 if (emit_debug_code()) {
3038 STATIC_ASSERT(kSmiTag == 0);
3039 tst(object, Operand(kSmiTagMask));
3040 Check(eq, kOperandIsNotSmi);
3045 void MacroAssembler::AssertString(Register object) {
3046 if (emit_debug_code()) {
3047 STATIC_ASSERT(kSmiTag == 0);
3048 tst(object, Operand(kSmiTagMask));
3049 Check(ne, kOperandIsASmiAndNotAString);
3051 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3052 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
3054 Check(lo, kOperandIsNotAString);
3059 void MacroAssembler::AssertName(Register object) {
3060 if (emit_debug_code()) {
3061 STATIC_ASSERT(kSmiTag == 0);
3062 tst(object, Operand(kSmiTagMask));
3063 Check(ne, kOperandIsASmiAndNotAName);
3065 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3066 CompareInstanceType(object, object, LAST_NAME_TYPE);
3068 Check(le, kOperandIsNotAName);
3074 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
3075 if (emit_debug_code()) {
3076 CompareRoot(reg, index);
3077 Check(eq, kHeapNumberMapRegisterClobbered);
3082 void MacroAssembler::JumpIfNotHeapNumber(Register object,
3083 Register heap_number_map,
3085 Label* on_not_heap_number) {
3086 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3087 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3088 cmp(scratch, heap_number_map);
3089 b(ne, on_not_heap_number);
3093 void MacroAssembler::LookupNumberStringCache(Register object,
3099 // Use of registers. Register result is used as a temporary.
3100 Register number_string_cache = result;
3101 Register mask = scratch3;
3103 // Load the number string cache.
3104 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
3106 // Make the hash mask from the length of the number string cache. It
3107 // contains two elements (number and string) for each cache entry.
3108 ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
3109 // Divide length by two (length is a smi).
3110 mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
3111 sub(mask, mask, Operand(1)); // Make mask.
3113 // Calculate the entry in the number string cache. The hash value in the
3114 // number string cache for smis is just the smi value, and the hash for
3115 // doubles is the xor of the upper and lower words. See
3116 // Heap::GetNumberStringCache.
3118 Label load_result_from_cache;
3119 JumpIfSmi(object, &is_smi);
3122 Heap::kHeapNumberMapRootIndex,
3126 STATIC_ASSERT(8 == kDoubleSize);
3129 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
3130 ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
3131 eor(scratch1, scratch1, Operand(scratch2));
3132 and_(scratch1, scratch1, Operand(mask));
3134 // Calculate address of entry in string cache: each entry consists
3135 // of two pointer sized fields.
3137 number_string_cache,
3138 Operand(scratch1, LSL, kPointerSizeLog2 + 1));
3140 Register probe = mask;
3141 ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
3142 JumpIfSmi(probe, not_found);
3143 sub(scratch2, object, Operand(kHeapObjectTag));
3144 vldr(d0, scratch2, HeapNumber::kValueOffset);
3145 sub(probe, probe, Operand(kHeapObjectTag));
3146 vldr(d1, probe, HeapNumber::kValueOffset);
3147 VFPCompareAndSetFlags(d0, d1);
3148 b(ne, not_found); // The cache did not contain this value.
3149 b(&load_result_from_cache);
3152 Register scratch = scratch1;
3153 and_(scratch, mask, Operand(object, ASR, 1));
3154 // Calculate address of entry in string cache: each entry consists
3155 // of two pointer sized fields.
3157 number_string_cache,
3158 Operand(scratch, LSL, kPointerSizeLog2 + 1));
3160 // Check if the entry is the smi we are looking for.
3161 ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3165 // Get the result from the cache.
3166 bind(&load_result_from_cache);
3167 ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
3168 IncrementCounter(isolate()->counters()->number_to_string_native(),
3175 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
3181 // Test that both first and second are sequential ASCII strings.
3182 // Assume that they are non-smis.
3183 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3184 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3185 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3186 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3188 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
3195 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
3200 // Check that neither is a smi.
3201 and_(scratch1, first, Operand(second));
3202 JumpIfSmi(scratch1, failure);
3203 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
3211 void MacroAssembler::JumpIfNotUniqueName(Register reg,
3212 Label* not_unique_name) {
3213 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3215 tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3217 cmp(reg, Operand(SYMBOL_TYPE));
3218 b(ne, not_unique_name);
3224 // Allocates a heap number or jumps to the need_gc label if the young space
3225 // is full and a scavenge is needed.
3226 void MacroAssembler::AllocateHeapNumber(Register result,
3229 Register heap_number_map,
3231 TaggingMode tagging_mode) {
3232 // Allocate an object in the heap for the heap number and tag it as a heap
3234 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3235 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3237 // Store heap number map in the allocated object.
3238 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3239 if (tagging_mode == TAG_RESULT) {
3240 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3242 str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3247 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3248 DwVfpRegister value,
3251 Register heap_number_map,
3252 Label* gc_required) {
3253 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3254 sub(scratch1, result, Operand(kHeapObjectTag));
3255 vstr(value, scratch1, HeapNumber::kValueOffset);
3259 // Allocates a simd128 object or jumps to the need_gc label if the young space
3260 // is full and a scavenge is needed.
3261 void MacroAssembler::AllocateSIMDHeapObject(int size,
3267 TaggingMode tagging_mode) {
3268 Allocate(size, result, scratch1, scratch2, gc_required,
3269 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3271 if (tagging_mode == TAG_RESULT) {
3272 str(map, FieldMemOperand(result, HeapObject::kMapOffset));
3274 str(map, MemOperand(result, HeapObject::kMapOffset));
3279 // Copies a fixed number of fields of heap objects from src to dst.
3280 void MacroAssembler::CopyFields(Register dst,
3282 LowDwVfpRegister double_scratch,
3284 int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
3285 for (int i = 0; i < double_count; i++) {
3286 vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
3287 vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
3290 STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
3291 STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
3293 int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
3295 vldr(double_scratch.low(),
3296 FieldMemOperand(src, (field_count - 1) * kPointerSize));
3297 vstr(double_scratch.low(),
3298 FieldMemOperand(dst, (field_count - 1) * kPointerSize));
3303 void MacroAssembler::CopyBytes(Register src,
3307 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3309 // Align src before copying in word size chunks.
3310 cmp(length, Operand(kPointerSize));
3313 bind(&align_loop_1);
3314 tst(src, Operand(kPointerSize - 1));
3316 ldrb(scratch, MemOperand(src, 1, PostIndex));
3317 strb(scratch, MemOperand(dst, 1, PostIndex));
3318 sub(length, length, Operand(1), SetCC);
3320 // Copy bytes in word size chunks.
3322 if (emit_debug_code()) {
3323 tst(src, Operand(kPointerSize - 1));
3324 Assert(eq, kExpectingAlignmentForCopyBytes);
3326 cmp(length, Operand(kPointerSize));
3328 ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
3329 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3330 str(scratch, MemOperand(dst, kPointerSize, PostIndex));
3332 strb(scratch, MemOperand(dst, 1, PostIndex));
3333 mov(scratch, Operand(scratch, LSR, 8));
3334 strb(scratch, MemOperand(dst, 1, PostIndex));
3335 mov(scratch, Operand(scratch, LSR, 8));
3336 strb(scratch, MemOperand(dst, 1, PostIndex));
3337 mov(scratch, Operand(scratch, LSR, 8));
3338 strb(scratch, MemOperand(dst, 1, PostIndex));
3340 sub(length, length, Operand(kPointerSize));
3343 // Copy the last bytes if any left.
3345 cmp(length, Operand::Zero());
3348 ldrb(scratch, MemOperand(src, 1, PostIndex));
3349 strb(scratch, MemOperand(dst, 1, PostIndex));
3350 sub(length, length, Operand(1), SetCC);
3351 b(ne, &byte_loop_1);
3356 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3357 Register end_offset,
3362 str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
3364 cmp(start_offset, end_offset);
3369 void MacroAssembler::CheckFor32DRegs(Register scratch) {
3370 mov(scratch, Operand(ExternalReference::cpu_features()));
3371 ldr(scratch, MemOperand(scratch));
3372 tst(scratch, Operand(1u << VFP32DREGS));
3376 void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
3377 CheckFor32DRegs(scratch);
3378 vstm(db_w, location, d16, d31, ne);
3379 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3380 vstm(db_w, location, d0, d15);
3384 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
3385 CheckFor32DRegs(scratch);
3386 vldm(ia_w, location, d0, d15);
3387 vldm(ia_w, location, d16, d31, ne);
3388 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3392 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
3398 const int kFlatAsciiStringMask =
3399 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3400 const int kFlatAsciiStringTag =
3401 kStringTag | kOneByteStringTag | kSeqStringTag;
3402 and_(scratch1, first, Operand(kFlatAsciiStringMask));
3403 and_(scratch2, second, Operand(kFlatAsciiStringMask));
3404 cmp(scratch1, Operand(kFlatAsciiStringTag));
3405 // Ignore second test if first test failed.
3406 cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
3411 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
3414 const int kFlatAsciiStringMask =
3415 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3416 const int kFlatAsciiStringTag =
3417 kStringTag | kOneByteStringTag | kSeqStringTag;
3418 and_(scratch, type, Operand(kFlatAsciiStringMask));
3419 cmp(scratch, Operand(kFlatAsciiStringTag));
3423 static const int kRegisterPassedArguments = 4;
3426 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3427 int num_double_arguments) {
3428 int stack_passed_words = 0;
3429 if (use_eabi_hardfloat()) {
3430 // In the hard floating point calling convention, we can use
3431 // all double registers to pass doubles.
3432 if (num_double_arguments > DoubleRegister::NumRegisters()) {
3433 stack_passed_words +=
3434 2 * (num_double_arguments - DoubleRegister::NumRegisters());
3437 // In the soft floating point calling convention, every double
3438 // argument is passed using two registers.
3439 num_reg_arguments += 2 * num_double_arguments;
3441 // Up to four simple arguments are passed in registers r0..r3.
3442 if (num_reg_arguments > kRegisterPassedArguments) {
3443 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3445 return stack_passed_words;
3449 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
3452 uint32_t encoding_mask) {
3455 Check(ne, kNonObject);
3457 ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3458 ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3460 and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3461 cmp(ip, Operand(encoding_mask));
3462 Check(eq, kUnexpectedStringType);
3464 // The index is assumed to be untagged coming in, tag it to compare with the
3465 // string length without using a temp register, it is restored at the end of
3467 Label index_tag_ok, index_tag_bad;
3468 TrySmiTag(index, index, &index_tag_bad);
3470 bind(&index_tag_bad);
3471 Abort(kIndexIsTooLarge);
3472 bind(&index_tag_ok);
3474 ldr(ip, FieldMemOperand(string, String::kLengthOffset));
3476 Check(lt, kIndexIsTooLarge);
3478 cmp(index, Operand(Smi::FromInt(0)));
3479 Check(ge, kIndexIsNegative);
3481 SmiUntag(index, index);
3485 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3486 int num_double_arguments,
3488 int frame_alignment = ActivationFrameAlignment();
3489 int stack_passed_arguments = CalculateStackPassedWords(
3490 num_reg_arguments, num_double_arguments);
3491 if (frame_alignment > kPointerSize) {
3492 // Make stack end at alignment and make room for num_arguments - 4 words
3493 // and the original value of sp.
3495 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3496 ASSERT(IsPowerOf2(frame_alignment));
3497 and_(sp, sp, Operand(-frame_alignment));
3498 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3500 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3505 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3507 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3511 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
3513 if (!use_eabi_hardfloat()) {
3519 // On ARM this is just a synonym to make the purpose clear.
3520 void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
3521 MovToFloatParameter(src);
3525 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
3526 DwVfpRegister src2) {
3527 ASSERT(src1.is(d0));
3528 ASSERT(src2.is(d1));
3529 if (!use_eabi_hardfloat()) {
3536 void MacroAssembler::CallCFunction(ExternalReference function,
3537 int num_reg_arguments,
3538 int num_double_arguments) {
3539 mov(ip, Operand(function));
3540 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3544 void MacroAssembler::CallCFunction(Register function,
3545 int num_reg_arguments,
3546 int num_double_arguments) {
3547 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3551 void MacroAssembler::CallCFunction(ExternalReference function,
3552 int num_arguments) {
3553 CallCFunction(function, num_arguments, 0);
3557 void MacroAssembler::CallCFunction(Register function,
3558 int num_arguments) {
3559 CallCFunction(function, num_arguments, 0);
3563 void MacroAssembler::CallCFunctionHelper(Register function,
3564 int num_reg_arguments,
3565 int num_double_arguments) {
3566 ASSERT(has_frame());
3567 // Make sure that the stack is aligned before calling a C function unless
3568 // running in the simulator. The simulator has its own alignment check which
3569 // provides more information.
3570 #if V8_HOST_ARCH_ARM
3571 if (emit_debug_code()) {
3572 int frame_alignment = OS::ActivationFrameAlignment();
3573 int frame_alignment_mask = frame_alignment - 1;
3574 if (frame_alignment > kPointerSize) {
3575 ASSERT(IsPowerOf2(frame_alignment));
3576 Label alignment_as_expected;
3577 tst(sp, Operand(frame_alignment_mask));
3578 b(eq, &alignment_as_expected);
3579 // Don't use Check here, as it will call Runtime_Abort possibly
3580 // re-entering here.
3581 stop("Unexpected alignment");
3582 bind(&alignment_as_expected);
3587 // Just call directly. The function called cannot cause a GC, or
3588 // allow preemption, so the return address in the link register
3591 int stack_passed_arguments = CalculateStackPassedWords(
3592 num_reg_arguments, num_double_arguments);
3593 if (ActivationFrameAlignment() > kPointerSize) {
3594 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3596 add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
3601 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3603 const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3604 const int32_t kPCRegOffset = 2 * kPointerSize;
3605 ldr(result, MemOperand(ldr_location));
3606 if (emit_debug_code()) {
3607 // Check that the instruction is a ldr reg, [pc + offset] .
3608 and_(result, result, Operand(kLdrPCPattern));
3609 cmp(result, Operand(kLdrPCPattern));
3610 Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
3611 // Result was clobbered. Restore it.
3612 ldr(result, MemOperand(ldr_location));
3614 // Get the address of the constant.
3615 and_(result, result, Operand(kLdrOffsetMask));
3616 add(result, ldr_location, Operand(result));
3617 add(result, result, Operand(kPCRegOffset));
3621 void MacroAssembler::CheckPageFlag(
3626 Label* condition_met) {
3627 Bfc(scratch, object, 0, kPageSizeBits);
3628 ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3629 tst(scratch, Operand(mask));
3630 b(cc, condition_met);
3634 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
3636 Label* if_deprecated) {
3637 if (map->CanBeDeprecated()) {
3638 mov(scratch, Operand(map));
3639 ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
3640 tst(scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
3641 b(ne, if_deprecated);
3646 void MacroAssembler::JumpIfBlack(Register object,
3650 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
3651 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3655 void MacroAssembler::HasColor(Register object,
3656 Register bitmap_scratch,
3657 Register mask_scratch,
3661 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3663 GetMarkBits(object, bitmap_scratch, mask_scratch);
3665 Label other_color, word_boundary;
3666 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3667 tst(ip, Operand(mask_scratch));
3668 b(first_bit == 1 ? eq : ne, &other_color);
3669 // Shift left 1 by adding.
3670 add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
3671 b(eq, &word_boundary);
3672 tst(ip, Operand(mask_scratch));
3673 b(second_bit == 1 ? ne : eq, has_color);
3676 bind(&word_boundary);
3677 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3678 tst(ip, Operand(1));
3679 b(second_bit == 1 ? ne : eq, has_color);
3684 // Detect some, but not all, common pointer-free objects. This is used by the
3685 // incremental write barrier which doesn't care about oddballs (they are always
3686 // marked black immediately so this code is not hit).
3687 void MacroAssembler::JumpIfDataObject(Register value,
3689 Label* not_data_object) {
3690 Label is_data_object;
3691 ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
3692 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3693 b(eq, &is_data_object);
3694 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3695 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3696 // If it's a string and it's not a cons string then it's an object containing
3698 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3699 tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
3700 b(ne, not_data_object);
3701 bind(&is_data_object);
3705 void MacroAssembler::GetMarkBits(Register addr_reg,
3706 Register bitmap_reg,
3707 Register mask_reg) {
3708 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3709 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3710 Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3711 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3712 Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
3713 add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
3714 mov(ip, Operand(1));
3715 mov(mask_reg, Operand(ip, LSL, mask_reg));
3719 void MacroAssembler::EnsureNotWhite(
3721 Register bitmap_scratch,
3722 Register mask_scratch,
3723 Register load_scratch,
3724 Label* value_is_white_and_not_data) {
3725 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3726 GetMarkBits(value, bitmap_scratch, mask_scratch);
3728 // If the value is black or grey we don't need to do anything.
3729 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3730 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3731 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
3732 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3736 // Since both black and grey have a 1 in the first position and white does
3737 // not have a 1 there we only need to check one bit.
3738 ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3739 tst(mask_scratch, load_scratch);
3742 if (emit_debug_code()) {
3743 // Check for impossible bit pattern.
3745 // LSL may overflow, making the check conservative.
3746 tst(load_scratch, Operand(mask_scratch, LSL, 1));
3748 stop("Impossible marking bit pattern");
3752 // Value is white. We check whether it is data that doesn't need scanning.
3753 // Currently only checks for HeapNumber and non-cons strings.
3754 Register map = load_scratch; // Holds map while checking type.
3755 Register length = load_scratch; // Holds length of object after testing type.
3756 Label is_data_object;
3758 // Check for heap-number
3759 ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3760 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3761 mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
3762 b(eq, &is_data_object);
3764 // Check for strings.
3765 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3766 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3767 // If it's a string and it's not a cons string then it's an object containing
3769 Register instance_type = load_scratch;
3770 ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
3771 tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
3772 b(ne, value_is_white_and_not_data);
3773 // It's a non-indirect (non-cons and non-slice) string.
3774 // If it's external, the length is just ExternalString::kSize.
3775 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3776 // External strings are the only ones with the kExternalStringTag bit
3778 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
3779 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
3780 tst(instance_type, Operand(kExternalStringTag));
3781 mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
3782 b(ne, &is_data_object);
3784 // Sequential string, either ASCII or UC16.
3785 // For ASCII (char-size of 1) we shift the smi tag away to get the length.
3786 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
3787 // getting the length multiplied by 2.
3788 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
3789 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3790 ldr(ip, FieldMemOperand(value, String::kLengthOffset));
3791 tst(instance_type, Operand(kStringEncodingMask));
3792 mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
3793 add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
3794 and_(length, length, Operand(~kObjectAlignmentMask));
3796 bind(&is_data_object);
3797 // Value is a data object, and it is white. Mark it black. Since we know
3798 // that the object is white we can make it black by flipping one bit.
3799 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3800 orr(ip, ip, Operand(mask_scratch));
3801 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3803 and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
3804 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3805 add(ip, ip, Operand(length));
3806 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3812 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3813 Usat(output_reg, 8, Operand(input_reg));
3817 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3818 DwVfpRegister input_reg,
3819 LowDwVfpRegister double_scratch) {
3824 VFPCompareAndSetFlags(input_reg, 0.0);
3827 // Double value is less than zero, NaN or Inf, return 0.
3828 mov(result_reg, Operand::Zero());
3831 // Double value is >= 255, return 255.
3833 Vmov(double_scratch, 255.0, result_reg);
3834 VFPCompareAndSetFlags(input_reg, double_scratch);
3836 mov(result_reg, Operand(255));
3839 // In 0-255 range, round and truncate.
3843 // Set rounding mode to round to the nearest integer by clearing bits[23:22].
3844 bic(result_reg, ip, Operand(kVFPRoundingModeMask));
3846 vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
3847 vmov(result_reg, double_scratch.low());
3854 void MacroAssembler::Throw(BailoutReason reason) {
3858 const char* msg = GetBailoutReason(reason);
3860 RecordComment("Throw message: ");
3865 mov(r0, Operand(Smi::FromInt(reason)));
3867 // Disable stub call restrictions to always allow calls to throw.
3869 // We don't actually want to generate a pile of code for this, so just
3870 // claim there is a stack frame, without generating one.
3871 FrameScope scope(this, StackFrame::NONE);
3872 CallRuntime(Runtime::kThrowMessage, 1);
3874 CallRuntime(Runtime::kThrowMessage, 1);
3876 // will not return here
3877 if (is_const_pool_blocked()) {
3878 // If the calling code cares throw the exact number of
3879 // instructions generated, we insert padding here to keep the size
3880 // of the ThrowMessage macro constant.
3881 static const int kExpectedThrowMessageInstructions = 10;
3882 int throw_instructions = InstructionsGeneratedSince(&throw_start);
3883 ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
3884 while (throw_instructions++ < kExpectedThrowMessageInstructions) {
3891 void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
3893 b(NegateCondition(cc), &L);
3895 // will not return here
3900 void MacroAssembler::LoadInstanceDescriptors(Register map,
3901 Register descriptors) {
3902 ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3906 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3907 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3908 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3912 void MacroAssembler::EnumLength(Register dst, Register map) {
3913 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3914 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3915 and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
3919 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3920 Register empty_fixed_array_value = r6;
3921 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3925 // Check if the enum length field is properly initialized, indicating that
3926 // there is an enum cache.
3927 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3930 cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
3931 b(eq, call_runtime);
3936 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3938 // For all objects but the receiver, check that the cache is empty.
3940 cmp(r3, Operand(Smi::FromInt(0)));
3941 b(ne, call_runtime);
3945 // Check that there are no elements. Register r2 contains the current JS
3946 // object we've reached through the prototype chain.
3948 ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
3949 cmp(r2, empty_fixed_array_value);
3950 b(eq, &no_elements);
3952 // Second chance, the object may be using the empty slow element dictionary.
3953 CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex);
3954 b(ne, call_runtime);
3957 ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
3958 cmp(r2, null_value);
3963 void MacroAssembler::TestJSArrayForAllocationMemento(
3964 Register receiver_reg,
3965 Register scratch_reg,
3966 Label* no_memento_found) {
3967 ExternalReference new_space_start =
3968 ExternalReference::new_space_start(isolate());
3969 ExternalReference new_space_allocation_top =
3970 ExternalReference::new_space_allocation_top_address(isolate());
3971 add(scratch_reg, receiver_reg,
3972 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3973 cmp(scratch_reg, Operand(new_space_start));
3974 b(lt, no_memento_found);
3975 mov(ip, Operand(new_space_allocation_top));
3976 ldr(ip, MemOperand(ip));
3977 cmp(scratch_reg, ip);
3978 b(gt, no_memento_found);
3979 ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
3981 Operand(isolate()->factory()->allocation_memento_map()));
3985 Register GetRegisterThatIsNotOneOf(Register reg1,
3992 if (reg1.is_valid()) regs |= reg1.bit();
3993 if (reg2.is_valid()) regs |= reg2.bit();
3994 if (reg3.is_valid()) regs |= reg3.bit();
3995 if (reg4.is_valid()) regs |= reg4.bit();
3996 if (reg5.is_valid()) regs |= reg5.bit();
3997 if (reg6.is_valid()) regs |= reg6.bit();
3999 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
4000 Register candidate = Register::FromAllocationIndex(i);
4001 if (regs & candidate.bit()) continue;
4009 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
4014 ASSERT(!scratch1.is(scratch0));
4015 Factory* factory = isolate()->factory();
4016 Register current = scratch0;
4019 // scratch contained elements pointer.
4020 mov(current, object);
4022 // Loop based on the map going up the prototype chain.
4024 ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4025 ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4026 Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
4027 cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
4029 ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4030 cmp(current, Operand(factory->null_value()));
4036 bool AreAliased(Register reg1,
4042 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
4043 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
4046 if (reg1.is_valid()) regs |= reg1.bit();
4047 if (reg2.is_valid()) regs |= reg2.bit();
4048 if (reg3.is_valid()) regs |= reg3.bit();
4049 if (reg4.is_valid()) regs |= reg4.bit();
4050 if (reg5.is_valid()) regs |= reg5.bit();
4051 if (reg6.is_valid()) regs |= reg6.bit();
4052 int n_of_non_aliasing_regs = NumRegs(regs);
4054 return n_of_valid_regs != n_of_non_aliasing_regs;
4059 CodePatcher::CodePatcher(byte* address,
4061 FlushICache flush_cache)
4062 : address_(address),
4063 size_(instructions * Assembler::kInstrSize),
4064 masm_(NULL, address, size_ + Assembler::kGap),
4065 flush_cache_(flush_cache) {
4066 // Create a new macro assembler pointing to the address of the code to patch.
4067 // The size is adjusted with kGap on order for the assembler to generate size
4068 // bytes of instructions without failing with buffer size constraints.
4069 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4073 CodePatcher::~CodePatcher() {
4074 // Indicate that code has changed.
4075 if (flush_cache_ == FLUSH) {
4076 CPU::FlushICache(address_, size_);
4079 // Check that the code was patched as expected.
4080 ASSERT(masm_.pc_ == address_ + size_);
4081 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4085 void CodePatcher::Emit(Instr instr) {
4086 masm()->emit(instr);
4090 void CodePatcher::Emit(Address addr) {
4091 masm()->emit(reinterpret_cast<Instr>(addr));
4095 void CodePatcher::EmitCondition(Condition cond) {
4096 Instr instr = Assembler::instr_at(masm_.pc_);
4097 instr = (instr & ~kCondMask) | cond;
4102 } } // namespace v8::internal
4104 #endif // V8_TARGET_ARCH_ARM