1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <limits.h> // For LONG_MIN, LONG_MAX.
32 #if V8_TARGET_ARCH_ARM
34 #include "bootstrapper.h"
36 #include "cpu-profiler.h"
38 #include "isolate-inl.h"
44 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
45 : Assembler(arg_isolate, buffer, size),
46 generating_stub_(false),
48 if (isolate() != NULL) {
49 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
55 void MacroAssembler::Jump(Register target, Condition cond) {
60 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
62 ASSERT(RelocInfo::IsCodeTarget(rmode));
63 mov(pc, Operand(target, rmode), LeaveCC, cond);
67 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
69 ASSERT(!RelocInfo::IsCodeTarget(rmode));
70 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
74 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
76 ASSERT(RelocInfo::IsCodeTarget(rmode));
77 // 'code' is always generated ARM code, never THUMB code
78 AllowDeferredHandleDereference embedding_raw_address;
79 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
83 int MacroAssembler::CallSize(Register target, Condition cond) {
88 void MacroAssembler::Call(Register target, Condition cond) {
89 // Block constant pool for the call instruction sequence.
90 BlockConstPoolScope block_const_pool(this);
94 ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
98 int MacroAssembler::CallSize(
99 Address target, RelocInfo::Mode rmode, Condition cond) {
100 int size = 2 * kInstrSize;
101 Instr mov_instr = cond | MOV | LeaveCC;
102 intptr_t immediate = reinterpret_cast<intptr_t>(target);
103 if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
110 int MacroAssembler::CallSizeNotPredictableCodeSize(
111 Address target, RelocInfo::Mode rmode, Condition cond) {
112 int size = 2 * kInstrSize;
113 Instr mov_instr = cond | MOV | LeaveCC;
114 intptr_t immediate = reinterpret_cast<intptr_t>(target);
115 if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
122 void MacroAssembler::Call(Address target,
123 RelocInfo::Mode rmode,
125 TargetAddressStorageMode mode) {
126 // Block constant pool for the call instruction sequence.
127 BlockConstPoolScope block_const_pool(this);
131 bool old_predictable_code_size = predictable_code_size();
132 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
133 set_predictable_code_size(true);
136 // Call sequence on V7 or later may be :
137 // movw ip, #... @ call address low 16
138 // movt ip, #... @ call address high 16
141 // Or for pre-V7 or values that may be back-patched
142 // to avoid ICache flushes:
143 // ldr ip, [pc, #...] @ call address
147 // Statement positions are expected to be recorded when the target
148 // address is loaded. The mov method will automatically record
149 // positions when pc is the target, since this is not the case here
150 // we have to do it explicitly.
151 positions_recorder()->WriteRecordedPositions();
153 mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
156 ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
157 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
158 set_predictable_code_size(old_predictable_code_size);
163 int MacroAssembler::CallSize(Handle<Code> code,
164 RelocInfo::Mode rmode,
165 TypeFeedbackId ast_id,
167 AllowDeferredHandleDereference using_raw_address;
168 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
172 void MacroAssembler::Call(Handle<Code> code,
173 RelocInfo::Mode rmode,
174 TypeFeedbackId ast_id,
176 TargetAddressStorageMode mode) {
179 ASSERT(RelocInfo::IsCodeTarget(rmode));
180 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
181 SetRecordedAstId(ast_id);
182 rmode = RelocInfo::CODE_TARGET_WITH_ID;
184 // 'code' is always generated ARM code, never THUMB code
185 AllowDeferredHandleDereference embedding_raw_address;
186 Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
190 void MacroAssembler::Ret(Condition cond) {
195 void MacroAssembler::Drop(int count, Condition cond) {
197 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
202 void MacroAssembler::Ret(int drop, Condition cond) {
208 void MacroAssembler::Swap(Register reg1,
212 if (scratch.is(no_reg)) {
213 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
214 eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
215 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
217 mov(scratch, reg1, LeaveCC, cond);
218 mov(reg1, reg2, LeaveCC, cond);
219 mov(reg2, scratch, LeaveCC, cond);
224 void MacroAssembler::Call(Label* target) {
229 void MacroAssembler::Push(Handle<Object> handle) {
230 mov(ip, Operand(handle));
235 void MacroAssembler::Move(Register dst, Handle<Object> value) {
236 AllowDeferredHandleDereference smi_check;
237 if (value->IsSmi()) {
238 mov(dst, Operand(value));
240 ASSERT(value->IsHeapObject());
241 if (isolate()->heap()->InNewSpace(*value)) {
242 Handle<Cell> cell = isolate()->factory()->NewCell(value);
243 mov(dst, Operand(cell));
244 ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
246 mov(dst, Operand(value));
252 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
254 mov(dst, src, LeaveCC, cond);
259 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
266 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
268 if (!src2.is_reg() &&
269 !src2.must_output_reloc_info(this) &&
270 src2.immediate() == 0) {
271 mov(dst, Operand::Zero(), LeaveCC, cond);
272 } else if (!src2.is_single_instruction(this) &&
273 !src2.must_output_reloc_info(this) &&
274 CpuFeatures::IsSupported(ARMv7) &&
275 IsPowerOf2(src2.immediate() + 1)) {
277 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
279 and_(dst, src1, src2, LeaveCC, cond);
284 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
287 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
288 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
289 and_(dst, src1, Operand(mask), LeaveCC, cond);
291 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
294 ubfx(dst, src1, lsb, width, cond);
299 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
302 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
303 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
304 and_(dst, src1, Operand(mask), LeaveCC, cond);
305 int shift_up = 32 - lsb - width;
306 int shift_down = lsb + shift_up;
308 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
310 if (shift_down != 0) {
311 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
314 sbfx(dst, src1, lsb, width, cond);
319 void MacroAssembler::Bfi(Register dst,
325 ASSERT(0 <= lsb && lsb < 32);
326 ASSERT(0 <= width && width < 32);
327 ASSERT(lsb + width < 32);
328 ASSERT(!scratch.is(dst));
329 if (width == 0) return;
330 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
331 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
332 bic(dst, dst, Operand(mask));
333 and_(scratch, src, Operand((1 << width) - 1));
334 mov(scratch, Operand(scratch, LSL, lsb));
335 orr(dst, dst, scratch);
337 bfi(dst, src, lsb, width, cond);
342 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
345 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
346 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
347 bic(dst, src, Operand(mask));
349 Move(dst, src, cond);
350 bfc(dst, lsb, width, cond);
355 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
357 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
358 ASSERT(!dst.is(pc) && !src.rm().is(pc));
359 ASSERT((satpos >= 0) && (satpos <= 31));
361 // These asserts are required to ensure compatibility with the ARMv7
363 ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
364 ASSERT(src.rs().is(no_reg));
367 int satval = (1 << satpos) - 1;
370 b(NegateCondition(cond), &done); // Skip saturate if !condition.
372 if (!(src.is_reg() && dst.is(src.rm()))) {
375 tst(dst, Operand(~satval));
377 mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
378 mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
381 usat(dst, satpos, src, cond);
386 void MacroAssembler::Load(Register dst,
387 const MemOperand& src,
389 ASSERT(!r.IsDouble());
390 if (r.IsInteger8()) {
392 } else if (r.IsUInteger8()) {
394 } else if (r.IsInteger16()) {
396 } else if (r.IsUInteger16()) {
404 void MacroAssembler::Store(Register src,
405 const MemOperand& dst,
407 ASSERT(!r.IsDouble());
408 if (r.IsInteger8() || r.IsUInteger8()) {
410 } else if (r.IsInteger16() || r.IsUInteger16()) {
418 void MacroAssembler::LoadRoot(Register destination,
419 Heap::RootListIndex index,
421 if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
422 isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
423 !predictable_code_size()) {
424 // The CPU supports fast immediate values, and this root will never
425 // change. We will load it as a relocatable immediate value.
426 Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
427 mov(destination, Operand(root), LeaveCC, cond);
430 ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
434 void MacroAssembler::StoreRoot(Register source,
435 Heap::RootListIndex index,
437 str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
441 void MacroAssembler::InNewSpace(Register object,
445 ASSERT(cond == eq || cond == ne);
446 and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
447 cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
452 void MacroAssembler::RecordWriteField(
457 LinkRegisterStatus lr_status,
458 SaveFPRegsMode save_fp,
459 RememberedSetAction remembered_set_action,
460 SmiCheck smi_check) {
461 // First, check if a write barrier is even needed. The tests below
462 // catch stores of Smis.
465 // Skip barrier if writing a smi.
466 if (smi_check == INLINE_SMI_CHECK) {
467 JumpIfSmi(value, &done);
470 // Although the object register is tagged, the offset is relative to the start
471 // of the object, so so offset must be a multiple of kPointerSize.
472 ASSERT(IsAligned(offset, kPointerSize));
474 add(dst, object, Operand(offset - kHeapObjectTag));
475 if (emit_debug_code()) {
477 tst(dst, Operand((1 << kPointerSizeLog2) - 1));
479 stop("Unaligned cell in write barrier");
488 remembered_set_action,
493 // Clobber clobbered input registers when running with the debug-code flag
494 // turned on to provoke errors.
495 if (emit_debug_code()) {
496 mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
497 mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
502 // Will clobber 4 registers: object, address, scratch, ip. The
503 // register 'object' contains a heap object pointer. The heap object
504 // tag is shifted away.
505 void MacroAssembler::RecordWrite(Register object,
508 LinkRegisterStatus lr_status,
509 SaveFPRegsMode fp_mode,
510 RememberedSetAction remembered_set_action,
511 SmiCheck smi_check) {
512 if (emit_debug_code()) {
513 ldr(ip, MemOperand(address));
515 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
518 // Count number of write barriers in generated code.
519 isolate()->counters()->write_barriers_static()->Increment();
520 // TODO(mstarzinger): Dynamic counter missing.
522 // First, check if a write barrier is even needed. The tests below
523 // catch stores of smis and stores into the young generation.
526 if (smi_check == INLINE_SMI_CHECK) {
527 JumpIfSmi(value, &done);
531 value, // Used as scratch.
532 MemoryChunk::kPointersToHereAreInterestingMask,
535 CheckPageFlag(object,
536 value, // Used as scratch.
537 MemoryChunk::kPointersFromHereAreInterestingMask,
541 // Record the actual write.
542 if (lr_status == kLRHasNotBeenSaved) {
545 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
547 if (lr_status == kLRHasNotBeenSaved) {
553 // Clobber clobbered registers when running with the debug-code flag
554 // turned on to provoke errors.
555 if (emit_debug_code()) {
556 mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
557 mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
562 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
565 SaveFPRegsMode fp_mode,
566 RememberedSetFinalAction and_then) {
568 if (emit_debug_code()) {
570 JumpIfNotInNewSpace(object, scratch, &ok);
571 stop("Remembered set pointer is in new space");
574 // Load store buffer top.
575 ExternalReference store_buffer =
576 ExternalReference::store_buffer_top(isolate());
577 mov(ip, Operand(store_buffer));
578 ldr(scratch, MemOperand(ip));
579 // Store pointer to buffer and increment buffer top.
580 str(address, MemOperand(scratch, kPointerSize, PostIndex));
581 // Write back new top of buffer.
582 str(scratch, MemOperand(ip));
583 // Call stub on end of buffer.
584 // Check for end of buffer.
585 tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
586 if (and_then == kFallThroughAtEnd) {
589 ASSERT(and_then == kReturnAtEnd);
593 StoreBufferOverflowStub store_buffer_overflow =
594 StoreBufferOverflowStub(fp_mode);
595 CallStub(&store_buffer_overflow);
598 if (and_then == kReturnAtEnd) {
604 void MacroAssembler::PushFixedFrame(Register marker_reg) {
605 ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code());
606 stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
608 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
614 void MacroAssembler::PopFixedFrame(Register marker_reg) {
615 ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code());
616 ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
618 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
624 // Push and pop all registers that can hold pointers.
625 void MacroAssembler::PushSafepointRegisters() {
626 // Safepoints expect a block of contiguous register values starting with r0:
627 ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
628 // Safepoints expect a block of kNumSafepointRegisters values on the
629 // stack, so adjust the stack for unsaved registers.
630 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
631 ASSERT(num_unsaved >= 0);
632 sub(sp, sp, Operand(num_unsaved * kPointerSize));
633 stm(db_w, sp, kSafepointSavedRegisters);
637 void MacroAssembler::PopSafepointRegisters() {
638 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
639 ldm(ia_w, sp, kSafepointSavedRegisters);
640 add(sp, sp, Operand(num_unsaved * kPointerSize));
644 void MacroAssembler::PushSafepointRegistersAndDoubles() {
645 // Number of d-regs not known at snapshot time.
646 ASSERT(!Serializer::enabled());
647 PushSafepointRegisters();
648 // Only save allocatable registers.
649 ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
650 ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
651 if (CpuFeatures::IsSupported(VFP32DREGS)) {
652 vstm(db_w, sp, d16, d31);
654 vstm(db_w, sp, d0, d13);
658 void MacroAssembler::PopSafepointRegistersAndDoubles() {
659 // Number of d-regs not known at snapshot time.
660 ASSERT(!Serializer::enabled());
661 // Only save allocatable registers.
662 ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
663 ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
664 vldm(ia_w, sp, d0, d13);
665 if (CpuFeatures::IsSupported(VFP32DREGS)) {
666 vldm(ia_w, sp, d16, d31);
668 PopSafepointRegisters();
671 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
673 str(src, SafepointRegistersAndDoublesSlot(dst));
677 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
678 str(src, SafepointRegisterSlot(dst));
682 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
683 ldr(dst, SafepointRegisterSlot(src));
687 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
688 // The registers are pushed starting with the highest encoding,
689 // which means that lowest encodings are closest to the stack pointer.
690 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
695 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
696 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
700 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
701 // Number of d-regs not known at snapshot time.
702 ASSERT(!Serializer::enabled());
703 // General purpose registers are pushed last on the stack.
704 int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
705 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
706 return MemOperand(sp, doubles_size + register_offset);
710 void MacroAssembler::Ldrd(Register dst1, Register dst2,
711 const MemOperand& src, Condition cond) {
712 ASSERT(src.rm().is(no_reg));
713 ASSERT(!dst1.is(lr)); // r14.
715 // V8 does not use this addressing mode, so the fallback code
716 // below doesn't support it yet.
717 ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
719 // Generate two ldr instructions if ldrd is not available.
720 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
721 (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
722 CpuFeatureScope scope(this, ARMv7);
723 ldrd(dst1, dst2, src, cond);
725 if ((src.am() == Offset) || (src.am() == NegOffset)) {
726 MemOperand src2(src);
727 src2.set_offset(src2.offset() + 4);
728 if (dst1.is(src.rn())) {
729 ldr(dst2, src2, cond);
730 ldr(dst1, src, cond);
732 ldr(dst1, src, cond);
733 ldr(dst2, src2, cond);
735 } else { // PostIndex or NegPostIndex.
736 ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
737 if (dst1.is(src.rn())) {
738 ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
739 ldr(dst1, src, cond);
741 MemOperand src2(src);
742 src2.set_offset(src2.offset() - 4);
743 ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
744 ldr(dst2, src2, cond);
751 void MacroAssembler::Strd(Register src1, Register src2,
752 const MemOperand& dst, Condition cond) {
753 ASSERT(dst.rm().is(no_reg));
754 ASSERT(!src1.is(lr)); // r14.
756 // V8 does not use this addressing mode, so the fallback code
757 // below doesn't support it yet.
758 ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
760 // Generate two str instructions if strd is not available.
761 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
762 (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
763 CpuFeatureScope scope(this, ARMv7);
764 strd(src1, src2, dst, cond);
766 MemOperand dst2(dst);
767 if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
768 dst2.set_offset(dst2.offset() + 4);
769 str(src1, dst, cond);
770 str(src2, dst2, cond);
771 } else { // PostIndex or NegPostIndex.
772 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
773 dst2.set_offset(dst2.offset() - 4);
774 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
775 str(src2, dst2, cond);
781 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
782 // If needed, restore wanted bits of FPSCR.
785 tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
787 orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
793 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
794 const DwVfpRegister src,
795 const Condition cond) {
796 vsub(dst, src, kDoubleRegZero, cond);
800 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
801 const DwVfpRegister src2,
802 const Condition cond) {
803 // Compare and move FPSCR flags to the normal condition flags.
804 VFPCompareAndLoadFlags(src1, src2, pc, cond);
807 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
809 const Condition cond) {
810 // Compare and move FPSCR flags to the normal condition flags.
811 VFPCompareAndLoadFlags(src1, src2, pc, cond);
815 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
816 const DwVfpRegister src2,
817 const Register fpscr_flags,
818 const Condition cond) {
819 // Compare and load FPSCR.
820 vcmp(src1, src2, cond);
821 vmrs(fpscr_flags, cond);
824 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
826 const Register fpscr_flags,
827 const Condition cond) {
828 // Compare and load FPSCR.
829 vcmp(src1, src2, cond);
830 vmrs(fpscr_flags, cond);
833 void MacroAssembler::Vmov(const DwVfpRegister dst,
835 const Register scratch) {
836 static const DoubleRepresentation minus_zero(-0.0);
837 static const DoubleRepresentation zero(0.0);
838 DoubleRepresentation value_rep(imm);
839 // Handle special values first.
840 if (value_rep == zero) {
841 vmov(dst, kDoubleRegZero);
842 } else if (value_rep == minus_zero) {
843 vneg(dst, kDoubleRegZero);
845 vmov(dst, imm, scratch);
850 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
851 if (src.code() < 16) {
852 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
853 vmov(dst, loc.high());
855 vmov(dst, VmovIndexHi, src);
860 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
861 if (dst.code() < 16) {
862 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
863 vmov(loc.high(), src);
865 vmov(dst, VmovIndexHi, src);
870 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
871 if (src.code() < 16) {
872 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
873 vmov(dst, loc.low());
875 vmov(dst, VmovIndexLo, src);
880 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
881 if (dst.code() < 16) {
882 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
883 vmov(loc.low(), src);
885 vmov(dst, VmovIndexLo, src);
890 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
891 if (frame_mode == BUILD_STUB_FRAME) {
893 Push(Smi::FromInt(StackFrame::STUB));
894 // Adjust FP to point to saved FP.
895 add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
897 PredictableCodeSizeScope predictible_code_size_scope(
898 this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
899 // The following three instructions must remain together and unmodified
900 // for code aging to work properly.
901 if (isolate()->IsCodePreAgingActive()) {
903 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
904 add(r0, pc, Operand(-8));
905 ldr(pc, MemOperand(pc, -4));
906 emit_code_stub_address(stub);
910 // Adjust FP to point to saved FP.
911 add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
917 void MacroAssembler::LoadConstantPoolPointerRegister() {
918 if (FLAG_enable_ool_constant_pool) {
919 int constant_pool_offset =
920 Code::kConstantPoolOffset - Code::kHeaderSize - pc_offset() - 8;
921 ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
922 ldr(pp, MemOperand(pc, constant_pool_offset));
927 void MacroAssembler::EnterFrame(StackFrame::Type type) {
930 mov(ip, Operand(Smi::FromInt(type)));
932 mov(ip, Operand(CodeObject()));
934 // Adjust FP to point to saved FP.
936 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
940 int MacroAssembler::LeaveFrame(StackFrame::Type type) {
945 // Drop the execution stack down to the frame pointer and restore
946 // the caller frame pointer, return address and constant pool pointer
947 // (if FLAG_enable_ool_constant_pool).
949 if (FLAG_enable_ool_constant_pool) {
950 add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
951 frame_ends = pc_offset();
952 ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
955 frame_ends = pc_offset();
956 ldm(ia_w, sp, fp.bit() | lr.bit());
962 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
963 // Set up the frame structure on the stack.
964 ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
965 ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
966 ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
968 mov(fp, Operand(sp)); // Set up new frame pointer.
969 // Reserve room for saved entry sp and code object.
970 sub(sp, sp, Operand(ExitFrameConstants::kFrameSize));
971 if (emit_debug_code()) {
972 mov(ip, Operand::Zero());
973 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
975 if (FLAG_enable_ool_constant_pool) {
976 str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
978 mov(ip, Operand(CodeObject()));
979 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
981 // Save the frame pointer and the context in top.
982 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
983 str(fp, MemOperand(ip));
984 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
985 str(cp, MemOperand(ip));
987 // Optionally save all double registers.
990 // Note that d0 will be accessible at
991 // fp - ExitFrameConstants::kFrameSize -
992 // DwVfpRegister::kMaxNumRegisters * kDoubleSize,
993 // since the sp slot, code slot and constant pool slot (if
994 // FLAG_enable_ool_constant_pool) were pushed after the fp.
997 // Reserve place for the return address and stack space and align the frame
998 // preparing for calling the runtime function.
999 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1000 sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
1001 if (frame_alignment > 0) {
1002 ASSERT(IsPowerOf2(frame_alignment));
1003 and_(sp, sp, Operand(-frame_alignment));
1006 // Set the exit frame sp value to point just before the return address
1008 add(ip, sp, Operand(kPointerSize));
1009 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1013 void MacroAssembler::InitializeNewString(Register string,
1015 Heap::RootListIndex map_index,
1017 Register scratch2) {
1018 SmiTag(scratch1, length);
1019 LoadRoot(scratch2, map_index);
1020 str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1021 mov(scratch1, Operand(String::kEmptyHashField));
1022 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1023 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
1027 int MacroAssembler::ActivationFrameAlignment() {
1028 #if V8_HOST_ARCH_ARM
1029 // Running on the real platform. Use the alignment as mandated by the local
1031 // Note: This will break if we ever start generating snapshots on one ARM
1032 // platform for another ARM platform with a different alignment.
1033 return OS::ActivationFrameAlignment();
1034 #else // V8_HOST_ARCH_ARM
1035 // If we are using the simulator then we should always align to the expected
1036 // alignment. As the simulator is used to generate snapshots we do not know
1037 // if the target platform will need alignment, so this is controlled from a
1039 return FLAG_sim_stack_alignment;
1040 #endif // V8_HOST_ARCH_ARM
1044 void MacroAssembler::LeaveExitFrame(bool save_doubles,
1045 Register argument_count,
1046 bool restore_context) {
1047 // Optionally restore all double registers.
1049 // Calculate the stack location of the saved doubles and restore them.
1050 const int offset = ExitFrameConstants::kFrameSize;
1052 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
1053 RestoreFPRegs(r3, ip);
1057 mov(r3, Operand::Zero());
1058 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1059 str(r3, MemOperand(ip));
1062 // Restore current context from top and clear it in debug mode.
1063 if (restore_context) {
1064 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1065 ldr(cp, MemOperand(ip));
1068 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1069 str(r3, MemOperand(ip));
1072 // Tear down the exit frame, pop the arguments, and return.
1073 if (FLAG_enable_ool_constant_pool) {
1074 ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1076 mov(sp, Operand(fp));
1077 ldm(ia_w, sp, fp.bit() | lr.bit());
1078 if (argument_count.is_valid()) {
1079 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1084 void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1085 if (use_eabi_hardfloat()) {
1093 // On ARM this is just a synonym to make the purpose clear.
1094 void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1095 MovFromFloatResult(dst);
1099 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1100 const ParameterCount& actual,
1101 Handle<Code> code_constant,
1104 bool* definitely_mismatches,
1106 const CallWrapper& call_wrapper) {
1107 bool definitely_matches = false;
1108 *definitely_mismatches = false;
1109 Label regular_invoke;
1111 // Check whether the expected and actual arguments count match. If not,
1112 // setup registers according to contract with ArgumentsAdaptorTrampoline:
1113 // r0: actual arguments count
1114 // r1: function (passed through to callee)
1115 // r2: expected arguments count
1117 // The code below is made a lot easier because the calling code already sets
1118 // up actual and expected registers according to the contract if values are
1119 // passed in registers.
1120 ASSERT(actual.is_immediate() || actual.reg().is(r0));
1121 ASSERT(expected.is_immediate() || expected.reg().is(r2));
1122 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
1124 if (expected.is_immediate()) {
1125 ASSERT(actual.is_immediate());
1126 if (expected.immediate() == actual.immediate()) {
1127 definitely_matches = true;
1129 mov(r0, Operand(actual.immediate()));
1130 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1131 if (expected.immediate() == sentinel) {
1132 // Don't worry about adapting arguments for builtins that
1133 // don't want that done. Skip adaption code by making it look
1134 // like we have a match between expected and actual number of
1136 definitely_matches = true;
1138 *definitely_mismatches = true;
1139 mov(r2, Operand(expected.immediate()));
1143 if (actual.is_immediate()) {
1144 cmp(expected.reg(), Operand(actual.immediate()));
1145 b(eq, ®ular_invoke);
1146 mov(r0, Operand(actual.immediate()));
1148 cmp(expected.reg(), Operand(actual.reg()));
1149 b(eq, ®ular_invoke);
1153 if (!definitely_matches) {
1154 if (!code_constant.is_null()) {
1155 mov(r3, Operand(code_constant));
1156 add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
1159 Handle<Code> adaptor =
1160 isolate()->builtins()->ArgumentsAdaptorTrampoline();
1161 if (flag == CALL_FUNCTION) {
1162 call_wrapper.BeforeCall(CallSize(adaptor));
1164 call_wrapper.AfterCall();
1165 if (!*definitely_mismatches) {
1169 Jump(adaptor, RelocInfo::CODE_TARGET);
1171 bind(®ular_invoke);
1176 void MacroAssembler::InvokeCode(Register code,
1177 const ParameterCount& expected,
1178 const ParameterCount& actual,
1180 const CallWrapper& call_wrapper) {
1181 // You can't call a function without a valid frame.
1182 ASSERT(flag == JUMP_FUNCTION || has_frame());
1185 bool definitely_mismatches = false;
1186 InvokePrologue(expected, actual, Handle<Code>::null(), code,
1187 &done, &definitely_mismatches, flag,
1189 if (!definitely_mismatches) {
1190 if (flag == CALL_FUNCTION) {
1191 call_wrapper.BeforeCall(CallSize(code));
1193 call_wrapper.AfterCall();
1195 ASSERT(flag == JUMP_FUNCTION);
1199 // Continue here if InvokePrologue does handle the invocation due to
1200 // mismatched parameter counts.
1206 void MacroAssembler::InvokeFunction(Register fun,
1207 const ParameterCount& actual,
1209 const CallWrapper& call_wrapper) {
1210 // You can't call a function without a valid frame.
1211 ASSERT(flag == JUMP_FUNCTION || has_frame());
1213 // Contract with called JS functions requires that function is passed in r1.
1216 Register expected_reg = r2;
1217 Register code_reg = r3;
1219 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1220 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1222 FieldMemOperand(code_reg,
1223 SharedFunctionInfo::kFormalParameterCountOffset));
1224 SmiUntag(expected_reg);
1226 FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1228 ParameterCount expected(expected_reg);
1229 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
1233 void MacroAssembler::InvokeFunction(Register function,
1234 const ParameterCount& expected,
1235 const ParameterCount& actual,
1237 const CallWrapper& call_wrapper) {
1238 // You can't call a function without a valid frame.
1239 ASSERT(flag == JUMP_FUNCTION || has_frame());
1241 // Contract with called JS functions requires that function is passed in r1.
1242 ASSERT(function.is(r1));
1244 // Get the function and setup the context.
1245 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1247 // We call indirectly through the code field in the function to
1248 // allow recompilation to take effect without changing any of the
1250 ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1251 InvokeCode(r3, expected, actual, flag, call_wrapper);
1255 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1256 const ParameterCount& expected,
1257 const ParameterCount& actual,
1259 const CallWrapper& call_wrapper) {
1261 InvokeFunction(r1, expected, actual, flag, call_wrapper);
1265 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1269 ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1270 IsInstanceJSObjectType(map, scratch, fail);
1274 void MacroAssembler::IsInstanceJSObjectType(Register map,
1277 ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1278 cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1280 cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1285 void MacroAssembler::IsObjectJSStringType(Register object,
1288 ASSERT(kNotStringTag != 0);
1290 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1291 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1292 tst(scratch, Operand(kIsNotStringMask));
1297 void MacroAssembler::IsObjectNameType(Register object,
1300 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1301 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1302 cmp(scratch, Operand(LAST_NAME_TYPE));
1307 #ifdef ENABLE_DEBUGGER_SUPPORT
1308 void MacroAssembler::DebugBreak() {
1309 mov(r0, Operand::Zero());
1310 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1312 ASSERT(AllowThisStubCall(&ces));
1313 Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
1318 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1319 int handler_index) {
1320 // Adjust this code if not the case.
1321 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1322 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1323 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1324 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1325 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1326 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1328 // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
1329 // We will build up the handler from the bottom by pushing on the stack.
1330 // Set up the code object (r5) and the state (r6) for pushing.
1332 StackHandler::IndexField::encode(handler_index) |
1333 StackHandler::KindField::encode(kind);
1334 mov(r5, Operand(CodeObject()));
1335 mov(r6, Operand(state));
1337 // Push the frame pointer, context, state, and code object.
1338 if (kind == StackHandler::JS_ENTRY) {
1339 mov(cp, Operand(Smi::FromInt(0))); // Indicates no context.
1340 mov(ip, Operand::Zero()); // NULL frame pointer.
1341 stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
1343 stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
1346 // Link the current handler as the next handler.
1347 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1348 ldr(r5, MemOperand(r6));
1350 // Set this new handler as the current one.
1351 str(sp, MemOperand(r6));
1355 void MacroAssembler::PopTryHandler() {
1356 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1358 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1359 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1360 str(r1, MemOperand(ip));
1364 void MacroAssembler::JumpToHandlerEntry() {
1365 // Compute the handler entry address and jump to it. The handler table is
1366 // a fixed array of (smi-tagged) code offsets.
1367 // r0 = exception, r1 = code object, r2 = state.
1368 ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
1369 add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1370 mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
1371 ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
1372 add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
1373 add(pc, r1, Operand::SmiUntag(r2)); // Jump
1377 void MacroAssembler::Throw(Register value) {
1378 // Adjust this code if not the case.
1379 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1380 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1381 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1382 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1383 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1384 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1386 // The exception is expected in r0.
1387 if (!value.is(r0)) {
1390 // Drop the stack pointer to the top of the top handler.
1391 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1392 ldr(sp, MemOperand(r3));
1393 // Restore the next handler.
1395 str(r2, MemOperand(r3));
1397 // Get the code object (r1) and state (r2). Restore the context and frame
1399 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1401 // If the handler is a JS frame, restore the context to the frame.
1402 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1405 str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1407 JumpToHandlerEntry();
1411 void MacroAssembler::ThrowUncatchable(Register value) {
1412 // Adjust this code if not the case.
1413 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1414 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1415 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1416 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1417 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1418 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1420 // The exception is expected in r0.
1421 if (!value.is(r0)) {
1424 // Drop the stack pointer to the top of the top stack handler.
1425 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1426 ldr(sp, MemOperand(r3));
1428 // Unwind the handlers until the ENTRY handler is found.
1429 Label fetch_next, check_kind;
1432 ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
1435 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1436 ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
1437 tst(r2, Operand(StackHandler::KindField::kMask));
1440 // Set the top handler address to next handler past the top ENTRY handler.
1442 str(r2, MemOperand(r3));
1443 // Get the code object (r1) and state (r2). Clear the context and frame
1444 // pointer (0 was saved in the handler).
1445 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1447 JumpToHandlerEntry();
1451 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1454 Label same_contexts;
1456 ASSERT(!holder_reg.is(scratch));
1457 ASSERT(!holder_reg.is(ip));
1458 ASSERT(!scratch.is(ip));
1460 // Load current lexical context from the stack frame.
1461 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1462 // In debug mode, make sure the lexical context is set.
1464 cmp(scratch, Operand::Zero());
1465 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1468 // Load the native context of the current context.
1470 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1471 ldr(scratch, FieldMemOperand(scratch, offset));
1472 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1474 // Check the context is a native context.
1475 if (emit_debug_code()) {
1476 // Cannot use ip as a temporary in this verification code. Due to the fact
1477 // that ip is clobbered as part of cmp with an object Operand.
1478 push(holder_reg); // Temporarily save holder on the stack.
1479 // Read the first word and compare to the native_context_map.
1480 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1481 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1482 cmp(holder_reg, ip);
1483 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1484 pop(holder_reg); // Restore holder.
1487 // Check if both contexts are the same.
1488 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1489 cmp(scratch, Operand(ip));
1490 b(eq, &same_contexts);
1492 // Check the context is a native context.
1493 if (emit_debug_code()) {
1494 // Cannot use ip as a temporary in this verification code. Due to the fact
1495 // that ip is clobbered as part of cmp with an object Operand.
1496 push(holder_reg); // Temporarily save holder on the stack.
1497 mov(holder_reg, ip); // Move ip to its holding place.
1498 LoadRoot(ip, Heap::kNullValueRootIndex);
1499 cmp(holder_reg, ip);
1500 Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1502 ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1503 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1504 cmp(holder_reg, ip);
1505 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1506 // Restore ip is not needed. ip is reloaded below.
1507 pop(holder_reg); // Restore holder.
1508 // Restore ip to holder's context.
1509 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1512 // Check that the security token in the calling global object is
1513 // compatible with the security token in the receiving global
1515 int token_offset = Context::kHeaderSize +
1516 Context::SECURITY_TOKEN_INDEX * kPointerSize;
1518 ldr(scratch, FieldMemOperand(scratch, token_offset));
1519 ldr(ip, FieldMemOperand(ip, token_offset));
1520 cmp(scratch, Operand(ip));
1523 bind(&same_contexts);
1527 // Compute the hash code from the untagged key. This must be kept in sync with
1528 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
1529 // code-stub-hydrogen.cc
1530 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1531 // First of all we assign the hash seed to scratch.
1532 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1535 // Xor original key with a seed.
1536 eor(t0, t0, Operand(scratch));
1538 // Compute the hash code from the untagged key. This must be kept in sync
1539 // with ComputeIntegerHash in utils.h.
1541 // hash = ~hash + (hash << 15);
1542 mvn(scratch, Operand(t0));
1543 add(t0, scratch, Operand(t0, LSL, 15));
1544 // hash = hash ^ (hash >> 12);
1545 eor(t0, t0, Operand(t0, LSR, 12));
1546 // hash = hash + (hash << 2);
1547 add(t0, t0, Operand(t0, LSL, 2));
1548 // hash = hash ^ (hash >> 4);
1549 eor(t0, t0, Operand(t0, LSR, 4));
1550 // hash = hash * 2057;
1551 mov(scratch, Operand(t0, LSL, 11));
1552 add(t0, t0, Operand(t0, LSL, 3));
1553 add(t0, t0, scratch);
1554 // hash = hash ^ (hash >> 16);
1555 eor(t0, t0, Operand(t0, LSR, 16));
1559 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1568 // elements - holds the slow-case elements of the receiver on entry.
1569 // Unchanged unless 'result' is the same register.
1571 // key - holds the smi key on entry.
1572 // Unchanged unless 'result' is the same register.
1574 // result - holds the result on exit if the load succeeded.
1575 // Allowed to be the same as 'key' or 'result'.
1576 // Unchanged on bailout so 'key' or 'result' can be used
1577 // in further computation.
1579 // Scratch registers:
1581 // t0 - holds the untagged key on entry and holds the hash once computed.
1583 // t1 - used to hold the capacity mask of the dictionary
1585 // t2 - used for the index into the dictionary.
1588 GetNumberHash(t0, t1);
1590 // Compute the capacity mask.
1591 ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1593 sub(t1, t1, Operand(1));
1595 // Generate an unrolled loop that performs a few probes before giving up.
1596 for (int i = 0; i < kNumberDictionaryProbes; i++) {
1597 // Use t2 for index calculations and keep the hash intact in t0.
1599 // Compute the masked index: (hash + i + i * i) & mask.
1601 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1603 and_(t2, t2, Operand(t1));
1605 // Scale the index by multiplying by the element size.
1606 ASSERT(SeededNumberDictionary::kEntrySize == 3);
1607 add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
1609 // Check if the key is identical to the name.
1610 add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
1611 ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1612 cmp(key, Operand(ip));
1613 if (i != kNumberDictionaryProbes - 1) {
1621 // Check that the value is a normal property.
1622 // t2: elements + (index * kPointerSize)
1623 const int kDetailsOffset =
1624 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1625 ldr(t1, FieldMemOperand(t2, kDetailsOffset));
1626 tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1629 // Get the value at the masked, scaled index and return.
1630 const int kValueOffset =
1631 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1632 ldr(result, FieldMemOperand(t2, kValueOffset));
1636 void MacroAssembler::Allocate(int object_size,
1641 AllocationFlags flags) {
1642 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
1643 if (!FLAG_inline_new) {
1644 if (emit_debug_code()) {
1645 // Trash the registers to simulate an allocation failure.
1646 mov(result, Operand(0x7091));
1647 mov(scratch1, Operand(0x7191));
1648 mov(scratch2, Operand(0x7291));
1654 ASSERT(!result.is(scratch1));
1655 ASSERT(!result.is(scratch2));
1656 ASSERT(!scratch1.is(scratch2));
1657 ASSERT(!scratch1.is(ip));
1658 ASSERT(!scratch2.is(ip));
1660 // Make object size into bytes.
1661 if ((flags & SIZE_IN_WORDS) != 0) {
1662 object_size *= kPointerSize;
1664 ASSERT_EQ(0, object_size & kObjectAlignmentMask);
1666 // Check relative positions of allocation top and limit addresses.
1667 // The values must be adjacent in memory to allow the use of LDM.
1668 // Also, assert that the registers are numbered such that the values
1669 // are loaded in the correct order.
1670 ExternalReference allocation_top =
1671 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1672 ExternalReference allocation_limit =
1673 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1676 reinterpret_cast<intptr_t>(allocation_top.address());
1678 reinterpret_cast<intptr_t>(allocation_limit.address());
1679 ASSERT((limit - top) == kPointerSize);
1680 ASSERT(result.code() < ip.code());
1682 // Set up allocation top address register.
1683 Register topaddr = scratch1;
1684 mov(topaddr, Operand(allocation_top));
1686 // This code stores a temporary value in ip. This is OK, as the code below
1687 // does not need ip for implicit literal generation.
1688 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1689 // Load allocation top into result and allocation limit into ip.
1690 ldm(ia, topaddr, result.bit() | ip.bit());
1692 if (emit_debug_code()) {
1693 // Assert that result actually contains top on entry. ip is used
1694 // immediately below so this use of ip does not cause difference with
1695 // respect to register content between debug and release mode.
1696 ldr(ip, MemOperand(topaddr));
1698 Check(eq, kUnexpectedAllocationTop);
1700 // Load allocation limit into ip. Result already contains allocation top.
1701 ldr(ip, MemOperand(topaddr, limit - top));
1704 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1705 // Align the next allocation. Storing the filler map without checking top is
1706 // safe in new-space because the limit of the heap is aligned there.
1707 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1708 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1709 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1712 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1713 cmp(result, Operand(ip));
1716 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1717 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1721 // Calculate new top and bail out if new space is exhausted. Use result
1722 // to calculate the new top. We must preserve the ip register at this
1723 // point, so we cannot just use add().
1724 ASSERT(object_size > 0);
1725 Register source = result;
1726 Condition cond = al;
1728 while (object_size != 0) {
1729 if (((object_size >> shift) & 0x03) == 0) {
1732 int bits = object_size & (0xff << shift);
1733 object_size -= bits;
1735 Operand bits_operand(bits);
1736 ASSERT(bits_operand.is_single_instruction(this));
1737 add(scratch2, source, bits_operand, SetCC, cond);
1743 cmp(scratch2, Operand(ip));
1745 str(scratch2, MemOperand(topaddr));
1747 // Tag object if requested.
1748 if ((flags & TAG_OBJECT) != 0) {
1749 add(result, result, Operand(kHeapObjectTag));
1754 void MacroAssembler::Allocate(Register object_size,
1759 AllocationFlags flags) {
1760 if (!FLAG_inline_new) {
1761 if (emit_debug_code()) {
1762 // Trash the registers to simulate an allocation failure.
1763 mov(result, Operand(0x7091));
1764 mov(scratch1, Operand(0x7191));
1765 mov(scratch2, Operand(0x7291));
1771 // Assert that the register arguments are different and that none of
1772 // them are ip. ip is used explicitly in the code generated below.
1773 ASSERT(!result.is(scratch1));
1774 ASSERT(!result.is(scratch2));
1775 ASSERT(!scratch1.is(scratch2));
1776 ASSERT(!object_size.is(ip));
1777 ASSERT(!result.is(ip));
1778 ASSERT(!scratch1.is(ip));
1779 ASSERT(!scratch2.is(ip));
1781 // Check relative positions of allocation top and limit addresses.
1782 // The values must be adjacent in memory to allow the use of LDM.
1783 // Also, assert that the registers are numbered such that the values
1784 // are loaded in the correct order.
1785 ExternalReference allocation_top =
1786 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1787 ExternalReference allocation_limit =
1788 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1790 reinterpret_cast<intptr_t>(allocation_top.address());
1792 reinterpret_cast<intptr_t>(allocation_limit.address());
1793 ASSERT((limit - top) == kPointerSize);
1794 ASSERT(result.code() < ip.code());
1796 // Set up allocation top address.
1797 Register topaddr = scratch1;
1798 mov(topaddr, Operand(allocation_top));
1800 // This code stores a temporary value in ip. This is OK, as the code below
1801 // does not need ip for implicit literal generation.
1802 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1803 // Load allocation top into result and allocation limit into ip.
1804 ldm(ia, topaddr, result.bit() | ip.bit());
1806 if (emit_debug_code()) {
1807 // Assert that result actually contains top on entry. ip is used
1808 // immediately below so this use of ip does not cause difference with
1809 // respect to register content between debug and release mode.
1810 ldr(ip, MemOperand(topaddr));
1812 Check(eq, kUnexpectedAllocationTop);
1814 // Load allocation limit into ip. Result already contains allocation top.
1815 ldr(ip, MemOperand(topaddr, limit - top));
1818 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1819 // Align the next allocation. Storing the filler map without checking top is
1820 // safe in new-space because the limit of the heap is aligned there.
1821 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1822 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1823 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1826 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1827 cmp(result, Operand(ip));
1830 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1831 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1835 // Calculate new top and bail out if new space is exhausted. Use result
1836 // to calculate the new top. Object size may be in words so a shift is
1837 // required to get the number of bytes.
1838 if ((flags & SIZE_IN_WORDS) != 0) {
1839 add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1841 add(scratch2, result, Operand(object_size), SetCC);
1844 cmp(scratch2, Operand(ip));
1847 // Update allocation top. result temporarily holds the new top.
1848 if (emit_debug_code()) {
1849 tst(scratch2, Operand(kObjectAlignmentMask));
1850 Check(eq, kUnalignedAllocationInNewSpace);
1852 str(scratch2, MemOperand(topaddr));
1854 // Tag object if requested.
1855 if ((flags & TAG_OBJECT) != 0) {
1856 add(result, result, Operand(kHeapObjectTag));
1861 void MacroAssembler::UndoAllocationInNewSpace(Register object,
1863 ExternalReference new_space_allocation_top =
1864 ExternalReference::new_space_allocation_top_address(isolate());
1866 // Make sure the object has no tag before resetting top.
1867 and_(object, object, Operand(~kHeapObjectTagMask));
1869 // Check that the object un-allocated is below the current top.
1870 mov(scratch, Operand(new_space_allocation_top));
1871 ldr(scratch, MemOperand(scratch));
1872 cmp(object, scratch);
1873 Check(lt, kUndoAllocationOfNonAllocatedMemory);
1875 // Write the address of the object to un-allocate as the current top.
1876 mov(scratch, Operand(new_space_allocation_top));
1877 str(object, MemOperand(scratch));
1881 void MacroAssembler::AllocateTwoByteString(Register result,
1886 Label* gc_required) {
1887 // Calculate the number of bytes needed for the characters in the string while
1888 // observing object alignment.
1889 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1890 mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
1891 add(scratch1, scratch1,
1892 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1893 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1895 // Allocate two-byte string in new space.
1903 // Set the map, length and hash field.
1904 InitializeNewString(result,
1906 Heap::kStringMapRootIndex,
1912 void MacroAssembler::AllocateAsciiString(Register result,
1917 Label* gc_required) {
1918 // Calculate the number of bytes needed for the characters in the string while
1919 // observing object alignment.
1920 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1921 ASSERT(kCharSize == 1);
1922 add(scratch1, length,
1923 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1924 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1926 // Allocate ASCII string in new space.
1934 // Set the map, length and hash field.
1935 InitializeNewString(result,
1937 Heap::kAsciiStringMapRootIndex,
1943 void MacroAssembler::AllocateTwoByteConsString(Register result,
1947 Label* gc_required) {
1948 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1951 InitializeNewString(result,
1953 Heap::kConsStringMapRootIndex,
1959 void MacroAssembler::AllocateAsciiConsString(Register result,
1963 Label* gc_required) {
1964 Label allocate_new_space, install_map;
1965 AllocationFlags flags = TAG_OBJECT;
1967 ExternalReference high_promotion_mode = ExternalReference::
1968 new_space_high_promotion_mode_active_address(isolate());
1969 mov(scratch1, Operand(high_promotion_mode));
1970 ldr(scratch1, MemOperand(scratch1, 0));
1971 cmp(scratch1, Operand::Zero());
1972 b(eq, &allocate_new_space);
1974 Allocate(ConsString::kSize,
1979 static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
1983 bind(&allocate_new_space);
1984 Allocate(ConsString::kSize,
1993 InitializeNewString(result,
1995 Heap::kConsAsciiStringMapRootIndex,
2001 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
2005 Label* gc_required) {
2006 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2009 InitializeNewString(result,
2011 Heap::kSlicedStringMapRootIndex,
2017 void MacroAssembler::AllocateAsciiSlicedString(Register result,
2021 Label* gc_required) {
2022 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2025 InitializeNewString(result,
2027 Heap::kSlicedAsciiStringMapRootIndex,
2033 void MacroAssembler::CompareObjectType(Register object,
2036 InstanceType type) {
2037 const Register temp = type_reg.is(no_reg) ? ip : type_reg;
2039 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2040 CompareInstanceType(map, temp, type);
2044 void MacroAssembler::CheckObjectTypeRange(Register object,
2046 InstanceType min_type,
2047 InstanceType max_type,
2048 Label* false_label) {
2049 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2050 STATIC_ASSERT(LAST_TYPE < 256);
2051 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2052 ldrb(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
2053 sub(ip, ip, Operand(min_type));
2054 cmp(ip, Operand(max_type - min_type));
2059 void MacroAssembler::CompareInstanceType(Register map,
2061 InstanceType type) {
2062 // Registers map and type_reg can be ip. These two lines assert
2063 // that ip can be used with the two instructions (the constants
2064 // will never need ip).
2065 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2066 STATIC_ASSERT(LAST_TYPE < 256);
2067 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2068 cmp(type_reg, Operand(type));
2072 void MacroAssembler::CompareRoot(Register obj,
2073 Heap::RootListIndex index) {
2074 ASSERT(!obj.is(ip));
2075 LoadRoot(ip, index);
2080 void MacroAssembler::CheckFastElements(Register map,
2083 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2084 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2085 STATIC_ASSERT(FAST_ELEMENTS == 2);
2086 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2087 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2088 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2093 void MacroAssembler::CheckFastObjectElements(Register map,
2096 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2097 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2098 STATIC_ASSERT(FAST_ELEMENTS == 2);
2099 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2100 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2101 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2103 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2108 void MacroAssembler::CheckFastSmiElements(Register map,
2111 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2112 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2113 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2114 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2119 void MacroAssembler::StoreNumberToDoubleElements(
2122 Register elements_reg,
2124 LowDwVfpRegister double_scratch,
2126 int elements_offset) {
2127 Label smi_value, store;
2129 // Handle smi values specially.
2130 JumpIfSmi(value_reg, &smi_value);
2132 // Ensure that the object is a heap number
2135 isolate()->factory()->heap_number_map(),
2139 vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2140 // Force a canonical NaN.
2141 if (emit_debug_code()) {
2143 tst(ip, Operand(kVFPDefaultNaNModeControlBit));
2144 Assert(ne, kDefaultNaNModeNotSet);
2146 VFPCanonicalizeNaN(double_scratch);
2150 SmiToDouble(double_scratch, value_reg);
2153 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
2154 vstr(double_scratch,
2155 FieldMemOperand(scratch1,
2156 FixedDoubleArray::kHeaderSize - elements_offset));
2160 void MacroAssembler::CompareMap(Register obj,
2163 Label* early_success) {
2164 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2165 CompareMap(scratch, map, early_success);
2169 void MacroAssembler::CompareMap(Register obj_map,
2171 Label* early_success) {
2172 cmp(obj_map, Operand(map));
2176 void MacroAssembler::CheckMap(Register obj,
2180 SmiCheckType smi_check_type) {
2181 if (smi_check_type == DO_SMI_CHECK) {
2182 JumpIfSmi(obj, fail);
2186 CompareMap(obj, scratch, map, &success);
2192 void MacroAssembler::CheckMap(Register obj,
2194 Heap::RootListIndex index,
2196 SmiCheckType smi_check_type) {
2197 if (smi_check_type == DO_SMI_CHECK) {
2198 JumpIfSmi(obj, fail);
2200 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2201 LoadRoot(ip, index);
2207 void MacroAssembler::DispatchMap(Register obj,
2210 Handle<Code> success,
2211 SmiCheckType smi_check_type) {
2213 if (smi_check_type == DO_SMI_CHECK) {
2214 JumpIfSmi(obj, &fail);
2216 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2217 mov(ip, Operand(map));
2219 Jump(success, RelocInfo::CODE_TARGET, eq);
2224 void MacroAssembler::TryGetFunctionPrototype(Register function,
2228 bool miss_on_bound_function) {
2229 // Check that the receiver isn't a smi.
2230 JumpIfSmi(function, miss);
2232 // Check that the function really is a function. Load map into result reg.
2233 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2236 if (miss_on_bound_function) {
2238 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2240 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2242 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
2246 // Make sure that the function has an instance prototype.
2248 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2249 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2250 b(ne, &non_instance);
2252 // Get the prototype or initial map from the function.
2254 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2256 // If the prototype or initial map is the hole, don't return it and
2257 // simply miss the cache instead. This will allow us to allocate a
2258 // prototype object on-demand in the runtime system.
2259 LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2263 // If the function does not have an initial map, we're done.
2265 CompareObjectType(result, scratch, scratch, MAP_TYPE);
2268 // Get the prototype from the initial map.
2269 ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2272 // Non-instance prototype: Fetch prototype from constructor field
2274 bind(&non_instance);
2275 ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2282 void MacroAssembler::CallStub(CodeStub* stub,
2283 TypeFeedbackId ast_id,
2285 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2286 Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond);
2290 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2291 Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond);
2295 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2296 return ref0.address() - ref1.address();
2300 void MacroAssembler::CallApiFunctionAndReturn(
2301 ExternalReference function,
2302 Address function_address,
2303 ExternalReference thunk_ref,
2304 Register thunk_last_arg,
2306 MemOperand return_value_operand,
2307 MemOperand* context_restore_operand) {
2308 ExternalReference next_address =
2309 ExternalReference::handle_scope_next_address(isolate());
2310 const int kNextOffset = 0;
2311 const int kLimitOffset = AddressOffset(
2312 ExternalReference::handle_scope_limit_address(isolate()),
2314 const int kLevelOffset = AddressOffset(
2315 ExternalReference::handle_scope_level_address(isolate()),
2318 ASSERT(!thunk_last_arg.is(r3));
2320 // Allocate HandleScope in callee-save registers.
2321 mov(r9, Operand(next_address));
2322 ldr(r4, MemOperand(r9, kNextOffset));
2323 ldr(r5, MemOperand(r9, kLimitOffset));
2324 ldr(r6, MemOperand(r9, kLevelOffset));
2325 add(r6, r6, Operand(1));
2326 str(r6, MemOperand(r9, kLevelOffset));
2328 if (FLAG_log_timer_events) {
2329 FrameScope frame(this, StackFrame::MANUAL);
2330 PushSafepointRegisters();
2331 PrepareCallCFunction(1, r0);
2332 mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2333 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2334 PopSafepointRegisters();
2337 Label profiler_disabled;
2338 Label end_profiler_check;
2339 bool* is_profiling_flag =
2340 isolate()->cpu_profiler()->is_profiling_address();
2341 STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
2342 mov(r3, Operand(reinterpret_cast<int32_t>(is_profiling_flag)));
2343 ldrb(r3, MemOperand(r3, 0));
2344 cmp(r3, Operand(0));
2345 b(eq, &profiler_disabled);
2347 // Additional parameter is the address of the actual callback.
2348 mov(thunk_last_arg, Operand(reinterpret_cast<int32_t>(function_address)));
2349 mov(r3, Operand(thunk_ref));
2350 jmp(&end_profiler_check);
2352 bind(&profiler_disabled);
2353 mov(r3, Operand(function));
2354 bind(&end_profiler_check);
2356 // Native call returns to the DirectCEntry stub which redirects to the
2357 // return address pushed on stack (could have moved after GC).
2358 // DirectCEntry stub itself is generated early and never moves.
2359 DirectCEntryStub stub;
2360 stub.GenerateCall(this, r3);
2362 if (FLAG_log_timer_events) {
2363 FrameScope frame(this, StackFrame::MANUAL);
2364 PushSafepointRegisters();
2365 PrepareCallCFunction(1, r0);
2366 mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2367 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2368 PopSafepointRegisters();
2371 Label promote_scheduled_exception;
2372 Label exception_handled;
2373 Label delete_allocated_handles;
2374 Label leave_exit_frame;
2375 Label return_value_loaded;
2377 // load value from ReturnValue
2378 ldr(r0, return_value_operand);
2379 bind(&return_value_loaded);
2380 // No more valid handles (the result handle was the last one). Restore
2381 // previous handle scope.
2382 str(r4, MemOperand(r9, kNextOffset));
2383 if (emit_debug_code()) {
2384 ldr(r1, MemOperand(r9, kLevelOffset));
2386 Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
2388 sub(r6, r6, Operand(1));
2389 str(r6, MemOperand(r9, kLevelOffset));
2390 ldr(ip, MemOperand(r9, kLimitOffset));
2392 b(ne, &delete_allocated_handles);
2394 // Check if the function scheduled an exception.
2395 bind(&leave_exit_frame);
2396 LoadRoot(r4, Heap::kTheHoleValueRootIndex);
2397 mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
2398 ldr(r5, MemOperand(ip));
2400 b(ne, &promote_scheduled_exception);
2401 bind(&exception_handled);
2403 bool restore_context = context_restore_operand != NULL;
2404 if (restore_context) {
2405 ldr(cp, *context_restore_operand);
2407 // LeaveExitFrame expects unwind space to be in a register.
2408 mov(r4, Operand(stack_space));
2409 LeaveExitFrame(false, r4, !restore_context);
2412 bind(&promote_scheduled_exception);
2414 FrameScope frame(this, StackFrame::INTERNAL);
2415 CallExternalReference(
2416 ExternalReference(Runtime::kPromoteScheduledException, isolate()),
2419 jmp(&exception_handled);
2421 // HandleScope limit has changed. Delete allocated extensions.
2422 bind(&delete_allocated_handles);
2423 str(r5, MemOperand(r9, kLimitOffset));
2425 PrepareCallCFunction(1, r5);
2426 mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2428 ExternalReference::delete_handle_scope_extensions(isolate()), 1);
2430 jmp(&leave_exit_frame);
2434 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2435 return has_frame_ || !stub->SometimesSetsUpAFrame();
2439 void MacroAssembler::IllegalOperation(int num_arguments) {
2440 if (num_arguments > 0) {
2441 add(sp, sp, Operand(num_arguments * kPointerSize));
2443 LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2447 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2448 // If the hash field contains an array index pick it out. The assert checks
2449 // that the constants for the maximum number of digits for an array index
2450 // cached in the hash field and the number of bits reserved for it does not
2452 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
2453 (1 << String::kArrayIndexValueBits));
2454 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
2455 // the low kHashShift bits.
2456 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
2457 SmiTag(index, hash);
2461 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
2462 if (CpuFeatures::IsSupported(VFP3)) {
2463 vmov(value.low(), smi);
2464 vcvt_f64_s32(value, 1);
2467 vmov(value.low(), ip);
2468 vcvt_f64_s32(value, value.low());
2473 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
2474 LowDwVfpRegister double_scratch) {
2475 ASSERT(!double_input.is(double_scratch));
2476 vcvt_s32_f64(double_scratch.low(), double_input);
2477 vcvt_f64_s32(double_scratch, double_scratch.low());
2478 VFPCompareAndSetFlags(double_input, double_scratch);
2482 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2483 DwVfpRegister double_input,
2484 LowDwVfpRegister double_scratch) {
2485 ASSERT(!double_input.is(double_scratch));
2486 vcvt_s32_f64(double_scratch.low(), double_input);
2487 vmov(result, double_scratch.low());
2488 vcvt_f64_s32(double_scratch, double_scratch.low());
2489 VFPCompareAndSetFlags(double_input, double_scratch);
2493 void MacroAssembler::TryInt32Floor(Register result,
2494 DwVfpRegister double_input,
2495 Register input_high,
2496 LowDwVfpRegister double_scratch,
2499 ASSERT(!result.is(input_high));
2500 ASSERT(!double_input.is(double_scratch));
2501 Label negative, exception;
2503 VmovHigh(input_high, double_input);
2505 // Test for NaN and infinities.
2506 Sbfx(result, input_high,
2507 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2508 cmp(result, Operand(-1));
2510 // Test for values that can be exactly represented as a
2511 // signed 32-bit integer.
2512 TryDoubleToInt32Exact(result, double_input, double_scratch);
2513 // If exact, return (result already fetched).
2515 cmp(input_high, Operand::Zero());
2518 // Input is in ]+0, +inf[.
2519 // If result equals 0x7fffffff input was out of range or
2520 // in ]0x7fffffff, 0x80000000[. We ignore this last case which
2521 // could fits into an int32, that means we always think input was
2522 // out of range and always go to exception.
2523 // If result < 0x7fffffff, go to done, result fetched.
2524 cmn(result, Operand(1));
2528 // Input is in ]-inf, -0[.
2529 // If x is a non integer negative number,
2530 // floor(x) <=> round_to_zero(x) - 1.
2532 sub(result, result, Operand(1), SetCC);
2533 // If result is still negative, go to done, result fetched.
2534 // Else, we had an overflow and we fall through exception.
2539 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2540 DwVfpRegister double_input,
2542 LowDwVfpRegister double_scratch = kScratchDoubleReg;
2543 vcvt_s32_f64(double_scratch.low(), double_input);
2544 vmov(result, double_scratch.low());
2546 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
2547 sub(ip, result, Operand(1));
2548 cmp(ip, Operand(0x7ffffffe));
2553 void MacroAssembler::TruncateDoubleToI(Register result,
2554 DwVfpRegister double_input) {
2557 TryInlineTruncateDoubleToI(result, double_input, &done);
2559 // If we fell through then inline version didn't succeed - call stub instead.
2561 sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2562 vstr(double_input, MemOperand(sp, 0));
2564 DoubleToIStub stub(sp, result, 0, true, true);
2567 add(sp, sp, Operand(kDoubleSize));
2574 void MacroAssembler::TruncateHeapNumberToI(Register result,
2577 LowDwVfpRegister double_scratch = kScratchDoubleReg;
2578 ASSERT(!result.is(object));
2580 vldr(double_scratch,
2581 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2582 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2584 // If we fell through then inline version didn't succeed - call stub instead.
2586 DoubleToIStub stub(object,
2588 HeapNumber::kValueOffset - kHeapObjectTag,
2598 void MacroAssembler::TruncateNumberToI(Register object,
2600 Register heap_number_map,
2602 Label* not_number) {
2604 ASSERT(!result.is(object));
2606 UntagAndJumpIfSmi(result, object, &done);
2607 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2608 TruncateHeapNumberToI(result, object);
2614 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2616 int num_least_bits) {
2617 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2618 ubfx(dst, src, kSmiTagSize, num_least_bits);
2621 and_(dst, dst, Operand((1 << num_least_bits) - 1));
2626 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2628 int num_least_bits) {
2629 and_(dst, src, Operand((1 << num_least_bits) - 1));
2633 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2635 SaveFPRegsMode save_doubles) {
2636 // All parameters are on the stack. r0 has the return value after call.
2638 // If the expected number of arguments of the runtime function is
2639 // constant, we check that the actual number of arguments match the
2641 if (f->nargs >= 0 && f->nargs != num_arguments) {
2642 IllegalOperation(num_arguments);
2646 // TODO(1236192): Most runtime routines don't need the number of
2647 // arguments passed in because it is constant. At some point we
2648 // should remove this need and make the runtime routine entry code
2650 mov(r0, Operand(num_arguments));
2651 mov(r1, Operand(ExternalReference(f, isolate())));
2652 CEntryStub stub(1, save_doubles);
2657 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2658 int num_arguments) {
2659 mov(r0, Operand(num_arguments));
2660 mov(r1, Operand(ext));
2667 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2670 // TODO(1236192): Most runtime routines don't need the number of
2671 // arguments passed in because it is constant. At some point we
2672 // should remove this need and make the runtime routine entry code
2674 mov(r0, Operand(num_arguments));
2675 JumpToExternalReference(ext);
2679 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2682 TailCallExternalReference(ExternalReference(fid, isolate()),
2688 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2689 #if defined(__thumb__)
2690 // Thumb mode builtin.
2691 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2693 mov(r1, Operand(builtin));
2695 Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
2699 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2701 const CallWrapper& call_wrapper) {
2702 // You can't call a builtin without a valid frame.
2703 ASSERT(flag == JUMP_FUNCTION || has_frame());
2705 GetBuiltinEntry(r2, id);
2706 if (flag == CALL_FUNCTION) {
2707 call_wrapper.BeforeCall(CallSize(r2));
2709 call_wrapper.AfterCall();
2711 ASSERT(flag == JUMP_FUNCTION);
2717 void MacroAssembler::GetBuiltinFunction(Register target,
2718 Builtins::JavaScript id) {
2719 // Load the builtins object into target register.
2721 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2722 ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2723 // Load the JavaScript builtin function from the builtins object.
2724 ldr(target, FieldMemOperand(target,
2725 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2729 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2730 ASSERT(!target.is(r1));
2731 GetBuiltinFunction(r1, id);
2732 // Load the code entry point from the builtins object.
2733 ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2737 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2738 Register scratch1, Register scratch2) {
2739 if (FLAG_native_code_counters && counter->Enabled()) {
2740 mov(scratch1, Operand(value));
2741 mov(scratch2, Operand(ExternalReference(counter)));
2742 str(scratch1, MemOperand(scratch2));
2747 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2748 Register scratch1, Register scratch2) {
2750 if (FLAG_native_code_counters && counter->Enabled()) {
2751 mov(scratch2, Operand(ExternalReference(counter)));
2752 ldr(scratch1, MemOperand(scratch2));
2753 add(scratch1, scratch1, Operand(value));
2754 str(scratch1, MemOperand(scratch2));
2759 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2760 Register scratch1, Register scratch2) {
2762 if (FLAG_native_code_counters && counter->Enabled()) {
2763 mov(scratch2, Operand(ExternalReference(counter)));
2764 ldr(scratch1, MemOperand(scratch2));
2765 sub(scratch1, scratch1, Operand(value));
2766 str(scratch1, MemOperand(scratch2));
2771 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
2772 if (emit_debug_code())
2773 Check(cond, reason);
2777 void MacroAssembler::AssertFastElements(Register elements) {
2778 if (emit_debug_code()) {
2779 ASSERT(!elements.is(ip));
2782 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2783 LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2786 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2789 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2792 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2799 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
2803 // will not return here
2808 void MacroAssembler::Abort(BailoutReason reason) {
2811 // We want to pass the msg string like a smi to avoid GC
2812 // problems, however msg is not guaranteed to be aligned
2813 // properly. Instead, we pass an aligned pointer that is
2814 // a proper v8 smi, but also pass the alignment difference
2815 // from the real pointer as a smi.
2816 const char* msg = GetBailoutReason(reason);
2817 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2818 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2819 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2822 RecordComment("Abort message: ");
2826 if (FLAG_trap_on_abort) {
2832 mov(r0, Operand(p0));
2834 mov(r0, Operand(Smi::FromInt(p1 - p0)));
2836 // Disable stub call restrictions to always allow calls to abort.
2838 // We don't actually want to generate a pile of code for this, so just
2839 // claim there is a stack frame, without generating one.
2840 FrameScope scope(this, StackFrame::NONE);
2841 CallRuntime(Runtime::kAbort, 2);
2843 CallRuntime(Runtime::kAbort, 2);
2845 // will not return here
2846 if (is_const_pool_blocked()) {
2847 // If the calling code cares about the exact number of
2848 // instructions generated, we insert padding here to keep the size
2849 // of the Abort macro constant.
2850 static const int kExpectedAbortInstructions = 10;
2851 int abort_instructions = InstructionsGeneratedSince(&abort_start);
2852 ASSERT(abort_instructions <= kExpectedAbortInstructions);
2853 while (abort_instructions++ < kExpectedAbortInstructions) {
2860 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2861 if (context_chain_length > 0) {
2862 // Move up the chain of contexts to the context containing the slot.
2863 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2864 for (int i = 1; i < context_chain_length; i++) {
2865 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2868 // Slot is in the current function context. Move it into the
2869 // destination register in case we store into it (the write barrier
2870 // cannot be allowed to destroy the context in esi).
2876 void MacroAssembler::LoadTransitionedArrayMapConditional(
2877 ElementsKind expected_kind,
2878 ElementsKind transitioned_kind,
2879 Register map_in_out,
2881 Label* no_map_match) {
2882 // Load the global or builtins object from the current context.
2884 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2885 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2887 // Check that the function's map is the same as the expected cached map.
2890 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2891 size_t offset = expected_kind * kPointerSize +
2892 FixedArrayBase::kHeaderSize;
2893 ldr(ip, FieldMemOperand(scratch, offset));
2894 cmp(map_in_out, ip);
2895 b(ne, no_map_match);
2897 // Use the transitioned cached map.
2898 offset = transitioned_kind * kPointerSize +
2899 FixedArrayBase::kHeaderSize;
2900 ldr(map_in_out, FieldMemOperand(scratch, offset));
2904 void MacroAssembler::LoadInitialArrayMap(
2905 Register function_in, Register scratch,
2906 Register map_out, bool can_have_holes) {
2907 ASSERT(!function_in.is(map_out));
2909 ldr(map_out, FieldMemOperand(function_in,
2910 JSFunction::kPrototypeOrInitialMapOffset));
2911 if (!FLAG_smi_only_arrays) {
2912 ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
2913 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2918 } else if (can_have_holes) {
2919 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2920 FAST_HOLEY_SMI_ELEMENTS,
2929 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2930 // Load the global or builtins object from the current context.
2932 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2933 // Load the native context from the global or builtins object.
2934 ldr(function, FieldMemOperand(function,
2935 GlobalObject::kNativeContextOffset));
2936 // Load the function from the native context.
2937 ldr(function, MemOperand(function, Context::SlotOffset(index)));
2941 void MacroAssembler::LoadArrayFunction(Register function) {
2942 // Load the global or builtins object from the current context.
2944 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2945 // Load the global context from the global or builtins object.
2947 FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
2948 // Load the array function from the native context.
2950 MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
2954 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2957 // Load the initial map. The global functions all have initial maps.
2958 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2959 if (emit_debug_code()) {
2961 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2964 Abort(kGlobalFunctionsMustHaveInitialMap);
2970 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2973 Label* not_power_of_two_or_zero) {
2974 sub(scratch, reg, Operand(1), SetCC);
2975 b(mi, not_power_of_two_or_zero);
2977 b(ne, not_power_of_two_or_zero);
2981 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2984 Label* zero_and_neg,
2985 Label* not_power_of_two) {
2986 sub(scratch, reg, Operand(1), SetCC);
2987 b(mi, zero_and_neg);
2989 b(ne, not_power_of_two);
2993 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
2995 Label* on_not_both_smi) {
2996 STATIC_ASSERT(kSmiTag == 0);
2997 tst(reg1, Operand(kSmiTagMask));
2998 tst(reg2, Operand(kSmiTagMask), eq);
2999 b(ne, on_not_both_smi);
3003 void MacroAssembler::UntagAndJumpIfSmi(
3004 Register dst, Register src, Label* smi_case) {
3005 STATIC_ASSERT(kSmiTag == 0);
3006 SmiUntag(dst, src, SetCC);
3007 b(cc, smi_case); // Shifter carry is not set for a smi.
3011 void MacroAssembler::UntagAndJumpIfNotSmi(
3012 Register dst, Register src, Label* non_smi_case) {
3013 STATIC_ASSERT(kSmiTag == 0);
3014 SmiUntag(dst, src, SetCC);
3015 b(cs, non_smi_case); // Shifter carry is set for a non-smi.
3019 void MacroAssembler::JumpIfEitherSmi(Register reg1,
3021 Label* on_either_smi) {
3022 STATIC_ASSERT(kSmiTag == 0);
3023 tst(reg1, Operand(kSmiTagMask));
3024 tst(reg2, Operand(kSmiTagMask), ne);
3025 b(eq, on_either_smi);
3029 void MacroAssembler::AssertNotSmi(Register object) {
3030 if (emit_debug_code()) {
3031 STATIC_ASSERT(kSmiTag == 0);
3032 tst(object, Operand(kSmiTagMask));
3033 Check(ne, kOperandIsASmi);
3038 void MacroAssembler::AssertSmi(Register object) {
3039 if (emit_debug_code()) {
3040 STATIC_ASSERT(kSmiTag == 0);
3041 tst(object, Operand(kSmiTagMask));
3042 Check(eq, kOperandIsNotSmi);
3047 void MacroAssembler::AssertString(Register object) {
3048 if (emit_debug_code()) {
3049 STATIC_ASSERT(kSmiTag == 0);
3050 tst(object, Operand(kSmiTagMask));
3051 Check(ne, kOperandIsASmiAndNotAString);
3053 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3054 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
3056 Check(lo, kOperandIsNotAString);
3061 void MacroAssembler::AssertName(Register object) {
3062 if (emit_debug_code()) {
3063 STATIC_ASSERT(kSmiTag == 0);
3064 tst(object, Operand(kSmiTagMask));
3065 Check(ne, kOperandIsASmiAndNotAName);
3067 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3068 CompareInstanceType(object, object, LAST_NAME_TYPE);
3070 Check(le, kOperandIsNotAName);
3076 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
3077 if (emit_debug_code()) {
3078 CompareRoot(reg, index);
3079 Check(eq, kHeapNumberMapRegisterClobbered);
3084 void MacroAssembler::JumpIfNotHeapNumber(Register object,
3085 Register heap_number_map,
3087 Label* on_not_heap_number) {
3088 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3089 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3090 cmp(scratch, heap_number_map);
3091 b(ne, on_not_heap_number);
3095 void MacroAssembler::LookupNumberStringCache(Register object,
3101 // Use of registers. Register result is used as a temporary.
3102 Register number_string_cache = result;
3103 Register mask = scratch3;
3105 // Load the number string cache.
3106 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
3108 // Make the hash mask from the length of the number string cache. It
3109 // contains two elements (number and string) for each cache entry.
3110 ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
3111 // Divide length by two (length is a smi).
3112 mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
3113 sub(mask, mask, Operand(1)); // Make mask.
3115 // Calculate the entry in the number string cache. The hash value in the
3116 // number string cache for smis is just the smi value, and the hash for
3117 // doubles is the xor of the upper and lower words. See
3118 // Heap::GetNumberStringCache.
3120 Label load_result_from_cache;
3121 JumpIfSmi(object, &is_smi);
3124 Heap::kHeapNumberMapRootIndex,
3128 STATIC_ASSERT(8 == kDoubleSize);
3131 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
3132 ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
3133 eor(scratch1, scratch1, Operand(scratch2));
3134 and_(scratch1, scratch1, Operand(mask));
3136 // Calculate address of entry in string cache: each entry consists
3137 // of two pointer sized fields.
3139 number_string_cache,
3140 Operand(scratch1, LSL, kPointerSizeLog2 + 1));
3142 Register probe = mask;
3143 ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
3144 JumpIfSmi(probe, not_found);
3145 sub(scratch2, object, Operand(kHeapObjectTag));
3146 vldr(d0, scratch2, HeapNumber::kValueOffset);
3147 sub(probe, probe, Operand(kHeapObjectTag));
3148 vldr(d1, probe, HeapNumber::kValueOffset);
3149 VFPCompareAndSetFlags(d0, d1);
3150 b(ne, not_found); // The cache did not contain this value.
3151 b(&load_result_from_cache);
3154 Register scratch = scratch1;
3155 and_(scratch, mask, Operand(object, ASR, 1));
3156 // Calculate address of entry in string cache: each entry consists
3157 // of two pointer sized fields.
3159 number_string_cache,
3160 Operand(scratch, LSL, kPointerSizeLog2 + 1));
3162 // Check if the entry is the smi we are looking for.
3163 ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3167 // Get the result from the cache.
3168 bind(&load_result_from_cache);
3169 ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
3170 IncrementCounter(isolate()->counters()->number_to_string_native(),
3177 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
3183 // Test that both first and second are sequential ASCII strings.
3184 // Assume that they are non-smis.
3185 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3186 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3187 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3188 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3190 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
3197 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
3202 // Check that neither is a smi.
3203 and_(scratch1, first, Operand(second));
3204 JumpIfSmi(scratch1, failure);
3205 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
3213 void MacroAssembler::JumpIfNotUniqueName(Register reg,
3214 Label* not_unique_name) {
3215 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3217 tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3219 cmp(reg, Operand(SYMBOL_TYPE));
3220 b(ne, not_unique_name);
3226 // Allocates a heap number or jumps to the need_gc label if the young space
3227 // is full and a scavenge is needed.
3228 void MacroAssembler::AllocateHeapNumber(Register result,
3231 Register heap_number_map,
3233 TaggingMode tagging_mode) {
3234 // Allocate an object in the heap for the heap number and tag it as a heap
3236 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3237 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3239 // Store heap number map in the allocated object.
3240 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3241 if (tagging_mode == TAG_RESULT) {
3242 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3244 str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3249 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3250 DwVfpRegister value,
3253 Register heap_number_map,
3254 Label* gc_required) {
3255 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3256 sub(scratch1, result, Operand(kHeapObjectTag));
3257 vstr(value, scratch1, HeapNumber::kValueOffset);
3261 // Copies a fixed number of fields of heap objects from src to dst.
3262 void MacroAssembler::CopyFields(Register dst,
3264 LowDwVfpRegister double_scratch,
3266 int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
3267 for (int i = 0; i < double_count; i++) {
3268 vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
3269 vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
3272 STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
3273 STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
3275 int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
3277 vldr(double_scratch.low(),
3278 FieldMemOperand(src, (field_count - 1) * kPointerSize));
3279 vstr(double_scratch.low(),
3280 FieldMemOperand(dst, (field_count - 1) * kPointerSize));
3285 void MacroAssembler::CopyBytes(Register src,
3289 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3291 // Align src before copying in word size chunks.
3292 cmp(length, Operand(kPointerSize));
3295 bind(&align_loop_1);
3296 tst(src, Operand(kPointerSize - 1));
3298 ldrb(scratch, MemOperand(src, 1, PostIndex));
3299 strb(scratch, MemOperand(dst, 1, PostIndex));
3300 sub(length, length, Operand(1), SetCC);
3302 // Copy bytes in word size chunks.
3304 if (emit_debug_code()) {
3305 tst(src, Operand(kPointerSize - 1));
3306 Assert(eq, kExpectingAlignmentForCopyBytes);
3308 cmp(length, Operand(kPointerSize));
3310 ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
3311 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3312 str(scratch, MemOperand(dst, kPointerSize, PostIndex));
3314 strb(scratch, MemOperand(dst, 1, PostIndex));
3315 mov(scratch, Operand(scratch, LSR, 8));
3316 strb(scratch, MemOperand(dst, 1, PostIndex));
3317 mov(scratch, Operand(scratch, LSR, 8));
3318 strb(scratch, MemOperand(dst, 1, PostIndex));
3319 mov(scratch, Operand(scratch, LSR, 8));
3320 strb(scratch, MemOperand(dst, 1, PostIndex));
3322 sub(length, length, Operand(kPointerSize));
3325 // Copy the last bytes if any left.
3327 cmp(length, Operand::Zero());
3330 ldrb(scratch, MemOperand(src, 1, PostIndex));
3331 strb(scratch, MemOperand(dst, 1, PostIndex));
3332 sub(length, length, Operand(1), SetCC);
3333 b(ne, &byte_loop_1);
3338 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3339 Register end_offset,
3344 str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
3346 cmp(start_offset, end_offset);
3351 void MacroAssembler::CheckFor32DRegs(Register scratch) {
3352 mov(scratch, Operand(ExternalReference::cpu_features()));
3353 ldr(scratch, MemOperand(scratch));
3354 tst(scratch, Operand(1u << VFP32DREGS));
3358 void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
3359 CheckFor32DRegs(scratch);
3360 vstm(db_w, location, d16, d31, ne);
3361 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3362 vstm(db_w, location, d0, d15);
3366 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
3367 CheckFor32DRegs(scratch);
3368 vldm(ia_w, location, d0, d15);
3369 vldm(ia_w, location, d16, d31, ne);
3370 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3374 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
3380 const int kFlatAsciiStringMask =
3381 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3382 const int kFlatAsciiStringTag =
3383 kStringTag | kOneByteStringTag | kSeqStringTag;
3384 and_(scratch1, first, Operand(kFlatAsciiStringMask));
3385 and_(scratch2, second, Operand(kFlatAsciiStringMask));
3386 cmp(scratch1, Operand(kFlatAsciiStringTag));
3387 // Ignore second test if first test failed.
3388 cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
3393 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
3396 const int kFlatAsciiStringMask =
3397 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3398 const int kFlatAsciiStringTag =
3399 kStringTag | kOneByteStringTag | kSeqStringTag;
3400 and_(scratch, type, Operand(kFlatAsciiStringMask));
3401 cmp(scratch, Operand(kFlatAsciiStringTag));
3405 static const int kRegisterPassedArguments = 4;
3408 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3409 int num_double_arguments) {
3410 int stack_passed_words = 0;
3411 if (use_eabi_hardfloat()) {
3412 // In the hard floating point calling convention, we can use
3413 // all double registers to pass doubles.
3414 if (num_double_arguments > DoubleRegister::NumRegisters()) {
3415 stack_passed_words +=
3416 2 * (num_double_arguments - DoubleRegister::NumRegisters());
3419 // In the soft floating point calling convention, every double
3420 // argument is passed using two registers.
3421 num_reg_arguments += 2 * num_double_arguments;
3423 // Up to four simple arguments are passed in registers r0..r3.
3424 if (num_reg_arguments > kRegisterPassedArguments) {
3425 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3427 return stack_passed_words;
3431 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
3434 uint32_t encoding_mask) {
3437 ThrowIf(eq, kNonObject);
3439 ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3440 ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3442 and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3443 cmp(ip, Operand(encoding_mask));
3444 ThrowIf(ne, kUnexpectedStringType);
3446 // The index is assumed to be untagged coming in, tag it to compare with the
3447 // string length without using a temp register, it is restored at the end of
3449 Label index_tag_ok, index_tag_bad;
3450 TrySmiTag(index, index, &index_tag_bad);
3452 bind(&index_tag_bad);
3453 Throw(kIndexIsTooLarge);
3454 bind(&index_tag_ok);
3456 ldr(ip, FieldMemOperand(string, String::kLengthOffset));
3458 ThrowIf(ge, kIndexIsTooLarge);
3460 cmp(index, Operand(Smi::FromInt(0)));
3461 ThrowIf(lt, kIndexIsNegative);
3463 SmiUntag(index, index);
3467 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3468 int num_double_arguments,
3470 int frame_alignment = ActivationFrameAlignment();
3471 int stack_passed_arguments = CalculateStackPassedWords(
3472 num_reg_arguments, num_double_arguments);
3473 if (frame_alignment > kPointerSize) {
3474 // Make stack end at alignment and make room for num_arguments - 4 words
3475 // and the original value of sp.
3477 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3478 ASSERT(IsPowerOf2(frame_alignment));
3479 and_(sp, sp, Operand(-frame_alignment));
3480 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3482 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3487 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3489 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3493 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
3495 if (!use_eabi_hardfloat()) {
3501 // On ARM this is just a synonym to make the purpose clear.
3502 void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
3503 MovToFloatParameter(src);
3507 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
3508 DwVfpRegister src2) {
3509 ASSERT(src1.is(d0));
3510 ASSERT(src2.is(d1));
3511 if (!use_eabi_hardfloat()) {
3518 void MacroAssembler::CallCFunction(ExternalReference function,
3519 int num_reg_arguments,
3520 int num_double_arguments) {
3521 mov(ip, Operand(function));
3522 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3526 void MacroAssembler::CallCFunction(Register function,
3527 int num_reg_arguments,
3528 int num_double_arguments) {
3529 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3533 void MacroAssembler::CallCFunction(ExternalReference function,
3534 int num_arguments) {
3535 CallCFunction(function, num_arguments, 0);
3539 void MacroAssembler::CallCFunction(Register function,
3540 int num_arguments) {
3541 CallCFunction(function, num_arguments, 0);
3545 void MacroAssembler::CallCFunctionHelper(Register function,
3546 int num_reg_arguments,
3547 int num_double_arguments) {
3548 ASSERT(has_frame());
3549 // Make sure that the stack is aligned before calling a C function unless
3550 // running in the simulator. The simulator has its own alignment check which
3551 // provides more information.
3552 #if V8_HOST_ARCH_ARM
3553 if (emit_debug_code()) {
3554 int frame_alignment = OS::ActivationFrameAlignment();
3555 int frame_alignment_mask = frame_alignment - 1;
3556 if (frame_alignment > kPointerSize) {
3557 ASSERT(IsPowerOf2(frame_alignment));
3558 Label alignment_as_expected;
3559 tst(sp, Operand(frame_alignment_mask));
3560 b(eq, &alignment_as_expected);
3561 // Don't use Check here, as it will call Runtime_Abort possibly
3562 // re-entering here.
3563 stop("Unexpected alignment");
3564 bind(&alignment_as_expected);
3569 // Just call directly. The function called cannot cause a GC, or
3570 // allow preemption, so the return address in the link register
3573 int stack_passed_arguments = CalculateStackPassedWords(
3574 num_reg_arguments, num_double_arguments);
3575 if (ActivationFrameAlignment() > kPointerSize) {
3576 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3578 add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
3583 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3585 const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3586 const int32_t kPCRegOffset = 2 * kPointerSize;
3587 ldr(result, MemOperand(ldr_location));
3588 if (emit_debug_code()) {
3589 // Check that the instruction is a ldr reg, [pc + offset] .
3590 and_(result, result, Operand(kLdrPCPattern));
3591 cmp(result, Operand(kLdrPCPattern));
3592 Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
3593 // Result was clobbered. Restore it.
3594 ldr(result, MemOperand(ldr_location));
3596 // Get the address of the constant.
3597 and_(result, result, Operand(kLdrOffsetMask));
3598 add(result, ldr_location, Operand(result));
3599 add(result, result, Operand(kPCRegOffset));
3603 void MacroAssembler::CheckPageFlag(
3608 Label* condition_met) {
3609 Bfc(scratch, object, 0, kPageSizeBits);
3610 ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3611 tst(scratch, Operand(mask));
3612 b(cc, condition_met);
3616 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
3618 Label* if_deprecated) {
3619 if (map->CanBeDeprecated()) {
3620 mov(scratch, Operand(map));
3621 ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
3622 tst(scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
3623 b(ne, if_deprecated);
3628 void MacroAssembler::JumpIfBlack(Register object,
3632 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
3633 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3637 void MacroAssembler::HasColor(Register object,
3638 Register bitmap_scratch,
3639 Register mask_scratch,
3643 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3645 GetMarkBits(object, bitmap_scratch, mask_scratch);
3647 Label other_color, word_boundary;
3648 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3649 tst(ip, Operand(mask_scratch));
3650 b(first_bit == 1 ? eq : ne, &other_color);
3651 // Shift left 1 by adding.
3652 add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
3653 b(eq, &word_boundary);
3654 tst(ip, Operand(mask_scratch));
3655 b(second_bit == 1 ? ne : eq, has_color);
3658 bind(&word_boundary);
3659 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3660 tst(ip, Operand(1));
3661 b(second_bit == 1 ? ne : eq, has_color);
3666 // Detect some, but not all, common pointer-free objects. This is used by the
3667 // incremental write barrier which doesn't care about oddballs (they are always
3668 // marked black immediately so this code is not hit).
3669 void MacroAssembler::JumpIfDataObject(Register value,
3671 Label* not_data_object) {
3672 Label is_data_object;
3673 ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
3674 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3675 b(eq, &is_data_object);
3676 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3677 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3678 // If it's a string and it's not a cons string then it's an object containing
3680 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3681 tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
3682 b(ne, not_data_object);
3683 bind(&is_data_object);
3687 void MacroAssembler::GetMarkBits(Register addr_reg,
3688 Register bitmap_reg,
3689 Register mask_reg) {
3690 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3691 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3692 Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3693 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3694 Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
3695 add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
3696 mov(ip, Operand(1));
3697 mov(mask_reg, Operand(ip, LSL, mask_reg));
3701 void MacroAssembler::EnsureNotWhite(
3703 Register bitmap_scratch,
3704 Register mask_scratch,
3705 Register load_scratch,
3706 Label* value_is_white_and_not_data) {
3707 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3708 GetMarkBits(value, bitmap_scratch, mask_scratch);
3710 // If the value is black or grey we don't need to do anything.
3711 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3712 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3713 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
3714 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3718 // Since both black and grey have a 1 in the first position and white does
3719 // not have a 1 there we only need to check one bit.
3720 ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3721 tst(mask_scratch, load_scratch);
3724 if (emit_debug_code()) {
3725 // Check for impossible bit pattern.
3727 // LSL may overflow, making the check conservative.
3728 tst(load_scratch, Operand(mask_scratch, LSL, 1));
3730 stop("Impossible marking bit pattern");
3734 // Value is white. We check whether it is data that doesn't need scanning.
3735 // Currently only checks for HeapNumber and non-cons strings.
3736 Register map = load_scratch; // Holds map while checking type.
3737 Register length = load_scratch; // Holds length of object after testing type.
3738 Label is_data_object;
3740 // Check for heap-number
3741 ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3742 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3743 mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
3744 b(eq, &is_data_object);
3746 // Check for strings.
3747 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3748 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3749 // If it's a string and it's not a cons string then it's an object containing
3751 Register instance_type = load_scratch;
3752 ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
3753 tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
3754 b(ne, value_is_white_and_not_data);
3755 // It's a non-indirect (non-cons and non-slice) string.
3756 // If it's external, the length is just ExternalString::kSize.
3757 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3758 // External strings are the only ones with the kExternalStringTag bit
3760 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
3761 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
3762 tst(instance_type, Operand(kExternalStringTag));
3763 mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
3764 b(ne, &is_data_object);
3766 // Sequential string, either ASCII or UC16.
3767 // For ASCII (char-size of 1) we shift the smi tag away to get the length.
3768 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
3769 // getting the length multiplied by 2.
3770 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
3771 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3772 ldr(ip, FieldMemOperand(value, String::kLengthOffset));
3773 tst(instance_type, Operand(kStringEncodingMask));
3774 mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
3775 add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
3776 and_(length, length, Operand(~kObjectAlignmentMask));
3778 bind(&is_data_object);
3779 // Value is a data object, and it is white. Mark it black. Since we know
3780 // that the object is white we can make it black by flipping one bit.
3781 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3782 orr(ip, ip, Operand(mask_scratch));
3783 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3785 and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
3786 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3787 add(ip, ip, Operand(length));
3788 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3794 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3795 Usat(output_reg, 8, Operand(input_reg));
3799 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3800 DwVfpRegister input_reg,
3801 LowDwVfpRegister double_scratch) {
3806 VFPCompareAndSetFlags(input_reg, 0.0);
3809 // Double value is less than zero, NaN or Inf, return 0.
3810 mov(result_reg, Operand::Zero());
3813 // Double value is >= 255, return 255.
3815 Vmov(double_scratch, 255.0, result_reg);
3816 VFPCompareAndSetFlags(input_reg, double_scratch);
3818 mov(result_reg, Operand(255));
3821 // In 0-255 range, round and truncate.
3825 // Set rounding mode to round to the nearest integer by clearing bits[23:22].
3826 bic(result_reg, ip, Operand(kVFPRoundingModeMask));
3828 vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
3829 vmov(result_reg, double_scratch.low());
3836 void MacroAssembler::Throw(BailoutReason reason) {
3840 const char* msg = GetBailoutReason(reason);
3842 RecordComment("Throw message: ");
3847 mov(r0, Operand(Smi::FromInt(reason)));
3849 // Disable stub call restrictions to always allow calls to throw.
3851 // We don't actually want to generate a pile of code for this, so just
3852 // claim there is a stack frame, without generating one.
3853 FrameScope scope(this, StackFrame::NONE);
3854 CallRuntime(Runtime::kThrowMessage, 1);
3856 CallRuntime(Runtime::kThrowMessage, 1);
3858 // will not return here
3859 if (is_const_pool_blocked()) {
3860 // If the calling code cares throw the exact number of
3861 // instructions generated, we insert padding here to keep the size
3862 // of the ThrowMessage macro constant.
3863 static const int kExpectedThrowMessageInstructions = 10;
3864 int throw_instructions = InstructionsGeneratedSince(&throw_start);
3865 ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
3866 while (throw_instructions++ < kExpectedThrowMessageInstructions) {
3873 void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
3875 b(NegateCondition(cc), &L);
3877 // will not return here
3882 void MacroAssembler::LoadInstanceDescriptors(Register map,
3883 Register descriptors) {
3884 ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3888 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3889 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3890 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3894 void MacroAssembler::EnumLength(Register dst, Register map) {
3895 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3896 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3897 and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
3901 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3902 Register empty_fixed_array_value = r6;
3903 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3907 // Check if the enum length field is properly initialized, indicating that
3908 // there is an enum cache.
3909 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3912 cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
3913 b(eq, call_runtime);
3918 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3920 // For all objects but the receiver, check that the cache is empty.
3922 cmp(r3, Operand(Smi::FromInt(0)));
3923 b(ne, call_runtime);
3927 // Check that there are no elements. Register r2 contains the current JS
3928 // object we've reached through the prototype chain.
3930 ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
3931 cmp(r2, empty_fixed_array_value);
3932 b(eq, &no_elements);
3934 // Second chance, the object may be using the empty slow element dictionary.
3935 CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex);
3936 b(ne, call_runtime);
3939 ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
3940 cmp(r2, null_value);
3945 void MacroAssembler::TestJSArrayForAllocationMemento(
3946 Register receiver_reg,
3947 Register scratch_reg,
3948 Label* no_memento_found) {
3949 ExternalReference new_space_start =
3950 ExternalReference::new_space_start(isolate());
3951 ExternalReference new_space_allocation_top =
3952 ExternalReference::new_space_allocation_top_address(isolate());
3953 add(scratch_reg, receiver_reg,
3954 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3955 cmp(scratch_reg, Operand(new_space_start));
3956 b(lt, no_memento_found);
3957 mov(ip, Operand(new_space_allocation_top));
3958 ldr(ip, MemOperand(ip));
3959 cmp(scratch_reg, ip);
3960 b(gt, no_memento_found);
3961 ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
3963 Operand(isolate()->factory()->allocation_memento_map()));
3967 Register GetRegisterThatIsNotOneOf(Register reg1,
3974 if (reg1.is_valid()) regs |= reg1.bit();
3975 if (reg2.is_valid()) regs |= reg2.bit();
3976 if (reg3.is_valid()) regs |= reg3.bit();
3977 if (reg4.is_valid()) regs |= reg4.bit();
3978 if (reg5.is_valid()) regs |= reg5.bit();
3979 if (reg6.is_valid()) regs |= reg6.bit();
3981 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
3982 Register candidate = Register::FromAllocationIndex(i);
3983 if (regs & candidate.bit()) continue;
3991 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3996 ASSERT(!scratch1.is(scratch0));
3997 Factory* factory = isolate()->factory();
3998 Register current = scratch0;
4001 // scratch contained elements pointer.
4002 mov(current, object);
4004 // Loop based on the map going up the prototype chain.
4006 ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4007 ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4008 Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
4009 cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
4011 ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4012 cmp(current, Operand(factory->null_value()));
4018 bool AreAliased(Register reg1,
4024 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
4025 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
4028 if (reg1.is_valid()) regs |= reg1.bit();
4029 if (reg2.is_valid()) regs |= reg2.bit();
4030 if (reg3.is_valid()) regs |= reg3.bit();
4031 if (reg4.is_valid()) regs |= reg4.bit();
4032 if (reg5.is_valid()) regs |= reg5.bit();
4033 if (reg6.is_valid()) regs |= reg6.bit();
4034 int n_of_non_aliasing_regs = NumRegs(regs);
4036 return n_of_valid_regs != n_of_non_aliasing_regs;
4041 CodePatcher::CodePatcher(byte* address,
4043 FlushICache flush_cache)
4044 : address_(address),
4045 size_(instructions * Assembler::kInstrSize),
4046 masm_(NULL, address, size_ + Assembler::kGap),
4047 flush_cache_(flush_cache) {
4048 // Create a new macro assembler pointing to the address of the code to patch.
4049 // The size is adjusted with kGap on order for the assembler to generate size
4050 // bytes of instructions without failing with buffer size constraints.
4051 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4055 CodePatcher::~CodePatcher() {
4056 // Indicate that code has changed.
4057 if (flush_cache_ == FLUSH) {
4058 CPU::FlushICache(address_, size_);
4061 // Check that the code was patched as expected.
4062 ASSERT(masm_.pc_ == address_ + size_);
4063 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4067 void CodePatcher::Emit(Instr instr) {
4068 masm()->emit(instr);
4072 void CodePatcher::Emit(Address addr) {
4073 masm()->emit(reinterpret_cast<Instr>(addr));
4077 void CodePatcher::EmitCondition(Condition cond) {
4078 Instr instr = Assembler::instr_at(masm_.pc_);
4079 instr = (instr & ~kCondMask) | cond;
4084 } } // namespace v8::internal
4086 #endif // V8_TARGET_ARCH_ARM