1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <limits.h> // For LONG_MIN, LONG_MAX.
32 #if V8_TARGET_ARCH_ARM
34 #include "bootstrapper.h"
36 #include "cpu-profiler.h"
43 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
44 : Assembler(arg_isolate, buffer, size),
45 generating_stub_(false),
46 allow_stub_calls_(true),
48 if (isolate() != NULL) {
49 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
55 void MacroAssembler::Jump(Register target, Condition cond) {
60 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
62 mov(ip, Operand(target, rmode));
67 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
69 ASSERT(!RelocInfo::IsCodeTarget(rmode));
70 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
74 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
76 ASSERT(RelocInfo::IsCodeTarget(rmode));
77 // 'code' is always generated ARM code, never THUMB code
78 AllowDeferredHandleDereference embedding_raw_address;
79 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
83 int MacroAssembler::CallSize(Register target, Condition cond) {
88 void MacroAssembler::Call(Register target, Condition cond) {
89 // Block constant pool for the call instruction sequence.
90 BlockConstPoolScope block_const_pool(this);
94 ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
98 int MacroAssembler::CallSize(
99 Address target, RelocInfo::Mode rmode, Condition cond) {
100 int size = 2 * kInstrSize;
101 Instr mov_instr = cond | MOV | LeaveCC;
102 intptr_t immediate = reinterpret_cast<intptr_t>(target);
103 if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
110 int MacroAssembler::CallSizeNotPredictableCodeSize(
111 Address target, RelocInfo::Mode rmode, Condition cond) {
112 int size = 2 * kInstrSize;
113 Instr mov_instr = cond | MOV | LeaveCC;
114 intptr_t immediate = reinterpret_cast<intptr_t>(target);
115 if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
122 void MacroAssembler::Call(Address target,
123 RelocInfo::Mode rmode,
125 TargetAddressStorageMode mode) {
126 // Block constant pool for the call instruction sequence.
127 BlockConstPoolScope block_const_pool(this);
131 bool old_predictable_code_size = predictable_code_size();
132 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
133 set_predictable_code_size(true);
136 // Call sequence on V7 or later may be :
137 // movw ip, #... @ call address low 16
138 // movt ip, #... @ call address high 16
141 // Or for pre-V7 or values that may be back-patched
142 // to avoid ICache flushes:
143 // ldr ip, [pc, #...] @ call address
147 // Statement positions are expected to be recorded when the target
148 // address is loaded. The mov method will automatically record
149 // positions when pc is the target, since this is not the case here
150 // we have to do it explicitly.
151 positions_recorder()->WriteRecordedPositions();
153 mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
156 ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
157 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
158 set_predictable_code_size(old_predictable_code_size);
163 int MacroAssembler::CallSize(Handle<Code> code,
164 RelocInfo::Mode rmode,
165 TypeFeedbackId ast_id,
167 AllowDeferredHandleDereference using_raw_address;
168 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
172 void MacroAssembler::Call(Handle<Code> code,
173 RelocInfo::Mode rmode,
174 TypeFeedbackId ast_id,
176 TargetAddressStorageMode mode) {
179 ASSERT(RelocInfo::IsCodeTarget(rmode));
180 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
181 SetRecordedAstId(ast_id);
182 rmode = RelocInfo::CODE_TARGET_WITH_ID;
184 // 'code' is always generated ARM code, never THUMB code
185 AllowDeferredHandleDereference embedding_raw_address;
186 Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
190 void MacroAssembler::Ret(Condition cond) {
195 void MacroAssembler::Drop(int count, Condition cond) {
197 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
202 void MacroAssembler::Ret(int drop, Condition cond) {
208 void MacroAssembler::Swap(Register reg1,
212 if (scratch.is(no_reg)) {
213 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
214 eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
215 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
217 mov(scratch, reg1, LeaveCC, cond);
218 mov(reg1, reg2, LeaveCC, cond);
219 mov(reg2, scratch, LeaveCC, cond);
224 void MacroAssembler::Call(Label* target) {
229 void MacroAssembler::Push(Handle<Object> handle) {
230 mov(ip, Operand(handle));
235 void MacroAssembler::Move(Register dst, Handle<Object> value) {
236 mov(dst, Operand(value));
240 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
242 mov(dst, src, LeaveCC, cond);
247 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
254 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
256 if (!src2.is_reg() &&
257 !src2.must_output_reloc_info(this) &&
258 src2.immediate() == 0) {
259 mov(dst, Operand::Zero(), LeaveCC, cond);
260 } else if (!src2.is_single_instruction(this) &&
261 !src2.must_output_reloc_info(this) &&
262 CpuFeatures::IsSupported(ARMv7) &&
263 IsPowerOf2(src2.immediate() + 1)) {
265 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
267 and_(dst, src1, src2, LeaveCC, cond);
272 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
275 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
276 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
277 and_(dst, src1, Operand(mask), LeaveCC, cond);
279 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
282 ubfx(dst, src1, lsb, width, cond);
287 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
290 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
291 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
292 and_(dst, src1, Operand(mask), LeaveCC, cond);
293 int shift_up = 32 - lsb - width;
294 int shift_down = lsb + shift_up;
296 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
298 if (shift_down != 0) {
299 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
302 sbfx(dst, src1, lsb, width, cond);
307 void MacroAssembler::Bfi(Register dst,
313 ASSERT(0 <= lsb && lsb < 32);
314 ASSERT(0 <= width && width < 32);
315 ASSERT(lsb + width < 32);
316 ASSERT(!scratch.is(dst));
317 if (width == 0) return;
318 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
319 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
320 bic(dst, dst, Operand(mask));
321 and_(scratch, src, Operand((1 << width) - 1));
322 mov(scratch, Operand(scratch, LSL, lsb));
323 orr(dst, dst, scratch);
325 bfi(dst, src, lsb, width, cond);
330 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
333 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
334 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
335 bic(dst, src, Operand(mask));
337 Move(dst, src, cond);
338 bfc(dst, lsb, width, cond);
343 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
345 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
346 ASSERT(!dst.is(pc) && !src.rm().is(pc));
347 ASSERT((satpos >= 0) && (satpos <= 31));
349 // These asserts are required to ensure compatibility with the ARMv7
351 ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
352 ASSERT(src.rs().is(no_reg));
355 int satval = (1 << satpos) - 1;
358 b(NegateCondition(cond), &done); // Skip saturate if !condition.
360 if (!(src.is_reg() && dst.is(src.rm()))) {
363 tst(dst, Operand(~satval));
365 mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
366 mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
369 usat(dst, satpos, src, cond);
374 void MacroAssembler::LoadRoot(Register destination,
375 Heap::RootListIndex index,
377 if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
378 isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
379 !predictable_code_size()) {
380 // The CPU supports fast immediate values, and this root will never
381 // change. We will load it as a relocatable immediate value.
382 Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
383 mov(destination, Operand(root), LeaveCC, cond);
386 ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
390 void MacroAssembler::StoreRoot(Register source,
391 Heap::RootListIndex index,
393 str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
397 void MacroAssembler::LoadHeapObject(Register result,
398 Handle<HeapObject> object) {
399 AllowDeferredHandleDereference using_raw_address;
400 if (isolate()->heap()->InNewSpace(*object)) {
401 Handle<Cell> cell = isolate()->factory()->NewCell(object);
402 mov(result, Operand(cell));
403 ldr(result, FieldMemOperand(result, Cell::kValueOffset));
405 mov(result, Operand(object));
410 void MacroAssembler::InNewSpace(Register object,
414 ASSERT(cond == eq || cond == ne);
415 and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
416 cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
421 void MacroAssembler::RecordWriteField(
426 LinkRegisterStatus lr_status,
427 SaveFPRegsMode save_fp,
428 RememberedSetAction remembered_set_action,
429 SmiCheck smi_check) {
430 // First, check if a write barrier is even needed. The tests below
431 // catch stores of Smis.
434 // Skip barrier if writing a smi.
435 if (smi_check == INLINE_SMI_CHECK) {
436 JumpIfSmi(value, &done);
439 // Although the object register is tagged, the offset is relative to the start
440 // of the object, so so offset must be a multiple of kPointerSize.
441 ASSERT(IsAligned(offset, kPointerSize));
443 add(dst, object, Operand(offset - kHeapObjectTag));
444 if (emit_debug_code()) {
446 tst(dst, Operand((1 << kPointerSizeLog2) - 1));
448 stop("Unaligned cell in write barrier");
457 remembered_set_action,
462 // Clobber clobbered input registers when running with the debug-code flag
463 // turned on to provoke errors.
464 if (emit_debug_code()) {
465 mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
466 mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
471 // Will clobber 4 registers: object, address, scratch, ip. The
472 // register 'object' contains a heap object pointer. The heap object
473 // tag is shifted away.
474 void MacroAssembler::RecordWrite(Register object,
477 LinkRegisterStatus lr_status,
478 SaveFPRegsMode fp_mode,
479 RememberedSetAction remembered_set_action,
480 SmiCheck smi_check) {
481 // The compiled code assumes that record write doesn't change the
482 // context register, so we check that none of the clobbered
484 ASSERT(!address.is(cp) && !value.is(cp));
486 if (emit_debug_code()) {
487 ldr(ip, MemOperand(address));
489 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
494 if (smi_check == INLINE_SMI_CHECK) {
495 JumpIfSmi(value, &done);
499 value, // Used as scratch.
500 MemoryChunk::kPointersToHereAreInterestingMask,
503 CheckPageFlag(object,
504 value, // Used as scratch.
505 MemoryChunk::kPointersFromHereAreInterestingMask,
509 // Record the actual write.
510 if (lr_status == kLRHasNotBeenSaved) {
513 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
515 if (lr_status == kLRHasNotBeenSaved) {
521 // Clobber clobbered registers when running with the debug-code flag
522 // turned on to provoke errors.
523 if (emit_debug_code()) {
524 mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
525 mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
530 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
533 SaveFPRegsMode fp_mode,
534 RememberedSetFinalAction and_then) {
536 if (emit_debug_code()) {
538 JumpIfNotInNewSpace(object, scratch, &ok);
539 stop("Remembered set pointer is in new space");
542 // Load store buffer top.
543 ExternalReference store_buffer =
544 ExternalReference::store_buffer_top(isolate());
545 mov(ip, Operand(store_buffer));
546 ldr(scratch, MemOperand(ip));
547 // Store pointer to buffer and increment buffer top.
548 str(address, MemOperand(scratch, kPointerSize, PostIndex));
549 // Write back new top of buffer.
550 str(scratch, MemOperand(ip));
551 // Call stub on end of buffer.
552 // Check for end of buffer.
553 tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
554 if (and_then == kFallThroughAtEnd) {
557 ASSERT(and_then == kReturnAtEnd);
561 StoreBufferOverflowStub store_buffer_overflow =
562 StoreBufferOverflowStub(fp_mode);
563 CallStub(&store_buffer_overflow);
566 if (and_then == kReturnAtEnd) {
572 // Push and pop all registers that can hold pointers.
573 void MacroAssembler::PushSafepointRegisters() {
574 // Safepoints expect a block of contiguous register values starting with r0:
575 ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
576 // Safepoints expect a block of kNumSafepointRegisters values on the
577 // stack, so adjust the stack for unsaved registers.
578 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
579 ASSERT(num_unsaved >= 0);
580 sub(sp, sp, Operand(num_unsaved * kPointerSize));
581 stm(db_w, sp, kSafepointSavedRegisters);
585 void MacroAssembler::PopSafepointRegisters() {
586 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
587 ldm(ia_w, sp, kSafepointSavedRegisters);
588 add(sp, sp, Operand(num_unsaved * kPointerSize));
592 void MacroAssembler::PushSafepointRegistersAndDoubles() {
593 // Number of d-regs not known at snapshot time.
594 ASSERT(!Serializer::enabled());
595 PushSafepointRegisters();
596 sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
598 for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
599 vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
604 void MacroAssembler::PopSafepointRegistersAndDoubles() {
605 // Number of d-regs not known at snapshot time.
606 ASSERT(!Serializer::enabled());
607 for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
608 vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
610 add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
612 PopSafepointRegisters();
615 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
617 str(src, SafepointRegistersAndDoublesSlot(dst));
621 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
622 str(src, SafepointRegisterSlot(dst));
626 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
627 ldr(dst, SafepointRegisterSlot(src));
631 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
632 // The registers are pushed starting with the highest encoding,
633 // which means that lowest encodings are closest to the stack pointer.
634 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
639 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
640 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
644 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
645 // Number of d-regs not known at snapshot time.
646 ASSERT(!Serializer::enabled());
647 // General purpose registers are pushed last on the stack.
648 int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
649 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
650 return MemOperand(sp, doubles_size + register_offset);
654 void MacroAssembler::Ldrd(Register dst1, Register dst2,
655 const MemOperand& src, Condition cond) {
656 ASSERT(src.rm().is(no_reg));
657 ASSERT(!dst1.is(lr)); // r14.
659 // V8 does not use this addressing mode, so the fallback code
660 // below doesn't support it yet.
661 ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
663 // Generate two ldr instructions if ldrd is not available.
664 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
665 (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
666 CpuFeatureScope scope(this, ARMv7);
667 ldrd(dst1, dst2, src, cond);
669 if ((src.am() == Offset) || (src.am() == NegOffset)) {
670 MemOperand src2(src);
671 src2.set_offset(src2.offset() + 4);
672 if (dst1.is(src.rn())) {
673 ldr(dst2, src2, cond);
674 ldr(dst1, src, cond);
676 ldr(dst1, src, cond);
677 ldr(dst2, src2, cond);
679 } else { // PostIndex or NegPostIndex.
680 ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
681 if (dst1.is(src.rn())) {
682 ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
683 ldr(dst1, src, cond);
685 MemOperand src2(src);
686 src2.set_offset(src2.offset() - 4);
687 ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
688 ldr(dst2, src2, cond);
695 void MacroAssembler::Strd(Register src1, Register src2,
696 const MemOperand& dst, Condition cond) {
697 ASSERT(dst.rm().is(no_reg));
698 ASSERT(!src1.is(lr)); // r14.
700 // V8 does not use this addressing mode, so the fallback code
701 // below doesn't support it yet.
702 ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
704 // Generate two str instructions if strd is not available.
705 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
706 (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
707 CpuFeatureScope scope(this, ARMv7);
708 strd(src1, src2, dst, cond);
710 MemOperand dst2(dst);
711 if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
712 dst2.set_offset(dst2.offset() + 4);
713 str(src1, dst, cond);
714 str(src2, dst2, cond);
715 } else { // PostIndex or NegPostIndex.
716 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
717 dst2.set_offset(dst2.offset() - 4);
718 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
719 str(src2, dst2, cond);
725 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
726 // If needed, restore wanted bits of FPSCR.
729 tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
731 orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
736 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister value,
737 const Condition cond) {
738 vsub(value, value, kDoubleRegZero, cond);
742 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
743 const DwVfpRegister src2,
744 const Condition cond) {
745 // Compare and move FPSCR flags to the normal condition flags.
746 VFPCompareAndLoadFlags(src1, src2, pc, cond);
749 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
751 const Condition cond) {
752 // Compare and move FPSCR flags to the normal condition flags.
753 VFPCompareAndLoadFlags(src1, src2, pc, cond);
757 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
758 const DwVfpRegister src2,
759 const Register fpscr_flags,
760 const Condition cond) {
761 // Compare and load FPSCR.
762 vcmp(src1, src2, cond);
763 vmrs(fpscr_flags, cond);
766 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
768 const Register fpscr_flags,
769 const Condition cond) {
770 // Compare and load FPSCR.
771 vcmp(src1, src2, cond);
772 vmrs(fpscr_flags, cond);
775 void MacroAssembler::Vmov(const DwVfpRegister dst,
777 const Register scratch) {
778 static const DoubleRepresentation minus_zero(-0.0);
779 static const DoubleRepresentation zero(0.0);
780 DoubleRepresentation value(imm);
781 // Handle special values first.
782 if (value.bits == zero.bits) {
783 vmov(dst, kDoubleRegZero);
784 } else if (value.bits == minus_zero.bits) {
785 vneg(dst, kDoubleRegZero);
787 vmov(dst, imm, scratch);
792 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
793 if (src.code() < 16) {
794 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
795 vmov(dst, loc.high());
797 vmov(dst, VmovIndexHi, src);
802 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
803 if (dst.code() < 16) {
804 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
805 vmov(loc.high(), src);
807 vmov(dst, VmovIndexHi, src);
812 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
813 if (src.code() < 16) {
814 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
815 vmov(dst, loc.low());
817 vmov(dst, VmovIndexLo, src);
822 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
823 if (dst.code() < 16) {
824 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
825 vmov(loc.low(), src);
827 vmov(dst, VmovIndexLo, src);
832 void MacroAssembler::ConvertNumberToInt32(Register object,
834 Register heap_number_map,
838 DwVfpRegister double_scratch1,
839 LowDwVfpRegister double_scratch2,
842 UntagAndJumpIfSmi(dst, object, &done);
843 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
844 vldr(double_scratch1, FieldMemOperand(object, HeapNumber::kValueOffset));
845 ECMAToInt32(dst, double_scratch1,
846 scratch1, scratch2, scratch3, double_scratch2);
852 void MacroAssembler::LoadNumber(Register object,
853 LowDwVfpRegister dst,
854 Register heap_number_map,
859 UntagAndJumpIfSmi(scratch, object, &is_smi);
860 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
862 vldr(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
865 // Handle loading a double from a smi.
867 vmov(dst.high(), scratch);
868 vcvt_f64_s32(dst, dst.high());
874 void MacroAssembler::LoadNumberAsInt32Double(Register object,
875 DwVfpRegister double_dst,
876 Register heap_number_map,
878 LowDwVfpRegister double_scratch,
880 ASSERT(!scratch.is(object));
881 ASSERT(!heap_number_map.is(object) && !heap_number_map.is(scratch));
883 Label done, obj_is_not_smi;
885 UntagAndJumpIfNotSmi(scratch, object, &obj_is_not_smi);
886 vmov(double_scratch.low(), scratch);
887 vcvt_f64_s32(double_dst, double_scratch.low());
890 bind(&obj_is_not_smi);
891 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_int32);
894 // Load the double value.
895 vldr(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
897 TestDoubleIsInt32(double_dst, double_scratch);
898 // Jump to not_int32 if the operation did not succeed.
905 void MacroAssembler::LoadNumberAsInt32(Register object,
907 Register heap_number_map,
909 DwVfpRegister double_scratch0,
910 LowDwVfpRegister double_scratch1,
912 ASSERT(!dst.is(object));
913 ASSERT(!scratch.is(object));
915 Label done, maybe_undefined;
917 UntagAndJumpIfSmi(dst, object, &done);
919 JumpIfNotHeapNumber(object, heap_number_map, scratch, &maybe_undefined);
921 // Object is a heap number.
922 // Convert the floating point value to a 32-bit integer.
923 // Load the double value.
924 vldr(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
926 TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1);
927 // Jump to not_int32 if the operation did not succeed.
931 bind(&maybe_undefined);
932 CompareRoot(object, Heap::kUndefinedValueRootIndex);
934 // |undefined| is truncated to 0.
935 mov(dst, Operand(Smi::FromInt(0)));
942 void MacroAssembler::EnterFrame(StackFrame::Type type) {
944 stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
945 mov(ip, Operand(Smi::FromInt(type)));
947 mov(ip, Operand(CodeObject()));
949 add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
953 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
958 // Drop the execution stack down to the frame pointer and restore
959 // the caller frame pointer and return address.
961 ldm(ia_w, sp, fp.bit() | lr.bit());
965 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
966 // Set up the frame structure on the stack.
967 ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
968 ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
969 ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
971 mov(fp, Operand(sp)); // Set up new frame pointer.
972 // Reserve room for saved entry sp and code object.
973 sub(sp, sp, Operand(2 * kPointerSize));
974 if (emit_debug_code()) {
975 mov(ip, Operand::Zero());
976 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
978 mov(ip, Operand(CodeObject()));
979 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
981 // Save the frame pointer and the context in top.
982 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
983 str(fp, MemOperand(ip));
984 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
985 str(cp, MemOperand(ip));
987 // Optionally save all double registers.
990 // Note that d0 will be accessible at
991 // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
992 // since the sp slot and code slot were pushed after the fp.
995 // Reserve place for the return address and stack space and align the frame
996 // preparing for calling the runtime function.
997 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
998 sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
999 if (frame_alignment > 0) {
1000 ASSERT(IsPowerOf2(frame_alignment));
1001 and_(sp, sp, Operand(-frame_alignment));
1004 // Set the exit frame sp value to point just before the return address
1006 add(ip, sp, Operand(kPointerSize));
1007 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1011 void MacroAssembler::InitializeNewString(Register string,
1013 Heap::RootListIndex map_index,
1015 Register scratch2) {
1016 SmiTag(scratch1, length);
1017 LoadRoot(scratch2, map_index);
1018 str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1019 mov(scratch1, Operand(String::kEmptyHashField));
1020 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1021 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
1025 int MacroAssembler::ActivationFrameAlignment() {
1026 #if V8_HOST_ARCH_ARM
1027 // Running on the real platform. Use the alignment as mandated by the local
1029 // Note: This will break if we ever start generating snapshots on one ARM
1030 // platform for another ARM platform with a different alignment.
1031 return OS::ActivationFrameAlignment();
1032 #else // V8_HOST_ARCH_ARM
1033 // If we are using the simulator then we should always align to the expected
1034 // alignment. As the simulator is used to generate snapshots we do not know
1035 // if the target platform will need alignment, so this is controlled from a
1037 return FLAG_sim_stack_alignment;
1038 #endif // V8_HOST_ARCH_ARM
1042 void MacroAssembler::LeaveExitFrame(bool save_doubles,
1043 Register argument_count) {
1044 // Optionally restore all double registers.
1046 // Calculate the stack location of the saved doubles and restore them.
1047 const int offset = 2 * kPointerSize;
1049 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
1050 RestoreFPRegs(r3, ip);
1054 mov(r3, Operand::Zero());
1055 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1056 str(r3, MemOperand(ip));
1058 // Restore current context from top and clear it in debug mode.
1059 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1060 ldr(cp, MemOperand(ip));
1062 str(r3, MemOperand(ip));
1065 // Tear down the exit frame, pop the arguments, and return.
1066 mov(sp, Operand(fp));
1067 ldm(ia_w, sp, fp.bit() | lr.bit());
1068 if (argument_count.is_valid()) {
1069 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1074 void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
1075 if (use_eabi_hardfloat()) {
1083 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
1084 // This macro takes the dst register to make the code more readable
1085 // at the call sites. However, the dst register has to be r5 to
1086 // follow the calling convention which requires the call type to be
1089 if (call_kind == CALL_AS_FUNCTION) {
1090 mov(dst, Operand(Smi::FromInt(1)));
1092 mov(dst, Operand(Smi::FromInt(0)));
1097 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1098 const ParameterCount& actual,
1099 Handle<Code> code_constant,
1102 bool* definitely_mismatches,
1104 const CallWrapper& call_wrapper,
1105 CallKind call_kind) {
1106 bool definitely_matches = false;
1107 *definitely_mismatches = false;
1108 Label regular_invoke;
1110 // Check whether the expected and actual arguments count match. If not,
1111 // setup registers according to contract with ArgumentsAdaptorTrampoline:
1112 // r0: actual arguments count
1113 // r1: function (passed through to callee)
1114 // r2: expected arguments count
1115 // r3: callee code entry
1117 // The code below is made a lot easier because the calling code already sets
1118 // up actual and expected registers according to the contract if values are
1119 // passed in registers.
1120 ASSERT(actual.is_immediate() || actual.reg().is(r0));
1121 ASSERT(expected.is_immediate() || expected.reg().is(r2));
1122 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
1124 if (expected.is_immediate()) {
1125 ASSERT(actual.is_immediate());
1126 if (expected.immediate() == actual.immediate()) {
1127 definitely_matches = true;
1129 mov(r0, Operand(actual.immediate()));
1130 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1131 if (expected.immediate() == sentinel) {
1132 // Don't worry about adapting arguments for builtins that
1133 // don't want that done. Skip adaption code by making it look
1134 // like we have a match between expected and actual number of
1136 definitely_matches = true;
1138 *definitely_mismatches = true;
1139 mov(r2, Operand(expected.immediate()));
1143 if (actual.is_immediate()) {
1144 cmp(expected.reg(), Operand(actual.immediate()));
1145 b(eq, ®ular_invoke);
1146 mov(r0, Operand(actual.immediate()));
1148 cmp(expected.reg(), Operand(actual.reg()));
1149 b(eq, ®ular_invoke);
1153 if (!definitely_matches) {
1154 if (!code_constant.is_null()) {
1155 mov(r3, Operand(code_constant));
1156 add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
1159 Handle<Code> adaptor =
1160 isolate()->builtins()->ArgumentsAdaptorTrampoline();
1161 if (flag == CALL_FUNCTION) {
1162 call_wrapper.BeforeCall(CallSize(adaptor));
1163 SetCallKind(r5, call_kind);
1165 call_wrapper.AfterCall();
1166 if (!*definitely_mismatches) {
1170 SetCallKind(r5, call_kind);
1171 Jump(adaptor, RelocInfo::CODE_TARGET);
1173 bind(®ular_invoke);
1178 void MacroAssembler::InvokeCode(Register code,
1179 const ParameterCount& expected,
1180 const ParameterCount& actual,
1182 const CallWrapper& call_wrapper,
1183 CallKind call_kind) {
1184 // You can't call a function without a valid frame.
1185 ASSERT(flag == JUMP_FUNCTION || has_frame());
1188 bool definitely_mismatches = false;
1189 InvokePrologue(expected, actual, Handle<Code>::null(), code,
1190 &done, &definitely_mismatches, flag,
1191 call_wrapper, call_kind);
1192 if (!definitely_mismatches) {
1193 if (flag == CALL_FUNCTION) {
1194 call_wrapper.BeforeCall(CallSize(code));
1195 SetCallKind(r5, call_kind);
1197 call_wrapper.AfterCall();
1199 ASSERT(flag == JUMP_FUNCTION);
1200 SetCallKind(r5, call_kind);
1204 // Continue here if InvokePrologue does handle the invocation due to
1205 // mismatched parameter counts.
1211 void MacroAssembler::InvokeCode(Handle<Code> code,
1212 const ParameterCount& expected,
1213 const ParameterCount& actual,
1214 RelocInfo::Mode rmode,
1216 CallKind call_kind) {
1217 // You can't call a function without a valid frame.
1218 ASSERT(flag == JUMP_FUNCTION || has_frame());
1221 bool definitely_mismatches = false;
1222 InvokePrologue(expected, actual, code, no_reg,
1223 &done, &definitely_mismatches, flag,
1224 NullCallWrapper(), call_kind);
1225 if (!definitely_mismatches) {
1226 if (flag == CALL_FUNCTION) {
1227 SetCallKind(r5, call_kind);
1230 SetCallKind(r5, call_kind);
1234 // Continue here if InvokePrologue does handle the invocation due to
1235 // mismatched parameter counts.
1241 void MacroAssembler::InvokeFunction(Register fun,
1242 const ParameterCount& actual,
1244 const CallWrapper& call_wrapper,
1245 CallKind call_kind) {
1246 // You can't call a function without a valid frame.
1247 ASSERT(flag == JUMP_FUNCTION || has_frame());
1249 // Contract with called JS functions requires that function is passed in r1.
1252 Register expected_reg = r2;
1253 Register code_reg = r3;
1255 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1256 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1258 FieldMemOperand(code_reg,
1259 SharedFunctionInfo::kFormalParameterCountOffset));
1260 SmiUntag(expected_reg);
1262 FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1264 ParameterCount expected(expected_reg);
1265 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
1269 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1270 const ParameterCount& expected,
1271 const ParameterCount& actual,
1273 const CallWrapper& call_wrapper,
1274 CallKind call_kind) {
1275 // You can't call a function without a valid frame.
1276 ASSERT(flag == JUMP_FUNCTION || has_frame());
1278 // Get the function and setup the context.
1279 LoadHeapObject(r1, function);
1280 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1282 // We call indirectly through the code field in the function to
1283 // allow recompilation to take effect without changing any of the
1285 ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1286 InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
1290 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1294 ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1295 IsInstanceJSObjectType(map, scratch, fail);
1299 void MacroAssembler::IsInstanceJSObjectType(Register map,
1302 ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1303 cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1305 cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1310 void MacroAssembler::IsObjectJSStringType(Register object,
1313 ASSERT(kNotStringTag != 0);
1315 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1316 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1317 tst(scratch, Operand(kIsNotStringMask));
1322 void MacroAssembler::IsObjectNameType(Register object,
1325 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1326 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1327 cmp(scratch, Operand(LAST_NAME_TYPE));
1332 #ifdef ENABLE_DEBUGGER_SUPPORT
1333 void MacroAssembler::DebugBreak() {
1334 mov(r0, Operand::Zero());
1335 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1337 ASSERT(AllowThisStubCall(&ces));
1338 Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
1343 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1344 int handler_index) {
1345 // Adjust this code if not the case.
1346 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1347 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1348 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1349 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1350 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1351 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1353 // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
1354 // We will build up the handler from the bottom by pushing on the stack.
1355 // Set up the code object (r5) and the state (r6) for pushing.
1357 StackHandler::IndexField::encode(handler_index) |
1358 StackHandler::KindField::encode(kind);
1359 mov(r5, Operand(CodeObject()));
1360 mov(r6, Operand(state));
1362 // Push the frame pointer, context, state, and code object.
1363 if (kind == StackHandler::JS_ENTRY) {
1364 mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
1365 mov(ip, Operand::Zero()); // NULL frame pointer.
1366 stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
1368 stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
1371 // Link the current handler as the next handler.
1372 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1373 ldr(r5, MemOperand(r6));
1375 // Set this new handler as the current one.
1376 str(sp, MemOperand(r6));
1380 void MacroAssembler::PopTryHandler() {
1381 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1383 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1384 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1385 str(r1, MemOperand(ip));
1389 void MacroAssembler::JumpToHandlerEntry() {
1390 // Compute the handler entry address and jump to it. The handler table is
1391 // a fixed array of (smi-tagged) code offsets.
1392 // r0 = exception, r1 = code object, r2 = state.
1393 ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
1394 add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1395 mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
1396 ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
1397 add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
1398 add(pc, r1, Operand::SmiUntag(r2)); // Jump
1402 void MacroAssembler::Throw(Register value) {
1403 // Adjust this code if not the case.
1404 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1405 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1406 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1407 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1408 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1409 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1411 // The exception is expected in r0.
1412 if (!value.is(r0)) {
1415 // Drop the stack pointer to the top of the top handler.
1416 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1417 ldr(sp, MemOperand(r3));
1418 // Restore the next handler.
1420 str(r2, MemOperand(r3));
1422 // Get the code object (r1) and state (r2). Restore the context and frame
1424 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1426 // If the handler is a JS frame, restore the context to the frame.
1427 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1430 str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1432 JumpToHandlerEntry();
1436 void MacroAssembler::ThrowUncatchable(Register value) {
1437 // Adjust this code if not the case.
1438 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1439 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1440 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1441 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1442 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1443 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1445 // The exception is expected in r0.
1446 if (!value.is(r0)) {
1449 // Drop the stack pointer to the top of the top stack handler.
1450 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1451 ldr(sp, MemOperand(r3));
1453 // Unwind the handlers until the ENTRY handler is found.
1454 Label fetch_next, check_kind;
1457 ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
1460 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1461 ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
1462 tst(r2, Operand(StackHandler::KindField::kMask));
1465 // Set the top handler address to next handler past the top ENTRY handler.
1467 str(r2, MemOperand(r3));
1468 // Get the code object (r1) and state (r2). Clear the context and frame
1469 // pointer (0 was saved in the handler).
1470 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1472 JumpToHandlerEntry();
1476 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1479 Label same_contexts;
1481 ASSERT(!holder_reg.is(scratch));
1482 ASSERT(!holder_reg.is(ip));
1483 ASSERT(!scratch.is(ip));
1485 // Load current lexical context from the stack frame.
1486 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1487 // In debug mode, make sure the lexical context is set.
1489 cmp(scratch, Operand::Zero());
1490 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1493 // Load the native context of the current context.
1495 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1496 ldr(scratch, FieldMemOperand(scratch, offset));
1497 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1499 // Check the context is a native context.
1500 if (emit_debug_code()) {
1501 // Cannot use ip as a temporary in this verification code. Due to the fact
1502 // that ip is clobbered as part of cmp with an object Operand.
1503 push(holder_reg); // Temporarily save holder on the stack.
1504 // Read the first word and compare to the native_context_map.
1505 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1506 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1507 cmp(holder_reg, ip);
1508 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1509 pop(holder_reg); // Restore holder.
1512 // Check if both contexts are the same.
1513 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1514 cmp(scratch, Operand(ip));
1515 b(eq, &same_contexts);
1517 // Check the context is a native context.
1518 if (emit_debug_code()) {
1519 // Cannot use ip as a temporary in this verification code. Due to the fact
1520 // that ip is clobbered as part of cmp with an object Operand.
1521 push(holder_reg); // Temporarily save holder on the stack.
1522 mov(holder_reg, ip); // Move ip to its holding place.
1523 LoadRoot(ip, Heap::kNullValueRootIndex);
1524 cmp(holder_reg, ip);
1525 Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1527 ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1528 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1529 cmp(holder_reg, ip);
1530 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1531 // Restore ip is not needed. ip is reloaded below.
1532 pop(holder_reg); // Restore holder.
1533 // Restore ip to holder's context.
1534 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1537 // Check that the security token in the calling global object is
1538 // compatible with the security token in the receiving global
1540 int token_offset = Context::kHeaderSize +
1541 Context::SECURITY_TOKEN_INDEX * kPointerSize;
1543 ldr(scratch, FieldMemOperand(scratch, token_offset));
1544 ldr(ip, FieldMemOperand(ip, token_offset));
1545 cmp(scratch, Operand(ip));
1548 bind(&same_contexts);
1552 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1553 // First of all we assign the hash seed to scratch.
1554 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1557 // Xor original key with a seed.
1558 eor(t0, t0, Operand(scratch));
1560 // Compute the hash code from the untagged key. This must be kept in sync
1561 // with ComputeIntegerHash in utils.h.
1563 // hash = ~hash + (hash << 15);
1564 mvn(scratch, Operand(t0));
1565 add(t0, scratch, Operand(t0, LSL, 15));
1566 // hash = hash ^ (hash >> 12);
1567 eor(t0, t0, Operand(t0, LSR, 12));
1568 // hash = hash + (hash << 2);
1569 add(t0, t0, Operand(t0, LSL, 2));
1570 // hash = hash ^ (hash >> 4);
1571 eor(t0, t0, Operand(t0, LSR, 4));
1572 // hash = hash * 2057;
1573 mov(scratch, Operand(t0, LSL, 11));
1574 add(t0, t0, Operand(t0, LSL, 3));
1575 add(t0, t0, scratch);
1576 // hash = hash ^ (hash >> 16);
1577 eor(t0, t0, Operand(t0, LSR, 16));
1581 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1590 // elements - holds the slow-case elements of the receiver on entry.
1591 // Unchanged unless 'result' is the same register.
1593 // key - holds the smi key on entry.
1594 // Unchanged unless 'result' is the same register.
1596 // result - holds the result on exit if the load succeeded.
1597 // Allowed to be the same as 'key' or 'result'.
1598 // Unchanged on bailout so 'key' or 'result' can be used
1599 // in further computation.
1601 // Scratch registers:
1603 // t0 - holds the untagged key on entry and holds the hash once computed.
1605 // t1 - used to hold the capacity mask of the dictionary
1607 // t2 - used for the index into the dictionary.
1610 GetNumberHash(t0, t1);
1612 // Compute the capacity mask.
1613 ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1615 sub(t1, t1, Operand(1));
1617 // Generate an unrolled loop that performs a few probes before giving up.
1618 static const int kProbes = 4;
1619 for (int i = 0; i < kProbes; i++) {
1620 // Use t2 for index calculations and keep the hash intact in t0.
1622 // Compute the masked index: (hash + i + i * i) & mask.
1624 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1626 and_(t2, t2, Operand(t1));
1628 // Scale the index by multiplying by the element size.
1629 ASSERT(SeededNumberDictionary::kEntrySize == 3);
1630 add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
1632 // Check if the key is identical to the name.
1633 add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
1634 ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1635 cmp(key, Operand(ip));
1636 if (i != kProbes - 1) {
1644 // Check that the value is a normal property.
1645 // t2: elements + (index * kPointerSize)
1646 const int kDetailsOffset =
1647 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1648 ldr(t1, FieldMemOperand(t2, kDetailsOffset));
1649 tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1652 // Get the value at the masked, scaled index and return.
1653 const int kValueOffset =
1654 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1655 ldr(result, FieldMemOperand(t2, kValueOffset));
1659 void MacroAssembler::Allocate(int object_size,
1664 AllocationFlags flags) {
1665 ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
1666 if (!FLAG_inline_new) {
1667 if (emit_debug_code()) {
1668 // Trash the registers to simulate an allocation failure.
1669 mov(result, Operand(0x7091));
1670 mov(scratch1, Operand(0x7191));
1671 mov(scratch2, Operand(0x7291));
1677 ASSERT(!result.is(scratch1));
1678 ASSERT(!result.is(scratch2));
1679 ASSERT(!scratch1.is(scratch2));
1680 ASSERT(!scratch1.is(ip));
1681 ASSERT(!scratch2.is(ip));
1683 // Make object size into bytes.
1684 if ((flags & SIZE_IN_WORDS) != 0) {
1685 object_size *= kPointerSize;
1687 ASSERT_EQ(0, object_size & kObjectAlignmentMask);
1689 // Check relative positions of allocation top and limit addresses.
1690 // The values must be adjacent in memory to allow the use of LDM.
1691 // Also, assert that the registers are numbered such that the values
1692 // are loaded in the correct order.
1693 ExternalReference allocation_top =
1694 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1695 ExternalReference allocation_limit =
1696 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1699 reinterpret_cast<intptr_t>(allocation_top.address());
1701 reinterpret_cast<intptr_t>(allocation_limit.address());
1702 ASSERT((limit - top) == kPointerSize);
1703 ASSERT(result.code() < ip.code());
1705 // Set up allocation top address and object size registers.
1706 Register topaddr = scratch1;
1707 Register obj_size_reg = scratch2;
1708 mov(topaddr, Operand(allocation_top));
1709 Operand obj_size_operand = Operand(object_size);
1710 if (!obj_size_operand.is_single_instruction(this)) {
1711 // We are about to steal IP, so we need to load this value first
1712 mov(obj_size_reg, obj_size_operand);
1715 // This code stores a temporary value in ip. This is OK, as the code below
1716 // does not need ip for implicit literal generation.
1717 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1718 // Load allocation top into result and allocation limit into ip.
1719 ldm(ia, topaddr, result.bit() | ip.bit());
1721 if (emit_debug_code()) {
1722 // Assert that result actually contains top on entry. ip is used
1723 // immediately below so this use of ip does not cause difference with
1724 // respect to register content between debug and release mode.
1725 ldr(ip, MemOperand(topaddr));
1727 Check(eq, kUnexpectedAllocationTop);
1729 // Load allocation limit into ip. Result already contains allocation top.
1730 ldr(ip, MemOperand(topaddr, limit - top));
1733 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1734 // Align the next allocation. Storing the filler map without checking top is
1735 // always safe because the limit of the heap is always aligned.
1736 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1737 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1738 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1741 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1742 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1746 // Calculate new top and bail out if new space is exhausted. Use result
1747 // to calculate the new top.
1748 if (obj_size_operand.is_single_instruction(this)) {
1749 // We can add the size as an immediate
1750 add(scratch2, result, obj_size_operand, SetCC);
1752 // Doesn't fit in an immediate, we have to use the register
1753 add(scratch2, result, obj_size_reg, SetCC);
1756 cmp(scratch2, Operand(ip));
1758 str(scratch2, MemOperand(topaddr));
1760 // Tag object if requested.
1761 if ((flags & TAG_OBJECT) != 0) {
1762 add(result, result, Operand(kHeapObjectTag));
1767 void MacroAssembler::Allocate(Register object_size,
1772 AllocationFlags flags) {
1773 if (!FLAG_inline_new) {
1774 if (emit_debug_code()) {
1775 // Trash the registers to simulate an allocation failure.
1776 mov(result, Operand(0x7091));
1777 mov(scratch1, Operand(0x7191));
1778 mov(scratch2, Operand(0x7291));
1784 // Assert that the register arguments are different and that none of
1785 // them are ip. ip is used explicitly in the code generated below.
1786 ASSERT(!result.is(scratch1));
1787 ASSERT(!result.is(scratch2));
1788 ASSERT(!scratch1.is(scratch2));
1789 ASSERT(!object_size.is(ip));
1790 ASSERT(!result.is(ip));
1791 ASSERT(!scratch1.is(ip));
1792 ASSERT(!scratch2.is(ip));
1794 // Check relative positions of allocation top and limit addresses.
1795 // The values must be adjacent in memory to allow the use of LDM.
1796 // Also, assert that the registers are numbered such that the values
1797 // are loaded in the correct order.
1798 ExternalReference allocation_top =
1799 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1800 ExternalReference allocation_limit =
1801 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1803 reinterpret_cast<intptr_t>(allocation_top.address());
1805 reinterpret_cast<intptr_t>(allocation_limit.address());
1806 ASSERT((limit - top) == kPointerSize);
1807 ASSERT(result.code() < ip.code());
1809 // Set up allocation top address.
1810 Register topaddr = scratch1;
1811 mov(topaddr, Operand(allocation_top));
1813 // This code stores a temporary value in ip. This is OK, as the code below
1814 // does not need ip for implicit literal generation.
1815 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1816 // Load allocation top into result and allocation limit into ip.
1817 ldm(ia, topaddr, result.bit() | ip.bit());
1819 if (emit_debug_code()) {
1820 // Assert that result actually contains top on entry. ip is used
1821 // immediately below so this use of ip does not cause difference with
1822 // respect to register content between debug and release mode.
1823 ldr(ip, MemOperand(topaddr));
1825 Check(eq, kUnexpectedAllocationTop);
1827 // Load allocation limit into ip. Result already contains allocation top.
1828 ldr(ip, MemOperand(topaddr, limit - top));
1831 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1832 // Align the next allocation. Storing the filler map without checking top is
1833 // always safe because the limit of the heap is always aligned.
1834 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1835 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1836 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1839 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1840 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1844 // Calculate new top and bail out if new space is exhausted. Use result
1845 // to calculate the new top. Object size may be in words so a shift is
1846 // required to get the number of bytes.
1847 if ((flags & SIZE_IN_WORDS) != 0) {
1848 add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1850 add(scratch2, result, Operand(object_size), SetCC);
1853 cmp(scratch2, Operand(ip));
1856 // Update allocation top. result temporarily holds the new top.
1857 if (emit_debug_code()) {
1858 tst(scratch2, Operand(kObjectAlignmentMask));
1859 Check(eq, kUnalignedAllocationInNewSpace);
1861 str(scratch2, MemOperand(topaddr));
1863 // Tag object if requested.
1864 if ((flags & TAG_OBJECT) != 0) {
1865 add(result, result, Operand(kHeapObjectTag));
1870 void MacroAssembler::UndoAllocationInNewSpace(Register object,
1872 ExternalReference new_space_allocation_top =
1873 ExternalReference::new_space_allocation_top_address(isolate());
1875 // Make sure the object has no tag before resetting top.
1876 and_(object, object, Operand(~kHeapObjectTagMask));
1878 // Check that the object un-allocated is below the current top.
1879 mov(scratch, Operand(new_space_allocation_top));
1880 ldr(scratch, MemOperand(scratch));
1881 cmp(object, scratch);
1882 Check(lt, kUndoAllocationOfNonAllocatedMemory);
1884 // Write the address of the object to un-allocate as the current top.
1885 mov(scratch, Operand(new_space_allocation_top));
1886 str(object, MemOperand(scratch));
1890 void MacroAssembler::AllocateTwoByteString(Register result,
1895 Label* gc_required) {
1896 // Calculate the number of bytes needed for the characters in the string while
1897 // observing object alignment.
1898 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1899 mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
1900 add(scratch1, scratch1,
1901 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1902 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1904 // Allocate two-byte string in new space.
1912 // Set the map, length and hash field.
1913 InitializeNewString(result,
1915 Heap::kStringMapRootIndex,
1921 void MacroAssembler::AllocateAsciiString(Register result,
1926 Label* gc_required) {
1927 // Calculate the number of bytes needed for the characters in the string while
1928 // observing object alignment.
1929 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1930 ASSERT(kCharSize == 1);
1931 add(scratch1, length,
1932 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1933 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1935 // Allocate ASCII string in new space.
1943 // Set the map, length and hash field.
1944 InitializeNewString(result,
1946 Heap::kAsciiStringMapRootIndex,
1952 void MacroAssembler::AllocateTwoByteConsString(Register result,
1956 Label* gc_required) {
1957 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1960 InitializeNewString(result,
1962 Heap::kConsStringMapRootIndex,
1968 void MacroAssembler::AllocateAsciiConsString(Register result,
1972 Label* gc_required) {
1973 Label allocate_new_space, install_map;
1974 AllocationFlags flags = TAG_OBJECT;
1976 ExternalReference high_promotion_mode = ExternalReference::
1977 new_space_high_promotion_mode_active_address(isolate());
1978 mov(scratch1, Operand(high_promotion_mode));
1979 ldr(scratch1, MemOperand(scratch1, 0));
1980 cmp(scratch1, Operand::Zero());
1981 b(eq, &allocate_new_space);
1983 Allocate(ConsString::kSize,
1988 static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
1992 bind(&allocate_new_space);
1993 Allocate(ConsString::kSize,
2002 InitializeNewString(result,
2004 Heap::kConsAsciiStringMapRootIndex,
2010 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
2014 Label* gc_required) {
2015 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2018 InitializeNewString(result,
2020 Heap::kSlicedStringMapRootIndex,
2026 void MacroAssembler::AllocateAsciiSlicedString(Register result,
2030 Label* gc_required) {
2031 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2034 InitializeNewString(result,
2036 Heap::kSlicedAsciiStringMapRootIndex,
2042 void MacroAssembler::CompareObjectType(Register object,
2045 InstanceType type) {
2046 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2047 CompareInstanceType(map, type_reg, type);
2051 void MacroAssembler::CompareInstanceType(Register map,
2053 InstanceType type) {
2054 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2055 cmp(type_reg, Operand(type));
2059 void MacroAssembler::CompareRoot(Register obj,
2060 Heap::RootListIndex index) {
2061 ASSERT(!obj.is(ip));
2062 LoadRoot(ip, index);
2067 void MacroAssembler::CheckFastElements(Register map,
2070 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2071 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2072 STATIC_ASSERT(FAST_ELEMENTS == 2);
2073 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2074 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2075 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2080 void MacroAssembler::CheckFastObjectElements(Register map,
2083 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2084 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2085 STATIC_ASSERT(FAST_ELEMENTS == 2);
2086 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2087 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2088 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2090 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2095 void MacroAssembler::CheckFastSmiElements(Register map,
2098 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2099 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2100 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2101 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2106 void MacroAssembler::StoreNumberToDoubleElements(
2109 Register elements_reg,
2111 LowDwVfpRegister double_scratch,
2113 int elements_offset) {
2114 Label smi_value, store;
2116 // Handle smi values specially.
2117 JumpIfSmi(value_reg, &smi_value);
2119 // Ensure that the object is a heap number
2122 isolate()->factory()->heap_number_map(),
2126 vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2127 // Force a canonical NaN.
2128 if (emit_debug_code()) {
2130 tst(ip, Operand(kVFPDefaultNaNModeControlBit));
2131 Assert(ne, kDefaultNaNModeNotSet);
2133 VFPCanonicalizeNaN(double_scratch);
2137 SmiToDouble(double_scratch, value_reg);
2140 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
2141 vstr(double_scratch,
2142 FieldMemOperand(scratch1,
2143 FixedDoubleArray::kHeaderSize - elements_offset));
2147 void MacroAssembler::CompareMap(Register obj,
2150 Label* early_success) {
2151 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2152 CompareMap(scratch, map, early_success);
2156 void MacroAssembler::CompareMap(Register obj_map,
2158 Label* early_success) {
2159 cmp(obj_map, Operand(map));
2163 void MacroAssembler::CheckMap(Register obj,
2167 SmiCheckType smi_check_type) {
2168 if (smi_check_type == DO_SMI_CHECK) {
2169 JumpIfSmi(obj, fail);
2173 CompareMap(obj, scratch, map, &success);
2179 void MacroAssembler::CheckMap(Register obj,
2181 Heap::RootListIndex index,
2183 SmiCheckType smi_check_type) {
2184 if (smi_check_type == DO_SMI_CHECK) {
2185 JumpIfSmi(obj, fail);
2187 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2188 LoadRoot(ip, index);
2194 void MacroAssembler::DispatchMap(Register obj,
2197 Handle<Code> success,
2198 SmiCheckType smi_check_type) {
2200 if (smi_check_type == DO_SMI_CHECK) {
2201 JumpIfSmi(obj, &fail);
2203 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2204 mov(ip, Operand(map));
2206 Jump(success, RelocInfo::CODE_TARGET, eq);
2211 void MacroAssembler::TryGetFunctionPrototype(Register function,
2215 bool miss_on_bound_function) {
2216 // Check that the receiver isn't a smi.
2217 JumpIfSmi(function, miss);
2219 // Check that the function really is a function. Load map into result reg.
2220 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2223 if (miss_on_bound_function) {
2225 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2227 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2229 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
2233 // Make sure that the function has an instance prototype.
2235 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2236 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2237 b(ne, &non_instance);
2239 // Get the prototype or initial map from the function.
2241 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2243 // If the prototype or initial map is the hole, don't return it and
2244 // simply miss the cache instead. This will allow us to allocate a
2245 // prototype object on-demand in the runtime system.
2246 LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2250 // If the function does not have an initial map, we're done.
2252 CompareObjectType(result, scratch, scratch, MAP_TYPE);
2255 // Get the prototype from the initial map.
2256 ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2259 // Non-instance prototype: Fetch prototype from constructor field
2261 bind(&non_instance);
2262 ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2269 void MacroAssembler::CallStub(CodeStub* stub,
2270 TypeFeedbackId ast_id,
2272 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2273 Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond);
2277 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2278 ASSERT(allow_stub_calls_ ||
2279 stub->CompilingCallsToThisStubIsGCSafe(isolate()));
2280 Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond);
2284 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2285 return ref0.address() - ref1.address();
2289 void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
2290 Address function_address,
2291 ExternalReference thunk_ref,
2292 Register thunk_last_arg,
2294 bool returns_handle,
2295 int return_value_offset) {
2296 ExternalReference next_address =
2297 ExternalReference::handle_scope_next_address(isolate());
2298 const int kNextOffset = 0;
2299 const int kLimitOffset = AddressOffset(
2300 ExternalReference::handle_scope_limit_address(isolate()),
2302 const int kLevelOffset = AddressOffset(
2303 ExternalReference::handle_scope_level_address(isolate()),
2306 // Allocate HandleScope in callee-save registers.
2307 mov(r7, Operand(next_address));
2308 ldr(r4, MemOperand(r7, kNextOffset));
2309 ldr(r5, MemOperand(r7, kLimitOffset));
2310 ldr(r6, MemOperand(r7, kLevelOffset));
2311 add(r6, r6, Operand(1));
2312 str(r6, MemOperand(r7, kLevelOffset));
2314 if (FLAG_log_timer_events) {
2315 FrameScope frame(this, StackFrame::MANUAL);
2316 PushSafepointRegisters();
2317 PrepareCallCFunction(1, r0);
2318 mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2319 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2320 PopSafepointRegisters();
2323 ASSERT(!thunk_last_arg.is(r3));
2324 Label profiler_disabled;
2325 Label end_profiler_check;
2326 bool* is_profiling_flag =
2327 isolate()->cpu_profiler()->is_profiling_address();
2328 STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
2329 mov(r3, Operand(reinterpret_cast<int32_t>(is_profiling_flag)));
2330 ldrb(r3, MemOperand(r3, 0));
2331 cmp(r3, Operand(0));
2332 b(eq, &profiler_disabled);
2334 // Additional parameter is the address of the actual callback.
2335 mov(thunk_last_arg, Operand(reinterpret_cast<int32_t>(function_address)));
2336 mov(r3, Operand(thunk_ref));
2337 jmp(&end_profiler_check);
2339 bind(&profiler_disabled);
2340 mov(r3, Operand(function));
2341 bind(&end_profiler_check);
2343 // Native call returns to the DirectCEntry stub which redirects to the
2344 // return address pushed on stack (could have moved after GC).
2345 // DirectCEntry stub itself is generated early and never moves.
2346 DirectCEntryStub stub;
2347 stub.GenerateCall(this, r3);
2349 if (FLAG_log_timer_events) {
2350 FrameScope frame(this, StackFrame::MANUAL);
2351 PushSafepointRegisters();
2352 PrepareCallCFunction(1, r0);
2353 mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2354 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2355 PopSafepointRegisters();
2358 Label promote_scheduled_exception;
2359 Label delete_allocated_handles;
2360 Label leave_exit_frame;
2361 Label return_value_loaded;
2363 if (returns_handle) {
2364 Label load_return_value;
2365 cmp(r0, Operand::Zero());
2366 b(eq, &load_return_value);
2367 // derefernce returned value
2368 ldr(r0, MemOperand(r0));
2369 b(&return_value_loaded);
2370 bind(&load_return_value);
2372 // load value from ReturnValue
2373 ldr(r0, MemOperand(fp, return_value_offset*kPointerSize));
2374 bind(&return_value_loaded);
2375 // No more valid handles (the result handle was the last one). Restore
2376 // previous handle scope.
2377 str(r4, MemOperand(r7, kNextOffset));
2378 if (emit_debug_code()) {
2379 ldr(r1, MemOperand(r7, kLevelOffset));
2381 Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
2383 sub(r6, r6, Operand(1));
2384 str(r6, MemOperand(r7, kLevelOffset));
2385 ldr(ip, MemOperand(r7, kLimitOffset));
2387 b(ne, &delete_allocated_handles);
2389 // Check if the function scheduled an exception.
2390 bind(&leave_exit_frame);
2391 LoadRoot(r4, Heap::kTheHoleValueRootIndex);
2392 mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
2393 ldr(r5, MemOperand(ip));
2395 b(ne, &promote_scheduled_exception);
2397 // LeaveExitFrame expects unwind space to be in a register.
2398 mov(r4, Operand(stack_space));
2399 LeaveExitFrame(false, r4);
2402 bind(&promote_scheduled_exception);
2403 TailCallExternalReference(
2404 ExternalReference(Runtime::kPromoteScheduledException, isolate()),
2408 // HandleScope limit has changed. Delete allocated extensions.
2409 bind(&delete_allocated_handles);
2410 str(r5, MemOperand(r7, kLimitOffset));
2412 PrepareCallCFunction(1, r5);
2413 mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2415 ExternalReference::delete_handle_scope_extensions(isolate()), 1);
2417 jmp(&leave_exit_frame);
2421 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2422 if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
2423 return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
2427 void MacroAssembler::IllegalOperation(int num_arguments) {
2428 if (num_arguments > 0) {
2429 add(sp, sp, Operand(num_arguments * kPointerSize));
2431 LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2435 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2436 // If the hash field contains an array index pick it out. The assert checks
2437 // that the constants for the maximum number of digits for an array index
2438 // cached in the hash field and the number of bits reserved for it does not
2440 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
2441 (1 << String::kArrayIndexValueBits));
2442 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
2443 // the low kHashShift bits.
2444 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
2445 SmiTag(index, hash);
2449 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
2450 if (CpuFeatures::IsSupported(VFP3)) {
2451 vmov(value.low(), smi);
2452 vcvt_f64_s32(value, 1);
2455 vmov(value.low(), ip);
2456 vcvt_f64_s32(value, value.low());
2461 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
2462 LowDwVfpRegister double_scratch) {
2463 ASSERT(!double_input.is(double_scratch));
2464 vcvt_s32_f64(double_scratch.low(), double_input);
2465 vcvt_f64_s32(double_scratch, double_scratch.low());
2466 VFPCompareAndSetFlags(double_input, double_scratch);
2470 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2471 DwVfpRegister double_input,
2472 LowDwVfpRegister double_scratch) {
2473 ASSERT(!double_input.is(double_scratch));
2474 vcvt_s32_f64(double_scratch.low(), double_input);
2475 vmov(result, double_scratch.low());
2476 vcvt_f64_s32(double_scratch, double_scratch.low());
2477 VFPCompareAndSetFlags(double_input, double_scratch);
2481 void MacroAssembler::TryInt32Floor(Register result,
2482 DwVfpRegister double_input,
2483 Register input_high,
2484 LowDwVfpRegister double_scratch,
2487 ASSERT(!result.is(input_high));
2488 ASSERT(!double_input.is(double_scratch));
2489 Label negative, exception;
2491 VmovHigh(input_high, double_input);
2493 // Test for NaN and infinities.
2494 Sbfx(result, input_high,
2495 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2496 cmp(result, Operand(-1));
2498 // Test for values that can be exactly represented as a
2499 // signed 32-bit integer.
2500 TryDoubleToInt32Exact(result, double_input, double_scratch);
2501 // If exact, return (result already fetched).
2503 cmp(input_high, Operand::Zero());
2506 // Input is in ]+0, +inf[.
2507 // If result equals 0x7fffffff input was out of range or
2508 // in ]0x7fffffff, 0x80000000[. We ignore this last case which
2509 // could fits into an int32, that means we always think input was
2510 // out of range and always go to exception.
2511 // If result < 0x7fffffff, go to done, result fetched.
2512 cmn(result, Operand(1));
2516 // Input is in ]-inf, -0[.
2517 // If x is a non integer negative number,
2518 // floor(x) <=> round_to_zero(x) - 1.
2520 sub(result, result, Operand(1), SetCC);
2521 // If result is still negative, go to done, result fetched.
2522 // Else, we had an overflow and we fall through exception.
2528 void MacroAssembler::ECMAToInt32(Register result,
2529 DwVfpRegister double_input,
2531 Register scratch_high,
2532 Register scratch_low,
2533 LowDwVfpRegister double_scratch) {
2534 ASSERT(!scratch_high.is(result));
2535 ASSERT(!scratch_low.is(result));
2536 ASSERT(!scratch_low.is(scratch_high));
2537 ASSERT(!scratch.is(result) &&
2538 !scratch.is(scratch_high) &&
2539 !scratch.is(scratch_low));
2540 ASSERT(!double_input.is(double_scratch));
2542 Label out_of_range, only_low, negate, done;
2544 vcvt_s32_f64(double_scratch.low(), double_input);
2545 vmov(result, double_scratch.low());
2547 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
2548 sub(scratch, result, Operand(1));
2549 cmp(scratch, Operand(0x7ffffffe));
2552 vmov(scratch_low, scratch_high, double_input);
2553 Ubfx(scratch, scratch_high,
2554 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2555 // Load scratch with exponent - 1. This is faster than loading
2556 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
2557 sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
2558 // If exponent is greater than or equal to 84, the 32 less significant
2559 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
2561 // Compare exponent with 84 (compare exponent - 1 with 83).
2562 cmp(scratch, Operand(83));
2563 b(ge, &out_of_range);
2565 // If we reach this code, 31 <= exponent <= 83.
2566 // So, we don't have to handle cases where 0 <= exponent <= 20 for
2567 // which we would need to shift right the high part of the mantissa.
2568 // Scratch contains exponent - 1.
2569 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
2570 rsb(scratch, scratch, Operand(51), SetCC);
2572 // 21 <= exponent <= 51, shift scratch_low and scratch_high
2573 // to generate the result.
2574 mov(scratch_low, Operand(scratch_low, LSR, scratch));
2575 // Scratch contains: 52 - exponent.
2576 // We needs: exponent - 20.
2577 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
2578 rsb(scratch, scratch, Operand(32));
2579 Ubfx(result, scratch_high,
2580 0, HeapNumber::kMantissaBitsInTopWord);
2581 // Set the implicit 1 before the mantissa part in scratch_high.
2582 orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2583 orr(result, scratch_low, Operand(result, LSL, scratch));
2586 bind(&out_of_range);
2587 mov(result, Operand::Zero());
2591 // 52 <= exponent <= 83, shift only scratch_low.
2592 // On entry, scratch contains: 52 - exponent.
2593 rsb(scratch, scratch, Operand::Zero());
2594 mov(result, Operand(scratch_low, LSL, scratch));
2597 // If input was positive, scratch_high ASR 31 equals 0 and
2598 // scratch_high LSR 31 equals zero.
2599 // New result = (result eor 0) + 0 = result.
2600 // If the input was negative, we have to negate the result.
2601 // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
2602 // New result = (result eor 0xffffffff) + 1 = 0 - result.
2603 eor(result, result, Operand(scratch_high, ASR, 31));
2604 add(result, result, Operand(scratch_high, LSR, 31));
2610 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2612 int num_least_bits) {
2613 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2614 ubfx(dst, src, kSmiTagSize, num_least_bits);
2617 and_(dst, dst, Operand((1 << num_least_bits) - 1));
2622 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2624 int num_least_bits) {
2625 and_(dst, src, Operand((1 << num_least_bits) - 1));
2629 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2630 int num_arguments) {
2631 // All parameters are on the stack. r0 has the return value after call.
2633 // If the expected number of arguments of the runtime function is
2634 // constant, we check that the actual number of arguments match the
2636 if (f->nargs >= 0 && f->nargs != num_arguments) {
2637 IllegalOperation(num_arguments);
2641 // TODO(1236192): Most runtime routines don't need the number of
2642 // arguments passed in because it is constant. At some point we
2643 // should remove this need and make the runtime routine entry code
2645 mov(r0, Operand(num_arguments));
2646 mov(r1, Operand(ExternalReference(f, isolate())));
2652 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
2653 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
2657 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
2658 const Runtime::Function* function = Runtime::FunctionForId(id);
2659 mov(r0, Operand(function->nargs));
2660 mov(r1, Operand(ExternalReference(function, isolate())));
2661 CEntryStub stub(1, kSaveFPRegs);
2666 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2667 int num_arguments) {
2668 mov(r0, Operand(num_arguments));
2669 mov(r1, Operand(ext));
2676 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2679 // TODO(1236192): Most runtime routines don't need the number of
2680 // arguments passed in because it is constant. At some point we
2681 // should remove this need and make the runtime routine entry code
2683 mov(r0, Operand(num_arguments));
2684 JumpToExternalReference(ext);
2688 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2691 TailCallExternalReference(ExternalReference(fid, isolate()),
2697 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2698 #if defined(__thumb__)
2699 // Thumb mode builtin.
2700 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2702 mov(r1, Operand(builtin));
2704 Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
2708 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2710 const CallWrapper& call_wrapper) {
2711 // You can't call a builtin without a valid frame.
2712 ASSERT(flag == JUMP_FUNCTION || has_frame());
2714 GetBuiltinEntry(r2, id);
2715 if (flag == CALL_FUNCTION) {
2716 call_wrapper.BeforeCall(CallSize(r2));
2717 SetCallKind(r5, CALL_AS_METHOD);
2719 call_wrapper.AfterCall();
2721 ASSERT(flag == JUMP_FUNCTION);
2722 SetCallKind(r5, CALL_AS_METHOD);
2728 void MacroAssembler::GetBuiltinFunction(Register target,
2729 Builtins::JavaScript id) {
2730 // Load the builtins object into target register.
2732 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2733 ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2734 // Load the JavaScript builtin function from the builtins object.
2735 ldr(target, FieldMemOperand(target,
2736 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2740 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2741 ASSERT(!target.is(r1));
2742 GetBuiltinFunction(r1, id);
2743 // Load the code entry point from the builtins object.
2744 ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2748 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2749 Register scratch1, Register scratch2) {
2750 if (FLAG_native_code_counters && counter->Enabled()) {
2751 mov(scratch1, Operand(value));
2752 mov(scratch2, Operand(ExternalReference(counter)));
2753 str(scratch1, MemOperand(scratch2));
2758 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2759 Register scratch1, Register scratch2) {
2761 if (FLAG_native_code_counters && counter->Enabled()) {
2762 mov(scratch2, Operand(ExternalReference(counter)));
2763 ldr(scratch1, MemOperand(scratch2));
2764 add(scratch1, scratch1, Operand(value));
2765 str(scratch1, MemOperand(scratch2));
2770 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2771 Register scratch1, Register scratch2) {
2773 if (FLAG_native_code_counters && counter->Enabled()) {
2774 mov(scratch2, Operand(ExternalReference(counter)));
2775 ldr(scratch1, MemOperand(scratch2));
2776 sub(scratch1, scratch1, Operand(value));
2777 str(scratch1, MemOperand(scratch2));
2782 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
2783 if (emit_debug_code())
2784 Check(cond, reason);
2788 void MacroAssembler::AssertFastElements(Register elements) {
2789 if (emit_debug_code()) {
2790 ASSERT(!elements.is(ip));
2793 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2794 LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2797 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2800 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2803 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2810 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
2814 // will not return here
2819 void MacroAssembler::Abort(BailoutReason reason) {
2822 // We want to pass the msg string like a smi to avoid GC
2823 // problems, however msg is not guaranteed to be aligned
2824 // properly. Instead, we pass an aligned pointer that is
2825 // a proper v8 smi, but also pass the alignment difference
2826 // from the real pointer as a smi.
2827 const char* msg = GetBailoutReason(reason);
2828 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2829 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2830 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2833 RecordComment("Abort message: ");
2838 mov(r0, Operand(p0));
2840 mov(r0, Operand(Smi::FromInt(p1 - p0)));
2842 // Disable stub call restrictions to always allow calls to abort.
2844 // We don't actually want to generate a pile of code for this, so just
2845 // claim there is a stack frame, without generating one.
2846 FrameScope scope(this, StackFrame::NONE);
2847 CallRuntime(Runtime::kAbort, 2);
2849 CallRuntime(Runtime::kAbort, 2);
2851 // will not return here
2852 if (is_const_pool_blocked()) {
2853 // If the calling code cares about the exact number of
2854 // instructions generated, we insert padding here to keep the size
2855 // of the Abort macro constant.
2856 static const int kExpectedAbortInstructions = 10;
2857 int abort_instructions = InstructionsGeneratedSince(&abort_start);
2858 ASSERT(abort_instructions <= kExpectedAbortInstructions);
2859 while (abort_instructions++ < kExpectedAbortInstructions) {
2866 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2867 if (context_chain_length > 0) {
2868 // Move up the chain of contexts to the context containing the slot.
2869 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2870 for (int i = 1; i < context_chain_length; i++) {
2871 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2874 // Slot is in the current function context. Move it into the
2875 // destination register in case we store into it (the write barrier
2876 // cannot be allowed to destroy the context in esi).
2882 void MacroAssembler::LoadTransitionedArrayMapConditional(
2883 ElementsKind expected_kind,
2884 ElementsKind transitioned_kind,
2885 Register map_in_out,
2887 Label* no_map_match) {
2888 // Load the global or builtins object from the current context.
2890 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2891 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2893 // Check that the function's map is the same as the expected cached map.
2896 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2897 size_t offset = expected_kind * kPointerSize +
2898 FixedArrayBase::kHeaderSize;
2899 ldr(ip, FieldMemOperand(scratch, offset));
2900 cmp(map_in_out, ip);
2901 b(ne, no_map_match);
2903 // Use the transitioned cached map.
2904 offset = transitioned_kind * kPointerSize +
2905 FixedArrayBase::kHeaderSize;
2906 ldr(map_in_out, FieldMemOperand(scratch, offset));
2910 void MacroAssembler::LoadInitialArrayMap(
2911 Register function_in, Register scratch,
2912 Register map_out, bool can_have_holes) {
2913 ASSERT(!function_in.is(map_out));
2915 ldr(map_out, FieldMemOperand(function_in,
2916 JSFunction::kPrototypeOrInitialMapOffset));
2917 if (!FLAG_smi_only_arrays) {
2918 ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
2919 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2924 } else if (can_have_holes) {
2925 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2926 FAST_HOLEY_SMI_ELEMENTS,
2935 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2936 // Load the global or builtins object from the current context.
2938 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2939 // Load the native context from the global or builtins object.
2940 ldr(function, FieldMemOperand(function,
2941 GlobalObject::kNativeContextOffset));
2942 // Load the function from the native context.
2943 ldr(function, MemOperand(function, Context::SlotOffset(index)));
2947 void MacroAssembler::LoadArrayFunction(Register function) {
2948 // Load the global or builtins object from the current context.
2950 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2951 // Load the global context from the global or builtins object.
2953 FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
2954 // Load the array function from the native context.
2956 MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
2960 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2963 // Load the initial map. The global functions all have initial maps.
2964 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2965 if (emit_debug_code()) {
2967 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2970 Abort(kGlobalFunctionsMustHaveInitialMap);
2976 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2979 Label* not_power_of_two_or_zero) {
2980 sub(scratch, reg, Operand(1), SetCC);
2981 b(mi, not_power_of_two_or_zero);
2983 b(ne, not_power_of_two_or_zero);
2987 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2990 Label* zero_and_neg,
2991 Label* not_power_of_two) {
2992 sub(scratch, reg, Operand(1), SetCC);
2993 b(mi, zero_and_neg);
2995 b(ne, not_power_of_two);
2999 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
3001 Label* on_not_both_smi) {
3002 STATIC_ASSERT(kSmiTag == 0);
3003 tst(reg1, Operand(kSmiTagMask));
3004 tst(reg2, Operand(kSmiTagMask), eq);
3005 b(ne, on_not_both_smi);
3009 void MacroAssembler::UntagAndJumpIfSmi(
3010 Register dst, Register src, Label* smi_case) {
3011 STATIC_ASSERT(kSmiTag == 0);
3012 SmiUntag(dst, src, SetCC);
3013 b(cc, smi_case); // Shifter carry is not set for a smi.
3017 void MacroAssembler::UntagAndJumpIfNotSmi(
3018 Register dst, Register src, Label* non_smi_case) {
3019 STATIC_ASSERT(kSmiTag == 0);
3020 SmiUntag(dst, src, SetCC);
3021 b(cs, non_smi_case); // Shifter carry is set for a non-smi.
3025 void MacroAssembler::JumpIfEitherSmi(Register reg1,
3027 Label* on_either_smi) {
3028 STATIC_ASSERT(kSmiTag == 0);
3029 tst(reg1, Operand(kSmiTagMask));
3030 tst(reg2, Operand(kSmiTagMask), ne);
3031 b(eq, on_either_smi);
3035 void MacroAssembler::AssertNotSmi(Register object) {
3036 if (emit_debug_code()) {
3037 STATIC_ASSERT(kSmiTag == 0);
3038 tst(object, Operand(kSmiTagMask));
3039 Check(ne, kOperandIsASmi);
3044 void MacroAssembler::AssertSmi(Register object) {
3045 if (emit_debug_code()) {
3046 STATIC_ASSERT(kSmiTag == 0);
3047 tst(object, Operand(kSmiTagMask));
3048 Check(eq, kOperandIsNotSmi);
3053 void MacroAssembler::AssertString(Register object) {
3054 if (emit_debug_code()) {
3055 STATIC_ASSERT(kSmiTag == 0);
3056 tst(object, Operand(kSmiTagMask));
3057 Check(ne, kOperandIsASmiAndNotAString);
3059 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3060 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
3062 Check(lo, kOperandIsNotAString);
3067 void MacroAssembler::AssertName(Register object) {
3068 if (emit_debug_code()) {
3069 STATIC_ASSERT(kSmiTag == 0);
3070 tst(object, Operand(kSmiTagMask));
3071 Check(ne, kOperandIsASmiAndNotAName);
3073 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3074 CompareInstanceType(object, object, LAST_NAME_TYPE);
3076 Check(le, kOperandIsNotAName);
3082 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
3083 if (emit_debug_code()) {
3084 CompareRoot(reg, index);
3085 Check(eq, kHeapNumberMapRegisterClobbered);
3090 void MacroAssembler::JumpIfNotHeapNumber(Register object,
3091 Register heap_number_map,
3093 Label* on_not_heap_number) {
3094 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3095 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3096 cmp(scratch, heap_number_map);
3097 b(ne, on_not_heap_number);
3101 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
3107 // Test that both first and second are sequential ASCII strings.
3108 // Assume that they are non-smis.
3109 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3110 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3111 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3112 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3114 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
3121 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
3126 // Check that neither is a smi.
3127 and_(scratch1, first, Operand(second));
3128 JumpIfSmi(scratch1, failure);
3129 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
3137 void MacroAssembler::JumpIfNotUniqueName(Register reg,
3138 Label* not_unique_name) {
3139 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3141 tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3143 cmp(reg, Operand(SYMBOL_TYPE));
3144 b(ne, not_unique_name);
3150 // Allocates a heap number or jumps to the need_gc label if the young space
3151 // is full and a scavenge is needed.
3152 void MacroAssembler::AllocateHeapNumber(Register result,
3155 Register heap_number_map,
3157 TaggingMode tagging_mode) {
3158 // Allocate an object in the heap for the heap number and tag it as a heap
3160 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3161 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3163 // Store heap number map in the allocated object.
3164 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3165 if (tagging_mode == TAG_RESULT) {
3166 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3168 str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3173 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3174 DwVfpRegister value,
3177 Register heap_number_map,
3178 Label* gc_required) {
3179 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3180 sub(scratch1, result, Operand(kHeapObjectTag));
3181 vstr(value, scratch1, HeapNumber::kValueOffset);
3185 // Copies a fixed number of fields of heap objects from src to dst.
3186 void MacroAssembler::CopyFields(Register dst,
3188 LowDwVfpRegister double_scratch,
3190 int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
3191 for (int i = 0; i < double_count; i++) {
3192 vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
3193 vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
3196 STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
3197 STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
3199 int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
3201 vldr(double_scratch.low(),
3202 FieldMemOperand(src, (field_count - 1) * kPointerSize));
3203 vstr(double_scratch.low(),
3204 FieldMemOperand(dst, (field_count - 1) * kPointerSize));
3209 void MacroAssembler::CopyBytes(Register src,
3213 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3215 // Align src before copying in word size chunks.
3217 cmp(length, Operand::Zero());
3219 bind(&align_loop_1);
3220 tst(src, Operand(kPointerSize - 1));
3222 ldrb(scratch, MemOperand(src, 1, PostIndex));
3223 strb(scratch, MemOperand(dst, 1, PostIndex));
3224 sub(length, length, Operand(1), SetCC);
3225 b(ne, &byte_loop_1);
3227 // Copy bytes in word size chunks.
3229 if (emit_debug_code()) {
3230 tst(src, Operand(kPointerSize - 1));
3231 Assert(eq, kExpectingAlignmentForCopyBytes);
3233 cmp(length, Operand(kPointerSize));
3235 ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
3236 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3237 str(scratch, MemOperand(dst, kPointerSize, PostIndex));
3239 strb(scratch, MemOperand(dst, 1, PostIndex));
3240 mov(scratch, Operand(scratch, LSR, 8));
3241 strb(scratch, MemOperand(dst, 1, PostIndex));
3242 mov(scratch, Operand(scratch, LSR, 8));
3243 strb(scratch, MemOperand(dst, 1, PostIndex));
3244 mov(scratch, Operand(scratch, LSR, 8));
3245 strb(scratch, MemOperand(dst, 1, PostIndex));
3247 sub(length, length, Operand(kPointerSize));
3250 // Copy the last bytes if any left.
3252 cmp(length, Operand::Zero());
3255 ldrb(scratch, MemOperand(src, 1, PostIndex));
3256 strb(scratch, MemOperand(dst, 1, PostIndex));
3257 sub(length, length, Operand(1), SetCC);
3258 b(ne, &byte_loop_1);
3263 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3264 Register end_offset,
3269 str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
3271 cmp(start_offset, end_offset);
3276 void MacroAssembler::CheckFor32DRegs(Register scratch) {
3277 mov(scratch, Operand(ExternalReference::cpu_features()));
3278 ldr(scratch, MemOperand(scratch));
3279 tst(scratch, Operand(1u << VFP32DREGS));
3283 void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
3284 CheckFor32DRegs(scratch);
3285 vstm(db_w, location, d16, d31, ne);
3286 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3287 vstm(db_w, location, d0, d15);
3291 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
3292 CheckFor32DRegs(scratch);
3293 vldm(ia_w, location, d0, d15);
3294 vldm(ia_w, location, d16, d31, ne);
3295 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3299 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
3305 const int kFlatAsciiStringMask =
3306 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3307 const int kFlatAsciiStringTag =
3308 kStringTag | kOneByteStringTag | kSeqStringTag;
3309 and_(scratch1, first, Operand(kFlatAsciiStringMask));
3310 and_(scratch2, second, Operand(kFlatAsciiStringMask));
3311 cmp(scratch1, Operand(kFlatAsciiStringTag));
3312 // Ignore second test if first test failed.
3313 cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
3318 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
3321 const int kFlatAsciiStringMask =
3322 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3323 const int kFlatAsciiStringTag =
3324 kStringTag | kOneByteStringTag | kSeqStringTag;
3325 and_(scratch, type, Operand(kFlatAsciiStringMask));
3326 cmp(scratch, Operand(kFlatAsciiStringTag));
3330 static const int kRegisterPassedArguments = 4;
3333 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3334 int num_double_arguments) {
3335 int stack_passed_words = 0;
3336 if (use_eabi_hardfloat()) {
3337 // In the hard floating point calling convention, we can use
3338 // all double registers to pass doubles.
3339 if (num_double_arguments > DoubleRegister::NumRegisters()) {
3340 stack_passed_words +=
3341 2 * (num_double_arguments - DoubleRegister::NumRegisters());
3344 // In the soft floating point calling convention, every double
3345 // argument is passed using two registers.
3346 num_reg_arguments += 2 * num_double_arguments;
3348 // Up to four simple arguments are passed in registers r0..r3.
3349 if (num_reg_arguments > kRegisterPassedArguments) {
3350 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3352 return stack_passed_words;
3356 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3357 int num_double_arguments,
3359 int frame_alignment = ActivationFrameAlignment();
3360 int stack_passed_arguments = CalculateStackPassedWords(
3361 num_reg_arguments, num_double_arguments);
3362 if (frame_alignment > kPointerSize) {
3363 // Make stack end at alignment and make room for num_arguments - 4 words
3364 // and the original value of sp.
3366 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3367 ASSERT(IsPowerOf2(frame_alignment));
3368 and_(sp, sp, Operand(-frame_alignment));
3369 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3371 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3376 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3378 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3382 void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
3383 if (use_eabi_hardfloat()) {
3391 void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
3392 DwVfpRegister dreg2) {
3393 if (use_eabi_hardfloat()) {
3395 ASSERT(!dreg1.is(d1));
3403 vmov(r0, r1, dreg1);
3404 vmov(r2, r3, dreg2);
3409 void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
3411 if (use_eabi_hardfloat()) {
3421 void MacroAssembler::CallCFunction(ExternalReference function,
3422 int num_reg_arguments,
3423 int num_double_arguments) {
3424 mov(ip, Operand(function));
3425 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3429 void MacroAssembler::CallCFunction(Register function,
3430 int num_reg_arguments,
3431 int num_double_arguments) {
3432 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3436 void MacroAssembler::CallCFunction(ExternalReference function,
3437 int num_arguments) {
3438 CallCFunction(function, num_arguments, 0);
3442 void MacroAssembler::CallCFunction(Register function,
3443 int num_arguments) {
3444 CallCFunction(function, num_arguments, 0);
3448 void MacroAssembler::CallCFunctionHelper(Register function,
3449 int num_reg_arguments,
3450 int num_double_arguments) {
3451 ASSERT(has_frame());
3452 // Make sure that the stack is aligned before calling a C function unless
3453 // running in the simulator. The simulator has its own alignment check which
3454 // provides more information.
3455 #if V8_HOST_ARCH_ARM
3456 if (emit_debug_code()) {
3457 int frame_alignment = OS::ActivationFrameAlignment();
3458 int frame_alignment_mask = frame_alignment - 1;
3459 if (frame_alignment > kPointerSize) {
3460 ASSERT(IsPowerOf2(frame_alignment));
3461 Label alignment_as_expected;
3462 tst(sp, Operand(frame_alignment_mask));
3463 b(eq, &alignment_as_expected);
3464 // Don't use Check here, as it will call Runtime_Abort possibly
3465 // re-entering here.
3466 stop("Unexpected alignment");
3467 bind(&alignment_as_expected);
3472 // Just call directly. The function called cannot cause a GC, or
3473 // allow preemption, so the return address in the link register
3476 int stack_passed_arguments = CalculateStackPassedWords(
3477 num_reg_arguments, num_double_arguments);
3478 if (ActivationFrameAlignment() > kPointerSize) {
3479 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3481 add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
3486 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3488 const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3489 const int32_t kPCRegOffset = 2 * kPointerSize;
3490 ldr(result, MemOperand(ldr_location));
3491 if (emit_debug_code()) {
3492 // Check that the instruction is a ldr reg, [pc + offset] .
3493 and_(result, result, Operand(kLdrPCPattern));
3494 cmp(result, Operand(kLdrPCPattern));
3495 Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
3496 // Result was clobbered. Restore it.
3497 ldr(result, MemOperand(ldr_location));
3499 // Get the address of the constant.
3500 and_(result, result, Operand(kLdrOffsetMask));
3501 add(result, ldr_location, Operand(result));
3502 add(result, result, Operand(kPCRegOffset));
3506 void MacroAssembler::CheckPageFlag(
3511 Label* condition_met) {
3512 Bfc(scratch, object, 0, kPageSizeBits);
3513 ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3514 tst(scratch, Operand(mask));
3515 b(cc, condition_met);
3519 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
3521 Label* if_deprecated) {
3522 if (map->CanBeDeprecated()) {
3523 mov(scratch, Operand(map));
3524 ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
3525 tst(scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
3526 b(ne, if_deprecated);
3531 void MacroAssembler::JumpIfBlack(Register object,
3535 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
3536 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3540 void MacroAssembler::HasColor(Register object,
3541 Register bitmap_scratch,
3542 Register mask_scratch,
3546 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3548 GetMarkBits(object, bitmap_scratch, mask_scratch);
3550 Label other_color, word_boundary;
3551 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3552 tst(ip, Operand(mask_scratch));
3553 b(first_bit == 1 ? eq : ne, &other_color);
3554 // Shift left 1 by adding.
3555 add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
3556 b(eq, &word_boundary);
3557 tst(ip, Operand(mask_scratch));
3558 b(second_bit == 1 ? ne : eq, has_color);
3561 bind(&word_boundary);
3562 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3563 tst(ip, Operand(1));
3564 b(second_bit == 1 ? ne : eq, has_color);
3569 // Detect some, but not all, common pointer-free objects. This is used by the
3570 // incremental write barrier which doesn't care about oddballs (they are always
3571 // marked black immediately so this code is not hit).
3572 void MacroAssembler::JumpIfDataObject(Register value,
3574 Label* not_data_object) {
3575 Label is_data_object;
3576 ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
3577 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3578 b(eq, &is_data_object);
3579 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3580 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3581 // If it's a string and it's not a cons string then it's an object containing
3583 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3584 tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
3585 b(ne, not_data_object);
3586 bind(&is_data_object);
3590 void MacroAssembler::GetMarkBits(Register addr_reg,
3591 Register bitmap_reg,
3592 Register mask_reg) {
3593 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3594 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3595 Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3596 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3597 Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
3598 add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
3599 mov(ip, Operand(1));
3600 mov(mask_reg, Operand(ip, LSL, mask_reg));
3604 void MacroAssembler::EnsureNotWhite(
3606 Register bitmap_scratch,
3607 Register mask_scratch,
3608 Register load_scratch,
3609 Label* value_is_white_and_not_data) {
3610 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3611 GetMarkBits(value, bitmap_scratch, mask_scratch);
3613 // If the value is black or grey we don't need to do anything.
3614 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3615 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3616 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
3617 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3621 // Since both black and grey have a 1 in the first position and white does
3622 // not have a 1 there we only need to check one bit.
3623 ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3624 tst(mask_scratch, load_scratch);
3627 if (emit_debug_code()) {
3628 // Check for impossible bit pattern.
3630 // LSL may overflow, making the check conservative.
3631 tst(load_scratch, Operand(mask_scratch, LSL, 1));
3633 stop("Impossible marking bit pattern");
3637 // Value is white. We check whether it is data that doesn't need scanning.
3638 // Currently only checks for HeapNumber and non-cons strings.
3639 Register map = load_scratch; // Holds map while checking type.
3640 Register length = load_scratch; // Holds length of object after testing type.
3641 Label is_data_object;
3643 // Check for heap-number
3644 ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3645 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3646 mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
3647 b(eq, &is_data_object);
3649 // Check for strings.
3650 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3651 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3652 // If it's a string and it's not a cons string then it's an object containing
3654 Register instance_type = load_scratch;
3655 ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
3656 tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
3657 b(ne, value_is_white_and_not_data);
3658 // It's a non-indirect (non-cons and non-slice) string.
3659 // If it's external, the length is just ExternalString::kSize.
3660 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3661 // External strings are the only ones with the kExternalStringTag bit
3663 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
3664 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
3665 tst(instance_type, Operand(kExternalStringTag));
3666 mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
3667 b(ne, &is_data_object);
3669 // Sequential string, either ASCII or UC16.
3670 // For ASCII (char-size of 1) we shift the smi tag away to get the length.
3671 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
3672 // getting the length multiplied by 2.
3673 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
3674 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3675 ldr(ip, FieldMemOperand(value, String::kLengthOffset));
3676 tst(instance_type, Operand(kStringEncodingMask));
3677 mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
3678 add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
3679 and_(length, length, Operand(~kObjectAlignmentMask));
3681 bind(&is_data_object);
3682 // Value is a data object, and it is white. Mark it black. Since we know
3683 // that the object is white we can make it black by flipping one bit.
3684 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3685 orr(ip, ip, Operand(mask_scratch));
3686 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3688 and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
3689 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3690 add(ip, ip, Operand(length));
3691 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3697 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3698 Usat(output_reg, 8, Operand(input_reg));
3702 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3703 DwVfpRegister input_reg,
3704 LowDwVfpRegister double_scratch) {
3709 VFPCompareAndSetFlags(input_reg, 0.0);
3712 // Double value is less than zero, NaN or Inf, return 0.
3713 mov(result_reg, Operand::Zero());
3716 // Double value is >= 255, return 255.
3718 Vmov(double_scratch, 255.0, result_reg);
3719 VFPCompareAndSetFlags(input_reg, double_scratch);
3721 mov(result_reg, Operand(255));
3724 // In 0-255 range, round and truncate.
3728 // Set rounding mode to round to the nearest integer by clearing bits[23:22].
3729 bic(result_reg, ip, Operand(kVFPRoundingModeMask));
3731 vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
3732 vmov(result_reg, double_scratch.low());
3739 void MacroAssembler::LoadInstanceDescriptors(Register map,
3740 Register descriptors) {
3741 ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3745 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3746 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3747 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3751 void MacroAssembler::EnumLength(Register dst, Register map) {
3752 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3753 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3754 and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
3758 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3759 Register empty_fixed_array_value = r6;
3760 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3764 // Check if the enum length field is properly initialized, indicating that
3765 // there is an enum cache.
3766 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3769 cmp(r3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
3770 b(eq, call_runtime);
3775 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3777 // For all objects but the receiver, check that the cache is empty.
3779 cmp(r3, Operand(Smi::FromInt(0)));
3780 b(ne, call_runtime);
3784 // Check that there are no elements. Register r2 contains the current JS
3785 // object we've reached through the prototype chain.
3786 ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
3787 cmp(r2, empty_fixed_array_value);
3788 b(ne, call_runtime);
3790 ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
3791 cmp(r2, null_value);
3796 void MacroAssembler::TestJSArrayForAllocationMemento(
3797 Register receiver_reg,
3798 Register scratch_reg) {
3799 Label no_memento_available;
3800 ExternalReference new_space_start =
3801 ExternalReference::new_space_start(isolate());
3802 ExternalReference new_space_allocation_top =
3803 ExternalReference::new_space_allocation_top_address(isolate());
3804 add(scratch_reg, receiver_reg,
3805 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3806 cmp(scratch_reg, Operand(new_space_start));
3807 b(lt, &no_memento_available);
3808 mov(ip, Operand(new_space_allocation_top));
3809 ldr(ip, MemOperand(ip));
3810 cmp(scratch_reg, ip);
3811 b(gt, &no_memento_available);
3812 ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
3814 Operand(Handle<Map>(isolate()->heap()->allocation_memento_map())));
3815 bind(&no_memento_available);
3820 bool AreAliased(Register reg1,
3826 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
3827 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
3830 if (reg1.is_valid()) regs |= reg1.bit();
3831 if (reg2.is_valid()) regs |= reg2.bit();
3832 if (reg3.is_valid()) regs |= reg3.bit();
3833 if (reg4.is_valid()) regs |= reg4.bit();
3834 if (reg5.is_valid()) regs |= reg5.bit();
3835 if (reg6.is_valid()) regs |= reg6.bit();
3836 int n_of_non_aliasing_regs = NumRegs(regs);
3838 return n_of_valid_regs != n_of_non_aliasing_regs;
3843 CodePatcher::CodePatcher(byte* address, int instructions)
3844 : address_(address),
3845 size_(instructions * Assembler::kInstrSize),
3846 masm_(NULL, address, size_ + Assembler::kGap) {
3847 // Create a new macro assembler pointing to the address of the code to patch.
3848 // The size is adjusted with kGap on order for the assembler to generate size
3849 // bytes of instructions without failing with buffer size constraints.
3850 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3854 CodePatcher::~CodePatcher() {
3855 // Indicate that code has changed.
3856 CPU::FlushICache(address_, size_);
3858 // Check that the code was patched as expected.
3859 ASSERT(masm_.pc_ == address_ + size_);
3860 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3864 void CodePatcher::Emit(Instr instr) {
3865 masm()->emit(instr);
3869 void CodePatcher::Emit(Address addr) {
3870 masm()->emit(reinterpret_cast<Instr>(addr));
3874 void CodePatcher::EmitCondition(Condition cond) {
3875 Instr instr = Assembler::instr_at(masm_.pc_);
3876 instr = (instr & ~kCondMask) | cond;
3881 } } // namespace v8::internal
3883 #endif // V8_TARGET_ARCH_ARM