1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
11 #include "src/base/bits.h"
12 #include "src/base/division-by-constant.h"
13 #include "src/bootstrapper.h"
14 #include "src/codegen.h"
15 #include "src/cpu-profiler.h"
16 #include "src/debug.h"
17 #include "src/isolate-inl.h"
18 #include "src/runtime/runtime.h"
23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
24 : Assembler(arg_isolate, buffer, size),
25 generating_stub_(false),
27 if (isolate() != NULL) {
28 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
34 void MacroAssembler::Jump(Register target, Condition cond) {
39 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
41 DCHECK(RelocInfo::IsCodeTarget(rmode));
42 mov(pc, Operand(target, rmode), LeaveCC, cond);
46 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
48 DCHECK(!RelocInfo::IsCodeTarget(rmode));
49 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
53 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
55 DCHECK(RelocInfo::IsCodeTarget(rmode));
56 // 'code' is always generated ARM code, never THUMB code
57 AllowDeferredHandleDereference embedding_raw_address;
58 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
62 int MacroAssembler::CallSize(Register target, Condition cond) {
67 void MacroAssembler::Call(Register target, Condition cond) {
68 // Block constant pool for the call instruction sequence.
69 BlockConstPoolScope block_const_pool(this);
73 DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
77 int MacroAssembler::CallSize(
78 Address target, RelocInfo::Mode rmode, Condition cond) {
79 Instr mov_instr = cond | MOV | LeaveCC;
80 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
82 mov_operand.instructions_required(this, mov_instr) * kInstrSize;
86 int MacroAssembler::CallStubSize(
87 CodeStub* stub, TypeFeedbackId ast_id, Condition cond) {
88 return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
92 int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate,
94 RelocInfo::Mode rmode,
96 Instr mov_instr = cond | MOV | LeaveCC;
97 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
99 mov_operand.instructions_required(NULL, mov_instr) * kInstrSize;
103 void MacroAssembler::Call(Address target,
104 RelocInfo::Mode rmode,
106 TargetAddressStorageMode mode) {
107 // Block constant pool for the call instruction sequence.
108 BlockConstPoolScope block_const_pool(this);
112 bool old_predictable_code_size = predictable_code_size();
113 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
114 set_predictable_code_size(true);
118 // Check the expected size before generating code to ensure we assume the same
119 // constant pool availability (e.g., whether constant pool is full or not).
120 int expected_size = CallSize(target, rmode, cond);
123 // Call sequence on V7 or later may be :
124 // movw ip, #... @ call address low 16
125 // movt ip, #... @ call address high 16
128 // Or for pre-V7 or values that may be back-patched
129 // to avoid ICache flushes:
130 // ldr ip, [pc, #...] @ call address
134 // Statement positions are expected to be recorded when the target
135 // address is loaded. The mov method will automatically record
136 // positions when pc is the target, since this is not the case here
137 // we have to do it explicitly.
138 positions_recorder()->WriteRecordedPositions();
140 mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
143 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
144 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
145 set_predictable_code_size(old_predictable_code_size);
150 int MacroAssembler::CallSize(Handle<Code> code,
151 RelocInfo::Mode rmode,
152 TypeFeedbackId ast_id,
154 AllowDeferredHandleDereference using_raw_address;
155 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
159 void MacroAssembler::Call(Handle<Code> code,
160 RelocInfo::Mode rmode,
161 TypeFeedbackId ast_id,
163 TargetAddressStorageMode mode) {
166 DCHECK(RelocInfo::IsCodeTarget(rmode));
167 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
168 SetRecordedAstId(ast_id);
169 rmode = RelocInfo::CODE_TARGET_WITH_ID;
171 // 'code' is always generated ARM code, never THUMB code
172 AllowDeferredHandleDereference embedding_raw_address;
173 Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
177 void MacroAssembler::Ret(Condition cond) {
182 void MacroAssembler::Drop(int count, Condition cond) {
184 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
189 void MacroAssembler::Ret(int drop, Condition cond) {
195 void MacroAssembler::Swap(Register reg1,
199 if (scratch.is(no_reg)) {
200 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
201 eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
202 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
204 mov(scratch, reg1, LeaveCC, cond);
205 mov(reg1, reg2, LeaveCC, cond);
206 mov(reg2, scratch, LeaveCC, cond);
211 void MacroAssembler::Call(Label* target) {
216 void MacroAssembler::Push(Handle<Object> handle) {
217 mov(ip, Operand(handle));
222 void MacroAssembler::Move(Register dst, Handle<Object> value) {
223 AllowDeferredHandleDereference smi_check;
224 if (value->IsSmi()) {
225 mov(dst, Operand(value));
227 DCHECK(value->IsHeapObject());
228 if (isolate()->heap()->InNewSpace(*value)) {
229 Handle<Cell> cell = isolate()->factory()->NewCell(value);
230 mov(dst, Operand(cell));
231 ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
233 mov(dst, Operand(value));
239 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
241 mov(dst, src, LeaveCC, cond);
246 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
253 void MacroAssembler::Mls(Register dst, Register src1, Register src2,
254 Register srcA, Condition cond) {
255 if (CpuFeatures::IsSupported(MLS)) {
256 CpuFeatureScope scope(this, MLS);
257 mls(dst, src1, src2, srcA, cond);
259 DCHECK(!srcA.is(ip));
260 mul(ip, src1, src2, LeaveCC, cond);
261 sub(dst, srcA, ip, LeaveCC, cond);
266 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
268 if (!src2.is_reg() &&
269 !src2.must_output_reloc_info(this) &&
270 src2.immediate() == 0) {
271 mov(dst, Operand::Zero(), LeaveCC, cond);
272 } else if (!(src2.instructions_required(this) == 1) &&
273 !src2.must_output_reloc_info(this) &&
274 CpuFeatures::IsSupported(ARMv7) &&
275 base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
277 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
279 and_(dst, src1, src2, LeaveCC, cond);
284 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
287 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
288 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
289 and_(dst, src1, Operand(mask), LeaveCC, cond);
291 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
294 ubfx(dst, src1, lsb, width, cond);
299 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
302 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
303 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
304 and_(dst, src1, Operand(mask), LeaveCC, cond);
305 int shift_up = 32 - lsb - width;
306 int shift_down = lsb + shift_up;
308 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
310 if (shift_down != 0) {
311 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
314 sbfx(dst, src1, lsb, width, cond);
319 void MacroAssembler::Bfi(Register dst,
325 DCHECK(0 <= lsb && lsb < 32);
326 DCHECK(0 <= width && width < 32);
327 DCHECK(lsb + width < 32);
328 DCHECK(!scratch.is(dst));
329 if (width == 0) return;
330 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
331 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
332 bic(dst, dst, Operand(mask));
333 and_(scratch, src, Operand((1 << width) - 1));
334 mov(scratch, Operand(scratch, LSL, lsb));
335 orr(dst, dst, scratch);
337 bfi(dst, src, lsb, width, cond);
342 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
345 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
346 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
347 bic(dst, src, Operand(mask));
349 Move(dst, src, cond);
350 bfc(dst, lsb, width, cond);
355 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
357 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
358 DCHECK(!dst.is(pc) && !src.rm().is(pc));
359 DCHECK((satpos >= 0) && (satpos <= 31));
361 // These asserts are required to ensure compatibility with the ARMv7
363 DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL));
364 DCHECK(src.rs().is(no_reg));
367 int satval = (1 << satpos) - 1;
370 b(NegateCondition(cond), &done); // Skip saturate if !condition.
372 if (!(src.is_reg() && dst.is(src.rm()))) {
375 tst(dst, Operand(~satval));
377 mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
378 mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
381 usat(dst, satpos, src, cond);
386 void MacroAssembler::Load(Register dst,
387 const MemOperand& src,
389 DCHECK(!r.IsDouble());
390 if (r.IsInteger8()) {
392 } else if (r.IsUInteger8()) {
394 } else if (r.IsInteger16()) {
396 } else if (r.IsUInteger16()) {
404 void MacroAssembler::Store(Register src,
405 const MemOperand& dst,
407 DCHECK(!r.IsDouble());
408 if (r.IsInteger8() || r.IsUInteger8()) {
410 } else if (r.IsInteger16() || r.IsUInteger16()) {
413 if (r.IsHeapObject()) {
415 } else if (r.IsSmi()) {
423 void MacroAssembler::LoadRoot(Register destination,
424 Heap::RootListIndex index,
426 if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
427 isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
428 !predictable_code_size()) {
429 // The CPU supports fast immediate values, and this root will never
430 // change. We will load it as a relocatable immediate value.
431 Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
432 mov(destination, Operand(root), LeaveCC, cond);
435 ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
439 void MacroAssembler::StoreRoot(Register source,
440 Heap::RootListIndex index,
442 str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
446 void MacroAssembler::InNewSpace(Register object,
450 DCHECK(cond == eq || cond == ne);
451 and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
452 cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
457 void MacroAssembler::RecordWriteField(
462 LinkRegisterStatus lr_status,
463 SaveFPRegsMode save_fp,
464 RememberedSetAction remembered_set_action,
466 PointersToHereCheck pointers_to_here_check_for_value) {
467 // First, check if a write barrier is even needed. The tests below
468 // catch stores of Smis.
471 // Skip barrier if writing a smi.
472 if (smi_check == INLINE_SMI_CHECK) {
473 JumpIfSmi(value, &done);
476 // Although the object register is tagged, the offset is relative to the start
477 // of the object, so so offset must be a multiple of kPointerSize.
478 DCHECK(IsAligned(offset, kPointerSize));
480 add(dst, object, Operand(offset - kHeapObjectTag));
481 if (emit_debug_code()) {
483 tst(dst, Operand((1 << kPointerSizeLog2) - 1));
485 stop("Unaligned cell in write barrier");
494 remembered_set_action,
496 pointers_to_here_check_for_value);
500 // Clobber clobbered input registers when running with the debug-code flag
501 // turned on to provoke errors.
502 if (emit_debug_code()) {
503 mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
504 mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
509 // Will clobber 4 registers: object, map, dst, ip. The
510 // register 'object' contains a heap object pointer.
511 void MacroAssembler::RecordWriteForMap(Register object,
514 LinkRegisterStatus lr_status,
515 SaveFPRegsMode fp_mode) {
516 if (emit_debug_code()) {
517 ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset));
518 cmp(dst, Operand(isolate()->factory()->meta_map()));
519 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
522 if (!FLAG_incremental_marking) {
526 if (emit_debug_code()) {
527 ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset));
529 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
534 // A single check of the map's pages interesting flag suffices, since it is
535 // only set during incremental collection, and then it's also guaranteed that
536 // the from object's page's interesting flag is also set. This optimization
537 // relies on the fact that maps can never be in new space.
539 map, // Used as scratch.
540 MemoryChunk::kPointersToHereAreInterestingMask,
544 add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
545 if (emit_debug_code()) {
547 tst(dst, Operand((1 << kPointerSizeLog2) - 1));
549 stop("Unaligned cell in write barrier");
553 // Record the actual write.
554 if (lr_status == kLRHasNotBeenSaved) {
557 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
560 if (lr_status == kLRHasNotBeenSaved) {
566 // Count number of write barriers in generated code.
567 isolate()->counters()->write_barriers_static()->Increment();
568 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
570 // Clobber clobbered registers when running with the debug-code flag
571 // turned on to provoke errors.
572 if (emit_debug_code()) {
573 mov(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
574 mov(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
579 // Will clobber 4 registers: object, address, scratch, ip. The
580 // register 'object' contains a heap object pointer. The heap object
581 // tag is shifted away.
582 void MacroAssembler::RecordWrite(
586 LinkRegisterStatus lr_status,
587 SaveFPRegsMode fp_mode,
588 RememberedSetAction remembered_set_action,
590 PointersToHereCheck pointers_to_here_check_for_value) {
591 DCHECK(!object.is(value));
592 if (emit_debug_code()) {
593 ldr(ip, MemOperand(address));
595 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
598 if (remembered_set_action == OMIT_REMEMBERED_SET &&
599 !FLAG_incremental_marking) {
603 // First, check if a write barrier is even needed. The tests below
604 // catch stores of smis and stores into the young generation.
607 if (smi_check == INLINE_SMI_CHECK) {
608 JumpIfSmi(value, &done);
611 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
613 value, // Used as scratch.
614 MemoryChunk::kPointersToHereAreInterestingMask,
618 CheckPageFlag(object,
619 value, // Used as scratch.
620 MemoryChunk::kPointersFromHereAreInterestingMask,
624 // Record the actual write.
625 if (lr_status == kLRHasNotBeenSaved) {
628 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
631 if (lr_status == kLRHasNotBeenSaved) {
637 // Count number of write barriers in generated code.
638 isolate()->counters()->write_barriers_static()->Increment();
639 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
642 // Clobber clobbered registers when running with the debug-code flag
643 // turned on to provoke errors.
644 if (emit_debug_code()) {
645 mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
646 mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
651 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
654 SaveFPRegsMode fp_mode,
655 RememberedSetFinalAction and_then) {
657 if (emit_debug_code()) {
659 JumpIfNotInNewSpace(object, scratch, &ok);
660 stop("Remembered set pointer is in new space");
663 // Load store buffer top.
664 ExternalReference store_buffer =
665 ExternalReference::store_buffer_top(isolate());
666 mov(ip, Operand(store_buffer));
667 ldr(scratch, MemOperand(ip));
668 // Store pointer to buffer and increment buffer top.
669 str(address, MemOperand(scratch, kPointerSize, PostIndex));
670 // Write back new top of buffer.
671 str(scratch, MemOperand(ip));
672 // Call stub on end of buffer.
673 // Check for end of buffer.
674 tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
675 if (and_then == kFallThroughAtEnd) {
678 DCHECK(and_then == kReturnAtEnd);
682 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
683 CallStub(&store_buffer_overflow);
686 if (and_then == kReturnAtEnd) {
692 void MacroAssembler::PushFixedFrame(Register marker_reg) {
693 DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
694 stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
696 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
702 void MacroAssembler::PopFixedFrame(Register marker_reg) {
703 DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
704 ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
706 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
712 // Push and pop all registers that can hold pointers.
713 void MacroAssembler::PushSafepointRegisters() {
714 // Safepoints expect a block of contiguous register values starting with r0:
715 DCHECK(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
716 // Safepoints expect a block of kNumSafepointRegisters values on the
717 // stack, so adjust the stack for unsaved registers.
718 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
719 DCHECK(num_unsaved >= 0);
720 sub(sp, sp, Operand(num_unsaved * kPointerSize));
721 stm(db_w, sp, kSafepointSavedRegisters);
725 void MacroAssembler::PopSafepointRegisters() {
726 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
727 ldm(ia_w, sp, kSafepointSavedRegisters);
728 add(sp, sp, Operand(num_unsaved * kPointerSize));
732 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
733 str(src, SafepointRegisterSlot(dst));
737 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
738 ldr(dst, SafepointRegisterSlot(src));
742 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
743 // The registers are pushed starting with the highest encoding,
744 // which means that lowest encodings are closest to the stack pointer.
745 DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
750 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
751 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
755 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
756 // Number of d-regs not known at snapshot time.
757 DCHECK(!serializer_enabled());
758 // General purpose registers are pushed last on the stack.
759 int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
760 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
761 return MemOperand(sp, doubles_size + register_offset);
765 void MacroAssembler::Ldrd(Register dst1, Register dst2,
766 const MemOperand& src, Condition cond) {
767 DCHECK(src.rm().is(no_reg));
768 DCHECK(!dst1.is(lr)); // r14.
770 // V8 does not use this addressing mode, so the fallback code
771 // below doesn't support it yet.
772 DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
774 // Generate two ldr instructions if ldrd is not available.
775 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
776 (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
777 CpuFeatureScope scope(this, ARMv7);
778 ldrd(dst1, dst2, src, cond);
780 if ((src.am() == Offset) || (src.am() == NegOffset)) {
781 MemOperand src2(src);
782 src2.set_offset(src2.offset() + 4);
783 if (dst1.is(src.rn())) {
784 ldr(dst2, src2, cond);
785 ldr(dst1, src, cond);
787 ldr(dst1, src, cond);
788 ldr(dst2, src2, cond);
790 } else { // PostIndex or NegPostIndex.
791 DCHECK((src.am() == PostIndex) || (src.am() == NegPostIndex));
792 if (dst1.is(src.rn())) {
793 ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
794 ldr(dst1, src, cond);
796 MemOperand src2(src);
797 src2.set_offset(src2.offset() - 4);
798 ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
799 ldr(dst2, src2, cond);
806 void MacroAssembler::Strd(Register src1, Register src2,
807 const MemOperand& dst, Condition cond) {
808 DCHECK(dst.rm().is(no_reg));
809 DCHECK(!src1.is(lr)); // r14.
811 // V8 does not use this addressing mode, so the fallback code
812 // below doesn't support it yet.
813 DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
815 // Generate two str instructions if strd is not available.
816 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
817 (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
818 CpuFeatureScope scope(this, ARMv7);
819 strd(src1, src2, dst, cond);
821 MemOperand dst2(dst);
822 if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
823 dst2.set_offset(dst2.offset() + 4);
824 str(src1, dst, cond);
825 str(src2, dst2, cond);
826 } else { // PostIndex or NegPostIndex.
827 DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
828 dst2.set_offset(dst2.offset() - 4);
829 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
830 str(src2, dst2, cond);
836 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
837 // If needed, restore wanted bits of FPSCR.
840 if (emit_debug_code()) {
841 Label rounding_mode_correct;
842 tst(scratch, Operand(kVFPRoundingModeMask));
843 b(eq, &rounding_mode_correct);
844 // Don't call Assert here, since Runtime_Abort could re-enter here.
845 stop("Default rounding mode not set");
846 bind(&rounding_mode_correct);
848 tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
850 orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
856 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
857 const DwVfpRegister src,
858 const Condition cond) {
859 vsub(dst, src, kDoubleRegZero, cond);
863 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
864 const DwVfpRegister src2,
865 const Condition cond) {
866 // Compare and move FPSCR flags to the normal condition flags.
867 VFPCompareAndLoadFlags(src1, src2, pc, cond);
870 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
872 const Condition cond) {
873 // Compare and move FPSCR flags to the normal condition flags.
874 VFPCompareAndLoadFlags(src1, src2, pc, cond);
878 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
879 const DwVfpRegister src2,
880 const Register fpscr_flags,
881 const Condition cond) {
882 // Compare and load FPSCR.
883 vcmp(src1, src2, cond);
884 vmrs(fpscr_flags, cond);
887 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
889 const Register fpscr_flags,
890 const Condition cond) {
891 // Compare and load FPSCR.
892 vcmp(src1, src2, cond);
893 vmrs(fpscr_flags, cond);
896 void MacroAssembler::Vmov(const DwVfpRegister dst,
898 const Register scratch) {
899 static const DoubleRepresentation minus_zero(-0.0);
900 static const DoubleRepresentation zero(0.0);
901 DoubleRepresentation value_rep(imm);
902 // Handle special values first.
903 if (value_rep == zero) {
904 vmov(dst, kDoubleRegZero);
905 } else if (value_rep == minus_zero) {
906 vneg(dst, kDoubleRegZero);
908 vmov(dst, imm, scratch);
913 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
914 if (src.code() < 16) {
915 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
916 vmov(dst, loc.high());
918 vmov(dst, VmovIndexHi, src);
923 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
924 if (dst.code() < 16) {
925 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
926 vmov(loc.high(), src);
928 vmov(dst, VmovIndexHi, src);
933 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
934 if (src.code() < 16) {
935 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
936 vmov(dst, loc.low());
938 vmov(dst, VmovIndexLo, src);
943 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
944 if (dst.code() < 16) {
945 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
946 vmov(loc.low(), src);
948 vmov(dst, VmovIndexLo, src);
953 void MacroAssembler::LoadConstantPoolPointerRegister() {
954 if (FLAG_enable_ool_constant_pool) {
955 int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
956 pc_offset() - Instruction::kPCReadOffset;
957 DCHECK(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
958 ldr(pp, MemOperand(pc, constant_pool_offset));
963 void MacroAssembler::StubPrologue() {
965 Push(Smi::FromInt(StackFrame::STUB));
966 // Adjust FP to point to saved FP.
967 add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
968 if (FLAG_enable_ool_constant_pool) {
969 LoadConstantPoolPointerRegister();
970 set_ool_constant_pool_available(true);
975 void MacroAssembler::Prologue(bool code_pre_aging) {
976 { PredictableCodeSizeScope predictible_code_size_scope(
977 this, kNoCodeAgeSequenceLength);
978 // The following three instructions must remain together and unmodified
979 // for code aging to work properly.
980 if (code_pre_aging) {
982 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
983 add(r0, pc, Operand(-8));
984 ldr(pc, MemOperand(pc, -4));
985 emit_code_stub_address(stub);
989 // Adjust FP to point to saved FP.
990 add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
993 if (FLAG_enable_ool_constant_pool) {
994 LoadConstantPoolPointerRegister();
995 set_ool_constant_pool_available(true);
1000 void MacroAssembler::EnterFrame(StackFrame::Type type,
1001 bool load_constant_pool_pointer_reg) {
1004 if (FLAG_enable_ool_constant_pool && load_constant_pool_pointer_reg) {
1005 LoadConstantPoolPointerRegister();
1007 mov(ip, Operand(Smi::FromInt(type)));
1009 mov(ip, Operand(CodeObject()));
1011 // Adjust FP to point to saved FP.
1013 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
1017 int MacroAssembler::LeaveFrame(StackFrame::Type type) {
1022 // Drop the execution stack down to the frame pointer and restore
1023 // the caller frame pointer, return address and constant pool pointer
1024 // (if FLAG_enable_ool_constant_pool).
1026 if (FLAG_enable_ool_constant_pool) {
1027 add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
1028 frame_ends = pc_offset();
1029 ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
1032 frame_ends = pc_offset();
1033 ldm(ia_w, sp, fp.bit() | lr.bit());
1039 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
1040 // Set up the frame structure on the stack.
1041 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1042 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1043 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1045 mov(fp, Operand(sp)); // Set up new frame pointer.
1046 // Reserve room for saved entry sp and code object.
1047 sub(sp, sp, Operand(ExitFrameConstants::kFrameSize));
1048 if (emit_debug_code()) {
1049 mov(ip, Operand::Zero());
1050 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1052 if (FLAG_enable_ool_constant_pool) {
1053 str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1055 mov(ip, Operand(CodeObject()));
1056 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1058 // Save the frame pointer and the context in top.
1059 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1060 str(fp, MemOperand(ip));
1061 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1062 str(cp, MemOperand(ip));
1064 // Optionally save all double registers.
1067 // Note that d0 will be accessible at
1068 // fp - ExitFrameConstants::kFrameSize -
1069 // DwVfpRegister::kMaxNumRegisters * kDoubleSize,
1070 // since the sp slot, code slot and constant pool slot (if
1071 // FLAG_enable_ool_constant_pool) were pushed after the fp.
1074 // Reserve place for the return address and stack space and align the frame
1075 // preparing for calling the runtime function.
1076 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1077 sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
1078 if (frame_alignment > 0) {
1079 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
1080 and_(sp, sp, Operand(-frame_alignment));
1083 // Set the exit frame sp value to point just before the return address
1085 add(ip, sp, Operand(kPointerSize));
1086 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1090 void MacroAssembler::InitializeNewString(Register string,
1092 Heap::RootListIndex map_index,
1094 Register scratch2) {
1095 SmiTag(scratch1, length);
1096 LoadRoot(scratch2, map_index);
1097 str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1098 mov(scratch1, Operand(String::kEmptyHashField));
1099 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1100 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
1104 int MacroAssembler::ActivationFrameAlignment() {
1105 #if V8_HOST_ARCH_ARM
1106 // Running on the real platform. Use the alignment as mandated by the local
1108 // Note: This will break if we ever start generating snapshots on one ARM
1109 // platform for another ARM platform with a different alignment.
1110 return base::OS::ActivationFrameAlignment();
1111 #else // V8_HOST_ARCH_ARM
1112 // If we are using the simulator then we should always align to the expected
1113 // alignment. As the simulator is used to generate snapshots we do not know
1114 // if the target platform will need alignment, so this is controlled from a
1116 return FLAG_sim_stack_alignment;
1117 #endif // V8_HOST_ARCH_ARM
1121 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1122 bool restore_context,
1123 bool argument_count_is_length) {
1124 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1126 // Optionally restore all double registers.
1128 // Calculate the stack location of the saved doubles and restore them.
1129 const int offset = ExitFrameConstants::kFrameSize;
1131 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
1132 RestoreFPRegs(r3, ip);
1136 mov(r3, Operand::Zero());
1137 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1138 str(r3, MemOperand(ip));
1140 // Restore current context from top and clear it in debug mode.
1141 if (restore_context) {
1142 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1143 ldr(cp, MemOperand(ip));
1146 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1147 str(r3, MemOperand(ip));
1150 // Tear down the exit frame, pop the arguments, and return.
1151 if (FLAG_enable_ool_constant_pool) {
1152 ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1154 mov(sp, Operand(fp));
1155 ldm(ia_w, sp, fp.bit() | lr.bit());
1156 if (argument_count.is_valid()) {
1157 if (argument_count_is_length) {
1158 add(sp, sp, argument_count);
1160 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1166 void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1167 if (use_eabi_hardfloat()) {
1175 // On ARM this is just a synonym to make the purpose clear.
1176 void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1177 MovFromFloatResult(dst);
1181 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1182 const ParameterCount& actual,
1183 Handle<Code> code_constant,
1186 bool* definitely_mismatches,
1188 const CallWrapper& call_wrapper) {
1189 bool definitely_matches = false;
1190 *definitely_mismatches = false;
1191 Label regular_invoke;
1193 // Check whether the expected and actual arguments count match. If not,
1194 // setup registers according to contract with ArgumentsAdaptorTrampoline:
1195 // r0: actual arguments count
1196 // r1: function (passed through to callee)
1197 // r2: expected arguments count
1199 // The code below is made a lot easier because the calling code already sets
1200 // up actual and expected registers according to the contract if values are
1201 // passed in registers.
1202 DCHECK(actual.is_immediate() || actual.reg().is(r0));
1203 DCHECK(expected.is_immediate() || expected.reg().is(r2));
1204 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
1206 if (expected.is_immediate()) {
1207 DCHECK(actual.is_immediate());
1208 if (expected.immediate() == actual.immediate()) {
1209 definitely_matches = true;
1211 mov(r0, Operand(actual.immediate()));
1212 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1213 if (expected.immediate() == sentinel) {
1214 // Don't worry about adapting arguments for builtins that
1215 // don't want that done. Skip adaption code by making it look
1216 // like we have a match between expected and actual number of
1218 definitely_matches = true;
1220 *definitely_mismatches = true;
1221 mov(r2, Operand(expected.immediate()));
1225 if (actual.is_immediate()) {
1226 cmp(expected.reg(), Operand(actual.immediate()));
1227 b(eq, ®ular_invoke);
1228 mov(r0, Operand(actual.immediate()));
1230 cmp(expected.reg(), Operand(actual.reg()));
1231 b(eq, ®ular_invoke);
1235 if (!definitely_matches) {
1236 if (!code_constant.is_null()) {
1237 mov(r3, Operand(code_constant));
1238 add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
1241 Handle<Code> adaptor =
1242 isolate()->builtins()->ArgumentsAdaptorTrampoline();
1243 if (flag == CALL_FUNCTION) {
1244 call_wrapper.BeforeCall(CallSize(adaptor));
1246 call_wrapper.AfterCall();
1247 if (!*definitely_mismatches) {
1251 Jump(adaptor, RelocInfo::CODE_TARGET);
1253 bind(®ular_invoke);
1258 void MacroAssembler::InvokeCode(Register code,
1259 const ParameterCount& expected,
1260 const ParameterCount& actual,
1262 const CallWrapper& call_wrapper) {
1263 // You can't call a function without a valid frame.
1264 DCHECK(flag == JUMP_FUNCTION || has_frame());
1267 bool definitely_mismatches = false;
1268 InvokePrologue(expected, actual, Handle<Code>::null(), code,
1269 &done, &definitely_mismatches, flag,
1271 if (!definitely_mismatches) {
1272 if (flag == CALL_FUNCTION) {
1273 call_wrapper.BeforeCall(CallSize(code));
1275 call_wrapper.AfterCall();
1277 DCHECK(flag == JUMP_FUNCTION);
1281 // Continue here if InvokePrologue does handle the invocation due to
1282 // mismatched parameter counts.
1288 void MacroAssembler::InvokeFunction(Register fun,
1289 const ParameterCount& actual,
1291 const CallWrapper& call_wrapper) {
1292 // You can't call a function without a valid frame.
1293 DCHECK(flag == JUMP_FUNCTION || has_frame());
1295 // Contract with called JS functions requires that function is passed in r1.
1298 Register expected_reg = r2;
1299 Register code_reg = r3;
1301 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1302 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1304 FieldMemOperand(code_reg,
1305 SharedFunctionInfo::kFormalParameterCountOffset));
1306 SmiUntag(expected_reg);
1308 FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1310 ParameterCount expected(expected_reg);
1311 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
1315 void MacroAssembler::InvokeFunction(Register function,
1316 const ParameterCount& expected,
1317 const ParameterCount& actual,
1319 const CallWrapper& call_wrapper) {
1320 // You can't call a function without a valid frame.
1321 DCHECK(flag == JUMP_FUNCTION || has_frame());
1323 // Contract with called JS functions requires that function is passed in r1.
1324 DCHECK(function.is(r1));
1326 // Get the function and setup the context.
1327 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1329 // We call indirectly through the code field in the function to
1330 // allow recompilation to take effect without changing any of the
1332 ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1333 InvokeCode(r3, expected, actual, flag, call_wrapper);
1337 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1338 const ParameterCount& expected,
1339 const ParameterCount& actual,
1341 const CallWrapper& call_wrapper) {
1343 InvokeFunction(r1, expected, actual, flag, call_wrapper);
1347 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1351 ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1352 IsInstanceJSObjectType(map, scratch, fail);
1356 void MacroAssembler::IsInstanceJSObjectType(Register map,
1359 ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1360 cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1362 cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1367 void MacroAssembler::IsObjectJSStringType(Register object,
1370 DCHECK(kNotStringTag != 0);
1372 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1373 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1374 tst(scratch, Operand(kIsNotStringMask));
1379 void MacroAssembler::IsObjectNameType(Register object,
1382 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1383 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1384 cmp(scratch, Operand(LAST_NAME_TYPE));
1389 void MacroAssembler::DebugBreak() {
1390 mov(r0, Operand::Zero());
1391 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1392 CEntryStub ces(isolate(), 1);
1393 DCHECK(AllowThisStubCall(&ces));
1394 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1398 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1399 int handler_index) {
1400 // Adjust this code if not the case.
1401 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1402 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1403 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1404 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1405 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1406 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1408 // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
1409 // We will build up the handler from the bottom by pushing on the stack.
1410 // Set up the code object (r5) and the state (r6) for pushing.
1412 StackHandler::IndexField::encode(handler_index) |
1413 StackHandler::KindField::encode(kind);
1414 mov(r5, Operand(CodeObject()));
1415 mov(r6, Operand(state));
1417 // Push the frame pointer, context, state, and code object.
1418 if (kind == StackHandler::JS_ENTRY) {
1419 mov(cp, Operand(Smi::FromInt(0))); // Indicates no context.
1420 mov(ip, Operand::Zero()); // NULL frame pointer.
1421 stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
1423 stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
1426 // Link the current handler as the next handler.
1427 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1428 ldr(r5, MemOperand(r6));
1430 // Set this new handler as the current one.
1431 str(sp, MemOperand(r6));
1435 void MacroAssembler::PopTryHandler() {
1436 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1438 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1439 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1440 str(r1, MemOperand(ip));
1444 void MacroAssembler::JumpToHandlerEntry() {
1445 // Compute the handler entry address and jump to it. The handler table is
1446 // a fixed array of (smi-tagged) code offsets.
1447 // r0 = exception, r1 = code object, r2 = state.
1449 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1450 if (FLAG_enable_ool_constant_pool) {
1451 ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset)); // Constant pool.
1453 ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
1454 add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1455 mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
1456 ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
1457 add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
1458 add(pc, r1, Operand::SmiUntag(r2)); // Jump
1462 void MacroAssembler::Throw(Register value) {
1463 // Adjust this code if not the case.
1464 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1465 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1466 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1467 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1468 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1469 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1471 // The exception is expected in r0.
1472 if (!value.is(r0)) {
1475 // Drop the stack pointer to the top of the top handler.
1476 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1477 ldr(sp, MemOperand(r3));
1478 // Restore the next handler.
1480 str(r2, MemOperand(r3));
1482 // Get the code object (r1) and state (r2). Restore the context and frame
1484 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1486 // If the handler is a JS frame, restore the context to the frame.
1487 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1490 str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1492 JumpToHandlerEntry();
1496 void MacroAssembler::ThrowUncatchable(Register value) {
1497 // Adjust this code if not the case.
1498 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1499 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1500 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1501 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1502 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1503 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1505 // The exception is expected in r0.
1506 if (!value.is(r0)) {
1509 // Drop the stack pointer to the top of the top stack handler.
1510 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1511 ldr(sp, MemOperand(r3));
1513 // Unwind the handlers until the ENTRY handler is found.
1514 Label fetch_next, check_kind;
1517 ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
1520 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1521 ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
1522 tst(r2, Operand(StackHandler::KindField::kMask));
1525 // Set the top handler address to next handler past the top ENTRY handler.
1527 str(r2, MemOperand(r3));
1528 // Get the code object (r1) and state (r2). Clear the context and frame
1529 // pointer (0 was saved in the handler).
1530 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1532 JumpToHandlerEntry();
1536 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1539 Label same_contexts;
1541 DCHECK(!holder_reg.is(scratch));
1542 DCHECK(!holder_reg.is(ip));
1543 DCHECK(!scratch.is(ip));
1545 // Load current lexical context from the stack frame.
1546 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1547 // In debug mode, make sure the lexical context is set.
1549 cmp(scratch, Operand::Zero());
1550 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1553 // Load the native context of the current context.
1555 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1556 ldr(scratch, FieldMemOperand(scratch, offset));
1557 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1559 // Check the context is a native context.
1560 if (emit_debug_code()) {
1561 // Cannot use ip as a temporary in this verification code. Due to the fact
1562 // that ip is clobbered as part of cmp with an object Operand.
1563 push(holder_reg); // Temporarily save holder on the stack.
1564 // Read the first word and compare to the native_context_map.
1565 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1566 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1567 cmp(holder_reg, ip);
1568 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1569 pop(holder_reg); // Restore holder.
1572 // Check if both contexts are the same.
1573 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1574 cmp(scratch, Operand(ip));
1575 b(eq, &same_contexts);
1577 // Check the context is a native context.
1578 if (emit_debug_code()) {
1579 // Cannot use ip as a temporary in this verification code. Due to the fact
1580 // that ip is clobbered as part of cmp with an object Operand.
1581 push(holder_reg); // Temporarily save holder on the stack.
1582 mov(holder_reg, ip); // Move ip to its holding place.
1583 LoadRoot(ip, Heap::kNullValueRootIndex);
1584 cmp(holder_reg, ip);
1585 Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1587 ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1588 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1589 cmp(holder_reg, ip);
1590 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1591 // Restore ip is not needed. ip is reloaded below.
1592 pop(holder_reg); // Restore holder.
1593 // Restore ip to holder's context.
1594 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1597 // Check that the security token in the calling global object is
1598 // compatible with the security token in the receiving global
1600 int token_offset = Context::kHeaderSize +
1601 Context::SECURITY_TOKEN_INDEX * kPointerSize;
1603 ldr(scratch, FieldMemOperand(scratch, token_offset));
1604 ldr(ip, FieldMemOperand(ip, token_offset));
1605 cmp(scratch, Operand(ip));
1608 bind(&same_contexts);
1612 // Compute the hash code from the untagged key. This must be kept in sync with
1613 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1614 // code-stub-hydrogen.cc
1615 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1616 // First of all we assign the hash seed to scratch.
1617 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1620 // Xor original key with a seed.
1621 eor(t0, t0, Operand(scratch));
1623 // Compute the hash code from the untagged key. This must be kept in sync
1624 // with ComputeIntegerHash in utils.h.
1626 // hash = ~hash + (hash << 15);
1627 mvn(scratch, Operand(t0));
1628 add(t0, scratch, Operand(t0, LSL, 15));
1629 // hash = hash ^ (hash >> 12);
1630 eor(t0, t0, Operand(t0, LSR, 12));
1631 // hash = hash + (hash << 2);
1632 add(t0, t0, Operand(t0, LSL, 2));
1633 // hash = hash ^ (hash >> 4);
1634 eor(t0, t0, Operand(t0, LSR, 4));
1635 // hash = hash * 2057;
1636 mov(scratch, Operand(t0, LSL, 11));
1637 add(t0, t0, Operand(t0, LSL, 3));
1638 add(t0, t0, scratch);
1639 // hash = hash ^ (hash >> 16);
1640 eor(t0, t0, Operand(t0, LSR, 16));
1644 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1653 // elements - holds the slow-case elements of the receiver on entry.
1654 // Unchanged unless 'result' is the same register.
1656 // key - holds the smi key on entry.
1657 // Unchanged unless 'result' is the same register.
1659 // result - holds the result on exit if the load succeeded.
1660 // Allowed to be the same as 'key' or 'result'.
1661 // Unchanged on bailout so 'key' or 'result' can be used
1662 // in further computation.
1664 // Scratch registers:
1666 // t0 - holds the untagged key on entry and holds the hash once computed.
1668 // t1 - used to hold the capacity mask of the dictionary
1670 // t2 - used for the index into the dictionary.
1673 GetNumberHash(t0, t1);
1675 // Compute the capacity mask.
1676 ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1678 sub(t1, t1, Operand(1));
1680 // Generate an unrolled loop that performs a few probes before giving up.
1681 for (int i = 0; i < kNumberDictionaryProbes; i++) {
1682 // Use t2 for index calculations and keep the hash intact in t0.
1684 // Compute the masked index: (hash + i + i * i) & mask.
1686 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1688 and_(t2, t2, Operand(t1));
1690 // Scale the index by multiplying by the element size.
1691 DCHECK(SeededNumberDictionary::kEntrySize == 3);
1692 add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
1694 // Check if the key is identical to the name.
1695 add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
1696 ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1697 cmp(key, Operand(ip));
1698 if (i != kNumberDictionaryProbes - 1) {
1706 // Check that the value is a field property.
1707 // t2: elements + (index * kPointerSize)
1708 const int kDetailsOffset =
1709 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1710 ldr(t1, FieldMemOperand(t2, kDetailsOffset));
1712 tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1715 // Get the value at the masked, scaled index and return.
1716 const int kValueOffset =
1717 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1718 ldr(result, FieldMemOperand(t2, kValueOffset));
1722 void MacroAssembler::Allocate(int object_size,
1727 AllocationFlags flags) {
1728 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1729 if (!FLAG_inline_new) {
1730 if (emit_debug_code()) {
1731 // Trash the registers to simulate an allocation failure.
1732 mov(result, Operand(0x7091));
1733 mov(scratch1, Operand(0x7191));
1734 mov(scratch2, Operand(0x7291));
1740 DCHECK(!result.is(scratch1));
1741 DCHECK(!result.is(scratch2));
1742 DCHECK(!scratch1.is(scratch2));
1743 DCHECK(!scratch1.is(ip));
1744 DCHECK(!scratch2.is(ip));
1746 // Make object size into bytes.
1747 if ((flags & SIZE_IN_WORDS) != 0) {
1748 object_size *= kPointerSize;
1750 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
1752 // Check relative positions of allocation top and limit addresses.
1753 // The values must be adjacent in memory to allow the use of LDM.
1754 // Also, assert that the registers are numbered such that the values
1755 // are loaded in the correct order.
1756 ExternalReference allocation_top =
1757 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1758 ExternalReference allocation_limit =
1759 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1762 reinterpret_cast<intptr_t>(allocation_top.address());
1764 reinterpret_cast<intptr_t>(allocation_limit.address());
1765 DCHECK((limit - top) == kPointerSize);
1766 DCHECK(result.code() < ip.code());
1768 // Set up allocation top address register.
1769 Register topaddr = scratch1;
1770 mov(topaddr, Operand(allocation_top));
1772 // This code stores a temporary value in ip. This is OK, as the code below
1773 // does not need ip for implicit literal generation.
1774 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1775 // Load allocation top into result and allocation limit into ip.
1776 ldm(ia, topaddr, result.bit() | ip.bit());
1778 if (emit_debug_code()) {
1779 // Assert that result actually contains top on entry. ip is used
1780 // immediately below so this use of ip does not cause difference with
1781 // respect to register content between debug and release mode.
1782 ldr(ip, MemOperand(topaddr));
1784 Check(eq, kUnexpectedAllocationTop);
1786 // Load allocation limit into ip. Result already contains allocation top.
1787 ldr(ip, MemOperand(topaddr, limit - top));
1790 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1791 // Align the next allocation. Storing the filler map without checking top is
1792 // safe in new-space because the limit of the heap is aligned there.
1793 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1794 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1795 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1798 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1799 cmp(result, Operand(ip));
1802 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1803 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1807 // Calculate new top and bail out if new space is exhausted. Use result
1808 // to calculate the new top. We must preserve the ip register at this
1809 // point, so we cannot just use add().
1810 DCHECK(object_size > 0);
1811 Register source = result;
1812 Condition cond = al;
1814 while (object_size != 0) {
1815 if (((object_size >> shift) & 0x03) == 0) {
1818 int bits = object_size & (0xff << shift);
1819 object_size -= bits;
1821 Operand bits_operand(bits);
1822 DCHECK(bits_operand.instructions_required(this) == 1);
1823 add(scratch2, source, bits_operand, SetCC, cond);
1829 cmp(scratch2, Operand(ip));
1831 str(scratch2, MemOperand(topaddr));
1833 // Tag object if requested.
1834 if ((flags & TAG_OBJECT) != 0) {
1835 add(result, result, Operand(kHeapObjectTag));
1840 void MacroAssembler::Allocate(Register object_size,
1845 AllocationFlags flags) {
1846 if (!FLAG_inline_new) {
1847 if (emit_debug_code()) {
1848 // Trash the registers to simulate an allocation failure.
1849 mov(result, Operand(0x7091));
1850 mov(scratch1, Operand(0x7191));
1851 mov(scratch2, Operand(0x7291));
1857 // Assert that the register arguments are different and that none of
1858 // them are ip. ip is used explicitly in the code generated below.
1859 DCHECK(!result.is(scratch1));
1860 DCHECK(!result.is(scratch2));
1861 DCHECK(!scratch1.is(scratch2));
1862 DCHECK(!object_size.is(ip));
1863 DCHECK(!result.is(ip));
1864 DCHECK(!scratch1.is(ip));
1865 DCHECK(!scratch2.is(ip));
1867 // Check relative positions of allocation top and limit addresses.
1868 // The values must be adjacent in memory to allow the use of LDM.
1869 // Also, assert that the registers are numbered such that the values
1870 // are loaded in the correct order.
1871 ExternalReference allocation_top =
1872 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1873 ExternalReference allocation_limit =
1874 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1876 reinterpret_cast<intptr_t>(allocation_top.address());
1878 reinterpret_cast<intptr_t>(allocation_limit.address());
1879 DCHECK((limit - top) == kPointerSize);
1880 DCHECK(result.code() < ip.code());
1882 // Set up allocation top address.
1883 Register topaddr = scratch1;
1884 mov(topaddr, Operand(allocation_top));
1886 // This code stores a temporary value in ip. This is OK, as the code below
1887 // does not need ip for implicit literal generation.
1888 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1889 // Load allocation top into result and allocation limit into ip.
1890 ldm(ia, topaddr, result.bit() | ip.bit());
1892 if (emit_debug_code()) {
1893 // Assert that result actually contains top on entry. ip is used
1894 // immediately below so this use of ip does not cause difference with
1895 // respect to register content between debug and release mode.
1896 ldr(ip, MemOperand(topaddr));
1898 Check(eq, kUnexpectedAllocationTop);
1900 // Load allocation limit into ip. Result already contains allocation top.
1901 ldr(ip, MemOperand(topaddr, limit - top));
1904 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1905 // Align the next allocation. Storing the filler map without checking top is
1906 // safe in new-space because the limit of the heap is aligned there.
1907 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1908 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1909 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1912 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1913 cmp(result, Operand(ip));
1916 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1917 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1921 // Calculate new top and bail out if new space is exhausted. Use result
1922 // to calculate the new top. Object size may be in words so a shift is
1923 // required to get the number of bytes.
1924 if ((flags & SIZE_IN_WORDS) != 0) {
1925 add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1927 add(scratch2, result, Operand(object_size), SetCC);
1930 cmp(scratch2, Operand(ip));
1933 // Update allocation top. result temporarily holds the new top.
1934 if (emit_debug_code()) {
1935 tst(scratch2, Operand(kObjectAlignmentMask));
1936 Check(eq, kUnalignedAllocationInNewSpace);
1938 str(scratch2, MemOperand(topaddr));
1940 // Tag object if requested.
1941 if ((flags & TAG_OBJECT) != 0) {
1942 add(result, result, Operand(kHeapObjectTag));
1947 void MacroAssembler::UndoAllocationInNewSpace(Register object,
1949 ExternalReference new_space_allocation_top =
1950 ExternalReference::new_space_allocation_top_address(isolate());
1952 // Make sure the object has no tag before resetting top.
1953 and_(object, object, Operand(~kHeapObjectTagMask));
1955 // Check that the object un-allocated is below the current top.
1956 mov(scratch, Operand(new_space_allocation_top));
1957 ldr(scratch, MemOperand(scratch));
1958 cmp(object, scratch);
1959 Check(lt, kUndoAllocationOfNonAllocatedMemory);
1961 // Write the address of the object to un-allocate as the current top.
1962 mov(scratch, Operand(new_space_allocation_top));
1963 str(object, MemOperand(scratch));
1967 void MacroAssembler::AllocateTwoByteString(Register result,
1972 Label* gc_required) {
1973 // Calculate the number of bytes needed for the characters in the string while
1974 // observing object alignment.
1975 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1976 mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
1977 add(scratch1, scratch1,
1978 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1979 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1981 // Allocate two-byte string in new space.
1989 // Set the map, length and hash field.
1990 InitializeNewString(result,
1992 Heap::kStringMapRootIndex,
1998 void MacroAssembler::AllocateOneByteString(Register result, Register length,
1999 Register scratch1, Register scratch2,
2001 Label* gc_required) {
2002 // Calculate the number of bytes needed for the characters in the string while
2003 // observing object alignment.
2004 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2005 DCHECK(kCharSize == 1);
2006 add(scratch1, length,
2007 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
2008 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2010 // Allocate one-byte string in new space.
2018 // Set the map, length and hash field.
2019 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
2020 scratch1, scratch2);
2024 void MacroAssembler::AllocateTwoByteConsString(Register result,
2028 Label* gc_required) {
2029 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
2032 InitializeNewString(result,
2034 Heap::kConsStringMapRootIndex,
2040 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
2043 Label* gc_required) {
2044 Allocate(ConsString::kSize,
2051 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
2052 scratch1, scratch2);
2056 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
2060 Label* gc_required) {
2061 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2064 InitializeNewString(result,
2066 Heap::kSlicedStringMapRootIndex,
2072 void MacroAssembler::AllocateOneByteSlicedString(Register result,
2076 Label* gc_required) {
2077 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2080 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
2081 scratch1, scratch2);
2085 void MacroAssembler::CompareObjectType(Register object,
2088 InstanceType type) {
2089 const Register temp = type_reg.is(no_reg) ? ip : type_reg;
2091 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2092 CompareInstanceType(map, temp, type);
2096 void MacroAssembler::CheckObjectTypeRange(Register object,
2098 InstanceType min_type,
2099 InstanceType max_type,
2100 Label* false_label) {
2101 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2102 STATIC_ASSERT(LAST_TYPE < 256);
2103 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2104 ldrb(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
2105 sub(ip, ip, Operand(min_type));
2106 cmp(ip, Operand(max_type - min_type));
2111 void MacroAssembler::CompareInstanceType(Register map,
2113 InstanceType type) {
2114 // Registers map and type_reg can be ip. These two lines assert
2115 // that ip can be used with the two instructions (the constants
2116 // will never need ip).
2117 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2118 STATIC_ASSERT(LAST_TYPE < 256);
2119 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2120 cmp(type_reg, Operand(type));
2124 void MacroAssembler::CompareRoot(Register obj,
2125 Heap::RootListIndex index) {
2126 DCHECK(!obj.is(ip));
2127 LoadRoot(ip, index);
2132 void MacroAssembler::CheckFastElements(Register map,
2135 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2136 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2137 STATIC_ASSERT(FAST_ELEMENTS == 2);
2138 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2139 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2140 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2145 void MacroAssembler::CheckFastObjectElements(Register map,
2148 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2149 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2150 STATIC_ASSERT(FAST_ELEMENTS == 2);
2151 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2152 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2153 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2155 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2160 void MacroAssembler::CheckFastSmiElements(Register map,
2163 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2164 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2165 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2166 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2171 void MacroAssembler::StoreNumberToDoubleElements(
2174 Register elements_reg,
2176 LowDwVfpRegister double_scratch,
2178 int elements_offset) {
2179 Label smi_value, store;
2181 // Handle smi values specially.
2182 JumpIfSmi(value_reg, &smi_value);
2184 // Ensure that the object is a heap number
2187 isolate()->factory()->heap_number_map(),
2191 vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2192 // Force a canonical NaN.
2193 if (emit_debug_code()) {
2195 tst(ip, Operand(kVFPDefaultNaNModeControlBit));
2196 Assert(ne, kDefaultNaNModeNotSet);
2198 VFPCanonicalizeNaN(double_scratch);
2202 SmiToDouble(double_scratch, value_reg);
2205 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
2206 vstr(double_scratch,
2207 FieldMemOperand(scratch1,
2208 FixedDoubleArray::kHeaderSize - elements_offset));
2212 void MacroAssembler::CompareMap(Register obj,
2215 Label* early_success) {
2216 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2217 CompareMap(scratch, map, early_success);
2221 void MacroAssembler::CompareMap(Register obj_map,
2223 Label* early_success) {
2224 cmp(obj_map, Operand(map));
2228 void MacroAssembler::CheckMap(Register obj,
2232 SmiCheckType smi_check_type) {
2233 if (smi_check_type == DO_SMI_CHECK) {
2234 JumpIfSmi(obj, fail);
2238 CompareMap(obj, scratch, map, &success);
2244 void MacroAssembler::CheckMap(Register obj,
2246 Heap::RootListIndex index,
2248 SmiCheckType smi_check_type) {
2249 if (smi_check_type == DO_SMI_CHECK) {
2250 JumpIfSmi(obj, fail);
2252 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2253 LoadRoot(ip, index);
2259 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
2260 Register scratch2, Handle<WeakCell> cell,
2261 Handle<Code> success,
2262 SmiCheckType smi_check_type) {
2264 if (smi_check_type == DO_SMI_CHECK) {
2265 JumpIfSmi(obj, &fail);
2267 ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
2268 CmpWeakValue(scratch1, cell, scratch2);
2269 Jump(success, RelocInfo::CODE_TARGET, eq);
2274 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2276 mov(scratch, Operand(cell));
2277 ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
2278 cmp(value, scratch);
2282 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2283 mov(value, Operand(cell));
2284 ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
2288 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2290 GetWeakValue(value, cell);
2291 JumpIfSmi(value, miss);
2295 void MacroAssembler::TryGetFunctionPrototype(Register function,
2299 bool miss_on_bound_function) {
2301 if (miss_on_bound_function) {
2302 // Check that the receiver isn't a smi.
2303 JumpIfSmi(function, miss);
2305 // Check that the function really is a function. Load map into result reg.
2306 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2310 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2312 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2314 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
2317 // Make sure that the function has an instance prototype.
2318 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2319 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2320 b(ne, &non_instance);
2323 // Get the prototype or initial map from the function.
2325 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2327 // If the prototype or initial map is the hole, don't return it and
2328 // simply miss the cache instead. This will allow us to allocate a
2329 // prototype object on-demand in the runtime system.
2330 LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2334 // If the function does not have an initial map, we're done.
2336 CompareObjectType(result, scratch, scratch, MAP_TYPE);
2339 // Get the prototype from the initial map.
2340 ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2342 if (miss_on_bound_function) {
2345 // Non-instance prototype: Fetch prototype from constructor field
2347 bind(&non_instance);
2348 ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2356 void MacroAssembler::CallStub(CodeStub* stub,
2357 TypeFeedbackId ast_id,
2359 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2360 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2364 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2365 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2369 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2370 return has_frame_ || !stub->SometimesSetsUpAFrame();
2374 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2375 // If the hash field contains an array index pick it out. The assert checks
2376 // that the constants for the maximum number of digits for an array index
2377 // cached in the hash field and the number of bits reserved for it does not
2379 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
2380 (1 << String::kArrayIndexValueBits));
2381 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
2385 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
2386 if (CpuFeatures::IsSupported(VFP3)) {
2387 vmov(value.low(), smi);
2388 vcvt_f64_s32(value, 1);
2391 vmov(value.low(), ip);
2392 vcvt_f64_s32(value, value.low());
2397 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
2398 LowDwVfpRegister double_scratch) {
2399 DCHECK(!double_input.is(double_scratch));
2400 vcvt_s32_f64(double_scratch.low(), double_input);
2401 vcvt_f64_s32(double_scratch, double_scratch.low());
2402 VFPCompareAndSetFlags(double_input, double_scratch);
2406 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2407 DwVfpRegister double_input,
2408 LowDwVfpRegister double_scratch) {
2409 DCHECK(!double_input.is(double_scratch));
2410 vcvt_s32_f64(double_scratch.low(), double_input);
2411 vmov(result, double_scratch.low());
2412 vcvt_f64_s32(double_scratch, double_scratch.low());
2413 VFPCompareAndSetFlags(double_input, double_scratch);
2417 void MacroAssembler::TryInt32Floor(Register result,
2418 DwVfpRegister double_input,
2419 Register input_high,
2420 LowDwVfpRegister double_scratch,
2423 DCHECK(!result.is(input_high));
2424 DCHECK(!double_input.is(double_scratch));
2425 Label negative, exception;
2427 VmovHigh(input_high, double_input);
2429 // Test for NaN and infinities.
2430 Sbfx(result, input_high,
2431 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2432 cmp(result, Operand(-1));
2434 // Test for values that can be exactly represented as a
2435 // signed 32-bit integer.
2436 TryDoubleToInt32Exact(result, double_input, double_scratch);
2437 // If exact, return (result already fetched).
2439 cmp(input_high, Operand::Zero());
2442 // Input is in ]+0, +inf[.
2443 // If result equals 0x7fffffff input was out of range or
2444 // in ]0x7fffffff, 0x80000000[. We ignore this last case which
2445 // could fits into an int32, that means we always think input was
2446 // out of range and always go to exception.
2447 // If result < 0x7fffffff, go to done, result fetched.
2448 cmn(result, Operand(1));
2452 // Input is in ]-inf, -0[.
2453 // If x is a non integer negative number,
2454 // floor(x) <=> round_to_zero(x) - 1.
2456 sub(result, result, Operand(1), SetCC);
2457 // If result is still negative, go to done, result fetched.
2458 // Else, we had an overflow and we fall through exception.
2463 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2464 DwVfpRegister double_input,
2466 LowDwVfpRegister double_scratch = kScratchDoubleReg;
2467 vcvt_s32_f64(double_scratch.low(), double_input);
2468 vmov(result, double_scratch.low());
2470 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
2471 sub(ip, result, Operand(1));
2472 cmp(ip, Operand(0x7ffffffe));
2477 void MacroAssembler::TruncateDoubleToI(Register result,
2478 DwVfpRegister double_input) {
2481 TryInlineTruncateDoubleToI(result, double_input, &done);
2483 // If we fell through then inline version didn't succeed - call stub instead.
2485 sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2486 vstr(double_input, MemOperand(sp, 0));
2488 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2491 add(sp, sp, Operand(kDoubleSize));
2498 void MacroAssembler::TruncateHeapNumberToI(Register result,
2501 LowDwVfpRegister double_scratch = kScratchDoubleReg;
2502 DCHECK(!result.is(object));
2504 vldr(double_scratch,
2505 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2506 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2508 // If we fell through then inline version didn't succeed - call stub instead.
2510 DoubleToIStub stub(isolate(),
2513 HeapNumber::kValueOffset - kHeapObjectTag,
2523 void MacroAssembler::TruncateNumberToI(Register object,
2525 Register heap_number_map,
2527 Label* not_number) {
2529 DCHECK(!result.is(object));
2531 UntagAndJumpIfSmi(result, object, &done);
2532 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2533 TruncateHeapNumberToI(result, object);
2539 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2541 int num_least_bits) {
2542 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2543 ubfx(dst, src, kSmiTagSize, num_least_bits);
2546 and_(dst, dst, Operand((1 << num_least_bits) - 1));
2551 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2553 int num_least_bits) {
2554 and_(dst, src, Operand((1 << num_least_bits) - 1));
2558 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2560 SaveFPRegsMode save_doubles) {
2561 // All parameters are on the stack. r0 has the return value after call.
2563 // If the expected number of arguments of the runtime function is
2564 // constant, we check that the actual number of arguments match the
2566 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2568 // TODO(1236192): Most runtime routines don't need the number of
2569 // arguments passed in because it is constant. At some point we
2570 // should remove this need and make the runtime routine entry code
2572 mov(r0, Operand(num_arguments));
2573 mov(r1, Operand(ExternalReference(f, isolate())));
2574 CEntryStub stub(isolate(), 1, save_doubles);
2579 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2580 int num_arguments) {
2581 mov(r0, Operand(num_arguments));
2582 mov(r1, Operand(ext));
2584 CEntryStub stub(isolate(), 1);
2589 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2592 // TODO(1236192): Most runtime routines don't need the number of
2593 // arguments passed in because it is constant. At some point we
2594 // should remove this need and make the runtime routine entry code
2596 mov(r0, Operand(num_arguments));
2597 JumpToExternalReference(ext);
2601 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2604 TailCallExternalReference(ExternalReference(fid, isolate()),
2610 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2611 #if defined(__thumb__)
2612 // Thumb mode builtin.
2613 DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2615 mov(r1, Operand(builtin));
2616 CEntryStub stub(isolate(), 1);
2617 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2621 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2623 const CallWrapper& call_wrapper) {
2624 // You can't call a builtin without a valid frame.
2625 DCHECK(flag == JUMP_FUNCTION || has_frame());
2627 GetBuiltinEntry(r2, id);
2628 if (flag == CALL_FUNCTION) {
2629 call_wrapper.BeforeCall(CallSize(r2));
2631 call_wrapper.AfterCall();
2633 DCHECK(flag == JUMP_FUNCTION);
2639 void MacroAssembler::GetBuiltinFunction(Register target,
2640 Builtins::JavaScript id) {
2641 // Load the builtins object into target register.
2643 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2644 ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2645 // Load the JavaScript builtin function from the builtins object.
2646 ldr(target, FieldMemOperand(target,
2647 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2651 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2652 DCHECK(!target.is(r1));
2653 GetBuiltinFunction(r1, id);
2654 // Load the code entry point from the builtins object.
2655 ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2659 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2660 Register scratch1, Register scratch2) {
2661 if (FLAG_native_code_counters && counter->Enabled()) {
2662 mov(scratch1, Operand(value));
2663 mov(scratch2, Operand(ExternalReference(counter)));
2664 str(scratch1, MemOperand(scratch2));
2669 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2670 Register scratch1, Register scratch2) {
2672 if (FLAG_native_code_counters && counter->Enabled()) {
2673 mov(scratch2, Operand(ExternalReference(counter)));
2674 ldr(scratch1, MemOperand(scratch2));
2675 add(scratch1, scratch1, Operand(value));
2676 str(scratch1, MemOperand(scratch2));
2681 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2682 Register scratch1, Register scratch2) {
2684 if (FLAG_native_code_counters && counter->Enabled()) {
2685 mov(scratch2, Operand(ExternalReference(counter)));
2686 ldr(scratch1, MemOperand(scratch2));
2687 sub(scratch1, scratch1, Operand(value));
2688 str(scratch1, MemOperand(scratch2));
2693 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
2694 if (emit_debug_code())
2695 Check(cond, reason);
2699 void MacroAssembler::AssertFastElements(Register elements) {
2700 if (emit_debug_code()) {
2701 DCHECK(!elements.is(ip));
2704 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2705 LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2708 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2711 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2714 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2721 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
2725 // will not return here
2730 void MacroAssembler::Abort(BailoutReason reason) {
2734 const char* msg = GetBailoutReason(reason);
2736 RecordComment("Abort message: ");
2740 if (FLAG_trap_on_abort) {
2746 mov(r0, Operand(Smi::FromInt(reason)));
2749 // Disable stub call restrictions to always allow calls to abort.
2751 // We don't actually want to generate a pile of code for this, so just
2752 // claim there is a stack frame, without generating one.
2753 FrameScope scope(this, StackFrame::NONE);
2754 CallRuntime(Runtime::kAbort, 1);
2756 CallRuntime(Runtime::kAbort, 1);
2758 // will not return here
2759 if (is_const_pool_blocked()) {
2760 // If the calling code cares about the exact number of
2761 // instructions generated, we insert padding here to keep the size
2762 // of the Abort macro constant.
2763 static const int kExpectedAbortInstructions = 7;
2764 int abort_instructions = InstructionsGeneratedSince(&abort_start);
2765 DCHECK(abort_instructions <= kExpectedAbortInstructions);
2766 while (abort_instructions++ < kExpectedAbortInstructions) {
2773 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2774 if (context_chain_length > 0) {
2775 // Move up the chain of contexts to the context containing the slot.
2776 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2777 for (int i = 1; i < context_chain_length; i++) {
2778 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2781 // Slot is in the current function context. Move it into the
2782 // destination register in case we store into it (the write barrier
2783 // cannot be allowed to destroy the context in esi).
2789 void MacroAssembler::LoadTransitionedArrayMapConditional(
2790 ElementsKind expected_kind,
2791 ElementsKind transitioned_kind,
2792 Register map_in_out,
2794 Label* no_map_match) {
2795 // Load the global or builtins object from the current context.
2797 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2798 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2800 // Check that the function's map is the same as the expected cached map.
2803 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2804 size_t offset = expected_kind * kPointerSize +
2805 FixedArrayBase::kHeaderSize;
2806 ldr(ip, FieldMemOperand(scratch, offset));
2807 cmp(map_in_out, ip);
2808 b(ne, no_map_match);
2810 // Use the transitioned cached map.
2811 offset = transitioned_kind * kPointerSize +
2812 FixedArrayBase::kHeaderSize;
2813 ldr(map_in_out, FieldMemOperand(scratch, offset));
2817 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2818 // Load the global or builtins object from the current context.
2820 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2821 // Load the native context from the global or builtins object.
2822 ldr(function, FieldMemOperand(function,
2823 GlobalObject::kNativeContextOffset));
2824 // Load the function from the native context.
2825 ldr(function, MemOperand(function, Context::SlotOffset(index)));
2829 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2832 // Load the initial map. The global functions all have initial maps.
2833 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2834 if (emit_debug_code()) {
2836 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2839 Abort(kGlobalFunctionsMustHaveInitialMap);
2845 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2848 Label* not_power_of_two_or_zero) {
2849 sub(scratch, reg, Operand(1), SetCC);
2850 b(mi, not_power_of_two_or_zero);
2852 b(ne, not_power_of_two_or_zero);
2856 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2859 Label* zero_and_neg,
2860 Label* not_power_of_two) {
2861 sub(scratch, reg, Operand(1), SetCC);
2862 b(mi, zero_and_neg);
2864 b(ne, not_power_of_two);
2868 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
2870 Label* on_not_both_smi) {
2871 STATIC_ASSERT(kSmiTag == 0);
2872 tst(reg1, Operand(kSmiTagMask));
2873 tst(reg2, Operand(kSmiTagMask), eq);
2874 b(ne, on_not_both_smi);
2878 void MacroAssembler::UntagAndJumpIfSmi(
2879 Register dst, Register src, Label* smi_case) {
2880 STATIC_ASSERT(kSmiTag == 0);
2881 SmiUntag(dst, src, SetCC);
2882 b(cc, smi_case); // Shifter carry is not set for a smi.
2886 void MacroAssembler::UntagAndJumpIfNotSmi(
2887 Register dst, Register src, Label* non_smi_case) {
2888 STATIC_ASSERT(kSmiTag == 0);
2889 SmiUntag(dst, src, SetCC);
2890 b(cs, non_smi_case); // Shifter carry is set for a non-smi.
2894 void MacroAssembler::JumpIfEitherSmi(Register reg1,
2896 Label* on_either_smi) {
2897 STATIC_ASSERT(kSmiTag == 0);
2898 tst(reg1, Operand(kSmiTagMask));
2899 tst(reg2, Operand(kSmiTagMask), ne);
2900 b(eq, on_either_smi);
2904 void MacroAssembler::AssertNotSmi(Register object) {
2905 if (emit_debug_code()) {
2906 STATIC_ASSERT(kSmiTag == 0);
2907 tst(object, Operand(kSmiTagMask));
2908 Check(ne, kOperandIsASmi);
2913 void MacroAssembler::AssertSmi(Register object) {
2914 if (emit_debug_code()) {
2915 STATIC_ASSERT(kSmiTag == 0);
2916 tst(object, Operand(kSmiTagMask));
2917 Check(eq, kOperandIsNotSmi);
2922 void MacroAssembler::AssertString(Register object) {
2923 if (emit_debug_code()) {
2924 STATIC_ASSERT(kSmiTag == 0);
2925 tst(object, Operand(kSmiTagMask));
2926 Check(ne, kOperandIsASmiAndNotAString);
2928 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2929 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2931 Check(lo, kOperandIsNotAString);
2936 void MacroAssembler::AssertName(Register object) {
2937 if (emit_debug_code()) {
2938 STATIC_ASSERT(kSmiTag == 0);
2939 tst(object, Operand(kSmiTagMask));
2940 Check(ne, kOperandIsASmiAndNotAName);
2942 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2943 CompareInstanceType(object, object, LAST_NAME_TYPE);
2945 Check(le, kOperandIsNotAName);
2950 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2952 if (emit_debug_code()) {
2953 Label done_checking;
2954 AssertNotSmi(object);
2955 CompareRoot(object, Heap::kUndefinedValueRootIndex);
2956 b(eq, &done_checking);
2957 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2958 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
2959 Assert(eq, kExpectedUndefinedOrCell);
2960 bind(&done_checking);
2965 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2966 if (emit_debug_code()) {
2967 CompareRoot(reg, index);
2968 Check(eq, kHeapNumberMapRegisterClobbered);
2973 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2974 Register heap_number_map,
2976 Label* on_not_heap_number) {
2977 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2978 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2979 cmp(scratch, heap_number_map);
2980 b(ne, on_not_heap_number);
2984 void MacroAssembler::LookupNumberStringCache(Register object,
2990 // Use of registers. Register result is used as a temporary.
2991 Register number_string_cache = result;
2992 Register mask = scratch3;
2994 // Load the number string cache.
2995 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2997 // Make the hash mask from the length of the number string cache. It
2998 // contains two elements (number and string) for each cache entry.
2999 ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
3000 // Divide length by two (length is a smi).
3001 mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
3002 sub(mask, mask, Operand(1)); // Make mask.
3004 // Calculate the entry in the number string cache. The hash value in the
3005 // number string cache for smis is just the smi value, and the hash for
3006 // doubles is the xor of the upper and lower words. See
3007 // Heap::GetNumberStringCache.
3009 Label load_result_from_cache;
3010 JumpIfSmi(object, &is_smi);
3013 Heap::kHeapNumberMapRootIndex,
3017 STATIC_ASSERT(8 == kDoubleSize);
3020 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
3021 ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
3022 eor(scratch1, scratch1, Operand(scratch2));
3023 and_(scratch1, scratch1, Operand(mask));
3025 // Calculate address of entry in string cache: each entry consists
3026 // of two pointer sized fields.
3028 number_string_cache,
3029 Operand(scratch1, LSL, kPointerSizeLog2 + 1));
3031 Register probe = mask;
3032 ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
3033 JumpIfSmi(probe, not_found);
3034 sub(scratch2, object, Operand(kHeapObjectTag));
3035 vldr(d0, scratch2, HeapNumber::kValueOffset);
3036 sub(probe, probe, Operand(kHeapObjectTag));
3037 vldr(d1, probe, HeapNumber::kValueOffset);
3038 VFPCompareAndSetFlags(d0, d1);
3039 b(ne, not_found); // The cache did not contain this value.
3040 b(&load_result_from_cache);
3043 Register scratch = scratch1;
3044 and_(scratch, mask, Operand(object, ASR, 1));
3045 // Calculate address of entry in string cache: each entry consists
3046 // of two pointer sized fields.
3048 number_string_cache,
3049 Operand(scratch, LSL, kPointerSizeLog2 + 1));
3051 // Check if the entry is the smi we are looking for.
3052 ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3056 // Get the result from the cache.
3057 bind(&load_result_from_cache);
3058 ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
3059 IncrementCounter(isolate()->counters()->number_to_string_native(),
3066 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
3067 Register first, Register second, Register scratch1, Register scratch2,
3069 // Test that both first and second are sequential one-byte strings.
3070 // Assume that they are non-smis.
3071 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3072 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3073 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3074 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3076 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
3080 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
3085 // Check that neither is a smi.
3086 and_(scratch1, first, Operand(second));
3087 JumpIfSmi(scratch1, failure);
3088 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
3093 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3094 Label* not_unique_name) {
3095 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3097 tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3099 cmp(reg, Operand(SYMBOL_TYPE));
3100 b(ne, not_unique_name);
3106 // Allocates a heap number or jumps to the need_gc label if the young space
3107 // is full and a scavenge is needed.
3108 void MacroAssembler::AllocateHeapNumber(Register result,
3111 Register heap_number_map,
3113 TaggingMode tagging_mode,
3115 // Allocate an object in the heap for the heap number and tag it as a heap
3117 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3118 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3120 Heap::RootListIndex map_index = mode == MUTABLE
3121 ? Heap::kMutableHeapNumberMapRootIndex
3122 : Heap::kHeapNumberMapRootIndex;
3123 AssertIsRoot(heap_number_map, map_index);
3125 // Store heap number map in the allocated object.
3126 if (tagging_mode == TAG_RESULT) {
3127 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3129 str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3134 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3135 DwVfpRegister value,
3138 Register heap_number_map,
3139 Label* gc_required) {
3140 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3141 sub(scratch1, result, Operand(kHeapObjectTag));
3142 vstr(value, scratch1, HeapNumber::kValueOffset);
3146 // Copies a fixed number of fields of heap objects from src to dst.
3147 void MacroAssembler::CopyFields(Register dst,
3149 LowDwVfpRegister double_scratch,
3151 int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
3152 for (int i = 0; i < double_count; i++) {
3153 vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
3154 vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
3157 STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
3158 STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
3160 int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
3162 vldr(double_scratch.low(),
3163 FieldMemOperand(src, (field_count - 1) * kPointerSize));
3164 vstr(double_scratch.low(),
3165 FieldMemOperand(dst, (field_count - 1) * kPointerSize));
3170 void MacroAssembler::CopyBytes(Register src,
3174 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3176 // Align src before copying in word size chunks.
3177 cmp(length, Operand(kPointerSize));
3180 bind(&align_loop_1);
3181 tst(src, Operand(kPointerSize - 1));
3183 ldrb(scratch, MemOperand(src, 1, PostIndex));
3184 strb(scratch, MemOperand(dst, 1, PostIndex));
3185 sub(length, length, Operand(1), SetCC);
3187 // Copy bytes in word size chunks.
3189 if (emit_debug_code()) {
3190 tst(src, Operand(kPointerSize - 1));
3191 Assert(eq, kExpectingAlignmentForCopyBytes);
3193 cmp(length, Operand(kPointerSize));
3195 ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
3196 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3197 str(scratch, MemOperand(dst, kPointerSize, PostIndex));
3199 strb(scratch, MemOperand(dst, 1, PostIndex));
3200 mov(scratch, Operand(scratch, LSR, 8));
3201 strb(scratch, MemOperand(dst, 1, PostIndex));
3202 mov(scratch, Operand(scratch, LSR, 8));
3203 strb(scratch, MemOperand(dst, 1, PostIndex));
3204 mov(scratch, Operand(scratch, LSR, 8));
3205 strb(scratch, MemOperand(dst, 1, PostIndex));
3207 sub(length, length, Operand(kPointerSize));
3210 // Copy the last bytes if any left.
3212 cmp(length, Operand::Zero());
3215 ldrb(scratch, MemOperand(src, 1, PostIndex));
3216 strb(scratch, MemOperand(dst, 1, PostIndex));
3217 sub(length, length, Operand(1), SetCC);
3218 b(ne, &byte_loop_1);
3223 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3224 Register end_offset,
3229 str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
3231 cmp(start_offset, end_offset);
3236 void MacroAssembler::CheckFor32DRegs(Register scratch) {
3237 mov(scratch, Operand(ExternalReference::cpu_features()));
3238 ldr(scratch, MemOperand(scratch));
3239 tst(scratch, Operand(1u << VFP32DREGS));
3243 void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
3244 CheckFor32DRegs(scratch);
3245 vstm(db_w, location, d16, d31, ne);
3246 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3247 vstm(db_w, location, d0, d15);
3251 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
3252 CheckFor32DRegs(scratch);
3253 vldm(ia_w, location, d0, d15);
3254 vldm(ia_w, location, d16, d31, ne);
3255 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3259 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
3260 Register first, Register second, Register scratch1, Register scratch2,
3262 const int kFlatOneByteStringMask =
3263 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3264 const int kFlatOneByteStringTag =
3265 kStringTag | kOneByteStringTag | kSeqStringTag;
3266 and_(scratch1, first, Operand(kFlatOneByteStringMask));
3267 and_(scratch2, second, Operand(kFlatOneByteStringMask));
3268 cmp(scratch1, Operand(kFlatOneByteStringTag));
3269 // Ignore second test if first test failed.
3270 cmp(scratch2, Operand(kFlatOneByteStringTag), eq);
3275 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
3278 const int kFlatOneByteStringMask =
3279 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3280 const int kFlatOneByteStringTag =
3281 kStringTag | kOneByteStringTag | kSeqStringTag;
3282 and_(scratch, type, Operand(kFlatOneByteStringMask));
3283 cmp(scratch, Operand(kFlatOneByteStringTag));
3287 static const int kRegisterPassedArguments = 4;
3290 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3291 int num_double_arguments) {
3292 int stack_passed_words = 0;
3293 if (use_eabi_hardfloat()) {
3294 // In the hard floating point calling convention, we can use
3295 // all double registers to pass doubles.
3296 if (num_double_arguments > DoubleRegister::NumRegisters()) {
3297 stack_passed_words +=
3298 2 * (num_double_arguments - DoubleRegister::NumRegisters());
3301 // In the soft floating point calling convention, every double
3302 // argument is passed using two registers.
3303 num_reg_arguments += 2 * num_double_arguments;
3305 // Up to four simple arguments are passed in registers r0..r3.
3306 if (num_reg_arguments > kRegisterPassedArguments) {
3307 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3309 return stack_passed_words;
3313 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
3316 uint32_t encoding_mask) {
3319 Check(ne, kNonObject);
3321 ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3322 ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3324 and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3325 cmp(ip, Operand(encoding_mask));
3326 Check(eq, kUnexpectedStringType);
3328 // The index is assumed to be untagged coming in, tag it to compare with the
3329 // string length without using a temp register, it is restored at the end of
3331 Label index_tag_ok, index_tag_bad;
3332 TrySmiTag(index, index, &index_tag_bad);
3334 bind(&index_tag_bad);
3335 Abort(kIndexIsTooLarge);
3336 bind(&index_tag_ok);
3338 ldr(ip, FieldMemOperand(string, String::kLengthOffset));
3340 Check(lt, kIndexIsTooLarge);
3342 cmp(index, Operand(Smi::FromInt(0)));
3343 Check(ge, kIndexIsNegative);
3345 SmiUntag(index, index);
3349 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3350 int num_double_arguments,
3352 int frame_alignment = ActivationFrameAlignment();
3353 int stack_passed_arguments = CalculateStackPassedWords(
3354 num_reg_arguments, num_double_arguments);
3355 if (frame_alignment > kPointerSize) {
3356 // Make stack end at alignment and make room for num_arguments - 4 words
3357 // and the original value of sp.
3359 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3360 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3361 and_(sp, sp, Operand(-frame_alignment));
3362 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3364 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3369 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3371 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3375 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
3377 if (!use_eabi_hardfloat()) {
3383 // On ARM this is just a synonym to make the purpose clear.
3384 void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
3385 MovToFloatParameter(src);
3389 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
3390 DwVfpRegister src2) {
3391 DCHECK(src1.is(d0));
3392 DCHECK(src2.is(d1));
3393 if (!use_eabi_hardfloat()) {
3400 void MacroAssembler::CallCFunction(ExternalReference function,
3401 int num_reg_arguments,
3402 int num_double_arguments) {
3403 mov(ip, Operand(function));
3404 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3408 void MacroAssembler::CallCFunction(Register function,
3409 int num_reg_arguments,
3410 int num_double_arguments) {
3411 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3415 void MacroAssembler::CallCFunction(ExternalReference function,
3416 int num_arguments) {
3417 CallCFunction(function, num_arguments, 0);
3421 void MacroAssembler::CallCFunction(Register function,
3422 int num_arguments) {
3423 CallCFunction(function, num_arguments, 0);
3427 void MacroAssembler::CallCFunctionHelper(Register function,
3428 int num_reg_arguments,
3429 int num_double_arguments) {
3430 DCHECK(has_frame());
3431 // Make sure that the stack is aligned before calling a C function unless
3432 // running in the simulator. The simulator has its own alignment check which
3433 // provides more information.
3434 #if V8_HOST_ARCH_ARM
3435 if (emit_debug_code()) {
3436 int frame_alignment = base::OS::ActivationFrameAlignment();
3437 int frame_alignment_mask = frame_alignment - 1;
3438 if (frame_alignment > kPointerSize) {
3439 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3440 Label alignment_as_expected;
3441 tst(sp, Operand(frame_alignment_mask));
3442 b(eq, &alignment_as_expected);
3443 // Don't use Check here, as it will call Runtime_Abort possibly
3444 // re-entering here.
3445 stop("Unexpected alignment");
3446 bind(&alignment_as_expected);
3451 // Just call directly. The function called cannot cause a GC, or
3452 // allow preemption, so the return address in the link register
3455 int stack_passed_arguments = CalculateStackPassedWords(
3456 num_reg_arguments, num_double_arguments);
3457 if (ActivationFrameAlignment() > kPointerSize) {
3458 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3460 add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
3465 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3468 Label small_constant_pool_load, load_result;
3469 ldr(result, MemOperand(ldr_location));
3471 if (FLAG_enable_ool_constant_pool) {
3472 // Check if this is an extended constant pool load.
3473 and_(scratch, result, Operand(GetConsantPoolLoadMask()));
3474 teq(scratch, Operand(GetConsantPoolLoadPattern()));
3475 b(eq, &small_constant_pool_load);
3476 if (emit_debug_code()) {
3477 // Check that the instruction sequence is:
3478 // movw reg, #offset_low
3479 // movt reg, #offset_high
3480 // ldr reg, [pp, reg]
3481 Instr patterns[] = {GetMovWPattern(), GetMovTPattern(),
3482 GetLdrPpRegOffsetPattern()};
3483 for (int i = 0; i < 3; i++) {
3484 ldr(result, MemOperand(ldr_location, i * kInstrSize));
3485 and_(result, result, Operand(patterns[i]));
3486 cmp(result, Operand(patterns[i]));
3487 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
3489 // Result was clobbered. Restore it.
3490 ldr(result, MemOperand(ldr_location));
3493 // Get the offset into the constant pool. First extract movw immediate into
3495 and_(scratch, result, Operand(0xfff));
3496 mov(ip, Operand(result, LSR, 4));
3497 and_(ip, ip, Operand(0xf000));
3498 orr(result, scratch, Operand(ip));
3499 // Then extract movt immediate and or into result.
3500 ldr(scratch, MemOperand(ldr_location, kInstrSize));
3501 and_(ip, scratch, Operand(0xf0000));
3502 orr(result, result, Operand(ip, LSL, 12));
3503 and_(scratch, scratch, Operand(0xfff));
3504 orr(result, result, Operand(scratch, LSL, 16));
3509 bind(&small_constant_pool_load);
3510 if (emit_debug_code()) {
3511 // Check that the instruction is a ldr reg, [<pc or pp> + offset] .
3512 and_(result, result, Operand(GetConsantPoolLoadPattern()));
3513 cmp(result, Operand(GetConsantPoolLoadPattern()));
3514 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
3515 // Result was clobbered. Restore it.
3516 ldr(result, MemOperand(ldr_location));
3519 // Get the offset into the constant pool.
3520 const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3521 and_(result, result, Operand(kLdrOffsetMask));
3524 // Get the address of the constant.
3525 if (FLAG_enable_ool_constant_pool) {
3526 add(result, pp, Operand(result));
3528 add(result, ldr_location, Operand(result));
3529 add(result, result, Operand(Instruction::kPCReadOffset));
3534 void MacroAssembler::CheckPageFlag(
3539 Label* condition_met) {
3540 Bfc(scratch, object, 0, kPageSizeBits);
3541 ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3542 tst(scratch, Operand(mask));
3543 b(cc, condition_met);
3547 void MacroAssembler::JumpIfBlack(Register object,
3551 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
3552 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3556 void MacroAssembler::HasColor(Register object,
3557 Register bitmap_scratch,
3558 Register mask_scratch,
3562 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3564 GetMarkBits(object, bitmap_scratch, mask_scratch);
3566 Label other_color, word_boundary;
3567 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3568 tst(ip, Operand(mask_scratch));
3569 b(first_bit == 1 ? eq : ne, &other_color);
3570 // Shift left 1 by adding.
3571 add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
3572 b(eq, &word_boundary);
3573 tst(ip, Operand(mask_scratch));
3574 b(second_bit == 1 ? ne : eq, has_color);
3577 bind(&word_boundary);
3578 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3579 tst(ip, Operand(1));
3580 b(second_bit == 1 ? ne : eq, has_color);
3585 // Detect some, but not all, common pointer-free objects. This is used by the
3586 // incremental write barrier which doesn't care about oddballs (they are always
3587 // marked black immediately so this code is not hit).
3588 void MacroAssembler::JumpIfDataObject(Register value,
3590 Label* not_data_object) {
3591 Label is_data_object;
3592 ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
3593 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3594 b(eq, &is_data_object);
3595 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3596 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3597 // If it's a string and it's not a cons string then it's an object containing
3599 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3600 tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
3601 b(ne, not_data_object);
3602 bind(&is_data_object);
3606 void MacroAssembler::GetMarkBits(Register addr_reg,
3607 Register bitmap_reg,
3608 Register mask_reg) {
3609 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3610 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3611 Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3612 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3613 Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
3614 add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
3615 mov(ip, Operand(1));
3616 mov(mask_reg, Operand(ip, LSL, mask_reg));
3620 void MacroAssembler::EnsureNotWhite(
3622 Register bitmap_scratch,
3623 Register mask_scratch,
3624 Register load_scratch,
3625 Label* value_is_white_and_not_data) {
3626 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3627 GetMarkBits(value, bitmap_scratch, mask_scratch);
3629 // If the value is black or grey we don't need to do anything.
3630 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3631 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3632 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
3633 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3637 // Since both black and grey have a 1 in the first position and white does
3638 // not have a 1 there we only need to check one bit.
3639 ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3640 tst(mask_scratch, load_scratch);
3643 if (emit_debug_code()) {
3644 // Check for impossible bit pattern.
3646 // LSL may overflow, making the check conservative.
3647 tst(load_scratch, Operand(mask_scratch, LSL, 1));
3649 stop("Impossible marking bit pattern");
3653 // Value is white. We check whether it is data that doesn't need scanning.
3654 // Currently only checks for HeapNumber and non-cons strings.
3655 Register map = load_scratch; // Holds map while checking type.
3656 Register length = load_scratch; // Holds length of object after testing type.
3657 Label is_data_object;
3659 // Check for heap-number
3660 ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3661 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3662 mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
3663 b(eq, &is_data_object);
3665 // Check for strings.
3666 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3667 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3668 // If it's a string and it's not a cons string then it's an object containing
3670 Register instance_type = load_scratch;
3671 ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
3672 tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
3673 b(ne, value_is_white_and_not_data);
3674 // It's a non-indirect (non-cons and non-slice) string.
3675 // If it's external, the length is just ExternalString::kSize.
3676 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3677 // External strings are the only ones with the kExternalStringTag bit
3679 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
3680 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
3681 tst(instance_type, Operand(kExternalStringTag));
3682 mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
3683 b(ne, &is_data_object);
3685 // Sequential string, either Latin1 or UC16.
3686 // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
3687 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
3688 // getting the length multiplied by 2.
3689 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
3690 DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
3691 ldr(ip, FieldMemOperand(value, String::kLengthOffset));
3692 tst(instance_type, Operand(kStringEncodingMask));
3693 mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
3694 add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
3695 and_(length, length, Operand(~kObjectAlignmentMask));
3697 bind(&is_data_object);
3698 // Value is a data object, and it is white. Mark it black. Since we know
3699 // that the object is white we can make it black by flipping one bit.
3700 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3701 orr(ip, ip, Operand(mask_scratch));
3702 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3704 and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
3705 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3706 add(ip, ip, Operand(length));
3707 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3713 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3714 Usat(output_reg, 8, Operand(input_reg));
3718 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3719 DwVfpRegister input_reg,
3720 LowDwVfpRegister double_scratch) {
3723 // Handle inputs >= 255 (including +infinity).
3724 Vmov(double_scratch, 255.0, result_reg);
3725 mov(result_reg, Operand(255));
3726 VFPCompareAndSetFlags(input_reg, double_scratch);
3729 // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest
3730 // rounding mode will provide the correct result.
3731 vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
3732 vmov(result_reg, double_scratch.low());
3738 void MacroAssembler::LoadInstanceDescriptors(Register map,
3739 Register descriptors) {
3740 ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3744 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3745 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3746 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3750 void MacroAssembler::EnumLength(Register dst, Register map) {
3751 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3752 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3753 and_(dst, dst, Operand(Map::EnumLengthBits::kMask));
3758 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3760 AccessorComponent accessor) {
3761 ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
3762 LoadInstanceDescriptors(dst, dst);
3764 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3765 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
3766 : AccessorPair::kSetterOffset;
3767 ldr(dst, FieldMemOperand(dst, offset));
3771 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3772 Register empty_fixed_array_value = r6;
3773 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3777 // Check if the enum length field is properly initialized, indicating that
3778 // there is an enum cache.
3779 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3782 cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
3783 b(eq, call_runtime);
3788 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3790 // For all objects but the receiver, check that the cache is empty.
3792 cmp(r3, Operand(Smi::FromInt(0)));
3793 b(ne, call_runtime);
3797 // Check that there are no elements. Register r2 contains the current JS
3798 // object we've reached through the prototype chain.
3800 ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
3801 cmp(r2, empty_fixed_array_value);
3802 b(eq, &no_elements);
3804 // Second chance, the object may be using the empty slow element dictionary.
3805 CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex);
3806 b(ne, call_runtime);
3809 ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
3810 cmp(r2, null_value);
3815 void MacroAssembler::TestJSArrayForAllocationMemento(
3816 Register receiver_reg,
3817 Register scratch_reg,
3818 Label* no_memento_found) {
3819 ExternalReference new_space_start =
3820 ExternalReference::new_space_start(isolate());
3821 ExternalReference new_space_allocation_top =
3822 ExternalReference::new_space_allocation_top_address(isolate());
3823 add(scratch_reg, receiver_reg,
3824 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3825 cmp(scratch_reg, Operand(new_space_start));
3826 b(lt, no_memento_found);
3827 mov(ip, Operand(new_space_allocation_top));
3828 ldr(ip, MemOperand(ip));
3829 cmp(scratch_reg, ip);
3830 b(gt, no_memento_found);
3831 ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
3833 Operand(isolate()->factory()->allocation_memento_map()));
3837 Register GetRegisterThatIsNotOneOf(Register reg1,
3844 if (reg1.is_valid()) regs |= reg1.bit();
3845 if (reg2.is_valid()) regs |= reg2.bit();
3846 if (reg3.is_valid()) regs |= reg3.bit();
3847 if (reg4.is_valid()) regs |= reg4.bit();
3848 if (reg5.is_valid()) regs |= reg5.bit();
3849 if (reg6.is_valid()) regs |= reg6.bit();
3851 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
3852 Register candidate = Register::FromAllocationIndex(i);
3853 if (regs & candidate.bit()) continue;
3861 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3866 DCHECK(!scratch1.is(scratch0));
3867 Factory* factory = isolate()->factory();
3868 Register current = scratch0;
3871 // scratch contained elements pointer.
3872 mov(current, object);
3874 // Loop based on the map going up the prototype chain.
3876 ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
3877 ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
3878 DecodeField<Map::ElementsKindBits>(scratch1);
3879 cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
3881 ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
3882 cmp(current, Operand(factory->null_value()));
3888 bool AreAliased(Register reg1,
3896 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
3897 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
3898 reg7.is_valid() + reg8.is_valid();
3901 if (reg1.is_valid()) regs |= reg1.bit();
3902 if (reg2.is_valid()) regs |= reg2.bit();
3903 if (reg3.is_valid()) regs |= reg3.bit();
3904 if (reg4.is_valid()) regs |= reg4.bit();
3905 if (reg5.is_valid()) regs |= reg5.bit();
3906 if (reg6.is_valid()) regs |= reg6.bit();
3907 if (reg7.is_valid()) regs |= reg7.bit();
3908 if (reg8.is_valid()) regs |= reg8.bit();
3909 int n_of_non_aliasing_regs = NumRegs(regs);
3911 return n_of_valid_regs != n_of_non_aliasing_regs;
3916 CodePatcher::CodePatcher(byte* address,
3918 FlushICache flush_cache)
3919 : address_(address),
3920 size_(instructions * Assembler::kInstrSize),
3921 masm_(NULL, address, size_ + Assembler::kGap),
3922 flush_cache_(flush_cache) {
3923 // Create a new macro assembler pointing to the address of the code to patch.
3924 // The size is adjusted with kGap on order for the assembler to generate size
3925 // bytes of instructions without failing with buffer size constraints.
3926 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3930 CodePatcher::~CodePatcher() {
3931 // Indicate that code has changed.
3932 if (flush_cache_ == FLUSH) {
3933 CpuFeatures::FlushICache(address_, size_);
3936 // Check that the code was patched as expected.
3937 DCHECK(masm_.pc_ == address_ + size_);
3938 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3942 void CodePatcher::Emit(Instr instr) {
3943 masm()->emit(instr);
3947 void CodePatcher::Emit(Address addr) {
3948 masm()->emit(reinterpret_cast<Instr>(addr));
3952 void CodePatcher::EmitCondition(Condition cond) {
3953 Instr instr = Assembler::instr_at(masm_.pc_);
3954 instr = (instr & ~kCondMask) | cond;
3959 void MacroAssembler::TruncatingDiv(Register result,
3962 DCHECK(!dividend.is(result));
3963 DCHECK(!dividend.is(ip));
3964 DCHECK(!result.is(ip));
3965 base::MagicNumbersForDivision<uint32_t> mag =
3966 base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
3967 mov(ip, Operand(mag.multiplier));
3968 bool neg = (mag.multiplier & (1U << 31)) != 0;
3969 if (divisor > 0 && neg) {
3970 smmla(result, dividend, ip, dividend);
3972 smmul(result, dividend, ip);
3973 if (divisor < 0 && !neg && mag.multiplier > 0) {
3974 sub(result, result, Operand(dividend));
3977 if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift));
3978 add(result, result, Operand(dividend, LSR, 31));
3981 } // namespace internal
3984 #endif // V8_TARGET_ARCH_ARM