1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/cpu-profiler.h"
14 #include "src/debug/debug.h"
15 #include "src/runtime/runtime.h"
17 #include "src/arm/macro-assembler-arm.h"
22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
23 : Assembler(arg_isolate, buffer, size),
24 generating_stub_(false),
26 if (isolate() != NULL) {
27 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
33 void MacroAssembler::Jump(Register target, Condition cond) {
38 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
40 DCHECK(RelocInfo::IsCodeTarget(rmode));
41 mov(pc, Operand(target, rmode), LeaveCC, cond);
45 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
47 DCHECK(!RelocInfo::IsCodeTarget(rmode));
48 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
52 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
54 DCHECK(RelocInfo::IsCodeTarget(rmode));
55 // 'code' is always generated ARM code, never THUMB code
56 AllowDeferredHandleDereference embedding_raw_address;
57 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
61 int MacroAssembler::CallSize(Register target, Condition cond) {
66 void MacroAssembler::Call(Register target, Condition cond) {
67 // Block constant pool for the call instruction sequence.
68 BlockConstPoolScope block_const_pool(this);
72 DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
76 int MacroAssembler::CallSize(
77 Address target, RelocInfo::Mode rmode, Condition cond) {
78 Instr mov_instr = cond | MOV | LeaveCC;
79 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
81 mov_operand.instructions_required(this, mov_instr) * kInstrSize;
85 int MacroAssembler::CallStubSize(
86 CodeStub* stub, TypeFeedbackId ast_id, Condition cond) {
87 return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
91 int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate,
93 RelocInfo::Mode rmode,
95 Instr mov_instr = cond | MOV | LeaveCC;
96 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
98 mov_operand.instructions_required(NULL, mov_instr) * kInstrSize;
102 void MacroAssembler::Call(Address target,
103 RelocInfo::Mode rmode,
105 TargetAddressStorageMode mode) {
106 // Block constant pool for the call instruction sequence.
107 BlockConstPoolScope block_const_pool(this);
111 bool old_predictable_code_size = predictable_code_size();
112 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
113 set_predictable_code_size(true);
117 // Check the expected size before generating code to ensure we assume the same
118 // constant pool availability (e.g., whether constant pool is full or not).
119 int expected_size = CallSize(target, rmode, cond);
122 // Call sequence on V7 or later may be :
123 // movw ip, #... @ call address low 16
124 // movt ip, #... @ call address high 16
127 // Or for pre-V7 or values that may be back-patched
128 // to avoid ICache flushes:
129 // ldr ip, [pc, #...] @ call address
133 // Statement positions are expected to be recorded when the target
134 // address is loaded. The mov method will automatically record
135 // positions when pc is the target, since this is not the case here
136 // we have to do it explicitly.
137 positions_recorder()->WriteRecordedPositions();
139 mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
142 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
143 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
144 set_predictable_code_size(old_predictable_code_size);
149 int MacroAssembler::CallSize(Handle<Code> code,
150 RelocInfo::Mode rmode,
151 TypeFeedbackId ast_id,
153 AllowDeferredHandleDereference using_raw_address;
154 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
158 void MacroAssembler::Call(Handle<Code> code,
159 RelocInfo::Mode rmode,
160 TypeFeedbackId ast_id,
162 TargetAddressStorageMode mode) {
165 DCHECK(RelocInfo::IsCodeTarget(rmode));
166 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
167 SetRecordedAstId(ast_id);
168 rmode = RelocInfo::CODE_TARGET_WITH_ID;
170 // 'code' is always generated ARM code, never THUMB code
171 AllowDeferredHandleDereference embedding_raw_address;
172 Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
176 void MacroAssembler::Ret(Condition cond) {
181 void MacroAssembler::Drop(int count, Condition cond) {
183 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
188 void MacroAssembler::Ret(int drop, Condition cond) {
194 void MacroAssembler::Swap(Register reg1,
198 if (scratch.is(no_reg)) {
199 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
200 eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
201 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
203 mov(scratch, reg1, LeaveCC, cond);
204 mov(reg1, reg2, LeaveCC, cond);
205 mov(reg2, scratch, LeaveCC, cond);
210 void MacroAssembler::Call(Label* target) {
215 void MacroAssembler::Push(Handle<Object> handle) {
216 mov(ip, Operand(handle));
221 void MacroAssembler::Move(Register dst, Handle<Object> value) {
222 AllowDeferredHandleDereference smi_check;
223 if (value->IsSmi()) {
224 mov(dst, Operand(value));
226 DCHECK(value->IsHeapObject());
227 if (isolate()->heap()->InNewSpace(*value)) {
228 Handle<Cell> cell = isolate()->factory()->NewCell(value);
229 mov(dst, Operand(cell));
230 ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
232 mov(dst, Operand(value));
238 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
240 mov(dst, src, LeaveCC, cond);
245 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
252 void MacroAssembler::Mls(Register dst, Register src1, Register src2,
253 Register srcA, Condition cond) {
254 if (CpuFeatures::IsSupported(MLS)) {
255 CpuFeatureScope scope(this, MLS);
256 mls(dst, src1, src2, srcA, cond);
258 DCHECK(!srcA.is(ip));
259 mul(ip, src1, src2, LeaveCC, cond);
260 sub(dst, srcA, ip, LeaveCC, cond);
265 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
267 if (!src2.is_reg() &&
268 !src2.must_output_reloc_info(this) &&
269 src2.immediate() == 0) {
270 mov(dst, Operand::Zero(), LeaveCC, cond);
271 } else if (!(src2.instructions_required(this) == 1) &&
272 !src2.must_output_reloc_info(this) &&
273 CpuFeatures::IsSupported(ARMv7) &&
274 base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
276 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
278 and_(dst, src1, src2, LeaveCC, cond);
283 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
286 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
287 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
288 and_(dst, src1, Operand(mask), LeaveCC, cond);
290 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
293 ubfx(dst, src1, lsb, width, cond);
298 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
301 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
302 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
303 and_(dst, src1, Operand(mask), LeaveCC, cond);
304 int shift_up = 32 - lsb - width;
305 int shift_down = lsb + shift_up;
307 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
309 if (shift_down != 0) {
310 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
313 sbfx(dst, src1, lsb, width, cond);
318 void MacroAssembler::Bfi(Register dst,
324 DCHECK(0 <= lsb && lsb < 32);
325 DCHECK(0 <= width && width < 32);
326 DCHECK(lsb + width < 32);
327 DCHECK(!scratch.is(dst));
328 if (width == 0) return;
329 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
330 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
331 bic(dst, dst, Operand(mask));
332 and_(scratch, src, Operand((1 << width) - 1));
333 mov(scratch, Operand(scratch, LSL, lsb));
334 orr(dst, dst, scratch);
336 bfi(dst, src, lsb, width, cond);
341 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
344 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
345 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
346 bic(dst, src, Operand(mask));
348 Move(dst, src, cond);
349 bfc(dst, lsb, width, cond);
354 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
356 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
357 DCHECK(!dst.is(pc) && !src.rm().is(pc));
358 DCHECK((satpos >= 0) && (satpos <= 31));
360 // These asserts are required to ensure compatibility with the ARMv7
362 DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL));
363 DCHECK(src.rs().is(no_reg));
366 int satval = (1 << satpos) - 1;
369 b(NegateCondition(cond), &done); // Skip saturate if !condition.
371 if (!(src.is_reg() && dst.is(src.rm()))) {
374 tst(dst, Operand(~satval));
376 mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
377 mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
380 usat(dst, satpos, src, cond);
385 void MacroAssembler::Load(Register dst,
386 const MemOperand& src,
388 DCHECK(!r.IsDouble());
389 if (r.IsInteger8()) {
391 } else if (r.IsUInteger8()) {
393 } else if (r.IsInteger16()) {
395 } else if (r.IsUInteger16()) {
403 void MacroAssembler::Store(Register src,
404 const MemOperand& dst,
406 DCHECK(!r.IsDouble());
407 if (r.IsInteger8() || r.IsUInteger8()) {
409 } else if (r.IsInteger16() || r.IsUInteger16()) {
412 if (r.IsHeapObject()) {
414 } else if (r.IsSmi()) {
422 void MacroAssembler::LoadRoot(Register destination,
423 Heap::RootListIndex index,
425 if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
426 isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
427 !predictable_code_size()) {
428 // The CPU supports fast immediate values, and this root will never
429 // change. We will load it as a relocatable immediate value.
430 Handle<Object> root = isolate()->heap()->root_handle(index);
431 mov(destination, Operand(root), LeaveCC, cond);
434 ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
438 void MacroAssembler::StoreRoot(Register source,
439 Heap::RootListIndex index,
441 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
442 str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
446 void MacroAssembler::InNewSpace(Register object,
450 DCHECK(cond == eq || cond == ne);
451 and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
452 cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
457 void MacroAssembler::RecordWriteField(
462 LinkRegisterStatus lr_status,
463 SaveFPRegsMode save_fp,
464 RememberedSetAction remembered_set_action,
466 PointersToHereCheck pointers_to_here_check_for_value) {
467 // First, check if a write barrier is even needed. The tests below
468 // catch stores of Smis.
471 // Skip barrier if writing a smi.
472 if (smi_check == INLINE_SMI_CHECK) {
473 JumpIfSmi(value, &done);
476 // Although the object register is tagged, the offset is relative to the start
477 // of the object, so so offset must be a multiple of kPointerSize.
478 DCHECK(IsAligned(offset, kPointerSize));
480 add(dst, object, Operand(offset - kHeapObjectTag));
481 if (emit_debug_code()) {
483 tst(dst, Operand((1 << kPointerSizeLog2) - 1));
485 stop("Unaligned cell in write barrier");
494 remembered_set_action,
496 pointers_to_here_check_for_value);
500 // Clobber clobbered input registers when running with the debug-code flag
501 // turned on to provoke errors.
502 if (emit_debug_code()) {
503 mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
504 mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
509 // Will clobber 4 registers: object, map, dst, ip. The
510 // register 'object' contains a heap object pointer.
511 void MacroAssembler::RecordWriteForMap(Register object,
514 LinkRegisterStatus lr_status,
515 SaveFPRegsMode fp_mode) {
516 if (emit_debug_code()) {
517 ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset));
518 cmp(dst, Operand(isolate()->factory()->meta_map()));
519 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
522 if (!FLAG_incremental_marking) {
526 if (emit_debug_code()) {
527 ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset));
529 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
534 // A single check of the map's pages interesting flag suffices, since it is
535 // only set during incremental collection, and then it's also guaranteed that
536 // the from object's page's interesting flag is also set. This optimization
537 // relies on the fact that maps can never be in new space.
539 map, // Used as scratch.
540 MemoryChunk::kPointersToHereAreInterestingMask,
544 add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
545 if (emit_debug_code()) {
547 tst(dst, Operand((1 << kPointerSizeLog2) - 1));
549 stop("Unaligned cell in write barrier");
553 // Record the actual write.
554 if (lr_status == kLRHasNotBeenSaved) {
557 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
560 if (lr_status == kLRHasNotBeenSaved) {
566 // Count number of write barriers in generated code.
567 isolate()->counters()->write_barriers_static()->Increment();
568 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
570 // Clobber clobbered registers when running with the debug-code flag
571 // turned on to provoke errors.
572 if (emit_debug_code()) {
573 mov(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
574 mov(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
579 // Will clobber 4 registers: object, address, scratch, ip. The
580 // register 'object' contains a heap object pointer. The heap object
581 // tag is shifted away.
582 void MacroAssembler::RecordWrite(
586 LinkRegisterStatus lr_status,
587 SaveFPRegsMode fp_mode,
588 RememberedSetAction remembered_set_action,
590 PointersToHereCheck pointers_to_here_check_for_value) {
591 DCHECK(!object.is(value));
592 if (emit_debug_code()) {
593 ldr(ip, MemOperand(address));
595 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
598 if (remembered_set_action == OMIT_REMEMBERED_SET &&
599 !FLAG_incremental_marking) {
603 // First, check if a write barrier is even needed. The tests below
604 // catch stores of smis and stores into the young generation.
607 if (smi_check == INLINE_SMI_CHECK) {
608 JumpIfSmi(value, &done);
611 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
613 value, // Used as scratch.
614 MemoryChunk::kPointersToHereAreInterestingMask,
618 CheckPageFlag(object,
619 value, // Used as scratch.
620 MemoryChunk::kPointersFromHereAreInterestingMask,
624 // Record the actual write.
625 if (lr_status == kLRHasNotBeenSaved) {
628 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
631 if (lr_status == kLRHasNotBeenSaved) {
637 // Count number of write barriers in generated code.
638 isolate()->counters()->write_barriers_static()->Increment();
639 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
642 // Clobber clobbered registers when running with the debug-code flag
643 // turned on to provoke errors.
644 if (emit_debug_code()) {
645 mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
646 mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
651 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
654 SaveFPRegsMode fp_mode,
655 RememberedSetFinalAction and_then) {
657 if (emit_debug_code()) {
659 JumpIfNotInNewSpace(object, scratch, &ok);
660 stop("Remembered set pointer is in new space");
663 // Load store buffer top.
664 ExternalReference store_buffer =
665 ExternalReference::store_buffer_top(isolate());
666 mov(ip, Operand(store_buffer));
667 ldr(scratch, MemOperand(ip));
668 // Store pointer to buffer and increment buffer top.
669 str(address, MemOperand(scratch, kPointerSize, PostIndex));
670 // Write back new top of buffer.
671 str(scratch, MemOperand(ip));
672 // Call stub on end of buffer.
673 // Check for end of buffer.
674 tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
675 if (and_then == kFallThroughAtEnd) {
678 DCHECK(and_then == kReturnAtEnd);
682 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
683 CallStub(&store_buffer_overflow);
686 if (and_then == kReturnAtEnd) {
692 void MacroAssembler::PushFixedFrame(Register marker_reg) {
693 DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
694 stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
695 (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
696 fp.bit() | lr.bit());
700 void MacroAssembler::PopFixedFrame(Register marker_reg) {
701 DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
702 ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
703 (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
704 fp.bit() | lr.bit());
708 // Push and pop all registers that can hold pointers.
709 void MacroAssembler::PushSafepointRegisters() {
710 // Safepoints expect a block of contiguous register values starting with r0.
711 // except when FLAG_enable_embedded_constant_pool, which omits pp.
712 DCHECK(kSafepointSavedRegisters ==
713 (FLAG_enable_embedded_constant_pool
714 ? ((1 << (kNumSafepointSavedRegisters + 1)) - 1) & ~pp.bit()
715 : (1 << kNumSafepointSavedRegisters) - 1));
716 // Safepoints expect a block of kNumSafepointRegisters values on the
717 // stack, so adjust the stack for unsaved registers.
718 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
719 DCHECK(num_unsaved >= 0);
720 sub(sp, sp, Operand(num_unsaved * kPointerSize));
721 stm(db_w, sp, kSafepointSavedRegisters);
725 void MacroAssembler::PopSafepointRegisters() {
726 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
727 ldm(ia_w, sp, kSafepointSavedRegisters);
728 add(sp, sp, Operand(num_unsaved * kPointerSize));
732 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
733 str(src, SafepointRegisterSlot(dst));
737 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
738 ldr(dst, SafepointRegisterSlot(src));
742 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
743 // The registers are pushed starting with the highest encoding,
744 // which means that lowest encodings are closest to the stack pointer.
745 if (FLAG_enable_embedded_constant_pool && reg_code > pp.code()) {
749 DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
754 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
755 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
759 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
760 // Number of d-regs not known at snapshot time.
761 DCHECK(!serializer_enabled());
762 // General purpose registers are pushed last on the stack.
763 int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
764 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
765 return MemOperand(sp, doubles_size + register_offset);
769 void MacroAssembler::Ldrd(Register dst1, Register dst2,
770 const MemOperand& src, Condition cond) {
771 DCHECK(src.rm().is(no_reg));
772 DCHECK(!dst1.is(lr)); // r14.
774 // V8 does not use this addressing mode, so the fallback code
775 // below doesn't support it yet.
776 DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
778 // Generate two ldr instructions if ldrd is not available.
779 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
780 (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
781 CpuFeatureScope scope(this, ARMv7);
782 ldrd(dst1, dst2, src, cond);
784 if ((src.am() == Offset) || (src.am() == NegOffset)) {
785 MemOperand src2(src);
786 src2.set_offset(src2.offset() + 4);
787 if (dst1.is(src.rn())) {
788 ldr(dst2, src2, cond);
789 ldr(dst1, src, cond);
791 ldr(dst1, src, cond);
792 ldr(dst2, src2, cond);
794 } else { // PostIndex or NegPostIndex.
795 DCHECK((src.am() == PostIndex) || (src.am() == NegPostIndex));
796 if (dst1.is(src.rn())) {
797 ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
798 ldr(dst1, src, cond);
800 MemOperand src2(src);
801 src2.set_offset(src2.offset() - 4);
802 ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
803 ldr(dst2, src2, cond);
810 void MacroAssembler::Strd(Register src1, Register src2,
811 const MemOperand& dst, Condition cond) {
812 DCHECK(dst.rm().is(no_reg));
813 DCHECK(!src1.is(lr)); // r14.
815 // V8 does not use this addressing mode, so the fallback code
816 // below doesn't support it yet.
817 DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
819 // Generate two str instructions if strd is not available.
820 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
821 (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
822 CpuFeatureScope scope(this, ARMv7);
823 strd(src1, src2, dst, cond);
825 MemOperand dst2(dst);
826 if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
827 dst2.set_offset(dst2.offset() + 4);
828 str(src1, dst, cond);
829 str(src2, dst2, cond);
830 } else { // PostIndex or NegPostIndex.
831 DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
832 dst2.set_offset(dst2.offset() - 4);
833 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
834 str(src2, dst2, cond);
840 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
841 // If needed, restore wanted bits of FPSCR.
844 if (emit_debug_code()) {
845 Label rounding_mode_correct;
846 tst(scratch, Operand(kVFPRoundingModeMask));
847 b(eq, &rounding_mode_correct);
848 // Don't call Assert here, since Runtime_Abort could re-enter here.
849 stop("Default rounding mode not set");
850 bind(&rounding_mode_correct);
852 tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
854 orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
860 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
861 const DwVfpRegister src,
862 const Condition cond) {
863 vsub(dst, src, kDoubleRegZero, cond);
867 void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
868 const SwVfpRegister src2,
869 const Condition cond) {
870 // Compare and move FPSCR flags to the normal condition flags.
871 VFPCompareAndLoadFlags(src1, src2, pc, cond);
874 void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
876 const Condition cond) {
877 // Compare and move FPSCR flags to the normal condition flags.
878 VFPCompareAndLoadFlags(src1, src2, pc, cond);
882 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
883 const DwVfpRegister src2,
884 const Condition cond) {
885 // Compare and move FPSCR flags to the normal condition flags.
886 VFPCompareAndLoadFlags(src1, src2, pc, cond);
889 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
891 const Condition cond) {
892 // Compare and move FPSCR flags to the normal condition flags.
893 VFPCompareAndLoadFlags(src1, src2, pc, cond);
897 void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
898 const SwVfpRegister src2,
899 const Register fpscr_flags,
900 const Condition cond) {
901 // Compare and load FPSCR.
902 vcmp(src1, src2, cond);
903 vmrs(fpscr_flags, cond);
906 void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
908 const Register fpscr_flags,
909 const Condition cond) {
910 // Compare and load FPSCR.
911 vcmp(src1, src2, cond);
912 vmrs(fpscr_flags, cond);
916 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
917 const DwVfpRegister src2,
918 const Register fpscr_flags,
919 const Condition cond) {
920 // Compare and load FPSCR.
921 vcmp(src1, src2, cond);
922 vmrs(fpscr_flags, cond);
925 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
927 const Register fpscr_flags,
928 const Condition cond) {
929 // Compare and load FPSCR.
930 vcmp(src1, src2, cond);
931 vmrs(fpscr_flags, cond);
935 void MacroAssembler::Vmov(const DwVfpRegister dst,
937 const Register scratch) {
938 static const DoubleRepresentation minus_zero(-0.0);
939 static const DoubleRepresentation zero(0.0);
940 DoubleRepresentation value_rep(imm);
941 // Handle special values first.
942 if (value_rep == zero) {
943 vmov(dst, kDoubleRegZero);
944 } else if (value_rep == minus_zero) {
945 vneg(dst, kDoubleRegZero);
947 vmov(dst, imm, scratch);
952 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
953 if (src.code() < 16) {
954 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
955 vmov(dst, loc.high());
957 vmov(dst, VmovIndexHi, src);
962 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
963 if (dst.code() < 16) {
964 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
965 vmov(loc.high(), src);
967 vmov(dst, VmovIndexHi, src);
972 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
973 if (src.code() < 16) {
974 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
975 vmov(dst, loc.low());
977 vmov(dst, VmovIndexLo, src);
982 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
983 if (dst.code() < 16) {
984 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
985 vmov(loc.low(), src);
987 vmov(dst, VmovIndexLo, src);
992 void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
993 Register code_target_address) {
994 DCHECK(FLAG_enable_embedded_constant_pool);
995 ldr(pp, MemOperand(code_target_address,
996 Code::kConstantPoolOffset - Code::kHeaderSize));
997 add(pp, pp, code_target_address);
1001 void MacroAssembler::LoadConstantPoolPointerRegister() {
1002 DCHECK(FLAG_enable_embedded_constant_pool);
1003 int entry_offset = pc_offset() + Instruction::kPCReadOffset;
1004 sub(ip, pc, Operand(entry_offset));
1005 LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
1009 void MacroAssembler::StubPrologue() {
1011 Push(Smi::FromInt(StackFrame::STUB));
1012 // Adjust FP to point to saved FP.
1013 add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
1014 if (FLAG_enable_embedded_constant_pool) {
1015 LoadConstantPoolPointerRegister();
1016 set_constant_pool_available(true);
1021 void MacroAssembler::Prologue(bool code_pre_aging) {
1022 { PredictableCodeSizeScope predictible_code_size_scope(
1023 this, kNoCodeAgeSequenceLength);
1024 // The following three instructions must remain together and unmodified
1025 // for code aging to work properly.
1026 if (code_pre_aging) {
1027 // Pre-age the code.
1028 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
1029 add(r0, pc, Operand(-8));
1030 ldr(pc, MemOperand(pc, -4));
1031 emit_code_stub_address(stub);
1035 // Adjust FP to point to saved FP.
1036 add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
1039 if (FLAG_enable_embedded_constant_pool) {
1040 LoadConstantPoolPointerRegister();
1041 set_constant_pool_available(true);
1046 void MacroAssembler::EnterFrame(StackFrame::Type type,
1047 bool load_constant_pool_pointer_reg) {
1050 if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
1051 LoadConstantPoolPointerRegister();
1053 mov(ip, Operand(Smi::FromInt(type)));
1055 mov(ip, Operand(CodeObject()));
1057 // Adjust FP to point to saved FP.
1059 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
1063 int MacroAssembler::LeaveFrame(StackFrame::Type type) {
1068 // Drop the execution stack down to the frame pointer and restore
1069 // the caller frame pointer, return address and constant pool pointer
1070 // (if FLAG_enable_embedded_constant_pool).
1072 if (FLAG_enable_embedded_constant_pool) {
1073 add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
1074 frame_ends = pc_offset();
1075 ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
1078 frame_ends = pc_offset();
1079 ldm(ia_w, sp, fp.bit() | lr.bit());
1085 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
1086 // Set up the frame structure on the stack.
1087 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1088 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1089 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1091 mov(fp, Operand(sp)); // Set up new frame pointer.
1092 // Reserve room for saved entry sp and code object.
1093 sub(sp, sp, Operand(ExitFrameConstants::kFrameSize));
1094 if (emit_debug_code()) {
1095 mov(ip, Operand::Zero());
1096 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1098 if (FLAG_enable_embedded_constant_pool) {
1099 str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1101 mov(ip, Operand(CodeObject()));
1102 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1104 // Save the frame pointer and the context in top.
1105 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1106 str(fp, MemOperand(ip));
1107 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1108 str(cp, MemOperand(ip));
1110 // Optionally save all double registers.
1113 // Note that d0 will be accessible at
1114 // fp - ExitFrameConstants::kFrameSize -
1115 // DwVfpRegister::kMaxNumRegisters * kDoubleSize,
1116 // since the sp slot, code slot and constant pool slot (if
1117 // FLAG_enable_embedded_constant_pool) were pushed after the fp.
1120 // Reserve place for the return address and stack space and align the frame
1121 // preparing for calling the runtime function.
1122 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1123 sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
1124 if (frame_alignment > 0) {
1125 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
1126 and_(sp, sp, Operand(-frame_alignment));
1129 // Set the exit frame sp value to point just before the return address
1131 add(ip, sp, Operand(kPointerSize));
1132 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1136 void MacroAssembler::InitializeNewString(Register string,
1138 Heap::RootListIndex map_index,
1140 Register scratch2) {
1141 SmiTag(scratch1, length);
1142 LoadRoot(scratch2, map_index);
1143 str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1144 mov(scratch1, Operand(String::kEmptyHashField));
1145 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1146 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
1150 int MacroAssembler::ActivationFrameAlignment() {
1151 #if V8_HOST_ARCH_ARM
1152 // Running on the real platform. Use the alignment as mandated by the local
1154 // Note: This will break if we ever start generating snapshots on one ARM
1155 // platform for another ARM platform with a different alignment.
1156 return base::OS::ActivationFrameAlignment();
1157 #else // V8_HOST_ARCH_ARM
1158 // If we are using the simulator then we should always align to the expected
1159 // alignment. As the simulator is used to generate snapshots we do not know
1160 // if the target platform will need alignment, so this is controlled from a
1162 return FLAG_sim_stack_alignment;
1163 #endif // V8_HOST_ARCH_ARM
1167 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1168 bool restore_context,
1169 bool argument_count_is_length) {
1170 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1172 // Optionally restore all double registers.
1174 // Calculate the stack location of the saved doubles and restore them.
1175 const int offset = ExitFrameConstants::kFrameSize;
1177 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
1178 RestoreFPRegs(r3, ip);
1182 mov(r3, Operand::Zero());
1183 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1184 str(r3, MemOperand(ip));
1186 // Restore current context from top and clear it in debug mode.
1187 if (restore_context) {
1188 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1189 ldr(cp, MemOperand(ip));
1192 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1193 str(r3, MemOperand(ip));
1196 // Tear down the exit frame, pop the arguments, and return.
1197 if (FLAG_enable_embedded_constant_pool) {
1198 ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1200 mov(sp, Operand(fp));
1201 ldm(ia_w, sp, fp.bit() | lr.bit());
1202 if (argument_count.is_valid()) {
1203 if (argument_count_is_length) {
1204 add(sp, sp, argument_count);
1206 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1212 void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1213 if (use_eabi_hardfloat()) {
1221 // On ARM this is just a synonym to make the purpose clear.
1222 void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1223 MovFromFloatResult(dst);
1227 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1228 const ParameterCount& actual,
1229 Handle<Code> code_constant,
1232 bool* definitely_mismatches,
1234 const CallWrapper& call_wrapper) {
1235 bool definitely_matches = false;
1236 *definitely_mismatches = false;
1237 Label regular_invoke;
1239 // Check whether the expected and actual arguments count match. If not,
1240 // setup registers according to contract with ArgumentsAdaptorTrampoline:
1241 // r0: actual arguments count
1242 // r1: function (passed through to callee)
1243 // r2: expected arguments count
1245 // The code below is made a lot easier because the calling code already sets
1246 // up actual and expected registers according to the contract if values are
1247 // passed in registers.
1248 DCHECK(actual.is_immediate() || actual.reg().is(r0));
1249 DCHECK(expected.is_immediate() || expected.reg().is(r2));
1250 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
1252 if (expected.is_immediate()) {
1253 DCHECK(actual.is_immediate());
1254 if (expected.immediate() == actual.immediate()) {
1255 definitely_matches = true;
1257 mov(r0, Operand(actual.immediate()));
1258 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1259 if (expected.immediate() == sentinel) {
1260 // Don't worry about adapting arguments for builtins that
1261 // don't want that done. Skip adaption code by making it look
1262 // like we have a match between expected and actual number of
1264 definitely_matches = true;
1266 *definitely_mismatches = true;
1267 mov(r2, Operand(expected.immediate()));
1271 if (actual.is_immediate()) {
1272 cmp(expected.reg(), Operand(actual.immediate()));
1273 b(eq, ®ular_invoke);
1274 mov(r0, Operand(actual.immediate()));
1276 cmp(expected.reg(), Operand(actual.reg()));
1277 b(eq, ®ular_invoke);
1281 if (!definitely_matches) {
1282 if (!code_constant.is_null()) {
1283 mov(r3, Operand(code_constant));
1284 add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
1287 Handle<Code> adaptor =
1288 isolate()->builtins()->ArgumentsAdaptorTrampoline();
1289 if (flag == CALL_FUNCTION) {
1290 call_wrapper.BeforeCall(CallSize(adaptor));
1292 call_wrapper.AfterCall();
1293 if (!*definitely_mismatches) {
1297 Jump(adaptor, RelocInfo::CODE_TARGET);
1299 bind(®ular_invoke);
1304 void MacroAssembler::InvokeCode(Register code,
1305 const ParameterCount& expected,
1306 const ParameterCount& actual,
1308 const CallWrapper& call_wrapper) {
1309 // You can't call a function without a valid frame.
1310 DCHECK(flag == JUMP_FUNCTION || has_frame());
1313 bool definitely_mismatches = false;
1314 InvokePrologue(expected, actual, Handle<Code>::null(), code,
1315 &done, &definitely_mismatches, flag,
1317 if (!definitely_mismatches) {
1318 if (flag == CALL_FUNCTION) {
1319 call_wrapper.BeforeCall(CallSize(code));
1321 call_wrapper.AfterCall();
1323 DCHECK(flag == JUMP_FUNCTION);
1327 // Continue here if InvokePrologue does handle the invocation due to
1328 // mismatched parameter counts.
1334 void MacroAssembler::InvokeFunction(Register fun,
1335 const ParameterCount& actual,
1337 const CallWrapper& call_wrapper) {
1338 // You can't call a function without a valid frame.
1339 DCHECK(flag == JUMP_FUNCTION || has_frame());
1341 // Contract with called JS functions requires that function is passed in r1.
1344 Register expected_reg = r2;
1345 Register code_reg = r3;
1347 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1348 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1350 FieldMemOperand(code_reg,
1351 SharedFunctionInfo::kFormalParameterCountOffset));
1352 SmiUntag(expected_reg);
1354 FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1356 ParameterCount expected(expected_reg);
1357 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
1361 void MacroAssembler::InvokeFunction(Register function,
1362 const ParameterCount& expected,
1363 const ParameterCount& actual,
1365 const CallWrapper& call_wrapper) {
1366 // You can't call a function without a valid frame.
1367 DCHECK(flag == JUMP_FUNCTION || has_frame());
1369 // Contract with called JS functions requires that function is passed in r1.
1370 DCHECK(function.is(r1));
1372 // Get the function and setup the context.
1373 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1375 // We call indirectly through the code field in the function to
1376 // allow recompilation to take effect without changing any of the
1378 ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1379 InvokeCode(r3, expected, actual, flag, call_wrapper);
1383 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1384 const ParameterCount& expected,
1385 const ParameterCount& actual,
1387 const CallWrapper& call_wrapper) {
1389 InvokeFunction(r1, expected, actual, flag, call_wrapper);
1393 void MacroAssembler::IsObjectJSStringType(Register object,
1396 DCHECK(kNotStringTag != 0);
1398 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1399 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1400 tst(scratch, Operand(kIsNotStringMask));
1405 void MacroAssembler::IsObjectNameType(Register object,
1408 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1409 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1410 cmp(scratch, Operand(LAST_NAME_TYPE));
1415 void MacroAssembler::DebugBreak() {
1416 mov(r0, Operand::Zero());
1418 Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
1419 CEntryStub ces(isolate(), 1);
1420 DCHECK(AllowThisStubCall(&ces));
1421 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
1425 void MacroAssembler::PushStackHandler() {
1426 // Adjust this code if not the case.
1427 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1428 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1430 // Link the current handler as the next handler.
1431 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1432 ldr(r5, MemOperand(r6));
1435 // Set this new handler as the current one.
1436 str(sp, MemOperand(r6));
1440 void MacroAssembler::PopStackHandler() {
1441 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1443 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1444 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1445 str(r1, MemOperand(ip));
1449 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1452 Label same_contexts;
1454 DCHECK(!holder_reg.is(scratch));
1455 DCHECK(!holder_reg.is(ip));
1456 DCHECK(!scratch.is(ip));
1458 // Load current lexical context from the stack frame.
1459 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1460 // In debug mode, make sure the lexical context is set.
1462 cmp(scratch, Operand::Zero());
1463 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1466 // Load the native context of the current context.
1468 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1469 ldr(scratch, FieldMemOperand(scratch, offset));
1470 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1472 // Check the context is a native context.
1473 if (emit_debug_code()) {
1474 // Cannot use ip as a temporary in this verification code. Due to the fact
1475 // that ip is clobbered as part of cmp with an object Operand.
1476 push(holder_reg); // Temporarily save holder on the stack.
1477 // Read the first word and compare to the native_context_map.
1478 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1479 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1480 cmp(holder_reg, ip);
1481 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1482 pop(holder_reg); // Restore holder.
1485 // Check if both contexts are the same.
1486 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1487 cmp(scratch, Operand(ip));
1488 b(eq, &same_contexts);
1490 // Check the context is a native context.
1491 if (emit_debug_code()) {
1492 // Cannot use ip as a temporary in this verification code. Due to the fact
1493 // that ip is clobbered as part of cmp with an object Operand.
1494 push(holder_reg); // Temporarily save holder on the stack.
1495 mov(holder_reg, ip); // Move ip to its holding place.
1496 LoadRoot(ip, Heap::kNullValueRootIndex);
1497 cmp(holder_reg, ip);
1498 Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1500 ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1501 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1502 cmp(holder_reg, ip);
1503 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1504 // Restore ip is not needed. ip is reloaded below.
1505 pop(holder_reg); // Restore holder.
1506 // Restore ip to holder's context.
1507 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1510 // Check that the security token in the calling global object is
1511 // compatible with the security token in the receiving global
1513 int token_offset = Context::kHeaderSize +
1514 Context::SECURITY_TOKEN_INDEX * kPointerSize;
1516 ldr(scratch, FieldMemOperand(scratch, token_offset));
1517 ldr(ip, FieldMemOperand(ip, token_offset));
1518 cmp(scratch, Operand(ip));
1521 bind(&same_contexts);
1525 // Compute the hash code from the untagged key. This must be kept in sync with
1526 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1527 // code-stub-hydrogen.cc
1528 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1529 // First of all we assign the hash seed to scratch.
1530 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1533 // Xor original key with a seed.
1534 eor(t0, t0, Operand(scratch));
1536 // Compute the hash code from the untagged key. This must be kept in sync
1537 // with ComputeIntegerHash in utils.h.
1539 // hash = ~hash + (hash << 15);
1540 mvn(scratch, Operand(t0));
1541 add(t0, scratch, Operand(t0, LSL, 15));
1542 // hash = hash ^ (hash >> 12);
1543 eor(t0, t0, Operand(t0, LSR, 12));
1544 // hash = hash + (hash << 2);
1545 add(t0, t0, Operand(t0, LSL, 2));
1546 // hash = hash ^ (hash >> 4);
1547 eor(t0, t0, Operand(t0, LSR, 4));
1548 // hash = hash * 2057;
1549 mov(scratch, Operand(t0, LSL, 11));
1550 add(t0, t0, Operand(t0, LSL, 3));
1551 add(t0, t0, scratch);
1552 // hash = hash ^ (hash >> 16);
1553 eor(t0, t0, Operand(t0, LSR, 16));
1554 bic(t0, t0, Operand(0xc0000000u));
1558 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1567 // elements - holds the slow-case elements of the receiver on entry.
1568 // Unchanged unless 'result' is the same register.
1570 // key - holds the smi key on entry.
1571 // Unchanged unless 'result' is the same register.
1573 // result - holds the result on exit if the load succeeded.
1574 // Allowed to be the same as 'key' or 'result'.
1575 // Unchanged on bailout so 'key' or 'result' can be used
1576 // in further computation.
1578 // Scratch registers:
1580 // t0 - holds the untagged key on entry and holds the hash once computed.
1582 // t1 - used to hold the capacity mask of the dictionary
1584 // t2 - used for the index into the dictionary.
1587 GetNumberHash(t0, t1);
1589 // Compute the capacity mask.
1590 ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1592 sub(t1, t1, Operand(1));
1594 // Generate an unrolled loop that performs a few probes before giving up.
1595 for (int i = 0; i < kNumberDictionaryProbes; i++) {
1596 // Use t2 for index calculations and keep the hash intact in t0.
1598 // Compute the masked index: (hash + i + i * i) & mask.
1600 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1602 and_(t2, t2, Operand(t1));
1604 // Scale the index by multiplying by the element size.
1605 DCHECK(SeededNumberDictionary::kEntrySize == 3);
1606 add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
1608 // Check if the key is identical to the name.
1609 add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
1610 ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1611 cmp(key, Operand(ip));
1612 if (i != kNumberDictionaryProbes - 1) {
1620 // Check that the value is a field property.
1621 // t2: elements + (index * kPointerSize)
1622 const int kDetailsOffset =
1623 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1624 ldr(t1, FieldMemOperand(t2, kDetailsOffset));
1626 tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1629 // Get the value at the masked, scaled index and return.
1630 const int kValueOffset =
1631 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1632 ldr(result, FieldMemOperand(t2, kValueOffset));
1636 void MacroAssembler::Allocate(int object_size,
1641 AllocationFlags flags) {
1642 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1643 if (!FLAG_inline_new) {
1644 if (emit_debug_code()) {
1645 // Trash the registers to simulate an allocation failure.
1646 mov(result, Operand(0x7091));
1647 mov(scratch1, Operand(0x7191));
1648 mov(scratch2, Operand(0x7291));
1654 DCHECK(!result.is(scratch1));
1655 DCHECK(!result.is(scratch2));
1656 DCHECK(!scratch1.is(scratch2));
1657 DCHECK(!scratch1.is(ip));
1658 DCHECK(!scratch2.is(ip));
1660 // Make object size into bytes.
1661 if ((flags & SIZE_IN_WORDS) != 0) {
1662 object_size *= kPointerSize;
1664 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
1666 // Check relative positions of allocation top and limit addresses.
1667 // The values must be adjacent in memory to allow the use of LDM.
1668 // Also, assert that the registers are numbered such that the values
1669 // are loaded in the correct order.
1670 ExternalReference allocation_top =
1671 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1672 ExternalReference allocation_limit =
1673 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1676 reinterpret_cast<intptr_t>(allocation_top.address());
1678 reinterpret_cast<intptr_t>(allocation_limit.address());
1679 DCHECK((limit - top) == kPointerSize);
1680 DCHECK(result.code() < ip.code());
1682 // Set up allocation top address register.
1683 Register topaddr = scratch1;
1684 mov(topaddr, Operand(allocation_top));
1686 // This code stores a temporary value in ip. This is OK, as the code below
1687 // does not need ip for implicit literal generation.
1688 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1689 // Load allocation top into result and allocation limit into ip.
1690 ldm(ia, topaddr, result.bit() | ip.bit());
1692 if (emit_debug_code()) {
1693 // Assert that result actually contains top on entry. ip is used
1694 // immediately below so this use of ip does not cause difference with
1695 // respect to register content between debug and release mode.
1696 ldr(ip, MemOperand(topaddr));
1698 Check(eq, kUnexpectedAllocationTop);
1700 // Load allocation limit into ip. Result already contains allocation top.
1701 ldr(ip, MemOperand(topaddr, limit - top));
1704 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1705 // Align the next allocation. Storing the filler map without checking top is
1706 // safe in new-space because the limit of the heap is aligned there.
1707 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1708 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1711 if ((flags & PRETENURE) != 0) {
1712 cmp(result, Operand(ip));
1715 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1716 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1720 // Calculate new top and bail out if new space is exhausted. Use result
1721 // to calculate the new top. We must preserve the ip register at this
1722 // point, so we cannot just use add().
1723 DCHECK(object_size > 0);
1724 Register source = result;
1725 Condition cond = al;
1727 while (object_size != 0) {
1728 if (((object_size >> shift) & 0x03) == 0) {
1731 int bits = object_size & (0xff << shift);
1732 object_size -= bits;
1734 Operand bits_operand(bits);
1735 DCHECK(bits_operand.instructions_required(this) == 1);
1736 add(scratch2, source, bits_operand, SetCC, cond);
1742 cmp(scratch2, Operand(ip));
1744 str(scratch2, MemOperand(topaddr));
1746 // Tag object if requested.
1747 if ((flags & TAG_OBJECT) != 0) {
1748 add(result, result, Operand(kHeapObjectTag));
1753 void MacroAssembler::Allocate(Register object_size,
1758 AllocationFlags flags) {
1759 if (!FLAG_inline_new) {
1760 if (emit_debug_code()) {
1761 // Trash the registers to simulate an allocation failure.
1762 mov(result, Operand(0x7091));
1763 mov(scratch1, Operand(0x7191));
1764 mov(scratch2, Operand(0x7291));
1770 // Assert that the register arguments are different and that none of
1771 // them are ip. ip is used explicitly in the code generated below.
1772 DCHECK(!result.is(scratch1));
1773 DCHECK(!result.is(scratch2));
1774 DCHECK(!scratch1.is(scratch2));
1775 DCHECK(!object_size.is(ip));
1776 DCHECK(!result.is(ip));
1777 DCHECK(!scratch1.is(ip));
1778 DCHECK(!scratch2.is(ip));
1780 // Check relative positions of allocation top and limit addresses.
1781 // The values must be adjacent in memory to allow the use of LDM.
1782 // Also, assert that the registers are numbered such that the values
1783 // are loaded in the correct order.
1784 ExternalReference allocation_top =
1785 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1786 ExternalReference allocation_limit =
1787 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1789 reinterpret_cast<intptr_t>(allocation_top.address());
1791 reinterpret_cast<intptr_t>(allocation_limit.address());
1792 DCHECK((limit - top) == kPointerSize);
1793 DCHECK(result.code() < ip.code());
1795 // Set up allocation top address.
1796 Register topaddr = scratch1;
1797 mov(topaddr, Operand(allocation_top));
1799 // This code stores a temporary value in ip. This is OK, as the code below
1800 // does not need ip for implicit literal generation.
1801 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1802 // Load allocation top into result and allocation limit into ip.
1803 ldm(ia, topaddr, result.bit() | ip.bit());
1805 if (emit_debug_code()) {
1806 // Assert that result actually contains top on entry. ip is used
1807 // immediately below so this use of ip does not cause difference with
1808 // respect to register content between debug and release mode.
1809 ldr(ip, MemOperand(topaddr));
1811 Check(eq, kUnexpectedAllocationTop);
1813 // Load allocation limit into ip. Result already contains allocation top.
1814 ldr(ip, MemOperand(topaddr, limit - top));
1817 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1818 // Align the next allocation. Storing the filler map without checking top is
1819 // safe in new-space because the limit of the heap is aligned there.
1820 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1821 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1824 if ((flags & PRETENURE) != 0) {
1825 cmp(result, Operand(ip));
1828 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1829 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1833 // Calculate new top and bail out if new space is exhausted. Use result
1834 // to calculate the new top. Object size may be in words so a shift is
1835 // required to get the number of bytes.
1836 if ((flags & SIZE_IN_WORDS) != 0) {
1837 add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1839 add(scratch2, result, Operand(object_size), SetCC);
1842 cmp(scratch2, Operand(ip));
1845 // Update allocation top. result temporarily holds the new top.
1846 if (emit_debug_code()) {
1847 tst(scratch2, Operand(kObjectAlignmentMask));
1848 Check(eq, kUnalignedAllocationInNewSpace);
1850 str(scratch2, MemOperand(topaddr));
1852 // Tag object if requested.
1853 if ((flags & TAG_OBJECT) != 0) {
1854 add(result, result, Operand(kHeapObjectTag));
1859 void MacroAssembler::AllocateTwoByteString(Register result,
1864 Label* gc_required) {
1865 // Calculate the number of bytes needed for the characters in the string while
1866 // observing object alignment.
1867 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1868 mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
1869 add(scratch1, scratch1,
1870 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1871 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1873 // Allocate two-byte string in new space.
1881 // Set the map, length and hash field.
1882 InitializeNewString(result,
1884 Heap::kStringMapRootIndex,
1890 void MacroAssembler::AllocateOneByteString(Register result, Register length,
1891 Register scratch1, Register scratch2,
1893 Label* gc_required) {
1894 // Calculate the number of bytes needed for the characters in the string while
1895 // observing object alignment.
1896 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1897 DCHECK(kCharSize == 1);
1898 add(scratch1, length,
1899 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1900 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1902 // Allocate one-byte string in new space.
1910 // Set the map, length and hash field.
1911 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
1912 scratch1, scratch2);
1916 void MacroAssembler::AllocateTwoByteConsString(Register result,
1920 Label* gc_required) {
1921 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1924 InitializeNewString(result,
1926 Heap::kConsStringMapRootIndex,
1932 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
1935 Label* gc_required) {
1936 Allocate(ConsString::kSize,
1943 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
1944 scratch1, scratch2);
1948 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1952 Label* gc_required) {
1953 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1956 InitializeNewString(result,
1958 Heap::kSlicedStringMapRootIndex,
1964 void MacroAssembler::AllocateOneByteSlicedString(Register result,
1968 Label* gc_required) {
1969 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1972 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
1973 scratch1, scratch2);
1977 void MacroAssembler::CompareObjectType(Register object,
1980 InstanceType type) {
1981 const Register temp = type_reg.is(no_reg) ? ip : type_reg;
1983 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
1984 CompareInstanceType(map, temp, type);
1988 void MacroAssembler::CompareInstanceType(Register map,
1990 InstanceType type) {
1991 // Registers map and type_reg can be ip. These two lines assert
1992 // that ip can be used with the two instructions (the constants
1993 // will never need ip).
1994 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1995 STATIC_ASSERT(LAST_TYPE < 256);
1996 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1997 cmp(type_reg, Operand(type));
2001 void MacroAssembler::CompareRoot(Register obj,
2002 Heap::RootListIndex index) {
2003 DCHECK(!obj.is(ip));
2004 LoadRoot(ip, index);
2009 void MacroAssembler::CheckFastElements(Register map,
2012 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2013 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2014 STATIC_ASSERT(FAST_ELEMENTS == 2);
2015 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2016 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2017 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2022 void MacroAssembler::CheckFastObjectElements(Register map,
2025 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2026 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2027 STATIC_ASSERT(FAST_ELEMENTS == 2);
2028 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2029 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2030 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2032 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2037 void MacroAssembler::CheckFastSmiElements(Register map,
2040 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2041 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2042 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2043 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2048 void MacroAssembler::StoreNumberToDoubleElements(
2051 Register elements_reg,
2053 LowDwVfpRegister double_scratch,
2055 int elements_offset) {
2056 Label smi_value, store;
2058 // Handle smi values specially.
2059 JumpIfSmi(value_reg, &smi_value);
2061 // Ensure that the object is a heap number
2064 isolate()->factory()->heap_number_map(),
2068 vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2069 // Force a canonical NaN.
2070 if (emit_debug_code()) {
2072 tst(ip, Operand(kVFPDefaultNaNModeControlBit));
2073 Assert(ne, kDefaultNaNModeNotSet);
2075 VFPCanonicalizeNaN(double_scratch);
2079 SmiToDouble(double_scratch, value_reg);
2082 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
2083 vstr(double_scratch,
2084 FieldMemOperand(scratch1,
2085 FixedDoubleArray::kHeaderSize - elements_offset));
2089 void MacroAssembler::CompareMap(Register obj,
2092 Label* early_success) {
2093 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2094 CompareMap(scratch, map, early_success);
2098 void MacroAssembler::CompareMap(Register obj_map,
2100 Label* early_success) {
2101 cmp(obj_map, Operand(map));
2105 void MacroAssembler::CheckMap(Register obj,
2109 SmiCheckType smi_check_type) {
2110 if (smi_check_type == DO_SMI_CHECK) {
2111 JumpIfSmi(obj, fail);
2115 CompareMap(obj, scratch, map, &success);
2121 void MacroAssembler::CheckMap(Register obj,
2123 Heap::RootListIndex index,
2125 SmiCheckType smi_check_type) {
2126 if (smi_check_type == DO_SMI_CHECK) {
2127 JumpIfSmi(obj, fail);
2129 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2130 LoadRoot(ip, index);
2136 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
2137 Register scratch2, Handle<WeakCell> cell,
2138 Handle<Code> success,
2139 SmiCheckType smi_check_type) {
2141 if (smi_check_type == DO_SMI_CHECK) {
2142 JumpIfSmi(obj, &fail);
2144 ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
2145 CmpWeakValue(scratch1, cell, scratch2);
2146 Jump(success, RelocInfo::CODE_TARGET, eq);
2151 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2153 mov(scratch, Operand(cell));
2154 ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
2155 cmp(value, scratch);
2159 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2160 mov(value, Operand(cell));
2161 ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
2165 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2167 GetWeakValue(value, cell);
2168 JumpIfSmi(value, miss);
2172 void MacroAssembler::GetMapConstructor(Register result, Register map,
2173 Register temp, Register temp2) {
2175 ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
2177 JumpIfSmi(result, &done);
2178 CompareObjectType(result, temp, temp2, MAP_TYPE);
2180 ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
2186 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
2187 Register scratch, Label* miss) {
2188 // Get the prototype or initial map from the function.
2190 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2192 // If the prototype or initial map is the hole, don't return it and
2193 // simply miss the cache instead. This will allow us to allocate a
2194 // prototype object on-demand in the runtime system.
2195 LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2199 // If the function does not have an initial map, we're done.
2201 CompareObjectType(result, scratch, scratch, MAP_TYPE);
2204 // Get the prototype from the initial map.
2205 ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2212 void MacroAssembler::CallStub(CodeStub* stub,
2213 TypeFeedbackId ast_id,
2215 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2216 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2220 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2221 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2225 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2226 return has_frame_ || !stub->SometimesSetsUpAFrame();
2230 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2231 // If the hash field contains an array index pick it out. The assert checks
2232 // that the constants for the maximum number of digits for an array index
2233 // cached in the hash field and the number of bits reserved for it does not
2235 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
2236 (1 << String::kArrayIndexValueBits));
2237 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
2241 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
2242 if (CpuFeatures::IsSupported(VFP3)) {
2243 vmov(value.low(), smi);
2244 vcvt_f64_s32(value, 1);
2247 vmov(value.low(), ip);
2248 vcvt_f64_s32(value, value.low());
2253 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
2254 LowDwVfpRegister double_scratch) {
2255 DCHECK(!double_input.is(double_scratch));
2256 vcvt_s32_f64(double_scratch.low(), double_input);
2257 vcvt_f64_s32(double_scratch, double_scratch.low());
2258 VFPCompareAndSetFlags(double_input, double_scratch);
2262 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2263 DwVfpRegister double_input,
2264 LowDwVfpRegister double_scratch) {
2265 DCHECK(!double_input.is(double_scratch));
2266 vcvt_s32_f64(double_scratch.low(), double_input);
2267 vmov(result, double_scratch.low());
2268 vcvt_f64_s32(double_scratch, double_scratch.low());
2269 VFPCompareAndSetFlags(double_input, double_scratch);
2273 void MacroAssembler::TryInt32Floor(Register result,
2274 DwVfpRegister double_input,
2275 Register input_high,
2276 LowDwVfpRegister double_scratch,
2279 DCHECK(!result.is(input_high));
2280 DCHECK(!double_input.is(double_scratch));
2281 Label negative, exception;
2283 VmovHigh(input_high, double_input);
2285 // Test for NaN and infinities.
2286 Sbfx(result, input_high,
2287 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2288 cmp(result, Operand(-1));
2290 // Test for values that can be exactly represented as a
2291 // signed 32-bit integer.
2292 TryDoubleToInt32Exact(result, double_input, double_scratch);
2293 // If exact, return (result already fetched).
2295 cmp(input_high, Operand::Zero());
2298 // Input is in ]+0, +inf[.
2299 // If result equals 0x7fffffff input was out of range or
2300 // in ]0x7fffffff, 0x80000000[. We ignore this last case which
2301 // could fits into an int32, that means we always think input was
2302 // out of range and always go to exception.
2303 // If result < 0x7fffffff, go to done, result fetched.
2304 cmn(result, Operand(1));
2308 // Input is in ]-inf, -0[.
2309 // If x is a non integer negative number,
2310 // floor(x) <=> round_to_zero(x) - 1.
2312 sub(result, result, Operand(1), SetCC);
2313 // If result is still negative, go to done, result fetched.
2314 // Else, we had an overflow and we fall through exception.
2319 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2320 DwVfpRegister double_input,
2322 LowDwVfpRegister double_scratch = kScratchDoubleReg;
2323 vcvt_s32_f64(double_scratch.low(), double_input);
2324 vmov(result, double_scratch.low());
2326 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
2327 sub(ip, result, Operand(1));
2328 cmp(ip, Operand(0x7ffffffe));
2333 void MacroAssembler::TruncateDoubleToI(Register result,
2334 DwVfpRegister double_input) {
2337 TryInlineTruncateDoubleToI(result, double_input, &done);
2339 // If we fell through then inline version didn't succeed - call stub instead.
2341 sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2342 vstr(double_input, MemOperand(sp, 0));
2344 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2347 add(sp, sp, Operand(kDoubleSize));
2354 void MacroAssembler::TruncateHeapNumberToI(Register result,
2357 LowDwVfpRegister double_scratch = kScratchDoubleReg;
2358 DCHECK(!result.is(object));
2360 vldr(double_scratch,
2361 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2362 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2364 // If we fell through then inline version didn't succeed - call stub instead.
2366 DoubleToIStub stub(isolate(),
2369 HeapNumber::kValueOffset - kHeapObjectTag,
2379 void MacroAssembler::TruncateNumberToI(Register object,
2381 Register heap_number_map,
2383 Label* not_number) {
2385 DCHECK(!result.is(object));
2387 UntagAndJumpIfSmi(result, object, &done);
2388 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2389 TruncateHeapNumberToI(result, object);
2395 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2397 int num_least_bits) {
2398 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2399 ubfx(dst, src, kSmiTagSize, num_least_bits);
2402 and_(dst, dst, Operand((1 << num_least_bits) - 1));
2407 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2409 int num_least_bits) {
2410 and_(dst, src, Operand((1 << num_least_bits) - 1));
2414 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2416 SaveFPRegsMode save_doubles) {
2417 // All parameters are on the stack. r0 has the return value after call.
2419 // If the expected number of arguments of the runtime function is
2420 // constant, we check that the actual number of arguments match the
2422 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2424 // TODO(1236192): Most runtime routines don't need the number of
2425 // arguments passed in because it is constant. At some point we
2426 // should remove this need and make the runtime routine entry code
2428 mov(r0, Operand(num_arguments));
2429 mov(r1, Operand(ExternalReference(f, isolate())));
2430 CEntryStub stub(isolate(), 1, save_doubles);
2435 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2436 int num_arguments) {
2437 mov(r0, Operand(num_arguments));
2438 mov(r1, Operand(ext));
2440 CEntryStub stub(isolate(), 1);
2445 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2448 // TODO(1236192): Most runtime routines don't need the number of
2449 // arguments passed in because it is constant. At some point we
2450 // should remove this need and make the runtime routine entry code
2452 mov(r0, Operand(num_arguments));
2453 JumpToExternalReference(ext);
2457 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2460 TailCallExternalReference(ExternalReference(fid, isolate()),
2466 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2467 #if defined(__thumb__)
2468 // Thumb mode builtin.
2469 DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2471 mov(r1, Operand(builtin));
2472 CEntryStub stub(isolate(), 1);
2473 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2477 void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
2478 const CallWrapper& call_wrapper) {
2479 // You can't call a builtin without a valid frame.
2480 DCHECK(flag == JUMP_FUNCTION || has_frame());
2482 GetBuiltinEntry(r2, native_context_index);
2483 if (flag == CALL_FUNCTION) {
2484 call_wrapper.BeforeCall(CallSize(r2));
2486 call_wrapper.AfterCall();
2488 DCHECK(flag == JUMP_FUNCTION);
2494 void MacroAssembler::GetBuiltinFunction(Register target,
2495 int native_context_index) {
2496 // Load the builtins object into target register.
2498 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2499 ldr(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
2500 // Load the JavaScript builtin function from the builtins object.
2501 ldr(target, ContextOperand(target, native_context_index));
2505 void MacroAssembler::GetBuiltinEntry(Register target,
2506 int native_context_index) {
2507 DCHECK(!target.is(r1));
2508 GetBuiltinFunction(r1, native_context_index);
2509 // Load the code entry point from the builtins object.
2510 ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2514 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2515 Register scratch1, Register scratch2) {
2516 if (FLAG_native_code_counters && counter->Enabled()) {
2517 mov(scratch1, Operand(value));
2518 mov(scratch2, Operand(ExternalReference(counter)));
2519 str(scratch1, MemOperand(scratch2));
2524 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2525 Register scratch1, Register scratch2) {
2527 if (FLAG_native_code_counters && counter->Enabled()) {
2528 mov(scratch2, Operand(ExternalReference(counter)));
2529 ldr(scratch1, MemOperand(scratch2));
2530 add(scratch1, scratch1, Operand(value));
2531 str(scratch1, MemOperand(scratch2));
2536 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2537 Register scratch1, Register scratch2) {
2539 if (FLAG_native_code_counters && counter->Enabled()) {
2540 mov(scratch2, Operand(ExternalReference(counter)));
2541 ldr(scratch1, MemOperand(scratch2));
2542 sub(scratch1, scratch1, Operand(value));
2543 str(scratch1, MemOperand(scratch2));
2548 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
2549 if (emit_debug_code())
2550 Check(cond, reason);
2554 void MacroAssembler::AssertFastElements(Register elements) {
2555 if (emit_debug_code()) {
2556 DCHECK(!elements.is(ip));
2559 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2560 LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2563 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2566 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2569 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2576 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
2580 // will not return here
2585 void MacroAssembler::Abort(BailoutReason reason) {
2589 const char* msg = GetBailoutReason(reason);
2591 RecordComment("Abort message: ");
2595 if (FLAG_trap_on_abort) {
2601 mov(r0, Operand(Smi::FromInt(reason)));
2604 // Disable stub call restrictions to always allow calls to abort.
2606 // We don't actually want to generate a pile of code for this, so just
2607 // claim there is a stack frame, without generating one.
2608 FrameScope scope(this, StackFrame::NONE);
2609 CallRuntime(Runtime::kAbort, 1);
2611 CallRuntime(Runtime::kAbort, 1);
2613 // will not return here
2614 if (is_const_pool_blocked()) {
2615 // If the calling code cares about the exact number of
2616 // instructions generated, we insert padding here to keep the size
2617 // of the Abort macro constant.
2618 static const int kExpectedAbortInstructions = 7;
2619 int abort_instructions = InstructionsGeneratedSince(&abort_start);
2620 DCHECK(abort_instructions <= kExpectedAbortInstructions);
2621 while (abort_instructions++ < kExpectedAbortInstructions) {
2628 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2629 if (context_chain_length > 0) {
2630 // Move up the chain of contexts to the context containing the slot.
2631 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2632 for (int i = 1; i < context_chain_length; i++) {
2633 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2636 // Slot is in the current function context. Move it into the
2637 // destination register in case we store into it (the write barrier
2638 // cannot be allowed to destroy the context in esi).
2644 void MacroAssembler::LoadTransitionedArrayMapConditional(
2645 ElementsKind expected_kind,
2646 ElementsKind transitioned_kind,
2647 Register map_in_out,
2649 Label* no_map_match) {
2650 // Load the global or builtins object from the current context.
2652 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2653 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2655 // Check that the function's map is the same as the expected cached map.
2658 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2659 size_t offset = expected_kind * kPointerSize +
2660 FixedArrayBase::kHeaderSize;
2661 ldr(ip, FieldMemOperand(scratch, offset));
2662 cmp(map_in_out, ip);
2663 b(ne, no_map_match);
2665 // Use the transitioned cached map.
2666 offset = transitioned_kind * kPointerSize +
2667 FixedArrayBase::kHeaderSize;
2668 ldr(map_in_out, FieldMemOperand(scratch, offset));
2672 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2673 // Load the global or builtins object from the current context.
2675 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2676 // Load the native context from the global or builtins object.
2677 ldr(function, FieldMemOperand(function,
2678 GlobalObject::kNativeContextOffset));
2679 // Load the function from the native context.
2680 ldr(function, MemOperand(function, Context::SlotOffset(index)));
2684 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2687 // Load the initial map. The global functions all have initial maps.
2688 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2689 if (emit_debug_code()) {
2691 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2694 Abort(kGlobalFunctionsMustHaveInitialMap);
2700 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2703 Label* not_power_of_two_or_zero) {
2704 sub(scratch, reg, Operand(1), SetCC);
2705 b(mi, not_power_of_two_or_zero);
2707 b(ne, not_power_of_two_or_zero);
2711 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2714 Label* zero_and_neg,
2715 Label* not_power_of_two) {
2716 sub(scratch, reg, Operand(1), SetCC);
2717 b(mi, zero_and_neg);
2719 b(ne, not_power_of_two);
2723 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
2725 Label* on_not_both_smi) {
2726 STATIC_ASSERT(kSmiTag == 0);
2727 tst(reg1, Operand(kSmiTagMask));
2728 tst(reg2, Operand(kSmiTagMask), eq);
2729 b(ne, on_not_both_smi);
2733 void MacroAssembler::UntagAndJumpIfSmi(
2734 Register dst, Register src, Label* smi_case) {
2735 STATIC_ASSERT(kSmiTag == 0);
2736 SmiUntag(dst, src, SetCC);
2737 b(cc, smi_case); // Shifter carry is not set for a smi.
2741 void MacroAssembler::UntagAndJumpIfNotSmi(
2742 Register dst, Register src, Label* non_smi_case) {
2743 STATIC_ASSERT(kSmiTag == 0);
2744 SmiUntag(dst, src, SetCC);
2745 b(cs, non_smi_case); // Shifter carry is set for a non-smi.
2749 void MacroAssembler::JumpIfEitherSmi(Register reg1,
2751 Label* on_either_smi) {
2752 STATIC_ASSERT(kSmiTag == 0);
2753 tst(reg1, Operand(kSmiTagMask));
2754 tst(reg2, Operand(kSmiTagMask), ne);
2755 b(eq, on_either_smi);
2759 void MacroAssembler::AssertNotSmi(Register object) {
2760 if (emit_debug_code()) {
2761 STATIC_ASSERT(kSmiTag == 0);
2762 tst(object, Operand(kSmiTagMask));
2763 Check(ne, kOperandIsASmi);
2768 void MacroAssembler::AssertSmi(Register object) {
2769 if (emit_debug_code()) {
2770 STATIC_ASSERT(kSmiTag == 0);
2771 tst(object, Operand(kSmiTagMask));
2772 Check(eq, kOperandIsNotSmi);
2777 void MacroAssembler::AssertString(Register object) {
2778 if (emit_debug_code()) {
2779 STATIC_ASSERT(kSmiTag == 0);
2780 tst(object, Operand(kSmiTagMask));
2781 Check(ne, kOperandIsASmiAndNotAString);
2783 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2784 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2786 Check(lo, kOperandIsNotAString);
2791 void MacroAssembler::AssertName(Register object) {
2792 if (emit_debug_code()) {
2793 STATIC_ASSERT(kSmiTag == 0);
2794 tst(object, Operand(kSmiTagMask));
2795 Check(ne, kOperandIsASmiAndNotAName);
2797 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2798 CompareInstanceType(object, object, LAST_NAME_TYPE);
2800 Check(le, kOperandIsNotAName);
2805 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2807 if (emit_debug_code()) {
2808 Label done_checking;
2809 AssertNotSmi(object);
2810 CompareRoot(object, Heap::kUndefinedValueRootIndex);
2811 b(eq, &done_checking);
2812 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2813 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
2814 Assert(eq, kExpectedUndefinedOrCell);
2815 bind(&done_checking);
2820 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2821 if (emit_debug_code()) {
2822 CompareRoot(reg, index);
2823 Check(eq, kHeapNumberMapRegisterClobbered);
2828 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2829 Register heap_number_map,
2831 Label* on_not_heap_number) {
2832 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2833 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2834 cmp(scratch, heap_number_map);
2835 b(ne, on_not_heap_number);
2839 void MacroAssembler::LookupNumberStringCache(Register object,
2845 // Use of registers. Register result is used as a temporary.
2846 Register number_string_cache = result;
2847 Register mask = scratch3;
2849 // Load the number string cache.
2850 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2852 // Make the hash mask from the length of the number string cache. It
2853 // contains two elements (number and string) for each cache entry.
2854 ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
2855 // Divide length by two (length is a smi).
2856 mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
2857 sub(mask, mask, Operand(1)); // Make mask.
2859 // Calculate the entry in the number string cache. The hash value in the
2860 // number string cache for smis is just the smi value, and the hash for
2861 // doubles is the xor of the upper and lower words. See
2862 // Heap::GetNumberStringCache.
2864 Label load_result_from_cache;
2865 JumpIfSmi(object, &is_smi);
2868 Heap::kHeapNumberMapRootIndex,
2872 STATIC_ASSERT(8 == kDoubleSize);
2875 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
2876 ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
2877 eor(scratch1, scratch1, Operand(scratch2));
2878 and_(scratch1, scratch1, Operand(mask));
2880 // Calculate address of entry in string cache: each entry consists
2881 // of two pointer sized fields.
2883 number_string_cache,
2884 Operand(scratch1, LSL, kPointerSizeLog2 + 1));
2886 Register probe = mask;
2887 ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2888 JumpIfSmi(probe, not_found);
2889 sub(scratch2, object, Operand(kHeapObjectTag));
2890 vldr(d0, scratch2, HeapNumber::kValueOffset);
2891 sub(probe, probe, Operand(kHeapObjectTag));
2892 vldr(d1, probe, HeapNumber::kValueOffset);
2893 VFPCompareAndSetFlags(d0, d1);
2894 b(ne, not_found); // The cache did not contain this value.
2895 b(&load_result_from_cache);
2898 Register scratch = scratch1;
2899 and_(scratch, mask, Operand(object, ASR, 1));
2900 // Calculate address of entry in string cache: each entry consists
2901 // of two pointer sized fields.
2903 number_string_cache,
2904 Operand(scratch, LSL, kPointerSizeLog2 + 1));
2906 // Check if the entry is the smi we are looking for.
2907 ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2911 // Get the result from the cache.
2912 bind(&load_result_from_cache);
2913 ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
2914 IncrementCounter(isolate()->counters()->number_to_string_native(),
2921 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
2922 Register first, Register second, Register scratch1, Register scratch2,
2924 // Test that both first and second are sequential one-byte strings.
2925 // Assume that they are non-smis.
2926 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2927 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2928 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2929 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2931 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
2935 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
2940 // Check that neither is a smi.
2941 and_(scratch1, first, Operand(second));
2942 JumpIfSmi(scratch1, failure);
2943 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
2948 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2949 Label* not_unique_name) {
2950 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2952 tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2954 cmp(reg, Operand(SYMBOL_TYPE));
2955 b(ne, not_unique_name);
2961 // Allocates a heap number or jumps to the need_gc label if the young space
2962 // is full and a scavenge is needed.
2963 void MacroAssembler::AllocateHeapNumber(Register result,
2966 Register heap_number_map,
2968 TaggingMode tagging_mode,
2970 // Allocate an object in the heap for the heap number and tag it as a heap
2972 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
2973 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
2975 Heap::RootListIndex map_index = mode == MUTABLE
2976 ? Heap::kMutableHeapNumberMapRootIndex
2977 : Heap::kHeapNumberMapRootIndex;
2978 AssertIsRoot(heap_number_map, map_index);
2980 // Store heap number map in the allocated object.
2981 if (tagging_mode == TAG_RESULT) {
2982 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
2984 str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
2989 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
2990 DwVfpRegister value,
2993 Register heap_number_map,
2994 Label* gc_required) {
2995 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
2996 sub(scratch1, result, Operand(kHeapObjectTag));
2997 vstr(value, scratch1, HeapNumber::kValueOffset);
3001 // Copies a fixed number of fields of heap objects from src to dst.
3002 void MacroAssembler::CopyFields(Register dst,
3004 LowDwVfpRegister double_scratch,
3006 int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
3007 for (int i = 0; i < double_count; i++) {
3008 vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
3009 vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
3012 STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
3013 STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
3015 int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
3017 vldr(double_scratch.low(),
3018 FieldMemOperand(src, (field_count - 1) * kPointerSize));
3019 vstr(double_scratch.low(),
3020 FieldMemOperand(dst, (field_count - 1) * kPointerSize));
3025 void MacroAssembler::CopyBytes(Register src,
3029 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3031 // Align src before copying in word size chunks.
3032 cmp(length, Operand(kPointerSize));
3035 bind(&align_loop_1);
3036 tst(src, Operand(kPointerSize - 1));
3038 ldrb(scratch, MemOperand(src, 1, PostIndex));
3039 strb(scratch, MemOperand(dst, 1, PostIndex));
3040 sub(length, length, Operand(1), SetCC);
3042 // Copy bytes in word size chunks.
3044 if (emit_debug_code()) {
3045 tst(src, Operand(kPointerSize - 1));
3046 Assert(eq, kExpectingAlignmentForCopyBytes);
3048 cmp(length, Operand(kPointerSize));
3050 ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
3051 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3052 str(scratch, MemOperand(dst, kPointerSize, PostIndex));
3054 strb(scratch, MemOperand(dst, 1, PostIndex));
3055 mov(scratch, Operand(scratch, LSR, 8));
3056 strb(scratch, MemOperand(dst, 1, PostIndex));
3057 mov(scratch, Operand(scratch, LSR, 8));
3058 strb(scratch, MemOperand(dst, 1, PostIndex));
3059 mov(scratch, Operand(scratch, LSR, 8));
3060 strb(scratch, MemOperand(dst, 1, PostIndex));
3062 sub(length, length, Operand(kPointerSize));
3065 // Copy the last bytes if any left.
3067 cmp(length, Operand::Zero());
3070 ldrb(scratch, MemOperand(src, 1, PostIndex));
3071 strb(scratch, MemOperand(dst, 1, PostIndex));
3072 sub(length, length, Operand(1), SetCC);
3073 b(ne, &byte_loop_1);
3078 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3079 Register end_offset,
3084 str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
3086 cmp(start_offset, end_offset);
3091 void MacroAssembler::CheckFor32DRegs(Register scratch) {
3092 mov(scratch, Operand(ExternalReference::cpu_features()));
3093 ldr(scratch, MemOperand(scratch));
3094 tst(scratch, Operand(1u << VFP32DREGS));
3098 void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
3099 CheckFor32DRegs(scratch);
3100 vstm(db_w, location, d16, d31, ne);
3101 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3102 vstm(db_w, location, d0, d15);
3106 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
3107 CheckFor32DRegs(scratch);
3108 vldm(ia_w, location, d0, d15);
3109 vldm(ia_w, location, d16, d31, ne);
3110 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3114 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
3115 Register first, Register second, Register scratch1, Register scratch2,
3117 const int kFlatOneByteStringMask =
3118 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3119 const int kFlatOneByteStringTag =
3120 kStringTag | kOneByteStringTag | kSeqStringTag;
3121 and_(scratch1, first, Operand(kFlatOneByteStringMask));
3122 and_(scratch2, second, Operand(kFlatOneByteStringMask));
3123 cmp(scratch1, Operand(kFlatOneByteStringTag));
3124 // Ignore second test if first test failed.
3125 cmp(scratch2, Operand(kFlatOneByteStringTag), eq);
3130 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
3133 const int kFlatOneByteStringMask =
3134 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3135 const int kFlatOneByteStringTag =
3136 kStringTag | kOneByteStringTag | kSeqStringTag;
3137 and_(scratch, type, Operand(kFlatOneByteStringMask));
3138 cmp(scratch, Operand(kFlatOneByteStringTag));
3142 static const int kRegisterPassedArguments = 4;
3145 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3146 int num_double_arguments) {
3147 int stack_passed_words = 0;
3148 if (use_eabi_hardfloat()) {
3149 // In the hard floating point calling convention, we can use
3150 // all double registers to pass doubles.
3151 if (num_double_arguments > DoubleRegister::NumRegisters()) {
3152 stack_passed_words +=
3153 2 * (num_double_arguments - DoubleRegister::NumRegisters());
3156 // In the soft floating point calling convention, every double
3157 // argument is passed using two registers.
3158 num_reg_arguments += 2 * num_double_arguments;
3160 // Up to four simple arguments are passed in registers r0..r3.
3161 if (num_reg_arguments > kRegisterPassedArguments) {
3162 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3164 return stack_passed_words;
3168 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
3171 uint32_t encoding_mask) {
3174 Check(ne, kNonObject);
3176 ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3177 ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3179 and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3180 cmp(ip, Operand(encoding_mask));
3181 Check(eq, kUnexpectedStringType);
3183 // The index is assumed to be untagged coming in, tag it to compare with the
3184 // string length without using a temp register, it is restored at the end of
3186 Label index_tag_ok, index_tag_bad;
3187 TrySmiTag(index, index, &index_tag_bad);
3189 bind(&index_tag_bad);
3190 Abort(kIndexIsTooLarge);
3191 bind(&index_tag_ok);
3193 ldr(ip, FieldMemOperand(string, String::kLengthOffset));
3195 Check(lt, kIndexIsTooLarge);
3197 cmp(index, Operand(Smi::FromInt(0)));
3198 Check(ge, kIndexIsNegative);
3200 SmiUntag(index, index);
3204 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3205 int num_double_arguments,
3207 int frame_alignment = ActivationFrameAlignment();
3208 int stack_passed_arguments = CalculateStackPassedWords(
3209 num_reg_arguments, num_double_arguments);
3210 if (frame_alignment > kPointerSize) {
3211 // Make stack end at alignment and make room for num_arguments - 4 words
3212 // and the original value of sp.
3214 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3215 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3216 and_(sp, sp, Operand(-frame_alignment));
3217 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3219 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3224 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3226 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3230 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
3232 if (!use_eabi_hardfloat()) {
3238 // On ARM this is just a synonym to make the purpose clear.
3239 void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
3240 MovToFloatParameter(src);
3244 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
3245 DwVfpRegister src2) {
3246 DCHECK(src1.is(d0));
3247 DCHECK(src2.is(d1));
3248 if (!use_eabi_hardfloat()) {
3255 void MacroAssembler::CallCFunction(ExternalReference function,
3256 int num_reg_arguments,
3257 int num_double_arguments) {
3258 mov(ip, Operand(function));
3259 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3263 void MacroAssembler::CallCFunction(Register function,
3264 int num_reg_arguments,
3265 int num_double_arguments) {
3266 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3270 void MacroAssembler::CallCFunction(ExternalReference function,
3271 int num_arguments) {
3272 CallCFunction(function, num_arguments, 0);
3276 void MacroAssembler::CallCFunction(Register function,
3277 int num_arguments) {
3278 CallCFunction(function, num_arguments, 0);
3282 void MacroAssembler::CallCFunctionHelper(Register function,
3283 int num_reg_arguments,
3284 int num_double_arguments) {
3285 DCHECK(has_frame());
3286 // Make sure that the stack is aligned before calling a C function unless
3287 // running in the simulator. The simulator has its own alignment check which
3288 // provides more information.
3289 #if V8_HOST_ARCH_ARM
3290 if (emit_debug_code()) {
3291 int frame_alignment = base::OS::ActivationFrameAlignment();
3292 int frame_alignment_mask = frame_alignment - 1;
3293 if (frame_alignment > kPointerSize) {
3294 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3295 Label alignment_as_expected;
3296 tst(sp, Operand(frame_alignment_mask));
3297 b(eq, &alignment_as_expected);
3298 // Don't use Check here, as it will call Runtime_Abort possibly
3299 // re-entering here.
3300 stop("Unexpected alignment");
3301 bind(&alignment_as_expected);
3306 // Just call directly. The function called cannot cause a GC, or
3307 // allow preemption, so the return address in the link register
3310 int stack_passed_arguments = CalculateStackPassedWords(
3311 num_reg_arguments, num_double_arguments);
3312 if (ActivationFrameAlignment() > kPointerSize) {
3313 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3315 add(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3320 void MacroAssembler::CheckPageFlag(
3325 Label* condition_met) {
3326 Bfc(scratch, object, 0, kPageSizeBits);
3327 ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3328 tst(scratch, Operand(mask));
3329 b(cc, condition_met);
3333 void MacroAssembler::JumpIfBlack(Register object,
3337 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
3338 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3342 void MacroAssembler::HasColor(Register object,
3343 Register bitmap_scratch,
3344 Register mask_scratch,
3348 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3350 GetMarkBits(object, bitmap_scratch, mask_scratch);
3352 Label other_color, word_boundary;
3353 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3354 tst(ip, Operand(mask_scratch));
3355 b(first_bit == 1 ? eq : ne, &other_color);
3356 // Shift left 1 by adding.
3357 add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
3358 b(eq, &word_boundary);
3359 tst(ip, Operand(mask_scratch));
3360 b(second_bit == 1 ? ne : eq, has_color);
3363 bind(&word_boundary);
3364 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3365 tst(ip, Operand(1));
3366 b(second_bit == 1 ? ne : eq, has_color);
3371 // Detect some, but not all, common pointer-free objects. This is used by the
3372 // incremental write barrier which doesn't care about oddballs (they are always
3373 // marked black immediately so this code is not hit).
3374 void MacroAssembler::JumpIfDataObject(Register value,
3376 Label* not_data_object) {
3377 Label is_data_object;
3378 ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
3379 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3380 b(eq, &is_data_object);
3381 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3382 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3383 // If it's a string and it's not a cons string then it's an object containing
3385 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3386 tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
3387 b(ne, not_data_object);
3388 bind(&is_data_object);
3392 void MacroAssembler::GetMarkBits(Register addr_reg,
3393 Register bitmap_reg,
3394 Register mask_reg) {
3395 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3396 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3397 Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3398 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3399 Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
3400 add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
3401 mov(ip, Operand(1));
3402 mov(mask_reg, Operand(ip, LSL, mask_reg));
3406 void MacroAssembler::EnsureNotWhite(
3408 Register bitmap_scratch,
3409 Register mask_scratch,
3410 Register load_scratch,
3411 Label* value_is_white_and_not_data) {
3412 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3413 GetMarkBits(value, bitmap_scratch, mask_scratch);
3415 // If the value is black or grey we don't need to do anything.
3416 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3417 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3418 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
3419 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3423 // Since both black and grey have a 1 in the first position and white does
3424 // not have a 1 there we only need to check one bit.
3425 ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3426 tst(mask_scratch, load_scratch);
3429 if (emit_debug_code()) {
3430 // Check for impossible bit pattern.
3432 // LSL may overflow, making the check conservative.
3433 tst(load_scratch, Operand(mask_scratch, LSL, 1));
3435 stop("Impossible marking bit pattern");
3439 // Value is white. We check whether it is data that doesn't need scanning.
3440 // Currently only checks for HeapNumber and non-cons strings.
3441 Register map = load_scratch; // Holds map while checking type.
3442 Register length = load_scratch; // Holds length of object after testing type.
3443 Label is_data_object;
3445 // Check for heap-number
3446 ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3447 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3448 mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
3449 b(eq, &is_data_object);
3451 // Check for strings.
3452 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3453 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3454 // If it's a string and it's not a cons string then it's an object containing
3456 Register instance_type = load_scratch;
3457 ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
3458 tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
3459 b(ne, value_is_white_and_not_data);
3460 // It's a non-indirect (non-cons and non-slice) string.
3461 // If it's external, the length is just ExternalString::kSize.
3462 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3463 // External strings are the only ones with the kExternalStringTag bit
3465 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
3466 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
3467 tst(instance_type, Operand(kExternalStringTag));
3468 mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
3469 b(ne, &is_data_object);
3471 // Sequential string, either Latin1 or UC16.
3472 // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
3473 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
3474 // getting the length multiplied by 2.
3475 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
3476 DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
3477 ldr(ip, FieldMemOperand(value, String::kLengthOffset));
3478 tst(instance_type, Operand(kStringEncodingMask));
3479 mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
3480 add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
3481 and_(length, length, Operand(~kObjectAlignmentMask));
3483 bind(&is_data_object);
3484 // Value is a data object, and it is white. Mark it black. Since we know
3485 // that the object is white we can make it black by flipping one bit.
3486 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3487 orr(ip, ip, Operand(mask_scratch));
3488 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3490 and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
3491 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3492 add(ip, ip, Operand(length));
3493 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3499 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3500 Usat(output_reg, 8, Operand(input_reg));
3504 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3505 DwVfpRegister input_reg,
3506 LowDwVfpRegister double_scratch) {
3509 // Handle inputs >= 255 (including +infinity).
3510 Vmov(double_scratch, 255.0, result_reg);
3511 mov(result_reg, Operand(255));
3512 VFPCompareAndSetFlags(input_reg, double_scratch);
3515 // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest
3516 // rounding mode will provide the correct result.
3517 vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
3518 vmov(result_reg, double_scratch.low());
3524 void MacroAssembler::LoadInstanceDescriptors(Register map,
3525 Register descriptors) {
3526 ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3530 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3531 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3532 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3536 void MacroAssembler::EnumLength(Register dst, Register map) {
3537 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3538 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3539 and_(dst, dst, Operand(Map::EnumLengthBits::kMask));
3544 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3546 AccessorComponent accessor) {
3547 ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
3548 LoadInstanceDescriptors(dst, dst);
3550 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3551 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
3552 : AccessorPair::kSetterOffset;
3553 ldr(dst, FieldMemOperand(dst, offset));
3557 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3558 Register empty_fixed_array_value = r6;
3559 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3563 // Check if the enum length field is properly initialized, indicating that
3564 // there is an enum cache.
3565 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3568 cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
3569 b(eq, call_runtime);
3574 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3576 // For all objects but the receiver, check that the cache is empty.
3578 cmp(r3, Operand(Smi::FromInt(0)));
3579 b(ne, call_runtime);
3583 // Check that there are no elements. Register r2 contains the current JS
3584 // object we've reached through the prototype chain.
3586 ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
3587 cmp(r2, empty_fixed_array_value);
3588 b(eq, &no_elements);
3590 // Second chance, the object may be using the empty slow element dictionary.
3591 CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex);
3592 b(ne, call_runtime);
3595 ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
3596 cmp(r2, null_value);
3601 void MacroAssembler::TestJSArrayForAllocationMemento(
3602 Register receiver_reg,
3603 Register scratch_reg,
3604 Label* no_memento_found) {
3605 ExternalReference new_space_start =
3606 ExternalReference::new_space_start(isolate());
3607 ExternalReference new_space_allocation_top =
3608 ExternalReference::new_space_allocation_top_address(isolate());
3609 add(scratch_reg, receiver_reg,
3610 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3611 cmp(scratch_reg, Operand(new_space_start));
3612 b(lt, no_memento_found);
3613 mov(ip, Operand(new_space_allocation_top));
3614 ldr(ip, MemOperand(ip));
3615 cmp(scratch_reg, ip);
3616 b(gt, no_memento_found);
3617 ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
3619 Operand(isolate()->factory()->allocation_memento_map()));
3623 Register GetRegisterThatIsNotOneOf(Register reg1,
3630 if (reg1.is_valid()) regs |= reg1.bit();
3631 if (reg2.is_valid()) regs |= reg2.bit();
3632 if (reg3.is_valid()) regs |= reg3.bit();
3633 if (reg4.is_valid()) regs |= reg4.bit();
3634 if (reg5.is_valid()) regs |= reg5.bit();
3635 if (reg6.is_valid()) regs |= reg6.bit();
3637 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
3638 Register candidate = Register::FromAllocationIndex(i);
3639 if (regs & candidate.bit()) continue;
3647 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3652 DCHECK(!scratch1.is(scratch0));
3653 Register current = scratch0;
3654 Label loop_again, end;
3656 // scratch contained elements pointer.
3657 mov(current, object);
3658 ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
3659 ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
3660 CompareRoot(current, Heap::kNullValueRootIndex);
3663 // Loop based on the map going up the prototype chain.
3665 ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
3667 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
3668 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
3669 ldrb(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
3670 cmp(scratch1, Operand(JS_OBJECT_TYPE));
3673 ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
3674 DecodeField<Map::ElementsKindBits>(scratch1);
3675 cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
3677 ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
3678 CompareRoot(current, Heap::kNullValueRootIndex);
3686 bool AreAliased(Register reg1,
3694 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
3695 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
3696 reg7.is_valid() + reg8.is_valid();
3699 if (reg1.is_valid()) regs |= reg1.bit();
3700 if (reg2.is_valid()) regs |= reg2.bit();
3701 if (reg3.is_valid()) regs |= reg3.bit();
3702 if (reg4.is_valid()) regs |= reg4.bit();
3703 if (reg5.is_valid()) regs |= reg5.bit();
3704 if (reg6.is_valid()) regs |= reg6.bit();
3705 if (reg7.is_valid()) regs |= reg7.bit();
3706 if (reg8.is_valid()) regs |= reg8.bit();
3707 int n_of_non_aliasing_regs = NumRegs(regs);
3709 return n_of_valid_regs != n_of_non_aliasing_regs;
3714 CodePatcher::CodePatcher(byte* address,
3716 FlushICache flush_cache)
3717 : address_(address),
3718 size_(instructions * Assembler::kInstrSize),
3719 masm_(NULL, address, size_ + Assembler::kGap),
3720 flush_cache_(flush_cache) {
3721 // Create a new macro assembler pointing to the address of the code to patch.
3722 // The size is adjusted with kGap on order for the assembler to generate size
3723 // bytes of instructions without failing with buffer size constraints.
3724 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3728 CodePatcher::~CodePatcher() {
3729 // Indicate that code has changed.
3730 if (flush_cache_ == FLUSH) {
3731 CpuFeatures::FlushICache(address_, size_);
3734 // Check that the code was patched as expected.
3735 DCHECK(masm_.pc_ == address_ + size_);
3736 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3740 void CodePatcher::Emit(Instr instr) {
3741 masm()->emit(instr);
3745 void CodePatcher::Emit(Address addr) {
3746 masm()->emit(reinterpret_cast<Instr>(addr));
3750 void CodePatcher::EmitCondition(Condition cond) {
3751 Instr instr = Assembler::instr_at(masm_.pc_);
3752 instr = (instr & ~kCondMask) | cond;
3757 void MacroAssembler::TruncatingDiv(Register result,
3760 DCHECK(!dividend.is(result));
3761 DCHECK(!dividend.is(ip));
3762 DCHECK(!result.is(ip));
3763 base::MagicNumbersForDivision<uint32_t> mag =
3764 base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
3765 mov(ip, Operand(mag.multiplier));
3766 bool neg = (mag.multiplier & (1U << 31)) != 0;
3767 if (divisor > 0 && neg) {
3768 smmla(result, dividend, ip, dividend);
3770 smmul(result, dividend, ip);
3771 if (divisor < 0 && !neg && mag.multiplier > 0) {
3772 sub(result, result, Operand(dividend));
3775 if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift));
3776 add(result, result, Operand(dividend, LSR, 31));
3779 } // namespace internal
3782 #endif // V8_TARGET_ARCH_ARM