1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
6 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
8 #include "src/assembler.h"
9 #include "src/globals.h"
10 #include "src/mips64/assembler-mips64.h"
15 // Give alias names to registers for calling conventions.
16 const Register kReturnRegister0 = {kRegister_v0_Code};
17 const Register kReturnRegister1 = {kRegister_v1_Code};
18 const Register kJSFunctionRegister = {kRegister_a1_Code};
19 const Register kContextRegister = {kRegister_s7_Code};
20 const Register kInterpreterAccumulatorRegister = {kRegister_v0_Code};
21 const Register kInterpreterRegisterFileRegister = {kRegister_a7_Code};
22 const Register kInterpreterBytecodeOffsetRegister = {kRegister_t0_Code};
23 const Register kInterpreterBytecodeArrayRegister = {kRegister_t1_Code};
24 const Register kInterpreterDispatchTableRegister = {kRegister_t2_Code};
25 const Register kRuntimeCallFunctionRegister = {kRegister_a1_Code};
26 const Register kRuntimeCallArgCountRegister = {kRegister_a0_Code};
28 // Forward declaration.
31 // Reserved Register Usage Summary.
33 // Registers t8, t9, and at are reserved for use by the MacroAssembler.
35 // The programmer should know that the MacroAssembler may clobber these three,
36 // but won't touch other registers except in special cases.
38 // Per the MIPS ABI, register t9 must be used for indirect function call
39 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
40 // trying to update gp register for position-independent-code. Whenever
41 // MIPS generated code calls C code, it must be via t9 register.
44 // Flags used for LeaveExitFrame function.
45 enum LeaveExitFrameMode {
47 NO_EMIT_RETURN = false
50 // Flags used for AllocateHeapNumber
58 // Flags used for the ObjectToDoubleFPURegister function.
59 enum ObjectToDoubleFlags {
61 NO_OBJECT_TO_DOUBLE_FLAGS = 0,
62 // Object is known to be a non smi.
63 OBJECT_NOT_SMI = 1 << 0,
64 // Don't load NaNs or infinities, branch to the non number case instead.
65 AVOID_NANS_AND_INFINITIES = 1 << 1
68 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
69 enum BranchDelaySlot {
74 // Flags used for the li macro-assembler function.
76 // If the constant value can be represented in just 16 bits, then
77 // optimize the li to use a single instruction, rather than lui/ori/dsll
80 // Always use 6 instructions (lui/ori/dsll sequence), even if the constant
81 // could be loaded with just one, so that this value is patchable later.
83 // For address loads only 4 instruction are required. Used to mark
84 // constant load that will be used as address without relocation
85 // information. It ensures predictable code size, so specific sites
86 // in code are patchable.
91 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
92 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
93 enum PointersToHereCheck {
94 kPointersToHereMaybeInteresting,
95 kPointersToHereAreAlwaysInteresting
97 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
99 Register GetRegisterThatIsNotOneOf(Register reg1,
100 Register reg2 = no_reg,
101 Register reg3 = no_reg,
102 Register reg4 = no_reg,
103 Register reg5 = no_reg,
104 Register reg6 = no_reg);
106 bool AreAliased(Register reg1,
108 Register reg3 = no_reg,
109 Register reg4 = no_reg,
110 Register reg5 = no_reg,
111 Register reg6 = no_reg,
112 Register reg7 = no_reg,
113 Register reg8 = no_reg);
116 // -----------------------------------------------------------------------------
117 // Static helper functions.
119 #if defined(V8_TARGET_LITTLE_ENDIAN)
120 #define SmiWordOffset(offset) (offset + kPointerSize / 2)
122 #define SmiWordOffset(offset) offset
126 inline MemOperand ContextOperand(Register context, int index) {
127 return MemOperand(context, Context::SlotOffset(index));
131 inline MemOperand GlobalObjectOperand() {
132 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
136 // Generate a MemOperand for loading a field from an object.
137 inline MemOperand FieldMemOperand(Register object, int offset) {
138 return MemOperand(object, offset - kHeapObjectTag);
142 inline MemOperand UntagSmiMemOperand(Register rm, int offset) {
143 // Assumes that Smis are shifted by 32 bits.
144 STATIC_ASSERT(kSmiShift == 32);
145 return MemOperand(rm, SmiWordOffset(offset));
149 inline MemOperand UntagSmiFieldMemOperand(Register rm, int offset) {
150 return UntagSmiMemOperand(rm, offset - kHeapObjectTag);
154 // Generate a MemOperand for storing arguments 5..N on the stack
155 // when calling CallCFunction().
156 // TODO(plind): Currently ONLY used for O32. Should be fixed for
157 // n64, and used in RegExp code, and other places
158 // with more than 8 arguments.
159 inline MemOperand CFunctionArgumentOperand(int index) {
160 DCHECK(index > kCArgSlotCount);
161 // Argument 5 takes the slot just past the four Arg-slots.
162 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
163 return MemOperand(sp, offset);
167 // MacroAssembler implements a collection of frequently used macros.
168 class MacroAssembler: public Assembler {
170 // The isolate parameter can be NULL if the macro assembler should
171 // not use isolate-dependent functionality. In this case, it's the
172 // responsibility of the caller to never invoke such function on the
174 MacroAssembler(Isolate* isolate, void* buffer, int size);
177 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
178 #define COND_ARGS cond, r1, r2
180 // Cases when relocation is not needed.
181 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
182 void Name(target_type target, BranchDelaySlot bd = PROTECT); \
183 inline void Name(BranchDelaySlot bd, target_type target) { \
186 void Name(target_type target, \
188 BranchDelaySlot bd = PROTECT); \
189 inline void Name(BranchDelaySlot bd, \
190 target_type target, \
192 Name(target, COND_ARGS, bd); \
195 #define DECLARE_BRANCH_PROTOTYPES(Name) \
196 DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
197 DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
199 DECLARE_BRANCH_PROTOTYPES(Branch)
200 DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
201 DECLARE_BRANCH_PROTOTYPES(BranchShort)
203 #undef DECLARE_BRANCH_PROTOTYPES
204 #undef COND_TYPED_ARGS
208 // Jump, Call, and Ret pseudo instructions implementing inter-working.
209 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
210 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
212 void Jump(Register target, COND_ARGS);
213 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
214 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
215 void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
216 static int CallSize(Register target, COND_ARGS);
217 void Call(Register target, COND_ARGS);
218 static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
219 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
220 int CallSize(Handle<Code> code,
221 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
222 TypeFeedbackId ast_id = TypeFeedbackId::None(),
224 void Call(Handle<Code> code,
225 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
226 TypeFeedbackId ast_id = TypeFeedbackId::None(),
229 inline void Ret(BranchDelaySlot bd, Condition cond = al,
230 Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
231 Ret(cond, rs, rt, bd);
234 void Branch(Label* L,
237 Heap::RootListIndex index,
238 BranchDelaySlot bdslot = PROTECT);
242 // Emit code to discard a non-negative number of pointer-sized elements
243 // from the stack, clobbering only the sp register.
245 Condition cond = cc_always,
246 Register reg = no_reg,
247 const Operand& op = Operand(no_reg));
249 // Trivial case of DropAndRet that utilizes the delay slot and only emits
251 void DropAndRet(int drop);
253 void DropAndRet(int drop,
258 // Swap two registers. If the scratch register is omitted then a slightly
259 // less efficient form using xor instead of mov is emitted.
260 void Swap(Register reg1, Register reg2, Register scratch = no_reg);
262 void Call(Label* target);
264 void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
266 inline void Move(Register dst, Register src) {
272 inline void Move(FPURegister dst, FPURegister src) {
278 inline void Move(Register dst_low, Register dst_high, FPURegister src) {
280 mfhc1(dst_high, src);
283 inline void FmoveHigh(Register dst_high, FPURegister src) {
284 mfhc1(dst_high, src);
287 inline void FmoveHigh(FPURegister dst, Register src_high) {
288 mthc1(src_high, dst);
291 inline void FmoveLow(Register dst_low, FPURegister src) {
295 void FmoveLow(FPURegister dst, Register src_low);
297 inline void Move(FPURegister dst, Register src_low, Register src_high) {
299 mthc1(src_high, dst);
302 void Move(FPURegister dst, float imm);
303 void Move(FPURegister dst, double imm);
306 void Movz(Register rd, Register rs, Register rt);
307 void Movn(Register rd, Register rs, Register rt);
308 void Movt(Register rd, Register rs, uint16_t cc = 0);
309 void Movf(Register rd, Register rs, uint16_t cc = 0);
311 void Clz(Register rd, Register rs);
313 // Jump unconditionally to given label.
314 // We NEED a nop in the branch delay slot, as it used by v8, for example in
315 // CodeGenerator::ProcessDeferred().
316 // Currently the branch delay slot is filled by the MacroAssembler.
317 // Use rather b(Label) for code generation.
322 void Load(Register dst, const MemOperand& src, Representation r);
323 void Store(Register src, const MemOperand& dst, Representation r);
325 void PushRoot(Heap::RootListIndex index) {
330 // Compare the object in a register to a value and jump if they are equal.
331 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
333 Branch(if_equal, eq, with, Operand(at));
336 // Compare the object in a register to a value and jump if they are not equal.
337 void JumpIfNotRoot(Register with, Heap::RootListIndex index,
338 Label* if_not_equal) {
340 Branch(if_not_equal, ne, with, Operand(at));
343 // Load an object from the root table.
344 void LoadRoot(Register destination,
345 Heap::RootListIndex index);
346 void LoadRoot(Register destination,
347 Heap::RootListIndex index,
348 Condition cond, Register src1, const Operand& src2);
350 // Store an object to the root table.
351 void StoreRoot(Register source,
352 Heap::RootListIndex index);
353 void StoreRoot(Register source,
354 Heap::RootListIndex index,
355 Condition cond, Register src1, const Operand& src2);
357 // ---------------------------------------------------------------------------
360 void IncrementalMarkingRecordWriteHelper(Register object,
364 enum RememberedSetFinalAction {
370 // Record in the remembered set the fact that we have a pointer to new space
371 // at the address pointed to by the addr register. Only works if addr is not
373 void RememberedSetHelper(Register object, // Used for debug code.
376 SaveFPRegsMode save_fp,
377 RememberedSetFinalAction and_then);
379 void CheckPageFlag(Register object,
383 Label* condition_met);
385 // Check if object is in new space. Jumps if the object is not in new space.
386 // The register scratch can be object itself, but it will be clobbered.
387 void JumpIfNotInNewSpace(Register object,
390 InNewSpace(object, scratch, ne, branch);
393 // Check if object is in new space. Jumps if the object is in new space.
394 // The register scratch can be object itself, but scratch will be clobbered.
395 void JumpIfInNewSpace(Register object,
398 InNewSpace(object, scratch, eq, branch);
401 // Check if an object has a given incremental marking color.
402 void HasColor(Register object,
409 void JumpIfBlack(Register object,
414 // Checks the color of an object. If the object is already grey or black
415 // then we just fall through, since it is already live. If it is white and
416 // we can determine that it doesn't need to be scanned, then we just mark it
417 // black and fall through. For the rest we jump to the label so the
418 // incremental marker can fix its assumptions.
419 void EnsureNotWhite(Register object,
423 Label* object_is_white_and_not_data);
425 // Detects conservatively whether an object is data-only, i.e. it does need to
426 // be scanned by the garbage collector.
427 void JumpIfDataObject(Register value,
429 Label* not_data_object);
431 // Notify the garbage collector that we wrote a pointer into an object.
432 // |object| is the object being stored into, |value| is the object being
433 // stored. value and scratch registers are clobbered by the operation.
434 // The offset is the offset from the start of the object, not the offset from
435 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
436 void RecordWriteField(
442 SaveFPRegsMode save_fp,
443 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
444 SmiCheck smi_check = INLINE_SMI_CHECK,
445 PointersToHereCheck pointers_to_here_check_for_value =
446 kPointersToHereMaybeInteresting);
448 // As above, but the offset has the tag presubtracted. For use with
449 // MemOperand(reg, off).
450 inline void RecordWriteContextSlot(
456 SaveFPRegsMode save_fp,
457 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
458 SmiCheck smi_check = INLINE_SMI_CHECK,
459 PointersToHereCheck pointers_to_here_check_for_value =
460 kPointersToHereMaybeInteresting) {
461 RecordWriteField(context,
462 offset + kHeapObjectTag,
467 remembered_set_action,
469 pointers_to_here_check_for_value);
472 void RecordWriteForMap(
477 SaveFPRegsMode save_fp);
479 // For a given |object| notify the garbage collector that the slot |address|
480 // has been written. |value| is the object being stored. The value and
481 // address registers are clobbered by the operation.
487 SaveFPRegsMode save_fp,
488 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
489 SmiCheck smi_check = INLINE_SMI_CHECK,
490 PointersToHereCheck pointers_to_here_check_for_value =
491 kPointersToHereMaybeInteresting);
494 // ---------------------------------------------------------------------------
495 // Inline caching support.
497 // Generate code for checking access rights - used for security checks
498 // on access to global objects across environments. The holder register
499 // is left untouched, whereas both scratch registers are clobbered.
500 void CheckAccessGlobalProxy(Register holder_reg,
504 void GetNumberHash(Register reg0, Register scratch);
506 void LoadFromNumberDictionary(Label* miss,
515 inline void MarkCode(NopMarkerTypes type) {
519 // Check if the given instruction is a 'type' marker.
520 // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
521 // nop(type)). These instructions are generated to mark special location in
522 // the code, like some special IC code.
523 static inline bool IsMarkedCode(Instr instr, int type) {
524 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
525 return IsNop(instr, type);
529 static inline int GetCodeMarker(Instr instr) {
530 uint32_t opcode = ((instr & kOpcodeMask));
531 uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
532 uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
533 uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
535 // Return <n> if we have a sll zero_reg, zero_reg, n
537 bool sllzz = (opcode == SLL &&
538 rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
539 rs == static_cast<uint32_t>(ToNumber(zero_reg)));
541 (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
542 DCHECK((type == -1) ||
543 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
549 // ---------------------------------------------------------------------------
550 // Allocation support.
552 // Allocate an object in new space or old space. The object_size is
553 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
554 // is passed. If the space is exhausted control continues at the gc_required
555 // label. The allocated object is returned in result. If the flag
556 // tag_allocated_object is true the result is tagged as as a heap object.
557 // All registers are clobbered also when control continues at the gc_required
559 void Allocate(int object_size,
564 AllocationFlags flags);
566 void Allocate(Register object_size,
571 AllocationFlags flags);
573 void AllocateTwoByteString(Register result,
579 void AllocateOneByteString(Register result, Register length,
580 Register scratch1, Register scratch2,
581 Register scratch3, Label* gc_required);
582 void AllocateTwoByteConsString(Register result,
587 void AllocateOneByteConsString(Register result, Register length,
588 Register scratch1, Register scratch2,
590 void AllocateTwoByteSlicedString(Register result,
595 void AllocateOneByteSlicedString(Register result, Register length,
596 Register scratch1, Register scratch2,
599 // Allocates a heap number or jumps to the gc_required label if the young
600 // space is full and a scavenge is needed. All registers are clobbered also
601 // when control continues at the gc_required label.
602 void AllocateHeapNumber(Register result,
605 Register heap_number_map,
607 TaggingMode tagging_mode = TAG_RESULT,
608 MutableMode mode = IMMUTABLE);
610 void AllocateHeapNumberWithValue(Register result,
616 // ---------------------------------------------------------------------------
617 // Instruction macros.
619 #define DEFINE_INSTRUCTION(instr) \
620 void instr(Register rd, Register rs, const Operand& rt); \
621 void instr(Register rd, Register rs, Register rt) { \
622 instr(rd, rs, Operand(rt)); \
624 void instr(Register rs, Register rt, int32_t j) { \
625 instr(rs, rt, Operand(j)); \
628 #define DEFINE_INSTRUCTION2(instr) \
629 void instr(Register rs, const Operand& rt); \
630 void instr(Register rs, Register rt) { \
631 instr(rs, Operand(rt)); \
633 void instr(Register rs, int32_t j) { \
634 instr(rs, Operand(j)); \
637 DEFINE_INSTRUCTION(Addu);
638 DEFINE_INSTRUCTION(Daddu);
639 DEFINE_INSTRUCTION(Div);
640 DEFINE_INSTRUCTION(Divu);
641 DEFINE_INSTRUCTION(Ddivu);
642 DEFINE_INSTRUCTION(Mod);
643 DEFINE_INSTRUCTION(Modu);
644 DEFINE_INSTRUCTION(Ddiv);
645 DEFINE_INSTRUCTION(Subu);
646 DEFINE_INSTRUCTION(Dsubu);
647 DEFINE_INSTRUCTION(Dmod);
648 DEFINE_INSTRUCTION(Dmodu);
649 DEFINE_INSTRUCTION(Mul);
650 DEFINE_INSTRUCTION(Mulh);
651 DEFINE_INSTRUCTION(Mulhu);
652 DEFINE_INSTRUCTION(Dmul);
653 DEFINE_INSTRUCTION(Dmulh);
654 DEFINE_INSTRUCTION2(Mult);
655 DEFINE_INSTRUCTION2(Dmult);
656 DEFINE_INSTRUCTION2(Multu);
657 DEFINE_INSTRUCTION2(Dmultu);
658 DEFINE_INSTRUCTION2(Div);
659 DEFINE_INSTRUCTION2(Ddiv);
660 DEFINE_INSTRUCTION2(Divu);
661 DEFINE_INSTRUCTION2(Ddivu);
663 DEFINE_INSTRUCTION(And);
664 DEFINE_INSTRUCTION(Or);
665 DEFINE_INSTRUCTION(Xor);
666 DEFINE_INSTRUCTION(Nor);
667 DEFINE_INSTRUCTION2(Neg);
669 DEFINE_INSTRUCTION(Slt);
670 DEFINE_INSTRUCTION(Sltu);
672 // MIPS32 R2 instruction macro.
673 DEFINE_INSTRUCTION(Ror);
674 DEFINE_INSTRUCTION(Dror);
676 #undef DEFINE_INSTRUCTION
677 #undef DEFINE_INSTRUCTION2
679 void Pref(int32_t hint, const MemOperand& rs);
682 // ---------------------------------------------------------------------------
683 // Pseudo-instructions.
685 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
687 void Ulw(Register rd, const MemOperand& rs);
688 void Usw(Register rd, const MemOperand& rs);
689 void Uld(Register rd, const MemOperand& rs, Register scratch = at);
690 void Usd(Register rd, const MemOperand& rs, Register scratch = at);
692 void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at);
693 void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at);
695 // Load int32 in the rd register.
696 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
697 inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
698 li(rd, Operand(j), mode);
700 void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
702 // Push multiple registers on the stack.
703 // Registers are saved in numerical order, with higher numbered registers
704 // saved in higher memory addresses.
705 void MultiPush(RegList regs);
706 void MultiPushReversed(RegList regs);
708 void MultiPushFPU(RegList regs);
709 void MultiPushReversedFPU(RegList regs);
711 void push(Register src) {
712 Daddu(sp, sp, Operand(-kPointerSize));
713 sd(src, MemOperand(sp, 0));
715 void Push(Register src) { push(src); }
718 void Push(Handle<Object> handle);
719 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
721 // Push two registers. Pushes leftmost register first (to highest address).
722 void Push(Register src1, Register src2) {
723 Dsubu(sp, sp, Operand(2 * kPointerSize));
724 sd(src1, MemOperand(sp, 1 * kPointerSize));
725 sd(src2, MemOperand(sp, 0 * kPointerSize));
728 // Push three registers. Pushes leftmost register first (to highest address).
729 void Push(Register src1, Register src2, Register src3) {
730 Dsubu(sp, sp, Operand(3 * kPointerSize));
731 sd(src1, MemOperand(sp, 2 * kPointerSize));
732 sd(src2, MemOperand(sp, 1 * kPointerSize));
733 sd(src3, MemOperand(sp, 0 * kPointerSize));
736 // Push four registers. Pushes leftmost register first (to highest address).
737 void Push(Register src1, Register src2, Register src3, Register src4) {
738 Dsubu(sp, sp, Operand(4 * kPointerSize));
739 sd(src1, MemOperand(sp, 3 * kPointerSize));
740 sd(src2, MemOperand(sp, 2 * kPointerSize));
741 sd(src3, MemOperand(sp, 1 * kPointerSize));
742 sd(src4, MemOperand(sp, 0 * kPointerSize));
745 // Push five registers. Pushes leftmost register first (to highest address).
746 void Push(Register src1, Register src2, Register src3, Register src4,
748 Dsubu(sp, sp, Operand(5 * kPointerSize));
749 sd(src1, MemOperand(sp, 4 * kPointerSize));
750 sd(src2, MemOperand(sp, 3 * kPointerSize));
751 sd(src3, MemOperand(sp, 2 * kPointerSize));
752 sd(src4, MemOperand(sp, 1 * kPointerSize));
753 sd(src5, MemOperand(sp, 0 * kPointerSize));
756 void Push(Register src, Condition cond, Register tst1, Register tst2) {
757 // Since we don't have conditional execution we use a Branch.
758 Branch(3, cond, tst1, Operand(tst2));
759 Dsubu(sp, sp, Operand(kPointerSize));
760 sd(src, MemOperand(sp, 0));
763 void PushRegisterAsTwoSmis(Register src, Register scratch = at);
764 void PopRegisterAsTwoSmis(Register dst, Register scratch = at);
766 // Pops multiple values from the stack and load them in the
767 // registers specified in regs. Pop order is the opposite as in MultiPush.
768 void MultiPop(RegList regs);
769 void MultiPopReversed(RegList regs);
771 void MultiPopFPU(RegList regs);
772 void MultiPopReversedFPU(RegList regs);
774 void pop(Register dst) {
775 ld(dst, MemOperand(sp, 0));
776 Daddu(sp, sp, Operand(kPointerSize));
778 void Pop(Register dst) { pop(dst); }
780 // Pop two registers. Pops rightmost register first (from lower address).
781 void Pop(Register src1, Register src2) {
782 DCHECK(!src1.is(src2));
783 ld(src2, MemOperand(sp, 0 * kPointerSize));
784 ld(src1, MemOperand(sp, 1 * kPointerSize));
785 Daddu(sp, sp, 2 * kPointerSize);
788 // Pop three registers. Pops rightmost register first (from lower address).
789 void Pop(Register src1, Register src2, Register src3) {
790 ld(src3, MemOperand(sp, 0 * kPointerSize));
791 ld(src2, MemOperand(sp, 1 * kPointerSize));
792 ld(src1, MemOperand(sp, 2 * kPointerSize));
793 Daddu(sp, sp, 3 * kPointerSize);
796 void Pop(uint32_t count = 1) {
797 Daddu(sp, sp, Operand(count * kPointerSize));
800 // Push and pop the registers that can hold pointers, as defined by the
801 // RegList constant kSafepointSavedRegisters.
802 void PushSafepointRegisters();
803 void PopSafepointRegisters();
804 // Store value in register src in the safepoint stack slot for
806 void StoreToSafepointRegisterSlot(Register src, Register dst);
807 // Load the value of the src register from its safepoint stack slot
808 // into register dst.
809 void LoadFromSafepointRegisterSlot(Register dst, Register src);
811 // MIPS64 R2 instruction macro.
812 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
813 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
814 void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
816 // ---------------------------------------------------------------------------
817 // FPU macros. These do not handle special cases like NaN or +- inf.
819 // Convert unsigned word to double.
820 void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
821 void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
823 // Convert double to unsigned long.
824 void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch);
826 void Trunc_l_d(FPURegister fd, FPURegister fs);
827 void Round_l_d(FPURegister fd, FPURegister fs);
828 void Floor_l_d(FPURegister fd, FPURegister fs);
829 void Ceil_l_d(FPURegister fd, FPURegister fs);
831 // Convert double to unsigned word.
832 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
833 void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
835 void Trunc_w_d(FPURegister fd, FPURegister fs);
836 void Round_w_d(FPURegister fd, FPURegister fs);
837 void Floor_w_d(FPURegister fd, FPURegister fs);
838 void Ceil_w_d(FPURegister fd, FPURegister fs);
840 void Madd_d(FPURegister fd,
844 FPURegister scratch);
846 // Wrapper functions for the different cmp/branch types.
847 inline void BranchF32(Label* target, Label* nan, Condition cc,
848 FPURegister cmp1, FPURegister cmp2,
849 BranchDelaySlot bd = PROTECT) {
850 BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
853 inline void BranchF64(Label* target, Label* nan, Condition cc,
854 FPURegister cmp1, FPURegister cmp2,
855 BranchDelaySlot bd = PROTECT) {
856 BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
859 // Alternate (inline) version for better readability with USE_DELAY_SLOT.
860 inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
861 Condition cc, FPURegister cmp1, FPURegister cmp2) {
862 BranchF64(target, nan, cc, cmp1, cmp2, bd);
865 inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
866 Condition cc, FPURegister cmp1, FPURegister cmp2) {
867 BranchF32(target, nan, cc, cmp1, cmp2, bd);
870 // Alias functions for backward compatibility.
871 inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
872 FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
873 BranchF64(target, nan, cc, cmp1, cmp2, bd);
876 inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
877 Condition cc, FPURegister cmp1, FPURegister cmp2) {
878 BranchF64(bd, target, nan, cc, cmp1, cmp2);
881 // Truncates a double using a specific rounding mode, and writes the value
882 // to the result register.
883 // The except_flag will contain any exceptions caused by the instruction.
884 // If check_inexact is kDontCheckForInexactConversion, then the inexact
885 // exception is masked.
886 void EmitFPUTruncate(FPURoundingMode rounding_mode,
888 DoubleRegister double_input,
890 DoubleRegister double_scratch,
891 Register except_flag,
892 CheckForInexactConversion check_inexact
893 = kDontCheckForInexactConversion);
895 // Performs a truncating conversion of a floating point number as used by
896 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
897 // succeeds, otherwise falls through if result is saturated. On return
898 // 'result' either holds answer, or is clobbered on fall through.
900 // Only public for the test code in test-code-stubs-arm.cc.
901 void TryInlineTruncateDoubleToI(Register result,
902 DoubleRegister input,
905 // Performs a truncating conversion of a floating point number as used by
906 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
907 // Exits with 'result' holding the answer.
908 void TruncateDoubleToI(Register result, DoubleRegister double_input);
910 // Performs a truncating conversion of a heap number as used by
911 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
912 // must be different registers. Exits with 'result' holding the answer.
913 void TruncateHeapNumberToI(Register result, Register object);
915 // Converts the smi or heap number in object to an int32 using the rules
916 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
917 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
918 // different registers.
919 void TruncateNumberToI(Register object,
921 Register heap_number_map,
925 // Loads the number from object into dst register.
926 // If |object| is neither smi nor heap number, |not_number| is jumped to
927 // with |object| still intact.
928 void LoadNumber(Register object,
930 Register heap_number_map,
934 // Loads the number from object into double_dst in the double format.
935 // Control will jump to not_int32 if the value cannot be exactly represented
936 // by a 32-bit integer.
937 // Floating point value in the 32-bit integer range that are not exact integer
939 void LoadNumberAsInt32Double(Register object,
940 DoubleRegister double_dst,
941 Register heap_number_map,
944 FPURegister double_scratch,
947 // Loads the number from object into dst as a 32-bit integer.
948 // Control will jump to not_int32 if the object cannot be exactly represented
949 // by a 32-bit integer.
950 // Floating point value in the 32-bit integer range that are not exact integer
951 // won't be converted.
952 void LoadNumberAsInt32(Register object,
954 Register heap_number_map,
957 FPURegister double_scratch0,
958 FPURegister double_scratch1,
962 // argc - argument count to be dropped by LeaveExitFrame.
963 // save_doubles - saves FPU registers on stack, currently disabled.
964 // stack_space - extra stack space.
965 void EnterExitFrame(bool save_doubles,
966 int stack_space = 0);
968 // Leave the current exit frame.
969 void LeaveExitFrame(bool save_doubles, Register arg_count,
970 bool restore_context, bool do_return = NO_EMIT_RETURN,
971 bool argument_count_is_length = false);
973 // Get the actual activation frame alignment for target environment.
974 static int ActivationFrameAlignment();
976 // Make sure the stack is aligned. Only emits code in debug mode.
977 void AssertStackIsAligned();
979 void LoadContext(Register dst, int context_chain_length);
981 // Load the global proxy from the current context.
982 void LoadGlobalProxy(Register dst);
984 // Conditionally load the cached Array transitioned map of type
985 // transitioned_kind from the native context if the map in register
986 // map_in_out is the cached Array map in the native context of
988 void LoadTransitionedArrayMapConditional(
989 ElementsKind expected_kind,
990 ElementsKind transitioned_kind,
993 Label* no_map_match);
995 void LoadGlobalFunction(int index, Register function);
997 // Load the initial map from the global function. The registers
998 // function and map can be the same, function is then overwritten.
999 void LoadGlobalFunctionInitialMap(Register function,
1003 void InitializeRootRegister() {
1004 ExternalReference roots_array_start =
1005 ExternalReference::roots_array_start(isolate());
1006 li(kRootRegister, Operand(roots_array_start));
1009 // -------------------------------------------------------------------------
1010 // JavaScript invokes.
1012 // Invoke the JavaScript function code by either calling or jumping.
1013 void InvokeCode(Register code,
1014 const ParameterCount& expected,
1015 const ParameterCount& actual,
1017 const CallWrapper& call_wrapper);
1019 // Invoke the JavaScript function in the given register. Changes the
1020 // current context to the context in the function before invoking.
1021 void InvokeFunction(Register function,
1022 const ParameterCount& actual,
1024 const CallWrapper& call_wrapper);
1026 void InvokeFunction(Register function,
1027 const ParameterCount& expected,
1028 const ParameterCount& actual,
1030 const CallWrapper& call_wrapper);
1032 void InvokeFunction(Handle<JSFunction> function,
1033 const ParameterCount& expected,
1034 const ParameterCount& actual,
1036 const CallWrapper& call_wrapper);
1039 void IsObjectJSStringType(Register object,
1043 void IsObjectNameType(Register object,
1047 // -------------------------------------------------------------------------
1048 // Debugger Support.
1052 // -------------------------------------------------------------------------
1053 // Exception handling.
1055 // Push a new stack handler and link into stack handler chain.
1056 void PushStackHandler();
1058 // Unlink the stack handler on top of the stack from the stack handler chain.
1059 // Must preserve the result register.
1060 void PopStackHandler();
1062 // Copies a fixed number of fields of heap objects from src to dst.
1063 void CopyFields(Register dst, Register src, RegList temps, int field_count);
1065 // Copies a number of bytes from src to dst. All registers are clobbered. On
1066 // exit src and dst will point to the place just after where the last byte was
1067 // read or written and length will be zero.
1068 void CopyBytes(Register src,
1073 // Initialize fields with filler values. Fields starting at |start_offset|
1074 // not including end_offset are overwritten with the value in |filler|. At
1075 // the end the loop, |start_offset| takes the value of |end_offset|.
1076 void InitializeFieldsWithFiller(Register start_offset,
1077 Register end_offset,
1080 // -------------------------------------------------------------------------
1081 // Support functions.
1083 // Machine code version of Map::GetConstructor().
1084 // |temp| holds |result|'s map when done, and |temp2| its instance type.
1085 void GetMapConstructor(Register result, Register map, Register temp,
1088 // Try to get function prototype of a function and puts the value in
1089 // the result register. Checks that the function really is a
1090 // function and jumps to the miss label if the fast checks fail. The
1091 // function register will be untouched; the other registers may be
1093 void TryGetFunctionPrototype(Register function, Register result,
1094 Register scratch, Label* miss);
1096 void GetObjectType(Register function,
1100 // Check if a map for a JSObject indicates that the object has fast elements.
1101 // Jump to the specified label if it does not.
1102 void CheckFastElements(Register map,
1106 // Check if a map for a JSObject indicates that the object can have both smi
1107 // and HeapObject elements. Jump to the specified label if it does not.
1108 void CheckFastObjectElements(Register map,
1112 // Check if a map for a JSObject indicates that the object has fast smi only
1113 // elements. Jump to the specified label if it does not.
1114 void CheckFastSmiElements(Register map,
1118 // Check to see if maybe_number can be stored as a double in
1119 // FastDoubleElements. If it can, store it at the index specified by key in
1120 // the FastDoubleElements array elements. Otherwise jump to fail.
1121 void StoreNumberToDoubleElements(Register value_reg,
1123 Register elements_reg,
1127 int elements_offset = 0);
1129 // Compare an object's map with the specified map and its transitioned
1130 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
1131 // "branch_to" if the result of the comparison is "cond". If multiple map
1132 // compares are required, the compare sequences branches to early_success.
1133 void CompareMapAndBranch(Register obj,
1136 Label* early_success,
1140 // As above, but the map of the object is already loaded into the register
1141 // which is preserved by the code generated.
1142 void CompareMapAndBranch(Register obj_map,
1144 Label* early_success,
1148 // Check if the map of an object is equal to a specified map and branch to
1149 // label if not. Skip the smi check if not required (object is known to be a
1150 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1151 // against maps that are ElementsKind transition maps of the specificed map.
1152 void CheckMap(Register obj,
1156 SmiCheckType smi_check_type);
1159 void CheckMap(Register obj,
1161 Heap::RootListIndex index,
1163 SmiCheckType smi_check_type);
1165 // Check if the map of an object is equal to a specified weak map and branch
1166 // to a specified target if equal. Skip the smi check if not required
1167 // (object is known to be a heap object)
1168 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1169 Handle<WeakCell> cell, Handle<Code> success,
1170 SmiCheckType smi_check_type);
1172 // If the value is a NaN, canonicalize the value else, do nothing.
1173 void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
1176 // Get value of the weak cell.
1177 void GetWeakValue(Register value, Handle<WeakCell> cell);
1179 // Load the value of the weak cell in the value register. Branch to the
1180 // given miss label is the weak cell was cleared.
1181 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
1183 // Load and check the instance type of an object for being a string.
1184 // Loads the type into the second argument register.
1185 // Returns a condition that will be enabled if the object was a string.
1186 Condition IsObjectStringType(Register obj,
1189 ld(type, FieldMemOperand(obj, HeapObject::kMapOffset));
1190 lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
1191 And(type, type, Operand(kIsNotStringMask));
1192 DCHECK_EQ(0u, kStringTag);
1197 // Picks out an array index from the hash field.
1199 // hash - holds the index's hash. Clobbered.
1200 // index - holds the overwritten index on exit.
1201 void IndexFromHash(Register hash, Register index);
1203 // Get the number of least significant bits from a register.
1204 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1205 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1207 // Load the value of a number object into a FPU double register. If the
1208 // object is not a number a jump to the label not_number is performed
1209 // and the FPU double register is unchanged.
1210 void ObjectToDoubleFPURegister(
1215 Register heap_number_map,
1217 ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
1219 // Load the value of a smi object into a FPU double register. The register
1220 // scratch1 can be the same register as smi in which case smi will hold the
1221 // untagged value afterwards.
1222 void SmiToDoubleFPURegister(Register smi,
1226 // -------------------------------------------------------------------------
1227 // Overflow handling functions.
1228 // Usage: first call the appropriate arithmetic function, then call one of the
1229 // jump functions with the overflow_dst register as the second parameter.
1231 void AdduAndCheckForOverflow(Register dst,
1234 Register overflow_dst,
1235 Register scratch = at);
1237 void AdduAndCheckForOverflow(Register dst, Register left,
1238 const Operand& right, Register overflow_dst,
1241 void SubuAndCheckForOverflow(Register dst,
1244 Register overflow_dst,
1245 Register scratch = at);
1247 void SubuAndCheckForOverflow(Register dst, Register left,
1248 const Operand& right, Register overflow_dst,
1251 void DadduAndCheckForOverflow(Register dst, Register left, Register right,
1252 Register overflow_dst, Register scratch = at);
1254 void DadduAndCheckForOverflow(Register dst, Register left,
1255 const Operand& right, Register overflow_dst,
1258 void DsubuAndCheckForOverflow(Register dst, Register left, Register right,
1259 Register overflow_dst, Register scratch = at);
1261 void DsubuAndCheckForOverflow(Register dst, Register left,
1262 const Operand& right, Register overflow_dst,
1265 void BranchOnOverflow(Label* label,
1266 Register overflow_check,
1267 BranchDelaySlot bd = PROTECT) {
1268 Branch(label, lt, overflow_check, Operand(zero_reg), bd);
1271 void BranchOnNoOverflow(Label* label,
1272 Register overflow_check,
1273 BranchDelaySlot bd = PROTECT) {
1274 Branch(label, ge, overflow_check, Operand(zero_reg), bd);
1277 void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1278 Ret(lt, overflow_check, Operand(zero_reg), bd);
1281 void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1282 Ret(ge, overflow_check, Operand(zero_reg), bd);
1285 // -------------------------------------------------------------------------
1288 // See comments at the beginning of CEntryStub::Generate.
1289 inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
1291 inline void PrepareCEntryFunction(const ExternalReference& ref) {
1292 li(a1, Operand(ref));
1295 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
1296 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
1298 // Call a code stub.
1299 void CallStub(CodeStub* stub,
1300 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1303 // Tail call a code stub (jump).
1304 void TailCallStub(CodeStub* stub, COND_ARGS);
1308 void CallJSExitStub(CodeStub* stub);
1310 // Call a runtime routine.
1311 void CallRuntime(const Runtime::Function* f, int num_arguments,
1312 SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1313 BranchDelaySlot bd = PROTECT);
1314 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1315 const Runtime::Function* function = Runtime::FunctionForId(id);
1316 CallRuntime(function, function->nargs, kSaveFPRegs);
1319 // Convenience function: Same as above, but takes the fid instead.
1320 void CallRuntime(Runtime::FunctionId id, int num_arguments,
1321 SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1322 BranchDelaySlot bd = PROTECT) {
1323 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles, bd);
1326 // Convenience function: call an external reference.
1327 void CallExternalReference(const ExternalReference& ext,
1329 BranchDelaySlot bd = PROTECT);
1331 // Tail call of a runtime routine (jump).
1332 // Like JumpToExternalReference, but also takes care of passing the number
1334 void TailCallExternalReference(const ExternalReference& ext,
1338 // Convenience function: tail call a runtime routine (jump).
1339 void TailCallRuntime(Runtime::FunctionId fid,
1343 int CalculateStackPassedWords(int num_reg_arguments,
1344 int num_double_arguments);
1346 // Before calling a C-function from generated code, align arguments on stack
1347 // and add space for the four mips argument slots.
1348 // After aligning the frame, non-register arguments must be stored on the
1349 // stack, after the argument-slots using helper: CFunctionArgumentOperand().
1350 // The argument count assumes all arguments are word sized.
1351 // Some compilers/platforms require the stack to be aligned when calling
1353 // Needs a scratch register to do some arithmetic. This register will be
1355 void PrepareCallCFunction(int num_reg_arguments,
1356 int num_double_registers,
1358 void PrepareCallCFunction(int num_reg_arguments,
1361 // Arguments 1-4 are placed in registers a0 thru a3 respectively.
1362 // Arguments 5..n are stored to stack using following:
1363 // sw(a4, CFunctionArgumentOperand(5));
1365 // Calls a C function and cleans up the space for arguments allocated
1366 // by PrepareCallCFunction. The called function is not allowed to trigger a
1367 // garbage collection, since that might move the code and invalidate the
1368 // return address (unless this is somehow accounted for by the called
1370 void CallCFunction(ExternalReference function, int num_arguments);
1371 void CallCFunction(Register function, int num_arguments);
1372 void CallCFunction(ExternalReference function,
1373 int num_reg_arguments,
1374 int num_double_arguments);
1375 void CallCFunction(Register function,
1376 int num_reg_arguments,
1377 int num_double_arguments);
1378 void MovFromFloatResult(DoubleRegister dst);
1379 void MovFromFloatParameter(DoubleRegister dst);
1381 // There are two ways of passing double arguments on MIPS, depending on
1382 // whether soft or hard floating point ABI is used. These functions
1383 // abstract parameter passing for the three different ways we call
1384 // C functions from generated code.
1385 void MovToFloatParameter(DoubleRegister src);
1386 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
1387 void MovToFloatResult(DoubleRegister src);
1389 // Jump to the builtin routine.
1390 void JumpToExternalReference(const ExternalReference& builtin,
1391 BranchDelaySlot bd = PROTECT);
1393 // Invoke specified builtin JavaScript function.
1394 void InvokeBuiltin(int native_context_index, InvokeFlag flag,
1395 const CallWrapper& call_wrapper = NullCallWrapper());
1397 // Store the code object for the given builtin in the target register and
1398 // setup the function in a1.
1399 void GetBuiltinEntry(Register target, int native_context_index);
1401 // Store the function for the given builtin in the target register.
1402 void GetBuiltinFunction(Register target, int native_context_index);
1406 uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
1410 Handle<Object> CodeObject() {
1411 DCHECK(!code_object_.is_null());
1412 return code_object_;
1415 // Emit code for a truncating division by a constant. The dividend register is
1416 // unchanged and at gets clobbered. Dividend and result must be different.
1417 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1419 // -------------------------------------------------------------------------
1420 // StatsCounter support.
1422 void SetCounter(StatsCounter* counter, int value,
1423 Register scratch1, Register scratch2);
1424 void IncrementCounter(StatsCounter* counter, int value,
1425 Register scratch1, Register scratch2);
1426 void DecrementCounter(StatsCounter* counter, int value,
1427 Register scratch1, Register scratch2);
1430 // -------------------------------------------------------------------------
1433 // Calls Abort(msg) if the condition cc is not satisfied.
1434 // Use --debug_code to enable.
1435 void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
1436 void AssertFastElements(Register elements);
1438 // Like Assert(), but always enabled.
1439 void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
1441 // Print a message to stdout and abort execution.
1442 void Abort(BailoutReason msg);
1444 // Verify restrictions about code generated in stubs.
1445 void set_generating_stub(bool value) { generating_stub_ = value; }
1446 bool generating_stub() { return generating_stub_; }
1447 void set_has_frame(bool value) { has_frame_ = value; }
1448 bool has_frame() { return has_frame_; }
1449 inline bool AllowThisStubCall(CodeStub* stub);
1451 // ---------------------------------------------------------------------------
1452 // Number utilities.
1454 // Check whether the value of reg is a power of two and not zero. If not
1455 // control continues at the label not_power_of_two. If reg is a power of two
1456 // the register scratch contains the value of (reg - 1) when control falls
1458 void JumpIfNotPowerOfTwoOrZero(Register reg,
1460 Label* not_power_of_two_or_zero);
1462 // -------------------------------------------------------------------------
1465 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1466 void SmiTagCheckOverflow(Register reg, Register overflow);
1467 void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1469 void SmiTag(Register dst, Register src) {
1470 STATIC_ASSERT(kSmiTag == 0);
1471 if (SmiValuesAre32Bits()) {
1472 STATIC_ASSERT(kSmiShift == 32);
1473 dsll32(dst, src, 0);
1475 Addu(dst, src, src);
1479 void SmiTag(Register reg) {
1483 // Try to convert int32 to smi. If the value is to large, preserve
1484 // the original value and jump to not_a_smi. Destroys scratch and
1486 void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
1487 TrySmiTag(reg, reg, scratch, not_a_smi);
1490 void TrySmiTag(Register dst,
1494 if (SmiValuesAre32Bits()) {
1497 SmiTagCheckOverflow(at, src, scratch);
1498 BranchOnOverflow(not_a_smi, scratch);
1503 void SmiUntag(Register dst, Register src) {
1504 if (SmiValuesAre32Bits()) {
1505 STATIC_ASSERT(kSmiShift == 32);
1506 dsra32(dst, src, 0);
1508 sra(dst, src, kSmiTagSize);
1512 void SmiUntag(Register reg) {
1516 // Left-shifted from int32 equivalent of Smi.
1517 void SmiScale(Register dst, Register src, int scale) {
1518 if (SmiValuesAre32Bits()) {
1519 // The int portion is upper 32-bits of 64-bit word.
1520 dsra(dst, src, kSmiShift - scale);
1522 DCHECK(scale >= kSmiTagSize);
1523 sll(dst, src, scale - kSmiTagSize);
1527 // Combine load with untagging or scaling.
1528 void SmiLoadUntag(Register dst, MemOperand src);
1530 void SmiLoadScale(Register dst, MemOperand src, int scale);
1532 // Returns 2 values: the Smi and a scaled version of the int within the Smi.
1533 void SmiLoadWithScale(Register d_smi,
1538 // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
1539 void SmiLoadUntagWithScale(Register d_int,
1545 // Test if the register contains a smi.
1546 inline void SmiTst(Register value, Register scratch) {
1547 And(scratch, value, Operand(kSmiTagMask));
1549 inline void NonNegativeSmiTst(Register value, Register scratch) {
1550 And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
1553 // Untag the source value into destination and jump if source is a smi.
1554 // Source and destination can be the same register.
1555 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1557 // Untag the source value into destination and jump if source is not a smi.
1558 // Source and destination can be the same register.
1559 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1561 // Jump the register contains a smi.
1562 void JumpIfSmi(Register value,
1564 Register scratch = at,
1565 BranchDelaySlot bd = PROTECT);
1567 // Jump if the register contains a non-smi.
1568 void JumpIfNotSmi(Register value,
1569 Label* not_smi_label,
1570 Register scratch = at,
1571 BranchDelaySlot bd = PROTECT);
1573 // Jump if either of the registers contain a non-smi.
1574 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1575 // Jump if either of the registers contain a smi.
1576 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1578 // Abort execution if argument is a smi, enabled via --debug-code.
1579 void AssertNotSmi(Register object);
1580 void AssertSmi(Register object);
1582 // Abort execution if argument is not a string, enabled via --debug-code.
1583 void AssertString(Register object);
1585 // Abort execution if argument is not a name, enabled via --debug-code.
1586 void AssertName(Register object);
1588 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1589 void AssertFunction(Register object);
1591 // Abort execution if argument is not undefined or an AllocationSite, enabled
1592 // via --debug-code.
1593 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1595 // Abort execution if reg is not the root value with the given index,
1596 // enabled via --debug-code.
1597 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1599 // ---------------------------------------------------------------------------
1600 // HeapNumber utilities.
1602 void JumpIfNotHeapNumber(Register object,
1603 Register heap_number_map,
1605 Label* on_not_heap_number);
1607 // -------------------------------------------------------------------------
1608 // String utilities.
1610 // Checks if both instance types are sequential one-byte strings and jumps to
1611 // label if either is not.
1612 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1613 Register first_object_instance_type, Register second_object_instance_type,
1614 Register scratch1, Register scratch2, Label* failure);
1616 // Check if instance type is sequential one-byte string and jump to label if
1618 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1621 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1623 void EmitSeqStringSetCharCheck(Register string,
1627 uint32_t encoding_mask);
1629 // Checks if both objects are sequential one-byte strings and jumps to label
1630 // if either is not. Assumes that neither object is a smi.
1631 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
1637 // Checks if both objects are sequential one-byte strings and jumps to label
1638 // if either is not.
1639 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1642 Label* not_flat_one_byte_strings);
1644 void ClampUint8(Register output_reg, Register input_reg);
1646 void ClampDoubleToUint8(Register result_reg,
1647 DoubleRegister input_reg,
1648 DoubleRegister temp_double_reg);
1651 void LoadInstanceDescriptors(Register map, Register descriptors);
1652 void EnumLength(Register dst, Register map);
1653 void NumberOfOwnDescriptors(Register dst, Register map);
1654 void LoadAccessor(Register dst, Register holder, int accessor_index,
1655 AccessorComponent accessor);
1657 template<typename Field>
1658 void DecodeField(Register dst, Register src) {
1659 Ext(dst, src, Field::kShift, Field::kSize);
1662 template<typename Field>
1663 void DecodeField(Register reg) {
1664 DecodeField<Field>(reg, reg);
1667 template<typename Field>
1668 void DecodeFieldToSmi(Register dst, Register src) {
1669 static const int shift = Field::kShift;
1670 static const int mask = Field::kMask >> shift;
1671 dsrl(dst, src, shift);
1672 And(dst, dst, Operand(mask));
1673 dsll32(dst, dst, 0);
1676 template<typename Field>
1677 void DecodeFieldToSmi(Register reg) {
1678 DecodeField<Field>(reg, reg);
1680 // Generates function and stub prologue code.
1681 void StubPrologue();
1682 void Prologue(bool code_pre_aging);
1684 // Load the type feedback vector from a JavaScript frame.
1685 void EmitLoadTypeFeedbackVector(Register vector);
1687 // Activation support.
1688 void EnterFrame(StackFrame::Type type);
1689 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
1690 void LeaveFrame(StackFrame::Type type);
1692 // Expects object in a0 and returns map with validated enum cache
1693 // in a0. Assumes that any other register can be used as a scratch.
1694 void CheckEnumCache(Register null_value, Label* call_runtime);
1696 // AllocationMemento support. Arrays may have an associated
1697 // AllocationMemento object that can be checked for in order to pretransition
1699 // On entry, receiver_reg should point to the array object.
1700 // scratch_reg gets clobbered.
1701 // If allocation info is present, jump to allocation_memento_present.
1702 void TestJSArrayForAllocationMemento(
1703 Register receiver_reg,
1704 Register scratch_reg,
1705 Label* no_memento_found,
1706 Condition cond = al,
1707 Label* allocation_memento_present = NULL);
1709 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1710 Register scratch_reg,
1711 Label* memento_found) {
1712 Label no_memento_found;
1713 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1714 &no_memento_found, eq, memento_found);
1715 bind(&no_memento_found);
1718 // Jumps to found label if a prototype map has dictionary elements.
1719 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1720 Register scratch1, Label* found);
1723 void CallCFunctionHelper(Register function,
1724 int num_reg_arguments,
1725 int num_double_arguments);
1727 void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
1728 void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
1730 BranchDelaySlot bdslot = PROTECT);
1731 void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
1732 void BranchAndLinkShort(Label* L, Condition cond, Register rs,
1734 BranchDelaySlot bdslot = PROTECT);
1735 void J(Label* L, BranchDelaySlot bdslot);
1736 void Jal(Label* L, BranchDelaySlot bdslot);
1737 void Jr(Label* L, BranchDelaySlot bdslot);
1738 void Jalr(Label* L, BranchDelaySlot bdslot);
1740 // Common implementation of BranchF functions for the different formats.
1741 void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
1742 Condition cc, FPURegister cmp1, FPURegister cmp2,
1743 BranchDelaySlot bd = PROTECT);
1745 void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
1746 FPURegister cmp1, FPURegister cmp2,
1747 BranchDelaySlot bd = PROTECT);
1750 // Helper functions for generating invokes.
1751 void InvokePrologue(const ParameterCount& expected,
1752 const ParameterCount& actual,
1753 Handle<Code> code_constant,
1756 bool* definitely_mismatches,
1758 const CallWrapper& call_wrapper);
1760 void InitializeNewString(Register string,
1762 Heap::RootListIndex map_index,
1766 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1767 void InNewSpace(Register object,
1769 Condition cond, // eq for new space, ne otherwise.
1772 // Helper for finding the mark bits for an address. Afterwards, the
1773 // bitmap register points at the word with the mark bits and the mask
1774 // the position of the first bit. Leaves addr_reg unchanged.
1775 inline void GetMarkBits(Register addr_reg,
1776 Register bitmap_reg,
1779 // Compute memory operands for safepoint stack slots.
1780 static int SafepointRegisterStackIndex(int reg_code);
1781 MemOperand SafepointRegisterSlot(Register reg);
1782 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1784 bool generating_stub_;
1786 bool has_double_zero_reg_set_;
1787 // This handle will be patched with the code object on installation.
1788 Handle<Object> code_object_;
1790 // Needs access to SafepointRegisterStackIndex for compiled frame
1792 friend class StandardFrame;
1796 // The code patcher is used to patch (typically) small parts of code e.g. for
1797 // debugging and other types of instrumentation. When using the code patcher
1798 // the exact number of bytes specified must be emitted. It is not legal to emit
1799 // relocation information. If any of these constraints are violated it causes
1800 // an assertion to fail.
1808 CodePatcher(byte* address,
1810 FlushICache flush_cache = FLUSH);
1813 // Macro assembler to emit code.
1814 MacroAssembler* masm() { return &masm_; }
1816 // Emit an instruction directly.
1817 void Emit(Instr instr);
1819 // Emit an address directly.
1820 void Emit(Address addr);
1822 // Change the condition part of an instruction leaving the rest of the current
1823 // instruction unchanged.
1824 void ChangeBranchCondition(Condition cond);
1827 byte* address_; // The address of the code being patched.
1828 int size_; // Number of bytes of the expected patch size.
1829 MacroAssembler masm_; // Macro assembler used to generate the code.
1830 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1835 #ifdef GENERATED_CODE_COVERAGE
1836 #define CODE_COVERAGE_STRINGIFY(x) #x
1837 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1838 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1839 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1841 #define ACCESS_MASM(masm) masm->
1844 } // namespace internal
1847 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_