1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
6 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
8 #include "src/assembler.h"
9 #include "src/globals.h"
10 #include "src/mips64/assembler-mips64.h"
15 // Give alias names to registers for calling conventions.
16 const Register kReturnRegister0 = {kRegister_v0_Code};
17 const Register kReturnRegister1 = {kRegister_v1_Code};
18 const Register kJSFunctionRegister = {kRegister_a1_Code};
19 const Register kContextRegister = {kRegister_s7_Code};
20 const Register kInterpreterAccumulatorRegister = {kRegister_v0_Code};
21 const Register kInterpreterRegisterFileRegister = {kRegister_a7_Code};
22 const Register kInterpreterBytecodeOffsetRegister = {kRegister_t0_Code};
23 const Register kInterpreterBytecodeArrayRegister = {kRegister_t1_Code};
24 const Register kInterpreterDispatchTableRegister = {kRegister_t2_Code};
25 const Register kRuntimeCallFunctionRegister = {kRegister_a1_Code};
26 const Register kRuntimeCallArgCountRegister = {kRegister_a0_Code};
28 // Forward declaration.
31 // Reserved Register Usage Summary.
33 // Registers t8, t9, and at are reserved for use by the MacroAssembler.
35 // The programmer should know that the MacroAssembler may clobber these three,
36 // but won't touch other registers except in special cases.
38 // Per the MIPS ABI, register t9 must be used for indirect function call
39 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
40 // trying to update gp register for position-independent-code. Whenever
41 // MIPS generated code calls C code, it must be via t9 register.
44 // Flags used for LeaveExitFrame function.
45 enum LeaveExitFrameMode {
47 NO_EMIT_RETURN = false
50 // Flags used for AllocateHeapNumber
58 // Flags used for the ObjectToDoubleFPURegister function.
59 enum ObjectToDoubleFlags {
61 NO_OBJECT_TO_DOUBLE_FLAGS = 0,
62 // Object is known to be a non smi.
63 OBJECT_NOT_SMI = 1 << 0,
64 // Don't load NaNs or infinities, branch to the non number case instead.
65 AVOID_NANS_AND_INFINITIES = 1 << 1
68 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
69 enum BranchDelaySlot {
74 // Flags used for the li macro-assembler function.
76 // If the constant value can be represented in just 16 bits, then
77 // optimize the li to use a single instruction, rather than lui/ori/dsll
80 // Always use 6 instructions (lui/ori/dsll sequence), even if the constant
81 // could be loaded with just one, so that this value is patchable later.
83 // For address loads only 4 instruction are required. Used to mark
84 // constant load that will be used as address without relocation
85 // information. It ensures predictable code size, so specific sites
86 // in code are patchable.
91 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
92 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
93 enum PointersToHereCheck {
94 kPointersToHereMaybeInteresting,
95 kPointersToHereAreAlwaysInteresting
97 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
99 Register GetRegisterThatIsNotOneOf(Register reg1,
100 Register reg2 = no_reg,
101 Register reg3 = no_reg,
102 Register reg4 = no_reg,
103 Register reg5 = no_reg,
104 Register reg6 = no_reg);
106 bool AreAliased(Register reg1,
108 Register reg3 = no_reg,
109 Register reg4 = no_reg,
110 Register reg5 = no_reg,
111 Register reg6 = no_reg,
112 Register reg7 = no_reg,
113 Register reg8 = no_reg);
116 // -----------------------------------------------------------------------------
117 // Static helper functions.
119 inline MemOperand ContextOperand(Register context, int index) {
120 return MemOperand(context, Context::SlotOffset(index));
124 inline MemOperand GlobalObjectOperand() {
125 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
129 // Generate a MemOperand for loading a field from an object.
130 inline MemOperand FieldMemOperand(Register object, int offset) {
131 return MemOperand(object, offset - kHeapObjectTag);
135 inline MemOperand UntagSmiMemOperand(Register rm, int offset) {
136 // Assumes that Smis are shifted by 32 bits and little endianness.
137 STATIC_ASSERT(kSmiShift == 32);
138 return MemOperand(rm, offset + (kSmiShift / kBitsPerByte));
142 inline MemOperand UntagSmiFieldMemOperand(Register rm, int offset) {
143 return UntagSmiMemOperand(rm, offset - kHeapObjectTag);
147 // Generate a MemOperand for storing arguments 5..N on the stack
148 // when calling CallCFunction().
149 // TODO(plind): Currently ONLY used for O32. Should be fixed for
150 // n64, and used in RegExp code, and other places
151 // with more than 8 arguments.
152 inline MemOperand CFunctionArgumentOperand(int index) {
153 DCHECK(index > kCArgSlotCount);
154 // Argument 5 takes the slot just past the four Arg-slots.
155 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
156 return MemOperand(sp, offset);
160 // MacroAssembler implements a collection of frequently used macros.
161 class MacroAssembler: public Assembler {
163 // The isolate parameter can be NULL if the macro assembler should
164 // not use isolate-dependent functionality. In this case, it's the
165 // responsibility of the caller to never invoke such function on the
167 MacroAssembler(Isolate* isolate, void* buffer, int size);
170 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
171 #define COND_ARGS cond, r1, r2
173 // Cases when relocation is not needed.
174 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
175 void Name(target_type target, BranchDelaySlot bd = PROTECT); \
176 inline void Name(BranchDelaySlot bd, target_type target) { \
179 void Name(target_type target, \
181 BranchDelaySlot bd = PROTECT); \
182 inline void Name(BranchDelaySlot bd, \
183 target_type target, \
185 Name(target, COND_ARGS, bd); \
188 #define DECLARE_BRANCH_PROTOTYPES(Name) \
189 DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
190 DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
192 DECLARE_BRANCH_PROTOTYPES(Branch)
193 DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
194 DECLARE_BRANCH_PROTOTYPES(BranchShort)
196 #undef DECLARE_BRANCH_PROTOTYPES
197 #undef COND_TYPED_ARGS
201 // Jump, Call, and Ret pseudo instructions implementing inter-working.
202 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
203 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
205 void Jump(Register target, COND_ARGS);
206 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
207 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
208 void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
209 static int CallSize(Register target, COND_ARGS);
210 void Call(Register target, COND_ARGS);
211 static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
212 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
213 int CallSize(Handle<Code> code,
214 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
215 TypeFeedbackId ast_id = TypeFeedbackId::None(),
217 void Call(Handle<Code> code,
218 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
219 TypeFeedbackId ast_id = TypeFeedbackId::None(),
222 inline void Ret(BranchDelaySlot bd, Condition cond = al,
223 Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
224 Ret(cond, rs, rt, bd);
227 void Branch(Label* L,
230 Heap::RootListIndex index,
231 BranchDelaySlot bdslot = PROTECT);
235 // Emit code to discard a non-negative number of pointer-sized elements
236 // from the stack, clobbering only the sp register.
238 Condition cond = cc_always,
239 Register reg = no_reg,
240 const Operand& op = Operand(no_reg));
242 // Trivial case of DropAndRet that utilizes the delay slot and only emits
244 void DropAndRet(int drop);
246 void DropAndRet(int drop,
251 // Swap two registers. If the scratch register is omitted then a slightly
252 // less efficient form using xor instead of mov is emitted.
253 void Swap(Register reg1, Register reg2, Register scratch = no_reg);
255 void Call(Label* target);
257 inline void Move(Register dst, Register src) {
263 inline void Move(FPURegister dst, FPURegister src) {
269 inline void Move(Register dst_low, Register dst_high, FPURegister src) {
271 mfhc1(dst_high, src);
274 inline void FmoveHigh(Register dst_high, FPURegister src) {
275 mfhc1(dst_high, src);
278 inline void FmoveHigh(FPURegister dst, Register src_high) {
279 mthc1(src_high, dst);
282 inline void FmoveLow(Register dst_low, FPURegister src) {
286 void FmoveLow(FPURegister dst, Register src_low);
288 inline void Move(FPURegister dst, Register src_low, Register src_high) {
290 mthc1(src_high, dst);
293 void Move(FPURegister dst, float imm);
294 void Move(FPURegister dst, double imm);
297 void Movz(Register rd, Register rs, Register rt);
298 void Movn(Register rd, Register rs, Register rt);
299 void Movt(Register rd, Register rs, uint16_t cc = 0);
300 void Movf(Register rd, Register rs, uint16_t cc = 0);
302 void Clz(Register rd, Register rs);
304 // Jump unconditionally to given label.
305 // We NEED a nop in the branch delay slot, as it used by v8, for example in
306 // CodeGenerator::ProcessDeferred().
307 // Currently the branch delay slot is filled by the MacroAssembler.
308 // Use rather b(Label) for code generation.
313 void Load(Register dst, const MemOperand& src, Representation r);
314 void Store(Register src, const MemOperand& dst, Representation r);
316 void PushRoot(Heap::RootListIndex index) {
321 // Compare the object in a register to a value and jump if they are equal.
322 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
324 Branch(if_equal, eq, with, Operand(at));
327 // Compare the object in a register to a value and jump if they are not equal.
328 void JumpIfNotRoot(Register with, Heap::RootListIndex index,
329 Label* if_not_equal) {
331 Branch(if_not_equal, ne, with, Operand(at));
334 // Load an object from the root table.
335 void LoadRoot(Register destination,
336 Heap::RootListIndex index);
337 void LoadRoot(Register destination,
338 Heap::RootListIndex index,
339 Condition cond, Register src1, const Operand& src2);
341 // Store an object to the root table.
342 void StoreRoot(Register source,
343 Heap::RootListIndex index);
344 void StoreRoot(Register source,
345 Heap::RootListIndex index,
346 Condition cond, Register src1, const Operand& src2);
348 // ---------------------------------------------------------------------------
351 void IncrementalMarkingRecordWriteHelper(Register object,
355 enum RememberedSetFinalAction {
361 // Record in the remembered set the fact that we have a pointer to new space
362 // at the address pointed to by the addr register. Only works if addr is not
364 void RememberedSetHelper(Register object, // Used for debug code.
367 SaveFPRegsMode save_fp,
368 RememberedSetFinalAction and_then);
370 void CheckPageFlag(Register object,
374 Label* condition_met);
376 // Check if object is in new space. Jumps if the object is not in new space.
377 // The register scratch can be object itself, but it will be clobbered.
378 void JumpIfNotInNewSpace(Register object,
381 InNewSpace(object, scratch, ne, branch);
384 // Check if object is in new space. Jumps if the object is in new space.
385 // The register scratch can be object itself, but scratch will be clobbered.
386 void JumpIfInNewSpace(Register object,
389 InNewSpace(object, scratch, eq, branch);
392 // Check if an object has a given incremental marking color.
393 void HasColor(Register object,
400 void JumpIfBlack(Register object,
405 // Checks the color of an object. If the object is already grey or black
406 // then we just fall through, since it is already live. If it is white and
407 // we can determine that it doesn't need to be scanned, then we just mark it
408 // black and fall through. For the rest we jump to the label so the
409 // incremental marker can fix its assumptions.
410 void EnsureNotWhite(Register object,
414 Label* object_is_white_and_not_data);
416 // Detects conservatively whether an object is data-only, i.e. it does need to
417 // be scanned by the garbage collector.
418 void JumpIfDataObject(Register value,
420 Label* not_data_object);
422 // Notify the garbage collector that we wrote a pointer into an object.
423 // |object| is the object being stored into, |value| is the object being
424 // stored. value and scratch registers are clobbered by the operation.
425 // The offset is the offset from the start of the object, not the offset from
426 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
427 void RecordWriteField(
433 SaveFPRegsMode save_fp,
434 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
435 SmiCheck smi_check = INLINE_SMI_CHECK,
436 PointersToHereCheck pointers_to_here_check_for_value =
437 kPointersToHereMaybeInteresting);
439 // As above, but the offset has the tag presubtracted. For use with
440 // MemOperand(reg, off).
441 inline void RecordWriteContextSlot(
447 SaveFPRegsMode save_fp,
448 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
449 SmiCheck smi_check = INLINE_SMI_CHECK,
450 PointersToHereCheck pointers_to_here_check_for_value =
451 kPointersToHereMaybeInteresting) {
452 RecordWriteField(context,
453 offset + kHeapObjectTag,
458 remembered_set_action,
460 pointers_to_here_check_for_value);
463 void RecordWriteForMap(
468 SaveFPRegsMode save_fp);
470 // For a given |object| notify the garbage collector that the slot |address|
471 // has been written. |value| is the object being stored. The value and
472 // address registers are clobbered by the operation.
478 SaveFPRegsMode save_fp,
479 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
480 SmiCheck smi_check = INLINE_SMI_CHECK,
481 PointersToHereCheck pointers_to_here_check_for_value =
482 kPointersToHereMaybeInteresting);
485 // ---------------------------------------------------------------------------
486 // Inline caching support.
488 // Generate code for checking access rights - used for security checks
489 // on access to global objects across environments. The holder register
490 // is left untouched, whereas both scratch registers are clobbered.
491 void CheckAccessGlobalProxy(Register holder_reg,
495 void GetNumberHash(Register reg0, Register scratch);
497 void LoadFromNumberDictionary(Label* miss,
506 inline void MarkCode(NopMarkerTypes type) {
510 // Check if the given instruction is a 'type' marker.
511 // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
512 // nop(type)). These instructions are generated to mark special location in
513 // the code, like some special IC code.
514 static inline bool IsMarkedCode(Instr instr, int type) {
515 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
516 return IsNop(instr, type);
520 static inline int GetCodeMarker(Instr instr) {
521 uint32_t opcode = ((instr & kOpcodeMask));
522 uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
523 uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
524 uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
526 // Return <n> if we have a sll zero_reg, zero_reg, n
528 bool sllzz = (opcode == SLL &&
529 rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
530 rs == static_cast<uint32_t>(ToNumber(zero_reg)));
532 (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
533 DCHECK((type == -1) ||
534 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
540 // ---------------------------------------------------------------------------
541 // Allocation support.
543 // Allocate an object in new space or old space. The object_size is
544 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
545 // is passed. If the space is exhausted control continues at the gc_required
546 // label. The allocated object is returned in result. If the flag
547 // tag_allocated_object is true the result is tagged as as a heap object.
548 // All registers are clobbered also when control continues at the gc_required
550 void Allocate(int object_size,
555 AllocationFlags flags);
557 void Allocate(Register object_size,
562 AllocationFlags flags);
564 void AllocateTwoByteString(Register result,
570 void AllocateOneByteString(Register result, Register length,
571 Register scratch1, Register scratch2,
572 Register scratch3, Label* gc_required);
573 void AllocateTwoByteConsString(Register result,
578 void AllocateOneByteConsString(Register result, Register length,
579 Register scratch1, Register scratch2,
581 void AllocateTwoByteSlicedString(Register result,
586 void AllocateOneByteSlicedString(Register result, Register length,
587 Register scratch1, Register scratch2,
590 // Allocates a heap number or jumps to the gc_required label if the young
591 // space is full and a scavenge is needed. All registers are clobbered also
592 // when control continues at the gc_required label.
593 void AllocateHeapNumber(Register result,
596 Register heap_number_map,
598 TaggingMode tagging_mode = TAG_RESULT,
599 MutableMode mode = IMMUTABLE);
601 void AllocateHeapNumberWithValue(Register result,
607 // ---------------------------------------------------------------------------
608 // Instruction macros.
610 #define DEFINE_INSTRUCTION(instr) \
611 void instr(Register rd, Register rs, const Operand& rt); \
612 void instr(Register rd, Register rs, Register rt) { \
613 instr(rd, rs, Operand(rt)); \
615 void instr(Register rs, Register rt, int32_t j) { \
616 instr(rs, rt, Operand(j)); \
619 #define DEFINE_INSTRUCTION2(instr) \
620 void instr(Register rs, const Operand& rt); \
621 void instr(Register rs, Register rt) { \
622 instr(rs, Operand(rt)); \
624 void instr(Register rs, int32_t j) { \
625 instr(rs, Operand(j)); \
628 DEFINE_INSTRUCTION(Addu);
629 DEFINE_INSTRUCTION(Daddu);
630 DEFINE_INSTRUCTION(Div);
631 DEFINE_INSTRUCTION(Divu);
632 DEFINE_INSTRUCTION(Ddivu);
633 DEFINE_INSTRUCTION(Mod);
634 DEFINE_INSTRUCTION(Modu);
635 DEFINE_INSTRUCTION(Ddiv);
636 DEFINE_INSTRUCTION(Subu);
637 DEFINE_INSTRUCTION(Dsubu);
638 DEFINE_INSTRUCTION(Dmod);
639 DEFINE_INSTRUCTION(Dmodu);
640 DEFINE_INSTRUCTION(Mul);
641 DEFINE_INSTRUCTION(Mulh);
642 DEFINE_INSTRUCTION(Mulhu);
643 DEFINE_INSTRUCTION(Dmul);
644 DEFINE_INSTRUCTION(Dmulh);
645 DEFINE_INSTRUCTION2(Mult);
646 DEFINE_INSTRUCTION2(Dmult);
647 DEFINE_INSTRUCTION2(Multu);
648 DEFINE_INSTRUCTION2(Dmultu);
649 DEFINE_INSTRUCTION2(Div);
650 DEFINE_INSTRUCTION2(Ddiv);
651 DEFINE_INSTRUCTION2(Divu);
652 DEFINE_INSTRUCTION2(Ddivu);
654 DEFINE_INSTRUCTION(And);
655 DEFINE_INSTRUCTION(Or);
656 DEFINE_INSTRUCTION(Xor);
657 DEFINE_INSTRUCTION(Nor);
658 DEFINE_INSTRUCTION2(Neg);
660 DEFINE_INSTRUCTION(Slt);
661 DEFINE_INSTRUCTION(Sltu);
663 // MIPS32 R2 instruction macro.
664 DEFINE_INSTRUCTION(Ror);
665 DEFINE_INSTRUCTION(Dror);
667 #undef DEFINE_INSTRUCTION
668 #undef DEFINE_INSTRUCTION2
670 void Pref(int32_t hint, const MemOperand& rs);
673 // ---------------------------------------------------------------------------
674 // Pseudo-instructions.
676 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
678 void Ulw(Register rd, const MemOperand& rs);
679 void Usw(Register rd, const MemOperand& rs);
680 void Uld(Register rd, const MemOperand& rs, Register scratch = at);
681 void Usd(Register rd, const MemOperand& rs, Register scratch = at);
683 // Load int32 in the rd register.
684 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
685 inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
686 li(rd, Operand(j), mode);
688 void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
690 // Push multiple registers on the stack.
691 // Registers are saved in numerical order, with higher numbered registers
692 // saved in higher memory addresses.
693 void MultiPush(RegList regs);
694 void MultiPushReversed(RegList regs);
696 void MultiPushFPU(RegList regs);
697 void MultiPushReversedFPU(RegList regs);
699 void push(Register src) {
700 Daddu(sp, sp, Operand(-kPointerSize));
701 sd(src, MemOperand(sp, 0));
703 void Push(Register src) { push(src); }
706 void Push(Handle<Object> handle);
707 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
709 // Push two registers. Pushes leftmost register first (to highest address).
710 void Push(Register src1, Register src2) {
711 Dsubu(sp, sp, Operand(2 * kPointerSize));
712 sd(src1, MemOperand(sp, 1 * kPointerSize));
713 sd(src2, MemOperand(sp, 0 * kPointerSize));
716 // Push three registers. Pushes leftmost register first (to highest address).
717 void Push(Register src1, Register src2, Register src3) {
718 Dsubu(sp, sp, Operand(3 * kPointerSize));
719 sd(src1, MemOperand(sp, 2 * kPointerSize));
720 sd(src2, MemOperand(sp, 1 * kPointerSize));
721 sd(src3, MemOperand(sp, 0 * kPointerSize));
724 // Push four registers. Pushes leftmost register first (to highest address).
725 void Push(Register src1, Register src2, Register src3, Register src4) {
726 Dsubu(sp, sp, Operand(4 * kPointerSize));
727 sd(src1, MemOperand(sp, 3 * kPointerSize));
728 sd(src2, MemOperand(sp, 2 * kPointerSize));
729 sd(src3, MemOperand(sp, 1 * kPointerSize));
730 sd(src4, MemOperand(sp, 0 * kPointerSize));
733 // Push five registers. Pushes leftmost register first (to highest address).
734 void Push(Register src1, Register src2, Register src3, Register src4,
736 Dsubu(sp, sp, Operand(5 * kPointerSize));
737 sd(src1, MemOperand(sp, 4 * kPointerSize));
738 sd(src2, MemOperand(sp, 3 * kPointerSize));
739 sd(src3, MemOperand(sp, 2 * kPointerSize));
740 sd(src4, MemOperand(sp, 1 * kPointerSize));
741 sd(src5, MemOperand(sp, 0 * kPointerSize));
744 void Push(Register src, Condition cond, Register tst1, Register tst2) {
745 // Since we don't have conditional execution we use a Branch.
746 Branch(3, cond, tst1, Operand(tst2));
747 Dsubu(sp, sp, Operand(kPointerSize));
748 sd(src, MemOperand(sp, 0));
751 void PushRegisterAsTwoSmis(Register src, Register scratch = at);
752 void PopRegisterAsTwoSmis(Register dst, Register scratch = at);
754 // Pops multiple values from the stack and load them in the
755 // registers specified in regs. Pop order is the opposite as in MultiPush.
756 void MultiPop(RegList regs);
757 void MultiPopReversed(RegList regs);
759 void MultiPopFPU(RegList regs);
760 void MultiPopReversedFPU(RegList regs);
762 void pop(Register dst) {
763 ld(dst, MemOperand(sp, 0));
764 Daddu(sp, sp, Operand(kPointerSize));
766 void Pop(Register dst) { pop(dst); }
768 // Pop two registers. Pops rightmost register first (from lower address).
769 void Pop(Register src1, Register src2) {
770 DCHECK(!src1.is(src2));
771 ld(src2, MemOperand(sp, 0 * kPointerSize));
772 ld(src1, MemOperand(sp, 1 * kPointerSize));
773 Daddu(sp, sp, 2 * kPointerSize);
776 // Pop three registers. Pops rightmost register first (from lower address).
777 void Pop(Register src1, Register src2, Register src3) {
778 ld(src3, MemOperand(sp, 0 * kPointerSize));
779 ld(src2, MemOperand(sp, 1 * kPointerSize));
780 ld(src1, MemOperand(sp, 2 * kPointerSize));
781 Daddu(sp, sp, 3 * kPointerSize);
784 void Pop(uint32_t count = 1) {
785 Daddu(sp, sp, Operand(count * kPointerSize));
788 // Push and pop the registers that can hold pointers, as defined by the
789 // RegList constant kSafepointSavedRegisters.
790 void PushSafepointRegisters();
791 void PopSafepointRegisters();
792 // Store value in register src in the safepoint stack slot for
794 void StoreToSafepointRegisterSlot(Register src, Register dst);
795 // Load the value of the src register from its safepoint stack slot
796 // into register dst.
797 void LoadFromSafepointRegisterSlot(Register dst, Register src);
799 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
801 // Does not handle errors.
802 void FlushICache(Register address, unsigned instructions);
804 // MIPS64 R2 instruction macro.
805 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
806 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
807 void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
809 // ---------------------------------------------------------------------------
810 // FPU macros. These do not handle special cases like NaN or +- inf.
812 // Convert unsigned word to double.
813 void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
814 void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
816 // Convert double to unsigned long.
817 void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch);
819 void Trunc_l_d(FPURegister fd, FPURegister fs);
820 void Round_l_d(FPURegister fd, FPURegister fs);
821 void Floor_l_d(FPURegister fd, FPURegister fs);
822 void Ceil_l_d(FPURegister fd, FPURegister fs);
824 // Convert double to unsigned word.
825 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
826 void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
828 void Trunc_w_d(FPURegister fd, FPURegister fs);
829 void Round_w_d(FPURegister fd, FPURegister fs);
830 void Floor_w_d(FPURegister fd, FPURegister fs);
831 void Ceil_w_d(FPURegister fd, FPURegister fs);
833 void Madd_d(FPURegister fd,
837 FPURegister scratch);
839 // Wrapper functions for the different cmp/branch types.
840 inline void BranchF32(Label* target, Label* nan, Condition cc,
841 FPURegister cmp1, FPURegister cmp2,
842 BranchDelaySlot bd = PROTECT) {
843 BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
846 inline void BranchF64(Label* target, Label* nan, Condition cc,
847 FPURegister cmp1, FPURegister cmp2,
848 BranchDelaySlot bd = PROTECT) {
849 BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
852 // Alternate (inline) version for better readability with USE_DELAY_SLOT.
853 inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
854 Condition cc, FPURegister cmp1, FPURegister cmp2) {
855 BranchF64(target, nan, cc, cmp1, cmp2, bd);
858 inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
859 Condition cc, FPURegister cmp1, FPURegister cmp2) {
860 BranchF32(target, nan, cc, cmp1, cmp2, bd);
863 // Alias functions for backward compatibility.
864 inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
865 FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
866 BranchF64(target, nan, cc, cmp1, cmp2, bd);
869 inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
870 Condition cc, FPURegister cmp1, FPURegister cmp2) {
871 BranchF64(bd, target, nan, cc, cmp1, cmp2);
874 // Truncates a double using a specific rounding mode, and writes the value
875 // to the result register.
876 // The except_flag will contain any exceptions caused by the instruction.
877 // If check_inexact is kDontCheckForInexactConversion, then the inexact
878 // exception is masked.
879 void EmitFPUTruncate(FPURoundingMode rounding_mode,
881 DoubleRegister double_input,
883 DoubleRegister double_scratch,
884 Register except_flag,
885 CheckForInexactConversion check_inexact
886 = kDontCheckForInexactConversion);
888 // Performs a truncating conversion of a floating point number as used by
889 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
890 // succeeds, otherwise falls through if result is saturated. On return
891 // 'result' either holds answer, or is clobbered on fall through.
893 // Only public for the test code in test-code-stubs-arm.cc.
894 void TryInlineTruncateDoubleToI(Register result,
895 DoubleRegister input,
898 // Performs a truncating conversion of a floating point number as used by
899 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
900 // Exits with 'result' holding the answer.
901 void TruncateDoubleToI(Register result, DoubleRegister double_input);
903 // Performs a truncating conversion of a heap number as used by
904 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
905 // must be different registers. Exits with 'result' holding the answer.
906 void TruncateHeapNumberToI(Register result, Register object);
908 // Converts the smi or heap number in object to an int32 using the rules
909 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
910 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
911 // different registers.
912 void TruncateNumberToI(Register object,
914 Register heap_number_map,
918 // Loads the number from object into dst register.
919 // If |object| is neither smi nor heap number, |not_number| is jumped to
920 // with |object| still intact.
921 void LoadNumber(Register object,
923 Register heap_number_map,
927 // Loads the number from object into double_dst in the double format.
928 // Control will jump to not_int32 if the value cannot be exactly represented
929 // by a 32-bit integer.
930 // Floating point value in the 32-bit integer range that are not exact integer
932 void LoadNumberAsInt32Double(Register object,
933 DoubleRegister double_dst,
934 Register heap_number_map,
937 FPURegister double_scratch,
940 // Loads the number from object into dst as a 32-bit integer.
941 // Control will jump to not_int32 if the object cannot be exactly represented
942 // by a 32-bit integer.
943 // Floating point value in the 32-bit integer range that are not exact integer
944 // won't be converted.
945 void LoadNumberAsInt32(Register object,
947 Register heap_number_map,
950 FPURegister double_scratch0,
951 FPURegister double_scratch1,
955 // argc - argument count to be dropped by LeaveExitFrame.
956 // save_doubles - saves FPU registers on stack, currently disabled.
957 // stack_space - extra stack space.
958 void EnterExitFrame(bool save_doubles,
959 int stack_space = 0);
961 // Leave the current exit frame.
962 void LeaveExitFrame(bool save_doubles, Register arg_count,
963 bool restore_context, bool do_return = NO_EMIT_RETURN,
964 bool argument_count_is_length = false);
966 // Get the actual activation frame alignment for target environment.
967 static int ActivationFrameAlignment();
969 // Make sure the stack is aligned. Only emits code in debug mode.
970 void AssertStackIsAligned();
972 void LoadContext(Register dst, int context_chain_length);
974 // Load the global proxy from the current context.
975 void LoadGlobalProxy(Register dst);
977 // Conditionally load the cached Array transitioned map of type
978 // transitioned_kind from the native context if the map in register
979 // map_in_out is the cached Array map in the native context of
981 void LoadTransitionedArrayMapConditional(
982 ElementsKind expected_kind,
983 ElementsKind transitioned_kind,
986 Label* no_map_match);
988 void LoadGlobalFunction(int index, Register function);
990 // Load the initial map from the global function. The registers
991 // function and map can be the same, function is then overwritten.
992 void LoadGlobalFunctionInitialMap(Register function,
996 void InitializeRootRegister() {
997 ExternalReference roots_array_start =
998 ExternalReference::roots_array_start(isolate());
999 li(kRootRegister, Operand(roots_array_start));
1002 // -------------------------------------------------------------------------
1003 // JavaScript invokes.
1005 // Invoke the JavaScript function code by either calling or jumping.
1006 void InvokeCode(Register code,
1007 const ParameterCount& expected,
1008 const ParameterCount& actual,
1010 const CallWrapper& call_wrapper);
1012 // Invoke the JavaScript function in the given register. Changes the
1013 // current context to the context in the function before invoking.
1014 void InvokeFunction(Register function,
1015 const ParameterCount& actual,
1017 const CallWrapper& call_wrapper);
1019 void InvokeFunction(Register function,
1020 const ParameterCount& expected,
1021 const ParameterCount& actual,
1023 const CallWrapper& call_wrapper);
1025 void InvokeFunction(Handle<JSFunction> function,
1026 const ParameterCount& expected,
1027 const ParameterCount& actual,
1029 const CallWrapper& call_wrapper);
1032 void IsObjectJSStringType(Register object,
1036 void IsObjectNameType(Register object,
1040 // -------------------------------------------------------------------------
1041 // Debugger Support.
1045 // -------------------------------------------------------------------------
1046 // Exception handling.
1048 // Push a new stack handler and link into stack handler chain.
1049 void PushStackHandler();
1051 // Unlink the stack handler on top of the stack from the stack handler chain.
1052 // Must preserve the result register.
1053 void PopStackHandler();
1055 // Copies a fixed number of fields of heap objects from src to dst.
1056 void CopyFields(Register dst, Register src, RegList temps, int field_count);
1058 // Copies a number of bytes from src to dst. All registers are clobbered. On
1059 // exit src and dst will point to the place just after where the last byte was
1060 // read or written and length will be zero.
1061 void CopyBytes(Register src,
1066 // Initialize fields with filler values. Fields starting at |start_offset|
1067 // not including end_offset are overwritten with the value in |filler|. At
1068 // the end the loop, |start_offset| takes the value of |end_offset|.
1069 void InitializeFieldsWithFiller(Register start_offset,
1070 Register end_offset,
1073 // -------------------------------------------------------------------------
1074 // Support functions.
1076 // Machine code version of Map::GetConstructor().
1077 // |temp| holds |result|'s map when done, and |temp2| its instance type.
1078 void GetMapConstructor(Register result, Register map, Register temp,
1081 // Try to get function prototype of a function and puts the value in
1082 // the result register. Checks that the function really is a
1083 // function and jumps to the miss label if the fast checks fail. The
1084 // function register will be untouched; the other registers may be
1086 void TryGetFunctionPrototype(Register function, Register result,
1087 Register scratch, Label* miss);
1089 void GetObjectType(Register function,
1093 // Check if a map for a JSObject indicates that the object has fast elements.
1094 // Jump to the specified label if it does not.
1095 void CheckFastElements(Register map,
1099 // Check if a map for a JSObject indicates that the object can have both smi
1100 // and HeapObject elements. Jump to the specified label if it does not.
1101 void CheckFastObjectElements(Register map,
1105 // Check if a map for a JSObject indicates that the object has fast smi only
1106 // elements. Jump to the specified label if it does not.
1107 void CheckFastSmiElements(Register map,
1111 // Check to see if maybe_number can be stored as a double in
1112 // FastDoubleElements. If it can, store it at the index specified by key in
1113 // the FastDoubleElements array elements. Otherwise jump to fail.
1114 void StoreNumberToDoubleElements(Register value_reg,
1116 Register elements_reg,
1120 int elements_offset = 0);
1122 // Compare an object's map with the specified map and its transitioned
1123 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
1124 // "branch_to" if the result of the comparison is "cond". If multiple map
1125 // compares are required, the compare sequences branches to early_success.
1126 void CompareMapAndBranch(Register obj,
1129 Label* early_success,
1133 // As above, but the map of the object is already loaded into the register
1134 // which is preserved by the code generated.
1135 void CompareMapAndBranch(Register obj_map,
1137 Label* early_success,
1141 // Check if the map of an object is equal to a specified map and branch to
1142 // label if not. Skip the smi check if not required (object is known to be a
1143 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1144 // against maps that are ElementsKind transition maps of the specificed map.
1145 void CheckMap(Register obj,
1149 SmiCheckType smi_check_type);
1152 void CheckMap(Register obj,
1154 Heap::RootListIndex index,
1156 SmiCheckType smi_check_type);
1158 // Check if the map of an object is equal to a specified weak map and branch
1159 // to a specified target if equal. Skip the smi check if not required
1160 // (object is known to be a heap object)
1161 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1162 Handle<WeakCell> cell, Handle<Code> success,
1163 SmiCheckType smi_check_type);
1165 // If the value is a NaN, canonicalize the value else, do nothing.
1166 void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
1169 // Get value of the weak cell.
1170 void GetWeakValue(Register value, Handle<WeakCell> cell);
1172 // Load the value of the weak cell in the value register. Branch to the
1173 // given miss label is the weak cell was cleared.
1174 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
1176 // Load and check the instance type of an object for being a string.
1177 // Loads the type into the second argument register.
1178 // Returns a condition that will be enabled if the object was a string.
1179 Condition IsObjectStringType(Register obj,
1182 ld(type, FieldMemOperand(obj, HeapObject::kMapOffset));
1183 lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
1184 And(type, type, Operand(kIsNotStringMask));
1185 DCHECK_EQ(0u, kStringTag);
1190 // Picks out an array index from the hash field.
1192 // hash - holds the index's hash. Clobbered.
1193 // index - holds the overwritten index on exit.
1194 void IndexFromHash(Register hash, Register index);
1196 // Get the number of least significant bits from a register.
1197 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1198 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1200 // Load the value of a number object into a FPU double register. If the
1201 // object is not a number a jump to the label not_number is performed
1202 // and the FPU double register is unchanged.
1203 void ObjectToDoubleFPURegister(
1208 Register heap_number_map,
1210 ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
1212 // Load the value of a smi object into a FPU double register. The register
1213 // scratch1 can be the same register as smi in which case smi will hold the
1214 // untagged value afterwards.
1215 void SmiToDoubleFPURegister(Register smi,
1219 // -------------------------------------------------------------------------
1220 // Overflow handling functions.
1221 // Usage: first call the appropriate arithmetic function, then call one of the
1222 // jump functions with the overflow_dst register as the second parameter.
1224 void AdduAndCheckForOverflow(Register dst,
1227 Register overflow_dst,
1228 Register scratch = at);
1230 void AdduAndCheckForOverflow(Register dst, Register left,
1231 const Operand& right, Register overflow_dst,
1234 void SubuAndCheckForOverflow(Register dst,
1237 Register overflow_dst,
1238 Register scratch = at);
1240 void SubuAndCheckForOverflow(Register dst, Register left,
1241 const Operand& right, Register overflow_dst,
1244 void DadduAndCheckForOverflow(Register dst, Register left, Register right,
1245 Register overflow_dst, Register scratch = at);
1247 void DadduAndCheckForOverflow(Register dst, Register left,
1248 const Operand& right, Register overflow_dst,
1251 void DsubuAndCheckForOverflow(Register dst, Register left, Register right,
1252 Register overflow_dst, Register scratch = at);
1254 void DsubuAndCheckForOverflow(Register dst, Register left,
1255 const Operand& right, Register overflow_dst,
1258 void BranchOnOverflow(Label* label,
1259 Register overflow_check,
1260 BranchDelaySlot bd = PROTECT) {
1261 Branch(label, lt, overflow_check, Operand(zero_reg), bd);
1264 void BranchOnNoOverflow(Label* label,
1265 Register overflow_check,
1266 BranchDelaySlot bd = PROTECT) {
1267 Branch(label, ge, overflow_check, Operand(zero_reg), bd);
1270 void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1271 Ret(lt, overflow_check, Operand(zero_reg), bd);
1274 void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1275 Ret(ge, overflow_check, Operand(zero_reg), bd);
1278 // -------------------------------------------------------------------------
1281 // See comments at the beginning of CEntryStub::Generate.
1282 inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
1284 inline void PrepareCEntryFunction(const ExternalReference& ref) {
1285 li(a1, Operand(ref));
1288 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
1289 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
1291 // Call a code stub.
1292 void CallStub(CodeStub* stub,
1293 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1296 // Tail call a code stub (jump).
1297 void TailCallStub(CodeStub* stub, COND_ARGS);
1301 void CallJSExitStub(CodeStub* stub);
1303 // Call a runtime routine.
1304 void CallRuntime(const Runtime::Function* f, int num_arguments,
1305 SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1306 BranchDelaySlot bd = PROTECT);
1307 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1308 const Runtime::Function* function = Runtime::FunctionForId(id);
1309 CallRuntime(function, function->nargs, kSaveFPRegs);
1312 // Convenience function: Same as above, but takes the fid instead.
1313 void CallRuntime(Runtime::FunctionId id, int num_arguments,
1314 SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1315 BranchDelaySlot bd = PROTECT) {
1316 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles, bd);
1319 // Convenience function: call an external reference.
1320 void CallExternalReference(const ExternalReference& ext,
1322 BranchDelaySlot bd = PROTECT);
1324 // Tail call of a runtime routine (jump).
1325 // Like JumpToExternalReference, but also takes care of passing the number
1327 void TailCallExternalReference(const ExternalReference& ext,
1331 // Convenience function: tail call a runtime routine (jump).
1332 void TailCallRuntime(Runtime::FunctionId fid,
1336 int CalculateStackPassedWords(int num_reg_arguments,
1337 int num_double_arguments);
1339 // Before calling a C-function from generated code, align arguments on stack
1340 // and add space for the four mips argument slots.
1341 // After aligning the frame, non-register arguments must be stored on the
1342 // stack, after the argument-slots using helper: CFunctionArgumentOperand().
1343 // The argument count assumes all arguments are word sized.
1344 // Some compilers/platforms require the stack to be aligned when calling
1346 // Needs a scratch register to do some arithmetic. This register will be
1348 void PrepareCallCFunction(int num_reg_arguments,
1349 int num_double_registers,
1351 void PrepareCallCFunction(int num_reg_arguments,
1354 // Arguments 1-4 are placed in registers a0 thru a3 respectively.
1355 // Arguments 5..n are stored to stack using following:
1356 // sw(a4, CFunctionArgumentOperand(5));
1358 // Calls a C function and cleans up the space for arguments allocated
1359 // by PrepareCallCFunction. The called function is not allowed to trigger a
1360 // garbage collection, since that might move the code and invalidate the
1361 // return address (unless this is somehow accounted for by the called
1363 void CallCFunction(ExternalReference function, int num_arguments);
1364 void CallCFunction(Register function, int num_arguments);
1365 void CallCFunction(ExternalReference function,
1366 int num_reg_arguments,
1367 int num_double_arguments);
1368 void CallCFunction(Register function,
1369 int num_reg_arguments,
1370 int num_double_arguments);
1371 void MovFromFloatResult(DoubleRegister dst);
1372 void MovFromFloatParameter(DoubleRegister dst);
1374 // There are two ways of passing double arguments on MIPS, depending on
1375 // whether soft or hard floating point ABI is used. These functions
1376 // abstract parameter passing for the three different ways we call
1377 // C functions from generated code.
1378 void MovToFloatParameter(DoubleRegister src);
1379 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
1380 void MovToFloatResult(DoubleRegister src);
1382 // Jump to the builtin routine.
1383 void JumpToExternalReference(const ExternalReference& builtin,
1384 BranchDelaySlot bd = PROTECT);
1386 // Invoke specified builtin JavaScript function.
1387 void InvokeBuiltin(int native_context_index, InvokeFlag flag,
1388 const CallWrapper& call_wrapper = NullCallWrapper());
1390 // Store the code object for the given builtin in the target register and
1391 // setup the function in a1.
1392 void GetBuiltinEntry(Register target, int native_context_index);
1394 // Store the function for the given builtin in the target register.
1395 void GetBuiltinFunction(Register target, int native_context_index);
1399 uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
1403 Handle<Object> CodeObject() {
1404 DCHECK(!code_object_.is_null());
1405 return code_object_;
1408 // Emit code for a truncating division by a constant. The dividend register is
1409 // unchanged and at gets clobbered. Dividend and result must be different.
1410 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1412 // -------------------------------------------------------------------------
1413 // StatsCounter support.
1415 void SetCounter(StatsCounter* counter, int value,
1416 Register scratch1, Register scratch2);
1417 void IncrementCounter(StatsCounter* counter, int value,
1418 Register scratch1, Register scratch2);
1419 void DecrementCounter(StatsCounter* counter, int value,
1420 Register scratch1, Register scratch2);
1423 // -------------------------------------------------------------------------
1426 // Calls Abort(msg) if the condition cc is not satisfied.
1427 // Use --debug_code to enable.
1428 void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
1429 void AssertFastElements(Register elements);
1431 // Like Assert(), but always enabled.
1432 void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
1434 // Print a message to stdout and abort execution.
1435 void Abort(BailoutReason msg);
1437 // Verify restrictions about code generated in stubs.
1438 void set_generating_stub(bool value) { generating_stub_ = value; }
1439 bool generating_stub() { return generating_stub_; }
1440 void set_has_frame(bool value) { has_frame_ = value; }
1441 bool has_frame() { return has_frame_; }
1442 inline bool AllowThisStubCall(CodeStub* stub);
1444 // ---------------------------------------------------------------------------
1445 // Number utilities.
1447 // Check whether the value of reg is a power of two and not zero. If not
1448 // control continues at the label not_power_of_two. If reg is a power of two
1449 // the register scratch contains the value of (reg - 1) when control falls
1451 void JumpIfNotPowerOfTwoOrZero(Register reg,
1453 Label* not_power_of_two_or_zero);
1455 // -------------------------------------------------------------------------
1458 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1459 void SmiTagCheckOverflow(Register reg, Register overflow);
1460 void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1462 void SmiTag(Register dst, Register src) {
1463 STATIC_ASSERT(kSmiTag == 0);
1464 if (SmiValuesAre32Bits()) {
1465 STATIC_ASSERT(kSmiShift == 32);
1466 dsll32(dst, src, 0);
1468 Addu(dst, src, src);
1472 void SmiTag(Register reg) {
1476 // Try to convert int32 to smi. If the value is to large, preserve
1477 // the original value and jump to not_a_smi. Destroys scratch and
1479 void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
1480 TrySmiTag(reg, reg, scratch, not_a_smi);
1483 void TrySmiTag(Register dst,
1487 if (SmiValuesAre32Bits()) {
1490 SmiTagCheckOverflow(at, src, scratch);
1491 BranchOnOverflow(not_a_smi, scratch);
1496 void SmiUntag(Register dst, Register src) {
1497 if (SmiValuesAre32Bits()) {
1498 STATIC_ASSERT(kSmiShift == 32);
1499 dsra32(dst, src, 0);
1501 sra(dst, src, kSmiTagSize);
1505 void SmiUntag(Register reg) {
1509 // Left-shifted from int32 equivalent of Smi.
1510 void SmiScale(Register dst, Register src, int scale) {
1511 if (SmiValuesAre32Bits()) {
1512 // The int portion is upper 32-bits of 64-bit word.
1513 dsra(dst, src, kSmiShift - scale);
1515 DCHECK(scale >= kSmiTagSize);
1516 sll(dst, src, scale - kSmiTagSize);
1520 // Combine load with untagging or scaling.
1521 void SmiLoadUntag(Register dst, MemOperand src);
1523 void SmiLoadScale(Register dst, MemOperand src, int scale);
1525 // Returns 2 values: the Smi and a scaled version of the int within the Smi.
1526 void SmiLoadWithScale(Register d_smi,
1531 // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
1532 void SmiLoadUntagWithScale(Register d_int,
1538 // Test if the register contains a smi.
1539 inline void SmiTst(Register value, Register scratch) {
1540 And(scratch, value, Operand(kSmiTagMask));
1542 inline void NonNegativeSmiTst(Register value, Register scratch) {
1543 And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
1546 // Untag the source value into destination and jump if source is a smi.
1547 // Source and destination can be the same register.
1548 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1550 // Untag the source value into destination and jump if source is not a smi.
1551 // Source and destination can be the same register.
1552 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1554 // Jump the register contains a smi.
1555 void JumpIfSmi(Register value,
1557 Register scratch = at,
1558 BranchDelaySlot bd = PROTECT);
1560 // Jump if the register contains a non-smi.
1561 void JumpIfNotSmi(Register value,
1562 Label* not_smi_label,
1563 Register scratch = at,
1564 BranchDelaySlot bd = PROTECT);
1566 // Jump if either of the registers contain a non-smi.
1567 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1568 // Jump if either of the registers contain a smi.
1569 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1571 // Abort execution if argument is a smi, enabled via --debug-code.
1572 void AssertNotSmi(Register object);
1573 void AssertSmi(Register object);
1575 // Abort execution if argument is not a string, enabled via --debug-code.
1576 void AssertString(Register object);
1578 // Abort execution if argument is not a name, enabled via --debug-code.
1579 void AssertName(Register object);
1581 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1582 void AssertFunction(Register object);
1584 // Abort execution if argument is not undefined or an AllocationSite, enabled
1585 // via --debug-code.
1586 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1588 // Abort execution if reg is not the root value with the given index,
1589 // enabled via --debug-code.
1590 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1592 // ---------------------------------------------------------------------------
1593 // HeapNumber utilities.
1595 void JumpIfNotHeapNumber(Register object,
1596 Register heap_number_map,
1598 Label* on_not_heap_number);
1600 // -------------------------------------------------------------------------
1601 // String utilities.
1603 // Generate code to do a lookup in the number string cache. If the number in
1604 // the register object is found in the cache the generated code falls through
1605 // with the result in the result register. The object and the result register
1606 // can be the same. If the number is not found in the cache the code jumps to
1607 // the label not_found with only the content of register object unchanged.
1608 void LookupNumberStringCache(Register object,
1615 // Checks if both instance types are sequential one-byte strings and jumps to
1616 // label if either is not.
1617 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1618 Register first_object_instance_type, Register second_object_instance_type,
1619 Register scratch1, Register scratch2, Label* failure);
1621 // Check if instance type is sequential one-byte string and jump to label if
1623 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1626 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1628 void EmitSeqStringSetCharCheck(Register string,
1632 uint32_t encoding_mask);
1634 // Checks if both objects are sequential one-byte strings and jumps to label
1635 // if either is not. Assumes that neither object is a smi.
1636 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
1642 // Checks if both objects are sequential one-byte strings and jumps to label
1643 // if either is not.
1644 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1647 Label* not_flat_one_byte_strings);
1649 void ClampUint8(Register output_reg, Register input_reg);
1651 void ClampDoubleToUint8(Register result_reg,
1652 DoubleRegister input_reg,
1653 DoubleRegister temp_double_reg);
1656 void LoadInstanceDescriptors(Register map, Register descriptors);
1657 void EnumLength(Register dst, Register map);
1658 void NumberOfOwnDescriptors(Register dst, Register map);
1659 void LoadAccessor(Register dst, Register holder, int accessor_index,
1660 AccessorComponent accessor);
1662 template<typename Field>
1663 void DecodeField(Register dst, Register src) {
1664 Ext(dst, src, Field::kShift, Field::kSize);
1667 template<typename Field>
1668 void DecodeField(Register reg) {
1669 DecodeField<Field>(reg, reg);
1672 template<typename Field>
1673 void DecodeFieldToSmi(Register dst, Register src) {
1674 static const int shift = Field::kShift;
1675 static const int mask = Field::kMask >> shift;
1676 dsrl(dst, src, shift);
1677 And(dst, dst, Operand(mask));
1678 dsll32(dst, dst, 0);
1681 template<typename Field>
1682 void DecodeFieldToSmi(Register reg) {
1683 DecodeField<Field>(reg, reg);
1685 // Generates function and stub prologue code.
1686 void StubPrologue();
1687 void Prologue(bool code_pre_aging);
1689 // Activation support.
1690 void EnterFrame(StackFrame::Type type);
1691 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
1692 void LeaveFrame(StackFrame::Type type);
1694 // Expects object in a0 and returns map with validated enum cache
1695 // in a0. Assumes that any other register can be used as a scratch.
1696 void CheckEnumCache(Register null_value, Label* call_runtime);
1698 // AllocationMemento support. Arrays may have an associated
1699 // AllocationMemento object that can be checked for in order to pretransition
1701 // On entry, receiver_reg should point to the array object.
1702 // scratch_reg gets clobbered.
1703 // If allocation info is present, jump to allocation_memento_present.
1704 void TestJSArrayForAllocationMemento(
1705 Register receiver_reg,
1706 Register scratch_reg,
1707 Label* no_memento_found,
1708 Condition cond = al,
1709 Label* allocation_memento_present = NULL);
1711 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1712 Register scratch_reg,
1713 Label* memento_found) {
1714 Label no_memento_found;
1715 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1716 &no_memento_found, eq, memento_found);
1717 bind(&no_memento_found);
1720 // Jumps to found label if a prototype map has dictionary elements.
1721 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1722 Register scratch1, Label* found);
1725 void CallCFunctionHelper(Register function,
1726 int num_reg_arguments,
1727 int num_double_arguments);
1729 void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
1730 void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
1732 BranchDelaySlot bdslot = PROTECT);
1733 void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
1734 void BranchAndLinkShort(Label* L, Condition cond, Register rs,
1736 BranchDelaySlot bdslot = PROTECT);
1737 void J(Label* L, BranchDelaySlot bdslot);
1738 void Jal(Label* L, BranchDelaySlot bdslot);
1739 void Jr(Label* L, BranchDelaySlot bdslot);
1740 void Jalr(Label* L, BranchDelaySlot bdslot);
1742 // Common implementation of BranchF functions for the different formats.
1743 void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
1744 Condition cc, FPURegister cmp1, FPURegister cmp2,
1745 BranchDelaySlot bd = PROTECT);
1747 void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
1748 FPURegister cmp1, FPURegister cmp2,
1749 BranchDelaySlot bd = PROTECT);
1752 // Helper functions for generating invokes.
1753 void InvokePrologue(const ParameterCount& expected,
1754 const ParameterCount& actual,
1755 Handle<Code> code_constant,
1758 bool* definitely_mismatches,
1760 const CallWrapper& call_wrapper);
1762 void InitializeNewString(Register string,
1764 Heap::RootListIndex map_index,
1768 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1769 void InNewSpace(Register object,
1771 Condition cond, // eq for new space, ne otherwise.
1774 // Helper for finding the mark bits for an address. Afterwards, the
1775 // bitmap register points at the word with the mark bits and the mask
1776 // the position of the first bit. Leaves addr_reg unchanged.
1777 inline void GetMarkBits(Register addr_reg,
1778 Register bitmap_reg,
1781 // Compute memory operands for safepoint stack slots.
1782 static int SafepointRegisterStackIndex(int reg_code);
1783 MemOperand SafepointRegisterSlot(Register reg);
1784 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1786 bool generating_stub_;
1788 bool has_double_zero_reg_set_;
1789 // This handle will be patched with the code object on installation.
1790 Handle<Object> code_object_;
1792 // Needs access to SafepointRegisterStackIndex for compiled frame
1794 friend class StandardFrame;
1798 // The code patcher is used to patch (typically) small parts of code e.g. for
1799 // debugging and other types of instrumentation. When using the code patcher
1800 // the exact number of bytes specified must be emitted. It is not legal to emit
1801 // relocation information. If any of these constraints are violated it causes
1802 // an assertion to fail.
1810 CodePatcher(byte* address,
1812 FlushICache flush_cache = FLUSH);
1815 // Macro assembler to emit code.
1816 MacroAssembler* masm() { return &masm_; }
1818 // Emit an instruction directly.
1819 void Emit(Instr instr);
1821 // Emit an address directly.
1822 void Emit(Address addr);
1824 // Change the condition part of an instruction leaving the rest of the current
1825 // instruction unchanged.
1826 void ChangeBranchCondition(Condition cond);
1829 byte* address_; // The address of the code being patched.
1830 int size_; // Number of bytes of the expected patch size.
1831 MacroAssembler masm_; // Macro assembler used to generate the code.
1832 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1837 #ifdef GENERATED_CODE_COVERAGE
1838 #define CODE_COVERAGE_STRINGIFY(x) #x
1839 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1840 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1841 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1843 #define ACCESS_MASM(masm) masm->
1846 } } // namespace v8::internal
1848 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_