1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
6 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
8 #include "src/assembler.h"
9 #include "src/globals.h"
10 #include "src/mips/assembler-mips.h"
15 // Give alias names to registers for calling conventions.
16 const Register kReturnRegister0 = {kRegister_v0_Code};
17 const Register kReturnRegister1 = {kRegister_v1_Code};
18 const Register kJSFunctionRegister = {kRegister_a1_Code};
19 const Register kContextRegister = {Register::kCpRegister};
20 const Register kInterpreterAccumulatorRegister = {kRegister_v0_Code};
21 const Register kInterpreterRegisterFileRegister = {kRegister_t3_Code};
22 const Register kInterpreterBytecodeOffsetRegister = {kRegister_t4_Code};
23 const Register kInterpreterBytecodeArrayRegister = {kRegister_t5_Code};
24 const Register kInterpreterDispatchTableRegister = {kRegister_t6_Code};
25 const Register kRuntimeCallFunctionRegister = {kRegister_a1_Code};
26 const Register kRuntimeCallArgCountRegister = {kRegister_a0_Code};
28 // Forward declaration.
31 // Reserved Register Usage Summary.
33 // Registers t8, t9, and at are reserved for use by the MacroAssembler.
35 // The programmer should know that the MacroAssembler may clobber these three,
36 // but won't touch other registers except in special cases.
38 // Per the MIPS ABI, register t9 must be used for indirect function call
39 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
40 // trying to update gp register for position-independent-code. Whenever
41 // MIPS generated code calls C code, it must be via t9 register.
44 // Flags used for LeaveExitFrame function.
45 enum LeaveExitFrameMode {
47 NO_EMIT_RETURN = false
50 // Flags used for AllocateHeapNumber
58 // Flags used for the ObjectToDoubleFPURegister function.
59 enum ObjectToDoubleFlags {
61 NO_OBJECT_TO_DOUBLE_FLAGS = 0,
62 // Object is known to be a non smi.
63 OBJECT_NOT_SMI = 1 << 0,
64 // Don't load NaNs or infinities, branch to the non number case instead.
65 AVOID_NANS_AND_INFINITIES = 1 << 1
68 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
69 enum BranchDelaySlot {
74 // Flags used for the li macro-assembler function.
76 // If the constant value can be represented in just 16 bits, then
77 // optimize the li to use a single instruction, rather than lui/ori pair.
79 // Always use 2 instructions (lui/ori pair), even if the constant could
80 // be loaded with just one, so that this value is patchable later.
85 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
86 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
87 enum PointersToHereCheck {
88 kPointersToHereMaybeInteresting,
89 kPointersToHereAreAlwaysInteresting
91 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
93 Register GetRegisterThatIsNotOneOf(Register reg1,
94 Register reg2 = no_reg,
95 Register reg3 = no_reg,
96 Register reg4 = no_reg,
97 Register reg5 = no_reg,
98 Register reg6 = no_reg);
100 bool AreAliased(Register reg1,
102 Register reg3 = no_reg,
103 Register reg4 = no_reg,
104 Register reg5 = no_reg,
105 Register reg6 = no_reg,
106 Register reg7 = no_reg,
107 Register reg8 = no_reg);
110 // -----------------------------------------------------------------------------
111 // Static helper functions.
113 inline MemOperand ContextOperand(Register context, int index) {
114 return MemOperand(context, Context::SlotOffset(index));
118 inline MemOperand GlobalObjectOperand() {
119 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
123 // Generate a MemOperand for loading a field from an object.
124 inline MemOperand FieldMemOperand(Register object, int offset) {
125 return MemOperand(object, offset - kHeapObjectTag);
129 // Generate a MemOperand for storing arguments 5..N on the stack
130 // when calling CallCFunction().
131 inline MemOperand CFunctionArgumentOperand(int index) {
132 DCHECK(index > kCArgSlotCount);
133 // Argument 5 takes the slot just past the four Arg-slots.
134 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
135 return MemOperand(sp, offset);
139 // MacroAssembler implements a collection of frequently used macros.
140 class MacroAssembler: public Assembler {
142 // The isolate parameter can be NULL if the macro assembler should
143 // not use isolate-dependent functionality. In this case, it's the
144 // responsibility of the caller to never invoke such function on the
146 MacroAssembler(Isolate* isolate, void* buffer, int size);
149 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
150 #define COND_ARGS cond, r1, r2
152 // Cases when relocation is not needed.
153 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
154 void Name(target_type target, BranchDelaySlot bd = PROTECT); \
155 inline void Name(BranchDelaySlot bd, target_type target) { \
158 void Name(target_type target, \
160 BranchDelaySlot bd = PROTECT); \
161 inline void Name(BranchDelaySlot bd, \
162 target_type target, \
164 Name(target, COND_ARGS, bd); \
167 #define DECLARE_BRANCH_PROTOTYPES(Name) \
168 DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
169 DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
171 DECLARE_BRANCH_PROTOTYPES(Branch)
172 DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
173 DECLARE_BRANCH_PROTOTYPES(BranchShort)
175 #undef DECLARE_BRANCH_PROTOTYPES
176 #undef COND_TYPED_ARGS
180 // Jump, Call, and Ret pseudo instructions implementing inter-working.
181 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
182 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
184 void Jump(Register target, COND_ARGS);
185 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
186 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
187 void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
188 static int CallSize(Register target, COND_ARGS);
189 void Call(Register target, COND_ARGS);
190 static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
191 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
192 int CallSize(Handle<Code> code,
193 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
194 TypeFeedbackId ast_id = TypeFeedbackId::None(),
196 void Call(Handle<Code> code,
197 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
198 TypeFeedbackId ast_id = TypeFeedbackId::None(),
201 inline void Ret(BranchDelaySlot bd, Condition cond = al,
202 Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
203 Ret(cond, rs, rt, bd);
206 void Branch(Label* L,
209 Heap::RootListIndex index,
210 BranchDelaySlot bdslot = PROTECT);
214 // Emit code to discard a non-negative number of pointer-sized elements
215 // from the stack, clobbering only the sp register.
217 Condition cond = cc_always,
218 Register reg = no_reg,
219 const Operand& op = Operand(no_reg));
221 // Trivial case of DropAndRet that utilizes the delay slot and only emits
223 void DropAndRet(int drop);
225 void DropAndRet(int drop,
230 // Swap two registers. If the scratch register is omitted then a slightly
231 // less efficient form using xor instead of mov is emitted.
232 void Swap(Register reg1, Register reg2, Register scratch = no_reg);
234 void Call(Label* target);
236 void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
238 inline void Move(Register dst, Register src) {
244 inline void Move(FPURegister dst, FPURegister src) {
250 inline void Move(Register dst_low, Register dst_high, FPURegister src) {
252 Mfhc1(dst_high, src);
255 inline void FmoveHigh(Register dst_high, FPURegister src) {
256 Mfhc1(dst_high, src);
259 inline void FmoveHigh(FPURegister dst, Register src_high) {
260 Mthc1(src_high, dst);
263 inline void FmoveLow(Register dst_low, FPURegister src) {
267 void FmoveLow(FPURegister dst, Register src_low);
269 inline void Move(FPURegister dst, Register src_low, Register src_high) {
271 Mthc1(src_high, dst);
274 void Move(FPURegister dst, float imm);
275 void Move(FPURegister dst, double imm);
278 void Movz(Register rd, Register rs, Register rt);
279 void Movn(Register rd, Register rs, Register rt);
280 void Movt(Register rd, Register rs, uint16_t cc = 0);
281 void Movf(Register rd, Register rs, uint16_t cc = 0);
283 void Clz(Register rd, Register rs);
285 // Jump unconditionally to given label.
286 // We NEED a nop in the branch delay slot, as it used by v8, for example in
287 // CodeGenerator::ProcessDeferred().
288 // Currently the branch delay slot is filled by the MacroAssembler.
289 // Use rather b(Label) for code generation.
294 void Load(Register dst, const MemOperand& src, Representation r);
295 void Store(Register src, const MemOperand& dst, Representation r);
297 void PushRoot(Heap::RootListIndex index) {
302 // Compare the object in a register to a value and jump if they are equal.
303 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
305 Branch(if_equal, eq, with, Operand(at));
308 // Compare the object in a register to a value and jump if they are not equal.
309 void JumpIfNotRoot(Register with, Heap::RootListIndex index,
310 Label* if_not_equal) {
312 Branch(if_not_equal, ne, with, Operand(at));
315 // Load an object from the root table.
316 void LoadRoot(Register destination,
317 Heap::RootListIndex index);
318 void LoadRoot(Register destination,
319 Heap::RootListIndex index,
320 Condition cond, Register src1, const Operand& src2);
322 // Store an object to the root table.
323 void StoreRoot(Register source,
324 Heap::RootListIndex index);
325 void StoreRoot(Register source,
326 Heap::RootListIndex index,
327 Condition cond, Register src1, const Operand& src2);
329 // ---------------------------------------------------------------------------
332 void IncrementalMarkingRecordWriteHelper(Register object,
336 enum RememberedSetFinalAction {
342 // Record in the remembered set the fact that we have a pointer to new space
343 // at the address pointed to by the addr register. Only works if addr is not
345 void RememberedSetHelper(Register object, // Used for debug code.
348 SaveFPRegsMode save_fp,
349 RememberedSetFinalAction and_then);
351 void CheckPageFlag(Register object,
355 Label* condition_met);
357 // Check if object is in new space. Jumps if the object is not in new space.
358 // The register scratch can be object itself, but it will be clobbered.
359 void JumpIfNotInNewSpace(Register object,
362 InNewSpace(object, scratch, ne, branch);
365 // Check if object is in new space. Jumps if the object is in new space.
366 // The register scratch can be object itself, but scratch will be clobbered.
367 void JumpIfInNewSpace(Register object,
370 InNewSpace(object, scratch, eq, branch);
373 // Check if an object has a given incremental marking color.
374 void HasColor(Register object,
381 void JumpIfBlack(Register object,
386 // Checks the color of an object. If the object is already grey or black
387 // then we just fall through, since it is already live. If it is white and
388 // we can determine that it doesn't need to be scanned, then we just mark it
389 // black and fall through. For the rest we jump to the label so the
390 // incremental marker can fix its assumptions.
391 void EnsureNotWhite(Register object,
395 Label* object_is_white_and_not_data);
397 // Detects conservatively whether an object is data-only, i.e. it does need to
398 // be scanned by the garbage collector.
399 void JumpIfDataObject(Register value,
401 Label* not_data_object);
403 // Notify the garbage collector that we wrote a pointer into an object.
404 // |object| is the object being stored into, |value| is the object being
405 // stored. value and scratch registers are clobbered by the operation.
406 // The offset is the offset from the start of the object, not the offset from
407 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
408 void RecordWriteField(
414 SaveFPRegsMode save_fp,
415 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
416 SmiCheck smi_check = INLINE_SMI_CHECK,
417 PointersToHereCheck pointers_to_here_check_for_value =
418 kPointersToHereMaybeInteresting);
420 // As above, but the offset has the tag presubtracted. For use with
421 // MemOperand(reg, off).
422 inline void RecordWriteContextSlot(
428 SaveFPRegsMode save_fp,
429 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
430 SmiCheck smi_check = INLINE_SMI_CHECK,
431 PointersToHereCheck pointers_to_here_check_for_value =
432 kPointersToHereMaybeInteresting) {
433 RecordWriteField(context,
434 offset + kHeapObjectTag,
439 remembered_set_action,
441 pointers_to_here_check_for_value);
444 void RecordWriteForMap(
449 SaveFPRegsMode save_fp);
451 // For a given |object| notify the garbage collector that the slot |address|
452 // has been written. |value| is the object being stored. The value and
453 // address registers are clobbered by the operation.
459 SaveFPRegsMode save_fp,
460 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
461 SmiCheck smi_check = INLINE_SMI_CHECK,
462 PointersToHereCheck pointers_to_here_check_for_value =
463 kPointersToHereMaybeInteresting);
466 // ---------------------------------------------------------------------------
467 // Inline caching support.
469 // Generate code for checking access rights - used for security checks
470 // on access to global objects across environments. The holder register
471 // is left untouched, whereas both scratch registers are clobbered.
472 void CheckAccessGlobalProxy(Register holder_reg,
476 void GetNumberHash(Register reg0, Register scratch);
478 void LoadFromNumberDictionary(Label* miss,
487 inline void MarkCode(NopMarkerTypes type) {
491 // Check if the given instruction is a 'type' marker.
492 // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
493 // nop(type)). These instructions are generated to mark special location in
494 // the code, like some special IC code.
495 static inline bool IsMarkedCode(Instr instr, int type) {
496 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
497 return IsNop(instr, type);
501 static inline int GetCodeMarker(Instr instr) {
502 uint32_t opcode = ((instr & kOpcodeMask));
503 uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
504 uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
505 uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
507 // Return <n> if we have a sll zero_reg, zero_reg, n
509 bool sllzz = (opcode == SLL &&
510 rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
511 rs == static_cast<uint32_t>(ToNumber(zero_reg)));
513 (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
514 DCHECK((type == -1) ||
515 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
521 // ---------------------------------------------------------------------------
522 // Allocation support.
524 // Allocate an object in new space or old space. The object_size is
525 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
526 // is passed. If the space is exhausted control continues at the gc_required
527 // label. The allocated object is returned in result. If the flag
528 // tag_allocated_object is true the result is tagged as as a heap object.
529 // All registers are clobbered also when control continues at the gc_required
531 void Allocate(int object_size,
536 AllocationFlags flags);
538 void Allocate(Register object_size,
543 AllocationFlags flags);
545 void AllocateTwoByteString(Register result,
551 void AllocateOneByteString(Register result, Register length,
552 Register scratch1, Register scratch2,
553 Register scratch3, Label* gc_required);
554 void AllocateTwoByteConsString(Register result,
559 void AllocateOneByteConsString(Register result, Register length,
560 Register scratch1, Register scratch2,
562 void AllocateTwoByteSlicedString(Register result,
567 void AllocateOneByteSlicedString(Register result, Register length,
568 Register scratch1, Register scratch2,
571 // Allocates a heap number or jumps to the gc_required label if the young
572 // space is full and a scavenge is needed. All registers are clobbered also
573 // when control continues at the gc_required label.
574 void AllocateHeapNumber(Register result,
577 Register heap_number_map,
579 TaggingMode tagging_mode = TAG_RESULT,
580 MutableMode mode = IMMUTABLE);
581 void AllocateHeapNumberWithValue(Register result,
587 // ---------------------------------------------------------------------------
588 // Instruction macros.
590 #define DEFINE_INSTRUCTION(instr) \
591 void instr(Register rd, Register rs, const Operand& rt); \
592 void instr(Register rd, Register rs, Register rt) { \
593 instr(rd, rs, Operand(rt)); \
595 void instr(Register rs, Register rt, int32_t j) { \
596 instr(rs, rt, Operand(j)); \
599 #define DEFINE_INSTRUCTION2(instr) \
600 void instr(Register rs, const Operand& rt); \
601 void instr(Register rs, Register rt) { \
602 instr(rs, Operand(rt)); \
604 void instr(Register rs, int32_t j) { \
605 instr(rs, Operand(j)); \
608 #define DEFINE_INSTRUCTION3(instr) \
609 void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
610 void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
611 instr(rd_hi, rd_lo, rs, Operand(rt)); \
613 void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
614 instr(rd_hi, rd_lo, rs, Operand(j)); \
617 DEFINE_INSTRUCTION(Addu);
618 DEFINE_INSTRUCTION(Subu);
619 DEFINE_INSTRUCTION(Mul);
620 DEFINE_INSTRUCTION(Div);
621 DEFINE_INSTRUCTION(Divu);
622 DEFINE_INSTRUCTION(Mod);
623 DEFINE_INSTRUCTION(Modu);
624 DEFINE_INSTRUCTION(Mulh);
625 DEFINE_INSTRUCTION2(Mult);
626 DEFINE_INSTRUCTION(Mulhu);
627 DEFINE_INSTRUCTION2(Multu);
628 DEFINE_INSTRUCTION2(Div);
629 DEFINE_INSTRUCTION2(Divu);
631 DEFINE_INSTRUCTION3(Div);
632 DEFINE_INSTRUCTION3(Mul);
634 DEFINE_INSTRUCTION(And);
635 DEFINE_INSTRUCTION(Or);
636 DEFINE_INSTRUCTION(Xor);
637 DEFINE_INSTRUCTION(Nor);
638 DEFINE_INSTRUCTION2(Neg);
640 DEFINE_INSTRUCTION(Slt);
641 DEFINE_INSTRUCTION(Sltu);
643 // MIPS32 R2 instruction macro.
644 DEFINE_INSTRUCTION(Ror);
646 #undef DEFINE_INSTRUCTION
647 #undef DEFINE_INSTRUCTION2
649 void Pref(int32_t hint, const MemOperand& rs);
652 // ---------------------------------------------------------------------------
653 // Pseudo-instructions.
655 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
657 void Ulw(Register rd, const MemOperand& rs);
658 void Usw(Register rd, const MemOperand& rs);
660 // Load int32 in the rd register.
661 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
662 inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
663 li(rd, Operand(j), mode);
665 void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
667 // Push multiple registers on the stack.
668 // Registers are saved in numerical order, with higher numbered registers
669 // saved in higher memory addresses.
670 void MultiPush(RegList regs);
671 void MultiPushReversed(RegList regs);
673 void MultiPushFPU(RegList regs);
674 void MultiPushReversedFPU(RegList regs);
676 void push(Register src) {
677 Addu(sp, sp, Operand(-kPointerSize));
678 sw(src, MemOperand(sp, 0));
680 void Push(Register src) { push(src); }
683 void Push(Handle<Object> handle);
684 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
686 // Push two registers. Pushes leftmost register first (to highest address).
687 void Push(Register src1, Register src2) {
688 Subu(sp, sp, Operand(2 * kPointerSize));
689 sw(src1, MemOperand(sp, 1 * kPointerSize));
690 sw(src2, MemOperand(sp, 0 * kPointerSize));
693 // Push three registers. Pushes leftmost register first (to highest address).
694 void Push(Register src1, Register src2, Register src3) {
695 Subu(sp, sp, Operand(3 * kPointerSize));
696 sw(src1, MemOperand(sp, 2 * kPointerSize));
697 sw(src2, MemOperand(sp, 1 * kPointerSize));
698 sw(src3, MemOperand(sp, 0 * kPointerSize));
701 // Push four registers. Pushes leftmost register first (to highest address).
702 void Push(Register src1, Register src2, Register src3, Register src4) {
703 Subu(sp, sp, Operand(4 * kPointerSize));
704 sw(src1, MemOperand(sp, 3 * kPointerSize));
705 sw(src2, MemOperand(sp, 2 * kPointerSize));
706 sw(src3, MemOperand(sp, 1 * kPointerSize));
707 sw(src4, MemOperand(sp, 0 * kPointerSize));
710 // Push five registers. Pushes leftmost register first (to highest address).
711 void Push(Register src1, Register src2, Register src3, Register src4,
713 Subu(sp, sp, Operand(5 * kPointerSize));
714 sw(src1, MemOperand(sp, 4 * kPointerSize));
715 sw(src2, MemOperand(sp, 3 * kPointerSize));
716 sw(src3, MemOperand(sp, 2 * kPointerSize));
717 sw(src4, MemOperand(sp, 1 * kPointerSize));
718 sw(src5, MemOperand(sp, 0 * kPointerSize));
721 void Push(Register src, Condition cond, Register tst1, Register tst2) {
722 // Since we don't have conditional execution we use a Branch.
723 Branch(3, cond, tst1, Operand(tst2));
724 Subu(sp, sp, Operand(kPointerSize));
725 sw(src, MemOperand(sp, 0));
728 // Pops multiple values from the stack and load them in the
729 // registers specified in regs. Pop order is the opposite as in MultiPush.
730 void MultiPop(RegList regs);
731 void MultiPopReversed(RegList regs);
733 void MultiPopFPU(RegList regs);
734 void MultiPopReversedFPU(RegList regs);
736 void pop(Register dst) {
737 lw(dst, MemOperand(sp, 0));
738 Addu(sp, sp, Operand(kPointerSize));
740 void Pop(Register dst) { pop(dst); }
742 // Pop two registers. Pops rightmost register first (from lower address).
743 void Pop(Register src1, Register src2) {
744 DCHECK(!src1.is(src2));
745 lw(src2, MemOperand(sp, 0 * kPointerSize));
746 lw(src1, MemOperand(sp, 1 * kPointerSize));
747 Addu(sp, sp, 2 * kPointerSize);
750 // Pop three registers. Pops rightmost register first (from lower address).
751 void Pop(Register src1, Register src2, Register src3) {
752 lw(src3, MemOperand(sp, 0 * kPointerSize));
753 lw(src2, MemOperand(sp, 1 * kPointerSize));
754 lw(src1, MemOperand(sp, 2 * kPointerSize));
755 Addu(sp, sp, 3 * kPointerSize);
758 void Pop(uint32_t count = 1) {
759 Addu(sp, sp, Operand(count * kPointerSize));
762 // Push and pop the registers that can hold pointers, as defined by the
763 // RegList constant kSafepointSavedRegisters.
764 void PushSafepointRegisters();
765 void PopSafepointRegisters();
766 // Store value in register src in the safepoint stack slot for
768 void StoreToSafepointRegisterSlot(Register src, Register dst);
769 // Load the value of the src register from its safepoint stack slot
770 // into register dst.
771 void LoadFromSafepointRegisterSlot(Register dst, Register src);
773 // MIPS32 R2 instruction macro.
774 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
775 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
777 // ---------------------------------------------------------------------------
778 // FPU macros. These do not handle special cases like NaN or +- inf.
780 // Convert unsigned word to double.
781 void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
782 void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
784 // Convert double to unsigned word.
785 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
786 void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
788 void Trunc_w_d(FPURegister fd, FPURegister fs);
789 void Round_w_d(FPURegister fd, FPURegister fs);
790 void Floor_w_d(FPURegister fd, FPURegister fs);
791 void Ceil_w_d(FPURegister fd, FPURegister fs);
793 // FP32 mode: Move the general purpose register into
794 // the high part of the double-register pair.
795 // FP64 mode: Move the general-purpose register into
796 // the higher 32 bits of the 64-bit coprocessor register,
797 // while leaving the low bits unchanged.
798 void Mthc1(Register rt, FPURegister fs);
800 // FP32 mode: move the high part of the double-register pair into
801 // general purpose register.
802 // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into
803 // general-purpose register.
804 void Mfhc1(Register rt, FPURegister fs);
806 // Wrapper functions for the different cmp/branch types.
807 inline void BranchF32(Label* target, Label* nan, Condition cc,
808 FPURegister cmp1, FPURegister cmp2,
809 BranchDelaySlot bd = PROTECT) {
810 BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
813 inline void BranchF64(Label* target, Label* nan, Condition cc,
814 FPURegister cmp1, FPURegister cmp2,
815 BranchDelaySlot bd = PROTECT) {
816 BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
819 // Alternate (inline) version for better readability with USE_DELAY_SLOT.
820 inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
821 Condition cc, FPURegister cmp1, FPURegister cmp2) {
822 BranchF64(target, nan, cc, cmp1, cmp2, bd);
825 inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
826 Condition cc, FPURegister cmp1, FPURegister cmp2) {
827 BranchF32(target, nan, cc, cmp1, cmp2, bd);
830 // Alias functions for backward compatibility.
831 inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
832 FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
833 BranchF64(target, nan, cc, cmp1, cmp2, bd);
836 inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
837 Condition cc, FPURegister cmp1, FPURegister cmp2) {
838 BranchF64(bd, target, nan, cc, cmp1, cmp2);
841 // Truncates a double using a specific rounding mode, and writes the value
842 // to the result register.
843 // The except_flag will contain any exceptions caused by the instruction.
844 // If check_inexact is kDontCheckForInexactConversion, then the inexact
845 // exception is masked.
846 void EmitFPUTruncate(FPURoundingMode rounding_mode,
848 DoubleRegister double_input,
850 DoubleRegister double_scratch,
851 Register except_flag,
852 CheckForInexactConversion check_inexact
853 = kDontCheckForInexactConversion);
855 // Performs a truncating conversion of a floating point number as used by
856 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
857 // succeeds, otherwise falls through if result is saturated. On return
858 // 'result' either holds answer, or is clobbered on fall through.
860 // Only public for the test code in test-code-stubs-arm.cc.
861 void TryInlineTruncateDoubleToI(Register result,
862 DoubleRegister input,
865 // Performs a truncating conversion of a floating point number as used by
866 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
867 // Exits with 'result' holding the answer.
868 void TruncateDoubleToI(Register result, DoubleRegister double_input);
870 // Performs a truncating conversion of a heap number as used by
871 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
872 // must be different registers. Exits with 'result' holding the answer.
873 void TruncateHeapNumberToI(Register result, Register object);
875 // Converts the smi or heap number in object to an int32 using the rules
876 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
877 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
878 // different registers.
879 void TruncateNumberToI(Register object,
881 Register heap_number_map,
885 // Loads the number from object into dst register.
886 // If |object| is neither smi nor heap number, |not_number| is jumped to
887 // with |object| still intact.
888 void LoadNumber(Register object,
890 Register heap_number_map,
894 // Loads the number from object into double_dst in the double format.
895 // Control will jump to not_int32 if the value cannot be exactly represented
896 // by a 32-bit integer.
897 // Floating point value in the 32-bit integer range that are not exact integer
899 void LoadNumberAsInt32Double(Register object,
900 DoubleRegister double_dst,
901 Register heap_number_map,
904 FPURegister double_scratch,
907 // Loads the number from object into dst as a 32-bit integer.
908 // Control will jump to not_int32 if the object cannot be exactly represented
909 // by a 32-bit integer.
910 // Floating point value in the 32-bit integer range that are not exact integer
911 // won't be converted.
912 void LoadNumberAsInt32(Register object,
914 Register heap_number_map,
917 FPURegister double_scratch0,
918 FPURegister double_scratch1,
922 // argc - argument count to be dropped by LeaveExitFrame.
923 // save_doubles - saves FPU registers on stack, currently disabled.
924 // stack_space - extra stack space.
925 void EnterExitFrame(bool save_doubles,
926 int stack_space = 0);
928 // Leave the current exit frame.
929 void LeaveExitFrame(bool save_doubles, Register arg_count,
930 bool restore_context, bool do_return = NO_EMIT_RETURN,
931 bool argument_count_is_length = false);
933 // Get the actual activation frame alignment for target environment.
934 static int ActivationFrameAlignment();
936 // Make sure the stack is aligned. Only emits code in debug mode.
937 void AssertStackIsAligned();
939 void LoadContext(Register dst, int context_chain_length);
941 // Load the global proxy from the current context.
942 void LoadGlobalProxy(Register dst);
944 // Conditionally load the cached Array transitioned map of type
945 // transitioned_kind from the native context if the map in register
946 // map_in_out is the cached Array map in the native context of
948 void LoadTransitionedArrayMapConditional(
949 ElementsKind expected_kind,
950 ElementsKind transitioned_kind,
953 Label* no_map_match);
955 void LoadGlobalFunction(int index, Register function);
957 // Load the initial map from the global function. The registers
958 // function and map can be the same, function is then overwritten.
959 void LoadGlobalFunctionInitialMap(Register function,
963 void InitializeRootRegister() {
964 ExternalReference roots_array_start =
965 ExternalReference::roots_array_start(isolate());
966 li(kRootRegister, Operand(roots_array_start));
969 // -------------------------------------------------------------------------
970 // JavaScript invokes.
972 // Invoke the JavaScript function code by either calling or jumping.
973 void InvokeCode(Register code,
974 const ParameterCount& expected,
975 const ParameterCount& actual,
977 const CallWrapper& call_wrapper);
979 // Invoke the JavaScript function in the given register. Changes the
980 // current context to the context in the function before invoking.
981 void InvokeFunction(Register function,
982 const ParameterCount& actual,
984 const CallWrapper& call_wrapper);
986 void InvokeFunction(Register function,
987 const ParameterCount& expected,
988 const ParameterCount& actual,
990 const CallWrapper& call_wrapper);
992 void InvokeFunction(Handle<JSFunction> function,
993 const ParameterCount& expected,
994 const ParameterCount& actual,
996 const CallWrapper& call_wrapper);
998 void IsObjectJSStringType(Register object,
1002 void IsObjectNameType(Register object,
1006 // -------------------------------------------------------------------------
1007 // Debugger Support.
1011 // -------------------------------------------------------------------------
1012 // Exception handling.
1014 // Push a new stack handler and link into stack handler chain.
1015 void PushStackHandler();
1017 // Unlink the stack handler on top of the stack from the stack handler chain.
1018 // Must preserve the result register.
1019 void PopStackHandler();
1021 // Copies a fixed number of fields of heap objects from src to dst.
1022 void CopyFields(Register dst, Register src, RegList temps, int field_count);
1024 // Copies a number of bytes from src to dst. All registers are clobbered. On
1025 // exit src and dst will point to the place just after where the last byte was
1026 // read or written and length will be zero.
1027 void CopyBytes(Register src,
1032 // Initialize fields with filler values. Fields starting at |start_offset|
1033 // not including end_offset are overwritten with the value in |filler|. At
1034 // the end the loop, |start_offset| takes the value of |end_offset|.
1035 void InitializeFieldsWithFiller(Register start_offset,
1036 Register end_offset,
1039 // -------------------------------------------------------------------------
1040 // Support functions.
1042 // Machine code version of Map::GetConstructor().
1043 // |temp| holds |result|'s map when done, and |temp2| its instance type.
1044 void GetMapConstructor(Register result, Register map, Register temp,
1047 // Try to get function prototype of a function and puts the value in
1048 // the result register. Checks that the function really is a
1049 // function and jumps to the miss label if the fast checks fail. The
1050 // function register will be untouched; the other registers may be
1052 void TryGetFunctionPrototype(Register function, Register result,
1053 Register scratch, Label* miss);
1055 void GetObjectType(Register function,
1059 // Check if a map for a JSObject indicates that the object has fast elements.
1060 // Jump to the specified label if it does not.
1061 void CheckFastElements(Register map,
1065 // Check if a map for a JSObject indicates that the object can have both smi
1066 // and HeapObject elements. Jump to the specified label if it does not.
1067 void CheckFastObjectElements(Register map,
1071 // Check if a map for a JSObject indicates that the object has fast smi only
1072 // elements. Jump to the specified label if it does not.
1073 void CheckFastSmiElements(Register map,
1077 // Check to see if maybe_number can be stored as a double in
1078 // FastDoubleElements. If it can, store it at the index specified by key in
1079 // the FastDoubleElements array elements. Otherwise jump to fail.
1080 void StoreNumberToDoubleElements(Register value_reg,
1082 Register elements_reg,
1087 int elements_offset = 0);
1089 // Compare an object's map with the specified map and its transitioned
1090 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
1091 // "branch_to" if the result of the comparison is "cond". If multiple map
1092 // compares are required, the compare sequences branches to early_success.
1093 void CompareMapAndBranch(Register obj,
1096 Label* early_success,
1100 // As above, but the map of the object is already loaded into the register
1101 // which is preserved by the code generated.
1102 void CompareMapAndBranch(Register obj_map,
1104 Label* early_success,
1108 // Check if the map of an object is equal to a specified map and branch to
1109 // label if not. Skip the smi check if not required (object is known to be a
1110 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1111 // against maps that are ElementsKind transition maps of the specificed map.
1112 void CheckMap(Register obj,
1116 SmiCheckType smi_check_type);
1119 void CheckMap(Register obj,
1121 Heap::RootListIndex index,
1123 SmiCheckType smi_check_type);
1125 // Check if the map of an object is equal to a specified weak map and branch
1126 // to a specified target if equal. Skip the smi check if not required
1127 // (object is known to be a heap object)
1128 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1129 Handle<WeakCell> cell, Handle<Code> success,
1130 SmiCheckType smi_check_type);
1132 // Get value of the weak cell.
1133 void GetWeakValue(Register value, Handle<WeakCell> cell);
1135 // Load the value of the weak cell in the value register. Branch to the
1136 // given miss label is the weak cell was cleared.
1137 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
1139 // Load and check the instance type of an object for being a string.
1140 // Loads the type into the second argument register.
1141 // Returns a condition that will be enabled if the object was a string.
1142 Condition IsObjectStringType(Register obj,
1145 lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
1146 lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
1147 And(type, type, Operand(kIsNotStringMask));
1148 DCHECK_EQ(0u, kStringTag);
1153 // Picks out an array index from the hash field.
1155 // hash - holds the index's hash. Clobbered.
1156 // index - holds the overwritten index on exit.
1157 void IndexFromHash(Register hash, Register index);
1159 // Get the number of least significant bits from a register.
1160 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1161 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1163 // Load the value of a number object into a FPU double register. If the
1164 // object is not a number a jump to the label not_number is performed
1165 // and the FPU double register is unchanged.
1166 void ObjectToDoubleFPURegister(
1171 Register heap_number_map,
1173 ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
1175 // Load the value of a smi object into a FPU double register. The register
1176 // scratch1 can be the same register as smi in which case smi will hold the
1177 // untagged value afterwards.
1178 void SmiToDoubleFPURegister(Register smi,
1182 // -------------------------------------------------------------------------
1183 // Overflow handling functions.
1184 // Usage: first call the appropriate arithmetic function, then call one of the
1185 // jump functions with the overflow_dst register as the second parameter.
1187 void AdduAndCheckForOverflow(Register dst,
1190 Register overflow_dst,
1191 Register scratch = at);
1193 void AdduAndCheckForOverflow(Register dst, Register left,
1194 const Operand& right, Register overflow_dst,
1195 Register scratch = at);
1197 void SubuAndCheckForOverflow(Register dst,
1200 Register overflow_dst,
1201 Register scratch = at);
1203 void SubuAndCheckForOverflow(Register dst, Register left,
1204 const Operand& right, Register overflow_dst,
1205 Register scratch = at);
1207 void BranchOnOverflow(Label* label,
1208 Register overflow_check,
1209 BranchDelaySlot bd = PROTECT) {
1210 Branch(label, lt, overflow_check, Operand(zero_reg), bd);
1213 void BranchOnNoOverflow(Label* label,
1214 Register overflow_check,
1215 BranchDelaySlot bd = PROTECT) {
1216 Branch(label, ge, overflow_check, Operand(zero_reg), bd);
1219 void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1220 Ret(lt, overflow_check, Operand(zero_reg), bd);
1223 void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1224 Ret(ge, overflow_check, Operand(zero_reg), bd);
1227 // -------------------------------------------------------------------------
1230 // See comments at the beginning of CEntryStub::Generate.
1231 inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
1233 inline void PrepareCEntryFunction(const ExternalReference& ref) {
1234 li(a1, Operand(ref));
1237 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
1238 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
1240 // Call a code stub.
1241 void CallStub(CodeStub* stub,
1242 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1245 // Tail call a code stub (jump).
1246 void TailCallStub(CodeStub* stub, COND_ARGS);
1250 void CallJSExitStub(CodeStub* stub);
1252 // Call a runtime routine.
1253 void CallRuntime(const Runtime::Function* f, int num_arguments,
1254 SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1255 BranchDelaySlot bd = PROTECT);
1256 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1257 const Runtime::Function* function = Runtime::FunctionForId(id);
1258 CallRuntime(function, function->nargs, kSaveFPRegs);
1261 // Convenience function: Same as above, but takes the fid instead.
1262 void CallRuntime(Runtime::FunctionId id, int num_arguments,
1263 SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1264 BranchDelaySlot bd = PROTECT) {
1265 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles, bd);
1268 // Convenience function: call an external reference.
1269 void CallExternalReference(const ExternalReference& ext,
1271 BranchDelaySlot bd = PROTECT);
1273 // Tail call of a runtime routine (jump).
1274 // Like JumpToExternalReference, but also takes care of passing the number
1276 void TailCallExternalReference(const ExternalReference& ext,
1280 // Convenience function: tail call a runtime routine (jump).
1281 void TailCallRuntime(Runtime::FunctionId fid,
1285 int CalculateStackPassedWords(int num_reg_arguments,
1286 int num_double_arguments);
1288 // Before calling a C-function from generated code, align arguments on stack
1289 // and add space for the four mips argument slots.
1290 // After aligning the frame, non-register arguments must be stored on the
1291 // stack, after the argument-slots using helper: CFunctionArgumentOperand().
1292 // The argument count assumes all arguments are word sized.
1293 // Some compilers/platforms require the stack to be aligned when calling
1295 // Needs a scratch register to do some arithmetic. This register will be
1297 void PrepareCallCFunction(int num_reg_arguments,
1298 int num_double_registers,
1300 void PrepareCallCFunction(int num_reg_arguments,
1303 // Arguments 1-4 are placed in registers a0 thru a3 respectively.
1304 // Arguments 5..n are stored to stack using following:
1305 // sw(t0, CFunctionArgumentOperand(5));
1307 // Calls a C function and cleans up the space for arguments allocated
1308 // by PrepareCallCFunction. The called function is not allowed to trigger a
1309 // garbage collection, since that might move the code and invalidate the
1310 // return address (unless this is somehow accounted for by the called
1312 void CallCFunction(ExternalReference function, int num_arguments);
1313 void CallCFunction(Register function, int num_arguments);
1314 void CallCFunction(ExternalReference function,
1315 int num_reg_arguments,
1316 int num_double_arguments);
1317 void CallCFunction(Register function,
1318 int num_reg_arguments,
1319 int num_double_arguments);
1320 void MovFromFloatResult(DoubleRegister dst);
1321 void MovFromFloatParameter(DoubleRegister dst);
1323 // There are two ways of passing double arguments on MIPS, depending on
1324 // whether soft or hard floating point ABI is used. These functions
1325 // abstract parameter passing for the three different ways we call
1326 // C functions from generated code.
1327 void MovToFloatParameter(DoubleRegister src);
1328 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
1329 void MovToFloatResult(DoubleRegister src);
1331 // Jump to the builtin routine.
1332 void JumpToExternalReference(const ExternalReference& builtin,
1333 BranchDelaySlot bd = PROTECT);
1335 // Invoke specified builtin JavaScript function.
1336 void InvokeBuiltin(int native_context_index, InvokeFlag flag,
1337 const CallWrapper& call_wrapper = NullCallWrapper());
1339 // Store the code object for the given builtin in the target register and
1340 // setup the function in a1.
1341 void GetBuiltinEntry(Register target, int native_context_index);
1343 // Store the function for the given builtin in the target register.
1344 void GetBuiltinFunction(Register target, int native_context_index);
1348 uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
1352 Handle<Object> CodeObject() {
1353 DCHECK(!code_object_.is_null());
1354 return code_object_;
1357 // Emit code for a truncating division by a constant. The dividend register is
1358 // unchanged and at gets clobbered. Dividend and result must be different.
1359 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1361 // -------------------------------------------------------------------------
1362 // StatsCounter support.
1364 void SetCounter(StatsCounter* counter, int value,
1365 Register scratch1, Register scratch2);
1366 void IncrementCounter(StatsCounter* counter, int value,
1367 Register scratch1, Register scratch2);
1368 void DecrementCounter(StatsCounter* counter, int value,
1369 Register scratch1, Register scratch2);
1372 // -------------------------------------------------------------------------
1375 // Calls Abort(msg) if the condition cc is not satisfied.
1376 // Use --debug_code to enable.
1377 void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
1378 void AssertFastElements(Register elements);
1380 // Like Assert(), but always enabled.
1381 void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
1383 // Print a message to stdout and abort execution.
1384 void Abort(BailoutReason msg);
1386 // Verify restrictions about code generated in stubs.
1387 void set_generating_stub(bool value) { generating_stub_ = value; }
1388 bool generating_stub() { return generating_stub_; }
1389 void set_has_frame(bool value) { has_frame_ = value; }
1390 bool has_frame() { return has_frame_; }
1391 inline bool AllowThisStubCall(CodeStub* stub);
1393 // ---------------------------------------------------------------------------
1394 // Number utilities.
1396 // Check whether the value of reg is a power of two and not zero. If not
1397 // control continues at the label not_power_of_two. If reg is a power of two
1398 // the register scratch contains the value of (reg - 1) when control falls
1400 void JumpIfNotPowerOfTwoOrZero(Register reg,
1402 Label* not_power_of_two_or_zero);
1404 // -------------------------------------------------------------------------
1407 void SmiTag(Register reg) {
1408 Addu(reg, reg, reg);
1411 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1412 void SmiTagCheckOverflow(Register reg, Register overflow);
1413 void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1415 void SmiTag(Register dst, Register src) {
1416 Addu(dst, src, src);
1419 // Try to convert int32 to smi. If the value is to large, preserve
1420 // the original value and jump to not_a_smi. Destroys scratch and
1422 void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
1423 TrySmiTag(reg, reg, scratch, not_a_smi);
1425 void TrySmiTag(Register dst,
1429 SmiTagCheckOverflow(at, src, scratch);
1430 BranchOnOverflow(not_a_smi, scratch);
1434 void SmiUntag(Register reg) {
1435 sra(reg, reg, kSmiTagSize);
1438 void SmiUntag(Register dst, Register src) {
1439 sra(dst, src, kSmiTagSize);
1442 // Test if the register contains a smi.
1443 inline void SmiTst(Register value, Register scratch) {
1444 And(scratch, value, Operand(kSmiTagMask));
1446 inline void NonNegativeSmiTst(Register value, Register scratch) {
1447 And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
1450 // Untag the source value into destination and jump if source is a smi.
1451 // Souce and destination can be the same register.
1452 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1454 // Untag the source value into destination and jump if source is not a smi.
1455 // Souce and destination can be the same register.
1456 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1458 // Jump the register contains a smi.
1459 void JumpIfSmi(Register value,
1461 Register scratch = at,
1462 BranchDelaySlot bd = PROTECT);
1464 // Jump if the register contains a non-smi.
1465 void JumpIfNotSmi(Register value,
1466 Label* not_smi_label,
1467 Register scratch = at,
1468 BranchDelaySlot bd = PROTECT);
1470 // Jump if either of the registers contain a non-smi.
1471 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1472 // Jump if either of the registers contain a smi.
1473 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1475 // Abort execution if argument is a smi, enabled via --debug-code.
1476 void AssertNotSmi(Register object);
1477 void AssertSmi(Register object);
1479 // Abort execution if argument is not a string, enabled via --debug-code.
1480 void AssertString(Register object);
1482 // Abort execution if argument is not a name, enabled via --debug-code.
1483 void AssertName(Register object);
1485 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1486 void AssertFunction(Register object);
1488 // Abort execution if argument is not undefined or an AllocationSite, enabled
1489 // via --debug-code.
1490 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1492 // Abort execution if reg is not the root value with the given index,
1493 // enabled via --debug-code.
1494 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1496 // ---------------------------------------------------------------------------
1497 // HeapNumber utilities.
1499 void JumpIfNotHeapNumber(Register object,
1500 Register heap_number_map,
1502 Label* on_not_heap_number);
1504 // -------------------------------------------------------------------------
1505 // String utilities.
1507 // Checks if both instance types are sequential ASCII strings and jumps to
1508 // label if either is not.
1509 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1510 Register first_object_instance_type, Register second_object_instance_type,
1511 Register scratch1, Register scratch2, Label* failure);
1513 // Check if instance type is sequential one-byte string and jump to label if
1515 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1518 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1520 void EmitSeqStringSetCharCheck(Register string,
1524 uint32_t encoding_mask);
1526 // Checks if both objects are sequential one-byte strings and jumps to label
1527 // if either is not. Assumes that neither object is a smi.
1528 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
1534 // Checks if both objects are sequential one-byte strings and jumps to label
1535 // if either is not.
1536 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1539 Label* not_flat_one_byte_strings);
1541 void ClampUint8(Register output_reg, Register input_reg);
1543 void ClampDoubleToUint8(Register result_reg,
1544 DoubleRegister input_reg,
1545 DoubleRegister temp_double_reg);
1548 void LoadInstanceDescriptors(Register map, Register descriptors);
1549 void EnumLength(Register dst, Register map);
1550 void NumberOfOwnDescriptors(Register dst, Register map);
1551 void LoadAccessor(Register dst, Register holder, int accessor_index,
1552 AccessorComponent accessor);
1554 template<typename Field>
1555 void DecodeField(Register dst, Register src) {
1556 Ext(dst, src, Field::kShift, Field::kSize);
1559 template<typename Field>
1560 void DecodeField(Register reg) {
1561 DecodeField<Field>(reg, reg);
1564 template<typename Field>
1565 void DecodeFieldToSmi(Register dst, Register src) {
1566 static const int shift = Field::kShift;
1567 static const int mask = Field::kMask >> shift << kSmiTagSize;
1568 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
1569 STATIC_ASSERT(kSmiTag == 0);
1570 if (shift < kSmiTagSize) {
1571 sll(dst, src, kSmiTagSize - shift);
1572 And(dst, dst, Operand(mask));
1573 } else if (shift > kSmiTagSize) {
1574 srl(dst, src, shift - kSmiTagSize);
1575 And(dst, dst, Operand(mask));
1577 And(dst, src, Operand(mask));
1581 template<typename Field>
1582 void DecodeFieldToSmi(Register reg) {
1583 DecodeField<Field>(reg, reg);
1586 // Generates function and stub prologue code.
1587 void StubPrologue();
1588 void Prologue(bool code_pre_aging);
1590 // Activation support.
1591 void EnterFrame(StackFrame::Type type);
1592 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
1593 void LeaveFrame(StackFrame::Type type);
1595 // Expects object in a0 and returns map with validated enum cache
1596 // in a0. Assumes that any other register can be used as a scratch.
1597 void CheckEnumCache(Register null_value, Label* call_runtime);
1599 // AllocationMemento support. Arrays may have an associated
1600 // AllocationMemento object that can be checked for in order to pretransition
1602 // On entry, receiver_reg should point to the array object.
1603 // scratch_reg gets clobbered.
1604 // If allocation info is present, jump to allocation_memento_present.
1605 void TestJSArrayForAllocationMemento(
1606 Register receiver_reg,
1607 Register scratch_reg,
1608 Label* no_memento_found,
1609 Condition cond = al,
1610 Label* allocation_memento_present = NULL);
1612 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1613 Register scratch_reg,
1614 Label* memento_found) {
1615 Label no_memento_found;
1616 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1617 &no_memento_found, eq, memento_found);
1618 bind(&no_memento_found);
1621 // Jumps to found label if a prototype map has dictionary elements.
1622 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1623 Register scratch1, Label* found);
1626 void CallCFunctionHelper(Register function,
1627 int num_reg_arguments,
1628 int num_double_arguments);
1630 void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
1631 void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
1633 BranchDelaySlot bdslot = PROTECT);
1634 void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
1635 void BranchAndLinkShort(Label* L, Condition cond, Register rs,
1637 BranchDelaySlot bdslot = PROTECT);
1638 void Jr(Label* L, BranchDelaySlot bdslot);
1639 void Jalr(Label* L, BranchDelaySlot bdslot);
1641 // Common implementation of BranchF functions for the different formats.
1642 void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
1643 Condition cc, FPURegister cmp1, FPURegister cmp2,
1644 BranchDelaySlot bd = PROTECT);
1646 void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
1647 FPURegister cmp1, FPURegister cmp2,
1648 BranchDelaySlot bd = PROTECT);
1650 // Helper functions for generating invokes.
1651 void InvokePrologue(const ParameterCount& expected,
1652 const ParameterCount& actual,
1653 Handle<Code> code_constant,
1656 bool* definitely_mismatches,
1658 const CallWrapper& call_wrapper);
1660 void InitializeNewString(Register string,
1662 Heap::RootListIndex map_index,
1666 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1667 void InNewSpace(Register object,
1669 Condition cond, // eq for new space, ne otherwise.
1672 // Helper for finding the mark bits for an address. Afterwards, the
1673 // bitmap register points at the word with the mark bits and the mask
1674 // the position of the first bit. Leaves addr_reg unchanged.
1675 inline void GetMarkBits(Register addr_reg,
1676 Register bitmap_reg,
1679 // Compute memory operands for safepoint stack slots.
1680 static int SafepointRegisterStackIndex(int reg_code);
1681 MemOperand SafepointRegisterSlot(Register reg);
1682 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1684 bool generating_stub_;
1686 bool has_double_zero_reg_set_;
1687 // This handle will be patched with the code object on installation.
1688 Handle<Object> code_object_;
1690 // Needs access to SafepointRegisterStackIndex for compiled frame
1692 friend class StandardFrame;
1696 // The code patcher is used to patch (typically) small parts of code e.g. for
1697 // debugging and other types of instrumentation. When using the code patcher
1698 // the exact number of bytes specified must be emitted. It is not legal to emit
1699 // relocation information. If any of these constraints are violated it causes
1700 // an assertion to fail.
1708 CodePatcher(byte* address,
1710 FlushICache flush_cache = FLUSH);
1713 // Macro assembler to emit code.
1714 MacroAssembler* masm() { return &masm_; }
1716 // Emit an instruction directly.
1717 void Emit(Instr instr);
1719 // Emit an address directly.
1720 void Emit(Address addr);
1722 // Change the condition part of an instruction leaving the rest of the current
1723 // instruction unchanged.
1724 void ChangeBranchCondition(Condition cond);
1727 byte* address_; // The address of the code being patched.
1728 int size_; // Number of bytes of the expected patch size.
1729 MacroAssembler masm_; // Macro assembler used to generate the code.
1730 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1735 #ifdef GENERATED_CODE_COVERAGE
1736 #define CODE_COVERAGE_STRINGIFY(x) #x
1737 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1738 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1739 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1741 #define ACCESS_MASM(masm) masm->
1744 } } // namespace v8::internal
1746 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_