1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
6 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
8 #include "src/assembler.h"
9 #include "src/globals.h"
10 #include "src/mips/assembler-mips.h"
15 // Forward declaration.
18 // Reserved Register Usage Summary.
20 // Registers t8, t9, and at are reserved for use by the MacroAssembler.
22 // The programmer should know that the MacroAssembler may clobber these three,
23 // but won't touch other registers except in special cases.
25 // Per the MIPS ABI, register t9 must be used for indirect function call
26 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
27 // trying to update gp register for position-independent-code. Whenever
28 // MIPS generated code calls C code, it must be via t9 register.
31 // Flags used for LeaveExitFrame function.
32 enum LeaveExitFrameMode {
34 NO_EMIT_RETURN = false
37 // Flags used for AllocateHeapNumber
45 // Flags used for the ObjectToDoubleFPURegister function.
46 enum ObjectToDoubleFlags {
48 NO_OBJECT_TO_DOUBLE_FLAGS = 0,
49 // Object is known to be a non smi.
50 OBJECT_NOT_SMI = 1 << 0,
51 // Don't load NaNs or infinities, branch to the non number case instead.
52 AVOID_NANS_AND_INFINITIES = 1 << 1
55 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
56 enum BranchDelaySlot {
61 // Flags used for the li macro-assembler function.
63 // If the constant value can be represented in just 16 bits, then
64 // optimize the li to use a single instruction, rather than lui/ori pair.
66 // Always use 2 instructions (lui/ori pair), even if the constant could
67 // be loaded with just one, so that this value is patchable later.
72 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
73 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
74 enum PointersToHereCheck {
75 kPointersToHereMaybeInteresting,
76 kPointersToHereAreAlwaysInteresting
78 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
80 Register GetRegisterThatIsNotOneOf(Register reg1,
81 Register reg2 = no_reg,
82 Register reg3 = no_reg,
83 Register reg4 = no_reg,
84 Register reg5 = no_reg,
85 Register reg6 = no_reg);
87 bool AreAliased(Register reg1,
89 Register reg3 = no_reg,
90 Register reg4 = no_reg,
91 Register reg5 = no_reg,
92 Register reg6 = no_reg,
93 Register reg7 = no_reg,
94 Register reg8 = no_reg);
97 // -----------------------------------------------------------------------------
98 // Static helper functions.
100 inline MemOperand ContextOperand(Register context, int index) {
101 return MemOperand(context, Context::SlotOffset(index));
105 inline MemOperand GlobalObjectOperand() {
106 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
110 // Generate a MemOperand for loading a field from an object.
111 inline MemOperand FieldMemOperand(Register object, int offset) {
112 return MemOperand(object, offset - kHeapObjectTag);
116 // Generate a MemOperand for storing arguments 5..N on the stack
117 // when calling CallCFunction().
118 inline MemOperand CFunctionArgumentOperand(int index) {
119 DCHECK(index > kCArgSlotCount);
120 // Argument 5 takes the slot just past the four Arg-slots.
121 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
122 return MemOperand(sp, offset);
126 // MacroAssembler implements a collection of frequently used macros.
127 class MacroAssembler: public Assembler {
129 // The isolate parameter can be NULL if the macro assembler should
130 // not use isolate-dependent functionality. In this case, it's the
131 // responsibility of the caller to never invoke such function on the
133 MacroAssembler(Isolate* isolate, void* buffer, int size);
136 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
137 #define COND_ARGS cond, r1, r2
139 // Cases when relocation is not needed.
140 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
141 void Name(target_type target, BranchDelaySlot bd = PROTECT); \
142 inline void Name(BranchDelaySlot bd, target_type target) { \
145 void Name(target_type target, \
147 BranchDelaySlot bd = PROTECT); \
148 inline void Name(BranchDelaySlot bd, \
149 target_type target, \
151 Name(target, COND_ARGS, bd); \
154 #define DECLARE_BRANCH_PROTOTYPES(Name) \
155 DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
156 DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
158 DECLARE_BRANCH_PROTOTYPES(Branch)
159 DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
160 DECLARE_BRANCH_PROTOTYPES(BranchShort)
162 #undef DECLARE_BRANCH_PROTOTYPES
163 #undef COND_TYPED_ARGS
167 // Jump, Call, and Ret pseudo instructions implementing inter-working.
168 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
169 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
171 void Jump(Register target, COND_ARGS);
172 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
173 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
174 void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
175 static int CallSize(Register target, COND_ARGS);
176 void Call(Register target, COND_ARGS);
177 static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
178 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
179 int CallSize(Handle<Code> code,
180 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
181 TypeFeedbackId ast_id = TypeFeedbackId::None(),
183 void Call(Handle<Code> code,
184 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
185 TypeFeedbackId ast_id = TypeFeedbackId::None(),
188 inline void Ret(BranchDelaySlot bd, Condition cond = al,
189 Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
190 Ret(cond, rs, rt, bd);
193 void Branch(Label* L,
196 Heap::RootListIndex index,
197 BranchDelaySlot bdslot = PROTECT);
201 // Emit code to discard a non-negative number of pointer-sized elements
202 // from the stack, clobbering only the sp register.
204 Condition cond = cc_always,
205 Register reg = no_reg,
206 const Operand& op = Operand(no_reg));
208 // Trivial case of DropAndRet that utilizes the delay slot and only emits
210 void DropAndRet(int drop);
212 void DropAndRet(int drop,
217 // Swap two registers. If the scratch register is omitted then a slightly
218 // less efficient form using xor instead of mov is emitted.
219 void Swap(Register reg1, Register reg2, Register scratch = no_reg);
221 void Call(Label* target);
223 inline void Move(Register dst, Register src) {
229 inline void Move(FPURegister dst, FPURegister src) {
235 inline void Move(Register dst_low, Register dst_high, FPURegister src) {
237 Mfhc1(dst_high, src);
240 inline void FmoveHigh(Register dst_high, FPURegister src) {
241 Mfhc1(dst_high, src);
244 inline void FmoveLow(Register dst_low, FPURegister src) {
248 inline void Move(FPURegister dst, Register src_low, Register src_high) {
250 Mthc1(src_high, dst);
254 void Move(FPURegister dst, double imm);
255 void Movz(Register rd, Register rs, Register rt);
256 void Movn(Register rd, Register rs, Register rt);
257 void Movt(Register rd, Register rs, uint16_t cc = 0);
258 void Movf(Register rd, Register rs, uint16_t cc = 0);
260 void Clz(Register rd, Register rs);
262 // Jump unconditionally to given label.
263 // We NEED a nop in the branch delay slot, as it used by v8, for example in
264 // CodeGenerator::ProcessDeferred().
265 // Currently the branch delay slot is filled by the MacroAssembler.
266 // Use rather b(Label) for code generation.
271 void Load(Register dst, const MemOperand& src, Representation r);
272 void Store(Register src, const MemOperand& dst, Representation r);
274 // Load an object from the root table.
275 void LoadRoot(Register destination,
276 Heap::RootListIndex index);
277 void LoadRoot(Register destination,
278 Heap::RootListIndex index,
279 Condition cond, Register src1, const Operand& src2);
281 // Store an object to the root table.
282 void StoreRoot(Register source,
283 Heap::RootListIndex index);
284 void StoreRoot(Register source,
285 Heap::RootListIndex index,
286 Condition cond, Register src1, const Operand& src2);
288 // ---------------------------------------------------------------------------
291 void IncrementalMarkingRecordWriteHelper(Register object,
295 enum RememberedSetFinalAction {
301 // Record in the remembered set the fact that we have a pointer to new space
302 // at the address pointed to by the addr register. Only works if addr is not
304 void RememberedSetHelper(Register object, // Used for debug code.
307 SaveFPRegsMode save_fp,
308 RememberedSetFinalAction and_then);
310 void CheckPageFlag(Register object,
314 Label* condition_met);
316 void CheckMapDeprecated(Handle<Map> map,
318 Label* if_deprecated);
320 // Check if object is in new space. Jumps if the object is not in new space.
321 // The register scratch can be object itself, but it will be clobbered.
322 void JumpIfNotInNewSpace(Register object,
325 InNewSpace(object, scratch, ne, branch);
328 // Check if object is in new space. Jumps if the object is in new space.
329 // The register scratch can be object itself, but scratch will be clobbered.
330 void JumpIfInNewSpace(Register object,
333 InNewSpace(object, scratch, eq, branch);
336 // Check if an object has a given incremental marking color.
337 void HasColor(Register object,
344 void JumpIfBlack(Register object,
349 // Checks the color of an object. If the object is already grey or black
350 // then we just fall through, since it is already live. If it is white and
351 // we can determine that it doesn't need to be scanned, then we just mark it
352 // black and fall through. For the rest we jump to the label so the
353 // incremental marker can fix its assumptions.
354 void EnsureNotWhite(Register object,
358 Label* object_is_white_and_not_data);
360 // Detects conservatively whether an object is data-only, i.e. it does need to
361 // be scanned by the garbage collector.
362 void JumpIfDataObject(Register value,
364 Label* not_data_object);
366 // Notify the garbage collector that we wrote a pointer into an object.
367 // |object| is the object being stored into, |value| is the object being
368 // stored. value and scratch registers are clobbered by the operation.
369 // The offset is the offset from the start of the object, not the offset from
370 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
371 void RecordWriteField(
377 SaveFPRegsMode save_fp,
378 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
379 SmiCheck smi_check = INLINE_SMI_CHECK,
380 PointersToHereCheck pointers_to_here_check_for_value =
381 kPointersToHereMaybeInteresting);
383 // As above, but the offset has the tag presubtracted. For use with
384 // MemOperand(reg, off).
385 inline void RecordWriteContextSlot(
391 SaveFPRegsMode save_fp,
392 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
393 SmiCheck smi_check = INLINE_SMI_CHECK,
394 PointersToHereCheck pointers_to_here_check_for_value =
395 kPointersToHereMaybeInteresting) {
396 RecordWriteField(context,
397 offset + kHeapObjectTag,
402 remembered_set_action,
404 pointers_to_here_check_for_value);
407 void RecordWriteForMap(
412 SaveFPRegsMode save_fp);
414 // For a given |object| notify the garbage collector that the slot |address|
415 // has been written. |value| is the object being stored. The value and
416 // address registers are clobbered by the operation.
422 SaveFPRegsMode save_fp,
423 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
424 SmiCheck smi_check = INLINE_SMI_CHECK,
425 PointersToHereCheck pointers_to_here_check_for_value =
426 kPointersToHereMaybeInteresting);
429 // ---------------------------------------------------------------------------
430 // Inline caching support.
432 // Generate code for checking access rights - used for security checks
433 // on access to global objects across environments. The holder register
434 // is left untouched, whereas both scratch registers are clobbered.
435 void CheckAccessGlobalProxy(Register holder_reg,
439 void GetNumberHash(Register reg0, Register scratch);
441 void LoadFromNumberDictionary(Label* miss,
450 inline void MarkCode(NopMarkerTypes type) {
454 // Check if the given instruction is a 'type' marker.
455 // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
456 // nop(type)). These instructions are generated to mark special location in
457 // the code, like some special IC code.
458 static inline bool IsMarkedCode(Instr instr, int type) {
459 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
460 return IsNop(instr, type);
464 static inline int GetCodeMarker(Instr instr) {
465 uint32_t opcode = ((instr & kOpcodeMask));
466 uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
467 uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
468 uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
470 // Return <n> if we have a sll zero_reg, zero_reg, n
472 bool sllzz = (opcode == SLL &&
473 rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
474 rs == static_cast<uint32_t>(ToNumber(zero_reg)));
476 (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
477 DCHECK((type == -1) ||
478 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
484 // ---------------------------------------------------------------------------
485 // Allocation support.
487 // Allocate an object in new space or old pointer space. The object_size is
488 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
489 // is passed. If the space is exhausted control continues at the gc_required
490 // label. The allocated object is returned in result. If the flag
491 // tag_allocated_object is true the result is tagged as as a heap object.
492 // All registers are clobbered also when control continues at the gc_required
494 void Allocate(int object_size,
499 AllocationFlags flags);
501 void Allocate(Register object_size,
506 AllocationFlags flags);
508 // Undo allocation in new space. The object passed and objects allocated after
509 // it will no longer be allocated. The caller must make sure that no pointers
510 // are left to the object(s) no longer allocated as they would be invalid when
511 // allocation is undone.
512 void UndoAllocationInNewSpace(Register object, Register scratch);
515 void AllocateTwoByteString(Register result,
521 void AllocateOneByteString(Register result, Register length,
522 Register scratch1, Register scratch2,
523 Register scratch3, Label* gc_required);
524 void AllocateTwoByteConsString(Register result,
529 void AllocateOneByteConsString(Register result, Register length,
530 Register scratch1, Register scratch2,
532 void AllocateTwoByteSlicedString(Register result,
537 void AllocateOneByteSlicedString(Register result, Register length,
538 Register scratch1, Register scratch2,
541 // Allocates a heap number or jumps to the gc_required label if the young
542 // space is full and a scavenge is needed. All registers are clobbered also
543 // when control continues at the gc_required label.
544 void AllocateHeapNumber(Register result,
547 Register heap_number_map,
549 TaggingMode tagging_mode = TAG_RESULT,
550 MutableMode mode = IMMUTABLE);
551 void AllocateHeapNumberWithValue(Register result,
557 // ---------------------------------------------------------------------------
558 // Instruction macros.
560 #define DEFINE_INSTRUCTION(instr) \
561 void instr(Register rd, Register rs, const Operand& rt); \
562 void instr(Register rd, Register rs, Register rt) { \
563 instr(rd, rs, Operand(rt)); \
565 void instr(Register rs, Register rt, int32_t j) { \
566 instr(rs, rt, Operand(j)); \
569 #define DEFINE_INSTRUCTION2(instr) \
570 void instr(Register rs, const Operand& rt); \
571 void instr(Register rs, Register rt) { \
572 instr(rs, Operand(rt)); \
574 void instr(Register rs, int32_t j) { \
575 instr(rs, Operand(j)); \
578 #define DEFINE_INSTRUCTION3(instr) \
579 void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
580 void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
581 instr(rd_hi, rd_lo, rs, Operand(rt)); \
583 void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
584 instr(rd_hi, rd_lo, rs, Operand(j)); \
587 DEFINE_INSTRUCTION(Addu);
588 DEFINE_INSTRUCTION(Subu);
589 DEFINE_INSTRUCTION(Mul);
590 DEFINE_INSTRUCTION(Div);
591 DEFINE_INSTRUCTION(Divu);
592 DEFINE_INSTRUCTION(Mod);
593 DEFINE_INSTRUCTION(Modu);
594 DEFINE_INSTRUCTION(Mulh);
595 DEFINE_INSTRUCTION2(Mult);
596 DEFINE_INSTRUCTION(Mulhu);
597 DEFINE_INSTRUCTION2(Multu);
598 DEFINE_INSTRUCTION2(Div);
599 DEFINE_INSTRUCTION2(Divu);
601 DEFINE_INSTRUCTION3(Div);
602 DEFINE_INSTRUCTION3(Mul);
604 DEFINE_INSTRUCTION(And);
605 DEFINE_INSTRUCTION(Or);
606 DEFINE_INSTRUCTION(Xor);
607 DEFINE_INSTRUCTION(Nor);
608 DEFINE_INSTRUCTION2(Neg);
610 DEFINE_INSTRUCTION(Slt);
611 DEFINE_INSTRUCTION(Sltu);
613 // MIPS32 R2 instruction macro.
614 DEFINE_INSTRUCTION(Ror);
616 #undef DEFINE_INSTRUCTION
617 #undef DEFINE_INSTRUCTION2
619 void Pref(int32_t hint, const MemOperand& rs);
622 // ---------------------------------------------------------------------------
623 // Pseudo-instructions.
625 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
627 void Ulw(Register rd, const MemOperand& rs);
628 void Usw(Register rd, const MemOperand& rs);
630 // Load int32 in the rd register.
631 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
632 inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
633 li(rd, Operand(j), mode);
635 void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
637 // Push multiple registers on the stack.
638 // Registers are saved in numerical order, with higher numbered registers
639 // saved in higher memory addresses.
640 void MultiPush(RegList regs);
641 void MultiPushReversed(RegList regs);
643 void MultiPushFPU(RegList regs);
644 void MultiPushReversedFPU(RegList regs);
646 void push(Register src) {
647 Addu(sp, sp, Operand(-kPointerSize));
648 sw(src, MemOperand(sp, 0));
650 void Push(Register src) { push(src); }
653 void Push(Handle<Object> handle);
654 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
656 // Push two registers. Pushes leftmost register first (to highest address).
657 void Push(Register src1, Register src2) {
658 Subu(sp, sp, Operand(2 * kPointerSize));
659 sw(src1, MemOperand(sp, 1 * kPointerSize));
660 sw(src2, MemOperand(sp, 0 * kPointerSize));
663 // Push three registers. Pushes leftmost register first (to highest address).
664 void Push(Register src1, Register src2, Register src3) {
665 Subu(sp, sp, Operand(3 * kPointerSize));
666 sw(src1, MemOperand(sp, 2 * kPointerSize));
667 sw(src2, MemOperand(sp, 1 * kPointerSize));
668 sw(src3, MemOperand(sp, 0 * kPointerSize));
671 // Push four registers. Pushes leftmost register first (to highest address).
672 void Push(Register src1, Register src2, Register src3, Register src4) {
673 Subu(sp, sp, Operand(4 * kPointerSize));
674 sw(src1, MemOperand(sp, 3 * kPointerSize));
675 sw(src2, MemOperand(sp, 2 * kPointerSize));
676 sw(src3, MemOperand(sp, 1 * kPointerSize));
677 sw(src4, MemOperand(sp, 0 * kPointerSize));
680 void Push(Register src, Condition cond, Register tst1, Register tst2) {
681 // Since we don't have conditional execution we use a Branch.
682 Branch(3, cond, tst1, Operand(tst2));
683 Subu(sp, sp, Operand(kPointerSize));
684 sw(src, MemOperand(sp, 0));
687 // Pops multiple values from the stack and load them in the
688 // registers specified in regs. Pop order is the opposite as in MultiPush.
689 void MultiPop(RegList regs);
690 void MultiPopReversed(RegList regs);
692 void MultiPopFPU(RegList regs);
693 void MultiPopReversedFPU(RegList regs);
695 void pop(Register dst) {
696 lw(dst, MemOperand(sp, 0));
697 Addu(sp, sp, Operand(kPointerSize));
699 void Pop(Register dst) { pop(dst); }
701 // Pop two registers. Pops rightmost register first (from lower address).
702 void Pop(Register src1, Register src2) {
703 DCHECK(!src1.is(src2));
704 lw(src2, MemOperand(sp, 0 * kPointerSize));
705 lw(src1, MemOperand(sp, 1 * kPointerSize));
706 Addu(sp, sp, 2 * kPointerSize);
709 // Pop three registers. Pops rightmost register first (from lower address).
710 void Pop(Register src1, Register src2, Register src3) {
711 lw(src3, MemOperand(sp, 0 * kPointerSize));
712 lw(src2, MemOperand(sp, 1 * kPointerSize));
713 lw(src1, MemOperand(sp, 2 * kPointerSize));
714 Addu(sp, sp, 3 * kPointerSize);
717 void Pop(uint32_t count = 1) {
718 Addu(sp, sp, Operand(count * kPointerSize));
721 // Push and pop the registers that can hold pointers, as defined by the
722 // RegList constant kSafepointSavedRegisters.
723 void PushSafepointRegisters();
724 void PopSafepointRegisters();
725 // Store value in register src in the safepoint stack slot for
727 void StoreToSafepointRegisterSlot(Register src, Register dst);
728 // Load the value of the src register from its safepoint stack slot
729 // into register dst.
730 void LoadFromSafepointRegisterSlot(Register dst, Register src);
732 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
734 // Does not handle errors.
735 void FlushICache(Register address, unsigned instructions);
737 // MIPS32 R2 instruction macro.
738 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
739 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
741 // ---------------------------------------------------------------------------
742 // FPU macros. These do not handle special cases like NaN or +- inf.
744 // Convert unsigned word to double.
745 void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
746 void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
748 // Convert double to unsigned word.
749 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
750 void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
752 void Trunc_w_d(FPURegister fd, FPURegister fs);
753 void Round_w_d(FPURegister fd, FPURegister fs);
754 void Floor_w_d(FPURegister fd, FPURegister fs);
755 void Ceil_w_d(FPURegister fd, FPURegister fs);
757 // FP32 mode: Move the general purpose register into
758 // the high part of the double-register pair.
759 // FP64 mode: Move the general-purpose register into
760 // the higher 32 bits of the 64-bit coprocessor register,
761 // while leaving the low bits unchanged.
762 void Mthc1(Register rt, FPURegister fs);
764 // FP32 mode: move the high part of the double-register pair into
765 // general purpose register.
766 // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into
767 // general-purpose register.
768 void Mfhc1(Register rt, FPURegister fs);
770 // Wrapper function for the different cmp/branch types.
771 void BranchF(Label* target,
776 BranchDelaySlot bd = PROTECT);
778 // Alternate (inline) version for better readability with USE_DELAY_SLOT.
779 inline void BranchF(BranchDelaySlot bd,
785 BranchF(target, nan, cc, cmp1, cmp2, bd);
788 // Truncates a double using a specific rounding mode, and writes the value
789 // to the result register.
790 // The except_flag will contain any exceptions caused by the instruction.
791 // If check_inexact is kDontCheckForInexactConversion, then the inexact
792 // exception is masked.
793 void EmitFPUTruncate(FPURoundingMode rounding_mode,
795 DoubleRegister double_input,
797 DoubleRegister double_scratch,
798 Register except_flag,
799 CheckForInexactConversion check_inexact
800 = kDontCheckForInexactConversion);
802 // Performs a truncating conversion of a floating point number as used by
803 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
804 // succeeds, otherwise falls through if result is saturated. On return
805 // 'result' either holds answer, or is clobbered on fall through.
807 // Only public for the test code in test-code-stubs-arm.cc.
808 void TryInlineTruncateDoubleToI(Register result,
809 DoubleRegister input,
812 // Performs a truncating conversion of a floating point number as used by
813 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
814 // Exits with 'result' holding the answer.
815 void TruncateDoubleToI(Register result, DoubleRegister double_input);
817 // Performs a truncating conversion of a heap number as used by
818 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
819 // must be different registers. Exits with 'result' holding the answer.
820 void TruncateHeapNumberToI(Register result, Register object);
822 // Converts the smi or heap number in object to an int32 using the rules
823 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
824 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
825 // different registers.
826 void TruncateNumberToI(Register object,
828 Register heap_number_map,
832 // Loads the number from object into dst register.
833 // If |object| is neither smi nor heap number, |not_number| is jumped to
834 // with |object| still intact.
835 void LoadNumber(Register object,
837 Register heap_number_map,
841 // Loads the number from object into double_dst in the double format.
842 // Control will jump to not_int32 if the value cannot be exactly represented
843 // by a 32-bit integer.
844 // Floating point value in the 32-bit integer range that are not exact integer
846 void LoadNumberAsInt32Double(Register object,
847 DoubleRegister double_dst,
848 Register heap_number_map,
851 FPURegister double_scratch,
854 // Loads the number from object into dst as a 32-bit integer.
855 // Control will jump to not_int32 if the object cannot be exactly represented
856 // by a 32-bit integer.
857 // Floating point value in the 32-bit integer range that are not exact integer
858 // won't be converted.
859 void LoadNumberAsInt32(Register object,
861 Register heap_number_map,
864 FPURegister double_scratch0,
865 FPURegister double_scratch1,
869 // argc - argument count to be dropped by LeaveExitFrame.
870 // save_doubles - saves FPU registers on stack, currently disabled.
871 // stack_space - extra stack space.
872 void EnterExitFrame(bool save_doubles,
873 int stack_space = 0);
875 // Leave the current exit frame.
876 void LeaveExitFrame(bool save_doubles,
878 bool restore_context,
879 bool do_return = NO_EMIT_RETURN);
881 // Get the actual activation frame alignment for target environment.
882 static int ActivationFrameAlignment();
884 // Make sure the stack is aligned. Only emits code in debug mode.
885 void AssertStackIsAligned();
887 void LoadContext(Register dst, int context_chain_length);
889 // Conditionally load the cached Array transitioned map of type
890 // transitioned_kind from the native context if the map in register
891 // map_in_out is the cached Array map in the native context of
893 void LoadTransitionedArrayMapConditional(
894 ElementsKind expected_kind,
895 ElementsKind transitioned_kind,
898 Label* no_map_match);
900 void LoadGlobalFunction(int index, Register function);
902 // Load the initial map from the global function. The registers
903 // function and map can be the same, function is then overwritten.
904 void LoadGlobalFunctionInitialMap(Register function,
908 void InitializeRootRegister() {
909 ExternalReference roots_array_start =
910 ExternalReference::roots_array_start(isolate());
911 li(kRootRegister, Operand(roots_array_start));
914 // -------------------------------------------------------------------------
915 // JavaScript invokes.
917 // Invoke the JavaScript function code by either calling or jumping.
918 void InvokeCode(Register code,
919 const ParameterCount& expected,
920 const ParameterCount& actual,
922 const CallWrapper& call_wrapper);
924 // Invoke the JavaScript function in the given register. Changes the
925 // current context to the context in the function before invoking.
926 void InvokeFunction(Register function,
927 const ParameterCount& actual,
929 const CallWrapper& call_wrapper);
931 void InvokeFunction(Register function,
932 const ParameterCount& expected,
933 const ParameterCount& actual,
935 const CallWrapper& call_wrapper);
937 void InvokeFunction(Handle<JSFunction> function,
938 const ParameterCount& expected,
939 const ParameterCount& actual,
941 const CallWrapper& call_wrapper);
944 void IsObjectJSObjectType(Register heap_object,
949 void IsInstanceJSObjectType(Register map,
953 void IsObjectJSStringType(Register object,
957 void IsObjectNameType(Register object,
961 // -------------------------------------------------------------------------
966 // -------------------------------------------------------------------------
967 // Exception handling.
969 // Push a new try handler and link into try handler chain.
970 void PushTryHandler(StackHandler::Kind kind, int handler_index);
972 // Unlink the stack handler on top of the stack from the try handler chain.
973 // Must preserve the result register.
974 void PopTryHandler();
976 // Passes thrown value to the handler of top of the try handler chain.
977 void Throw(Register value);
979 // Propagates an uncatchable exception to the top of the current JS stack's
981 void ThrowUncatchable(Register value);
983 // Copies a fixed number of fields of heap objects from src to dst.
984 void CopyFields(Register dst, Register src, RegList temps, int field_count);
986 // Copies a number of bytes from src to dst. All registers are clobbered. On
987 // exit src and dst will point to the place just after where the last byte was
988 // read or written and length will be zero.
989 void CopyBytes(Register src,
994 // Initialize fields with filler values. Fields starting at |start_offset|
995 // not including end_offset are overwritten with the value in |filler|. At
996 // the end the loop, |start_offset| takes the value of |end_offset|.
997 void InitializeFieldsWithFiller(Register start_offset,
1001 // -------------------------------------------------------------------------
1002 // Support functions.
1004 // Try to get function prototype of a function and puts the value in
1005 // the result register. Checks that the function really is a
1006 // function and jumps to the miss label if the fast checks fail. The
1007 // function register will be untouched; the other registers may be
1009 void TryGetFunctionPrototype(Register function,
1013 bool miss_on_bound_function = false);
1015 void GetObjectType(Register function,
1019 // Check if a map for a JSObject indicates that the object has fast elements.
1020 // Jump to the specified label if it does not.
1021 void CheckFastElements(Register map,
1025 // Check if a map for a JSObject indicates that the object can have both smi
1026 // and HeapObject elements. Jump to the specified label if it does not.
1027 void CheckFastObjectElements(Register map,
1031 // Check if a map for a JSObject indicates that the object has fast smi only
1032 // elements. Jump to the specified label if it does not.
1033 void CheckFastSmiElements(Register map,
1037 // Check to see if maybe_number can be stored as a double in
1038 // FastDoubleElements. If it can, store it at the index specified by key in
1039 // the FastDoubleElements array elements. Otherwise jump to fail.
1040 void StoreNumberToDoubleElements(Register value_reg,
1042 Register elements_reg,
1047 int elements_offset = 0);
1049 // Compare an object's map with the specified map and its transitioned
1050 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
1051 // "branch_to" if the result of the comparison is "cond". If multiple map
1052 // compares are required, the compare sequences branches to early_success.
1053 void CompareMapAndBranch(Register obj,
1056 Label* early_success,
1060 // As above, but the map of the object is already loaded into the register
1061 // which is preserved by the code generated.
1062 void CompareMapAndBranch(Register obj_map,
1064 Label* early_success,
1068 // Check if the map of an object is equal to a specified map and branch to
1069 // label if not. Skip the smi check if not required (object is known to be a
1070 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1071 // against maps that are ElementsKind transition maps of the specificed map.
1072 void CheckMap(Register obj,
1076 SmiCheckType smi_check_type);
1079 void CheckMap(Register obj,
1081 Heap::RootListIndex index,
1083 SmiCheckType smi_check_type);
1085 // Check if the map of an object is equal to a specified map and branch to a
1086 // specified target if equal. Skip the smi check if not required (object is
1087 // known to be a heap object)
1088 void DispatchMap(Register obj,
1091 Handle<Code> success,
1092 SmiCheckType smi_check_type);
1095 // Load and check the instance type of an object for being a string.
1096 // Loads the type into the second argument register.
1097 // Returns a condition that will be enabled if the object was a string.
1098 Condition IsObjectStringType(Register obj,
1101 lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
1102 lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
1103 And(type, type, Operand(kIsNotStringMask));
1104 DCHECK_EQ(0, kStringTag);
1109 // Picks out an array index from the hash field.
1111 // hash - holds the index's hash. Clobbered.
1112 // index - holds the overwritten index on exit.
1113 void IndexFromHash(Register hash, Register index);
1115 // Get the number of least significant bits from a register.
1116 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1117 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1119 // Load the value of a number object into a FPU double register. If the
1120 // object is not a number a jump to the label not_number is performed
1121 // and the FPU double register is unchanged.
1122 void ObjectToDoubleFPURegister(
1127 Register heap_number_map,
1129 ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
1131 // Load the value of a smi object into a FPU double register. The register
1132 // scratch1 can be the same register as smi in which case smi will hold the
1133 // untagged value afterwards.
1134 void SmiToDoubleFPURegister(Register smi,
1138 // -------------------------------------------------------------------------
1139 // Overflow handling functions.
1140 // Usage: first call the appropriate arithmetic function, then call one of the
1141 // jump functions with the overflow_dst register as the second parameter.
1143 void AdduAndCheckForOverflow(Register dst,
1146 Register overflow_dst,
1147 Register scratch = at);
1149 void AdduAndCheckForOverflow(Register dst, Register left,
1150 const Operand& right, Register overflow_dst,
1151 Register scratch = at);
1153 void SubuAndCheckForOverflow(Register dst,
1156 Register overflow_dst,
1157 Register scratch = at);
1159 void SubuAndCheckForOverflow(Register dst, Register left,
1160 const Operand& right, Register overflow_dst,
1161 Register scratch = at);
1163 void BranchOnOverflow(Label* label,
1164 Register overflow_check,
1165 BranchDelaySlot bd = PROTECT) {
1166 Branch(label, lt, overflow_check, Operand(zero_reg), bd);
1169 void BranchOnNoOverflow(Label* label,
1170 Register overflow_check,
1171 BranchDelaySlot bd = PROTECT) {
1172 Branch(label, ge, overflow_check, Operand(zero_reg), bd);
1175 void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1176 Ret(lt, overflow_check, Operand(zero_reg), bd);
1179 void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1180 Ret(ge, overflow_check, Operand(zero_reg), bd);
1183 // -------------------------------------------------------------------------
1186 // See comments at the beginning of CEntryStub::Generate.
1187 inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
1189 inline void PrepareCEntryFunction(const ExternalReference& ref) {
1190 li(a1, Operand(ref));
1193 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
1194 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
1196 // Call a code stub.
1197 void CallStub(CodeStub* stub,
1198 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1201 // Tail call a code stub (jump).
1202 void TailCallStub(CodeStub* stub, COND_ARGS);
1206 void CallJSExitStub(CodeStub* stub);
1208 // Call a runtime routine.
1209 void CallRuntime(const Runtime::Function* f,
1211 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1212 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1213 const Runtime::Function* function = Runtime::FunctionForId(id);
1214 CallRuntime(function, function->nargs, kSaveFPRegs);
1217 // Convenience function: Same as above, but takes the fid instead.
1218 void CallRuntime(Runtime::FunctionId id,
1220 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1221 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1224 // Convenience function: call an external reference.
1225 void CallExternalReference(const ExternalReference& ext,
1227 BranchDelaySlot bd = PROTECT);
1229 // Tail call of a runtime routine (jump).
1230 // Like JumpToExternalReference, but also takes care of passing the number
1232 void TailCallExternalReference(const ExternalReference& ext,
1236 // Convenience function: tail call a runtime routine (jump).
1237 void TailCallRuntime(Runtime::FunctionId fid,
1241 int CalculateStackPassedWords(int num_reg_arguments,
1242 int num_double_arguments);
1244 // Before calling a C-function from generated code, align arguments on stack
1245 // and add space for the four mips argument slots.
1246 // After aligning the frame, non-register arguments must be stored on the
1247 // stack, after the argument-slots using helper: CFunctionArgumentOperand().
1248 // The argument count assumes all arguments are word sized.
1249 // Some compilers/platforms require the stack to be aligned when calling
1251 // Needs a scratch register to do some arithmetic. This register will be
1253 void PrepareCallCFunction(int num_reg_arguments,
1254 int num_double_registers,
1256 void PrepareCallCFunction(int num_reg_arguments,
1259 // Arguments 1-4 are placed in registers a0 thru a3 respectively.
1260 // Arguments 5..n are stored to stack using following:
1261 // sw(t0, CFunctionArgumentOperand(5));
1263 // Calls a C function and cleans up the space for arguments allocated
1264 // by PrepareCallCFunction. The called function is not allowed to trigger a
1265 // garbage collection, since that might move the code and invalidate the
1266 // return address (unless this is somehow accounted for by the called
1268 void CallCFunction(ExternalReference function, int num_arguments);
1269 void CallCFunction(Register function, int num_arguments);
1270 void CallCFunction(ExternalReference function,
1271 int num_reg_arguments,
1272 int num_double_arguments);
1273 void CallCFunction(Register function,
1274 int num_reg_arguments,
1275 int num_double_arguments);
1276 void MovFromFloatResult(DoubleRegister dst);
1277 void MovFromFloatParameter(DoubleRegister dst);
1279 // There are two ways of passing double arguments on MIPS, depending on
1280 // whether soft or hard floating point ABI is used. These functions
1281 // abstract parameter passing for the three different ways we call
1282 // C functions from generated code.
1283 void MovToFloatParameter(DoubleRegister src);
1284 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
1285 void MovToFloatResult(DoubleRegister src);
1287 // Calls an API function. Allocates HandleScope, extracts returned value
1288 // from handle and propagates exceptions. Restores context. stack_space
1289 // - space to be unwound on exit (includes the call JS arguments space and
1290 // the additional space allocated for the fast call).
1291 void CallApiFunctionAndReturn(Register function_address,
1292 ExternalReference thunk_ref,
1294 MemOperand return_value_operand,
1295 MemOperand* context_restore_operand);
1297 // Jump to the builtin routine.
1298 void JumpToExternalReference(const ExternalReference& builtin,
1299 BranchDelaySlot bd = PROTECT);
1301 // Invoke specified builtin JavaScript function. Adds an entry to
1302 // the unresolved list if the name does not resolve.
1303 void InvokeBuiltin(Builtins::JavaScript id,
1305 const CallWrapper& call_wrapper = NullCallWrapper());
1307 // Store the code object for the given builtin in the target register and
1308 // setup the function in a1.
1309 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
1311 // Store the function for the given builtin in the target register.
1312 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1316 uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
1320 Handle<Object> CodeObject() {
1321 DCHECK(!code_object_.is_null());
1322 return code_object_;
1325 // Emit code for a truncating division by a constant. The dividend register is
1326 // unchanged and at gets clobbered. Dividend and result must be different.
1327 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1329 // -------------------------------------------------------------------------
1330 // StatsCounter support.
1332 void SetCounter(StatsCounter* counter, int value,
1333 Register scratch1, Register scratch2);
1334 void IncrementCounter(StatsCounter* counter, int value,
1335 Register scratch1, Register scratch2);
1336 void DecrementCounter(StatsCounter* counter, int value,
1337 Register scratch1, Register scratch2);
1340 // -------------------------------------------------------------------------
1343 // Calls Abort(msg) if the condition cc is not satisfied.
1344 // Use --debug_code to enable.
1345 void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
1346 void AssertFastElements(Register elements);
1348 // Like Assert(), but always enabled.
1349 void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
1351 // Print a message to stdout and abort execution.
1352 void Abort(BailoutReason msg);
1354 // Verify restrictions about code generated in stubs.
1355 void set_generating_stub(bool value) { generating_stub_ = value; }
1356 bool generating_stub() { return generating_stub_; }
1357 void set_has_frame(bool value) { has_frame_ = value; }
1358 bool has_frame() { return has_frame_; }
1359 inline bool AllowThisStubCall(CodeStub* stub);
1361 // ---------------------------------------------------------------------------
1362 // Number utilities.
1364 // Check whether the value of reg is a power of two and not zero. If not
1365 // control continues at the label not_power_of_two. If reg is a power of two
1366 // the register scratch contains the value of (reg - 1) when control falls
1368 void JumpIfNotPowerOfTwoOrZero(Register reg,
1370 Label* not_power_of_two_or_zero);
1372 // -------------------------------------------------------------------------
1375 void SmiTag(Register reg) {
1376 Addu(reg, reg, reg);
1379 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1380 void SmiTagCheckOverflow(Register reg, Register overflow);
1381 void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1383 void SmiTag(Register dst, Register src) {
1384 Addu(dst, src, src);
1387 // Try to convert int32 to smi. If the value is to large, preserve
1388 // the original value and jump to not_a_smi. Destroys scratch and
1390 void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
1391 TrySmiTag(reg, reg, scratch, not_a_smi);
1393 void TrySmiTag(Register dst,
1397 SmiTagCheckOverflow(at, src, scratch);
1398 BranchOnOverflow(not_a_smi, scratch);
1402 void SmiUntag(Register reg) {
1403 sra(reg, reg, kSmiTagSize);
1406 void SmiUntag(Register dst, Register src) {
1407 sra(dst, src, kSmiTagSize);
1410 // Test if the register contains a smi.
1411 inline void SmiTst(Register value, Register scratch) {
1412 And(scratch, value, Operand(kSmiTagMask));
1414 inline void NonNegativeSmiTst(Register value, Register scratch) {
1415 And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
1418 // Untag the source value into destination and jump if source is a smi.
1419 // Souce and destination can be the same register.
1420 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1422 // Untag the source value into destination and jump if source is not a smi.
1423 // Souce and destination can be the same register.
1424 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1426 // Jump the register contains a smi.
1427 void JumpIfSmi(Register value,
1429 Register scratch = at,
1430 BranchDelaySlot bd = PROTECT);
1432 // Jump if the register contains a non-smi.
1433 void JumpIfNotSmi(Register value,
1434 Label* not_smi_label,
1435 Register scratch = at,
1436 BranchDelaySlot bd = PROTECT);
1438 // Jump if either of the registers contain a non-smi.
1439 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1440 // Jump if either of the registers contain a smi.
1441 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1443 // Abort execution if argument is a smi, enabled via --debug-code.
1444 void AssertNotSmi(Register object);
1445 void AssertSmi(Register object);
1447 // Abort execution if argument is not a string, enabled via --debug-code.
1448 void AssertString(Register object);
1450 // Abort execution if argument is not a name, enabled via --debug-code.
1451 void AssertName(Register object);
1453 // Abort execution if argument is not undefined or an AllocationSite, enabled
1454 // via --debug-code.
1455 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1457 // Abort execution if reg is not the root value with the given index,
1458 // enabled via --debug-code.
1459 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1461 // ---------------------------------------------------------------------------
1462 // HeapNumber utilities.
1464 void JumpIfNotHeapNumber(Register object,
1465 Register heap_number_map,
1467 Label* on_not_heap_number);
1469 // -------------------------------------------------------------------------
1470 // String utilities.
1472 // Generate code to do a lookup in the number string cache. If the number in
1473 // the register object is found in the cache the generated code falls through
1474 // with the result in the result register. The object and the result register
1475 // can be the same. If the number is not found in the cache the code jumps to
1476 // the label not_found with only the content of register object unchanged.
1477 void LookupNumberStringCache(Register object,
1484 // Checks if both instance types are sequential ASCII strings and jumps to
1485 // label if either is not.
1486 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1487 Register first_object_instance_type, Register second_object_instance_type,
1488 Register scratch1, Register scratch2, Label* failure);
1490 // Check if instance type is sequential one-byte string and jump to label if
1492 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1495 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1497 void EmitSeqStringSetCharCheck(Register string,
1501 uint32_t encoding_mask);
1503 // Checks if both objects are sequential one-byte strings and jumps to label
1504 // if either is not. Assumes that neither object is a smi.
1505 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
1511 // Checks if both objects are sequential one-byte strings and jumps to label
1512 // if either is not.
1513 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1516 Label* not_flat_one_byte_strings);
1518 void ClampUint8(Register output_reg, Register input_reg);
1520 void ClampDoubleToUint8(Register result_reg,
1521 DoubleRegister input_reg,
1522 DoubleRegister temp_double_reg);
1525 void LoadInstanceDescriptors(Register map, Register descriptors);
1526 void EnumLength(Register dst, Register map);
1527 void NumberOfOwnDescriptors(Register dst, Register map);
1529 template<typename Field>
1530 void DecodeField(Register dst, Register src) {
1531 Ext(dst, src, Field::kShift, Field::kSize);
1534 template<typename Field>
1535 void DecodeField(Register reg) {
1536 DecodeField<Field>(reg, reg);
1539 template<typename Field>
1540 void DecodeFieldToSmi(Register dst, Register src) {
1541 static const int shift = Field::kShift;
1542 static const int mask = Field::kMask >> shift << kSmiTagSize;
1543 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
1544 STATIC_ASSERT(kSmiTag == 0);
1545 if (shift < kSmiTagSize) {
1546 sll(dst, src, kSmiTagSize - shift);
1547 And(dst, dst, Operand(mask));
1548 } else if (shift > kSmiTagSize) {
1549 srl(dst, src, shift - kSmiTagSize);
1550 And(dst, dst, Operand(mask));
1552 And(dst, src, Operand(mask));
1556 template<typename Field>
1557 void DecodeFieldToSmi(Register reg) {
1558 DecodeField<Field>(reg, reg);
1561 // Generates function and stub prologue code.
1562 void StubPrologue();
1563 void Prologue(bool code_pre_aging);
1565 // Activation support.
1566 void EnterFrame(StackFrame::Type type);
1567 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
1568 void LeaveFrame(StackFrame::Type type);
1570 // Patch the relocated value (lui/ori pair).
1571 void PatchRelocatedValue(Register li_location,
1573 Register new_value);
1574 // Get the relocatad value (loaded data) from the lui/ori pair.
1575 void GetRelocatedValue(Register li_location,
1579 // Expects object in a0 and returns map with validated enum cache
1580 // in a0. Assumes that any other register can be used as a scratch.
1581 void CheckEnumCache(Register null_value, Label* call_runtime);
1583 // AllocationMemento support. Arrays may have an associated
1584 // AllocationMemento object that can be checked for in order to pretransition
1586 // On entry, receiver_reg should point to the array object.
1587 // scratch_reg gets clobbered.
1588 // If allocation info is present, jump to allocation_memento_present.
1589 void TestJSArrayForAllocationMemento(
1590 Register receiver_reg,
1591 Register scratch_reg,
1592 Label* no_memento_found,
1593 Condition cond = al,
1594 Label* allocation_memento_present = NULL);
1596 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1597 Register scratch_reg,
1598 Label* memento_found) {
1599 Label no_memento_found;
1600 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1601 &no_memento_found, eq, memento_found);
1602 bind(&no_memento_found);
1605 // Jumps to found label if a prototype map has dictionary elements.
1606 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1607 Register scratch1, Label* found);
1610 void CallCFunctionHelper(Register function,
1611 int num_reg_arguments,
1612 int num_double_arguments);
1614 void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
1615 void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
1617 BranchDelaySlot bdslot = PROTECT);
1618 void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
1619 void BranchAndLinkShort(Label* L, Condition cond, Register rs,
1621 BranchDelaySlot bdslot = PROTECT);
1622 void J(Label* L, BranchDelaySlot bdslot);
1623 void Jr(Label* L, BranchDelaySlot bdslot);
1624 void Jalr(Label* L, BranchDelaySlot bdslot);
1626 // Helper functions for generating invokes.
1627 void InvokePrologue(const ParameterCount& expected,
1628 const ParameterCount& actual,
1629 Handle<Code> code_constant,
1632 bool* definitely_mismatches,
1634 const CallWrapper& call_wrapper);
1636 // Get the code for the given builtin. Returns if able to resolve
1637 // the function in the 'resolved' flag.
1638 Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
1640 void InitializeNewString(Register string,
1642 Heap::RootListIndex map_index,
1646 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1647 void InNewSpace(Register object,
1649 Condition cond, // eq for new space, ne otherwise.
1652 // Helper for finding the mark bits for an address. Afterwards, the
1653 // bitmap register points at the word with the mark bits and the mask
1654 // the position of the first bit. Leaves addr_reg unchanged.
1655 inline void GetMarkBits(Register addr_reg,
1656 Register bitmap_reg,
1659 // Helper for throwing exceptions. Compute a handler address and jump to
1660 // it. See the implementation for register usage.
1661 void JumpToHandlerEntry();
1663 // Compute memory operands for safepoint stack slots.
1664 static int SafepointRegisterStackIndex(int reg_code);
1665 MemOperand SafepointRegisterSlot(Register reg);
1666 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1668 bool generating_stub_;
1670 bool has_double_zero_reg_set_;
1671 // This handle will be patched with the code object on installation.
1672 Handle<Object> code_object_;
1674 // Needs access to SafepointRegisterStackIndex for compiled frame
1676 friend class StandardFrame;
1680 // The code patcher is used to patch (typically) small parts of code e.g. for
1681 // debugging and other types of instrumentation. When using the code patcher
1682 // the exact number of bytes specified must be emitted. It is not legal to emit
1683 // relocation information. If any of these constraints are violated it causes
1684 // an assertion to fail.
1692 CodePatcher(byte* address,
1694 FlushICache flush_cache = FLUSH);
1695 virtual ~CodePatcher();
1697 // Macro assembler to emit code.
1698 MacroAssembler* masm() { return &masm_; }
1700 // Emit an instruction directly.
1701 void Emit(Instr instr);
1703 // Emit an address directly.
1704 void Emit(Address addr);
1706 // Change the condition part of an instruction leaving the rest of the current
1707 // instruction unchanged.
1708 void ChangeBranchCondition(Condition cond);
1711 byte* address_; // The address of the code being patched.
1712 int size_; // Number of bytes of the expected patch size.
1713 MacroAssembler masm_; // Macro assembler used to generate the code.
1714 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1719 #ifdef GENERATED_CODE_COVERAGE
1720 #define CODE_COVERAGE_STRINGIFY(x) #x
1721 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1722 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1723 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1725 #define ACCESS_MASM(masm) masm->
1728 } } // namespace v8::internal
1730 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_