1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
6 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
8 #include "src/assembler.h"
9 #include "src/globals.h"
10 #include "src/mips64/assembler-mips64.h"
15 // Forward declaration.
18 // Reserved Register Usage Summary.
20 // Registers t8, t9, and at are reserved for use by the MacroAssembler.
22 // The programmer should know that the MacroAssembler may clobber these three,
23 // but won't touch other registers except in special cases.
25 // Per the MIPS ABI, register t9 must be used for indirect function call
26 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
27 // trying to update gp register for position-independent-code. Whenever
28 // MIPS generated code calls C code, it must be via t9 register.
31 // Flags used for LeaveExitFrame function.
32 enum LeaveExitFrameMode {
34 NO_EMIT_RETURN = false
37 // Flags used for AllocateHeapNumber
45 // Flags used for the ObjectToDoubleFPURegister function.
46 enum ObjectToDoubleFlags {
48 NO_OBJECT_TO_DOUBLE_FLAGS = 0,
49 // Object is known to be a non smi.
50 OBJECT_NOT_SMI = 1 << 0,
51 // Don't load NaNs or infinities, branch to the non number case instead.
52 AVOID_NANS_AND_INFINITIES = 1 << 1
55 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
56 enum BranchDelaySlot {
61 // Flags used for the li macro-assembler function.
63 // If the constant value can be represented in just 16 bits, then
64 // optimize the li to use a single instruction, rather than lui/ori/dsll
67 // Always use 6 instructions (lui/ori/dsll sequence), even if the constant
68 // could be loaded with just one, so that this value is patchable later.
70 // For address loads only 4 instruction are required. Used to mark
71 // constant load that will be used as address without relocation
72 // information. It ensures predictable code size, so specific sites
73 // in code are patchable.
78 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
79 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
80 enum PointersToHereCheck {
81 kPointersToHereMaybeInteresting,
82 kPointersToHereAreAlwaysInteresting
84 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
86 Register GetRegisterThatIsNotOneOf(Register reg1,
87 Register reg2 = no_reg,
88 Register reg3 = no_reg,
89 Register reg4 = no_reg,
90 Register reg5 = no_reg,
91 Register reg6 = no_reg);
93 bool AreAliased(Register reg1,
95 Register reg3 = no_reg,
96 Register reg4 = no_reg,
97 Register reg5 = no_reg,
98 Register reg6 = no_reg,
99 Register reg7 = no_reg,
100 Register reg8 = no_reg);
103 // -----------------------------------------------------------------------------
104 // Static helper functions.
106 inline MemOperand ContextOperand(Register context, int index) {
107 return MemOperand(context, Context::SlotOffset(index));
111 inline MemOperand GlobalObjectOperand() {
112 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
116 // Generate a MemOperand for loading a field from an object.
117 inline MemOperand FieldMemOperand(Register object, int offset) {
118 return MemOperand(object, offset - kHeapObjectTag);
122 inline MemOperand UntagSmiMemOperand(Register rm, int offset) {
123 // Assumes that Smis are shifted by 32 bits and little endianness.
124 STATIC_ASSERT(kSmiShift == 32);
125 return MemOperand(rm, offset + (kSmiShift / kBitsPerByte));
129 inline MemOperand UntagSmiFieldMemOperand(Register rm, int offset) {
130 return UntagSmiMemOperand(rm, offset - kHeapObjectTag);
134 // Generate a MemOperand for storing arguments 5..N on the stack
135 // when calling CallCFunction().
136 // TODO(plind): Currently ONLY used for O32. Should be fixed for
137 // n64, and used in RegExp code, and other places
138 // with more than 8 arguments.
139 inline MemOperand CFunctionArgumentOperand(int index) {
140 DCHECK(index > kCArgSlotCount);
141 // Argument 5 takes the slot just past the four Arg-slots.
142 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
143 return MemOperand(sp, offset);
147 // MacroAssembler implements a collection of frequently used macros.
148 class MacroAssembler: public Assembler {
150 // The isolate parameter can be NULL if the macro assembler should
151 // not use isolate-dependent functionality. In this case, it's the
152 // responsibility of the caller to never invoke such function on the
154 MacroAssembler(Isolate* isolate, void* buffer, int size);
157 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
158 #define COND_ARGS cond, r1, r2
160 // Cases when relocation is not needed.
161 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
162 void Name(target_type target, BranchDelaySlot bd = PROTECT); \
163 inline void Name(BranchDelaySlot bd, target_type target) { \
166 void Name(target_type target, \
168 BranchDelaySlot bd = PROTECT); \
169 inline void Name(BranchDelaySlot bd, \
170 target_type target, \
172 Name(target, COND_ARGS, bd); \
175 #define DECLARE_BRANCH_PROTOTYPES(Name) \
176 DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
177 DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
179 DECLARE_BRANCH_PROTOTYPES(Branch)
180 DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
181 DECLARE_BRANCH_PROTOTYPES(BranchShort)
183 #undef DECLARE_BRANCH_PROTOTYPES
184 #undef COND_TYPED_ARGS
188 // Jump, Call, and Ret pseudo instructions implementing inter-working.
189 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
190 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
192 void Jump(Register target, COND_ARGS);
193 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
194 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
195 void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
196 static int CallSize(Register target, COND_ARGS);
197 void Call(Register target, COND_ARGS);
198 static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
199 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
200 int CallSize(Handle<Code> code,
201 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
202 TypeFeedbackId ast_id = TypeFeedbackId::None(),
204 void Call(Handle<Code> code,
205 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
206 TypeFeedbackId ast_id = TypeFeedbackId::None(),
209 inline void Ret(BranchDelaySlot bd, Condition cond = al,
210 Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
211 Ret(cond, rs, rt, bd);
214 void Branch(Label* L,
217 Heap::RootListIndex index,
218 BranchDelaySlot bdslot = PROTECT);
222 // Emit code to discard a non-negative number of pointer-sized elements
223 // from the stack, clobbering only the sp register.
225 Condition cond = cc_always,
226 Register reg = no_reg,
227 const Operand& op = Operand(no_reg));
229 // Trivial case of DropAndRet that utilizes the delay slot and only emits
231 void DropAndRet(int drop);
233 void DropAndRet(int drop,
238 // Swap two registers. If the scratch register is omitted then a slightly
239 // less efficient form using xor instead of mov is emitted.
240 void Swap(Register reg1, Register reg2, Register scratch = no_reg);
242 void Call(Label* target);
244 inline void Move(Register dst, Register src) {
250 inline void Move(FPURegister dst, FPURegister src) {
256 inline void Move(Register dst_low, Register dst_high, FPURegister src) {
258 mfhc1(dst_high, src);
261 inline void FmoveHigh(Register dst_high, FPURegister src) {
262 mfhc1(dst_high, src);
265 inline void FmoveHigh(FPURegister dst, Register src_high) {
266 mthc1(src_high, dst);
269 inline void FmoveLow(Register dst_low, FPURegister src) {
273 void FmoveLow(FPURegister dst, Register src_low);
275 inline void Move(FPURegister dst, Register src_low, Register src_high) {
277 mthc1(src_high, dst);
280 void Move(FPURegister dst, float imm);
281 void Move(FPURegister dst, double imm);
284 void Movz(Register rd, Register rs, Register rt);
285 void Movn(Register rd, Register rs, Register rt);
286 void Movt(Register rd, Register rs, uint16_t cc = 0);
287 void Movf(Register rd, Register rs, uint16_t cc = 0);
289 void Clz(Register rd, Register rs);
291 // Jump unconditionally to given label.
292 // We NEED a nop in the branch delay slot, as it used by v8, for example in
293 // CodeGenerator::ProcessDeferred().
294 // Currently the branch delay slot is filled by the MacroAssembler.
295 // Use rather b(Label) for code generation.
300 void Load(Register dst, const MemOperand& src, Representation r);
301 void Store(Register src, const MemOperand& dst, Representation r);
303 // Load an object from the root table.
304 void LoadRoot(Register destination,
305 Heap::RootListIndex index);
306 void LoadRoot(Register destination,
307 Heap::RootListIndex index,
308 Condition cond, Register src1, const Operand& src2);
310 // Store an object to the root table.
311 void StoreRoot(Register source,
312 Heap::RootListIndex index);
313 void StoreRoot(Register source,
314 Heap::RootListIndex index,
315 Condition cond, Register src1, const Operand& src2);
317 // ---------------------------------------------------------------------------
320 void IncrementalMarkingRecordWriteHelper(Register object,
324 enum RememberedSetFinalAction {
330 // Record in the remembered set the fact that we have a pointer to new space
331 // at the address pointed to by the addr register. Only works if addr is not
333 void RememberedSetHelper(Register object, // Used for debug code.
336 SaveFPRegsMode save_fp,
337 RememberedSetFinalAction and_then);
339 void CheckPageFlag(Register object,
343 Label* condition_met);
345 // Check if object is in new space. Jumps if the object is not in new space.
346 // The register scratch can be object itself, but it will be clobbered.
347 void JumpIfNotInNewSpace(Register object,
350 InNewSpace(object, scratch, ne, branch);
353 // Check if object is in new space. Jumps if the object is in new space.
354 // The register scratch can be object itself, but scratch will be clobbered.
355 void JumpIfInNewSpace(Register object,
358 InNewSpace(object, scratch, eq, branch);
361 // Check if an object has a given incremental marking color.
362 void HasColor(Register object,
369 void JumpIfBlack(Register object,
374 // Checks the color of an object. If the object is already grey or black
375 // then we just fall through, since it is already live. If it is white and
376 // we can determine that it doesn't need to be scanned, then we just mark it
377 // black and fall through. For the rest we jump to the label so the
378 // incremental marker can fix its assumptions.
379 void EnsureNotWhite(Register object,
383 Label* object_is_white_and_not_data);
385 // Detects conservatively whether an object is data-only, i.e. it does need to
386 // be scanned by the garbage collector.
387 void JumpIfDataObject(Register value,
389 Label* not_data_object);
391 // Notify the garbage collector that we wrote a pointer into an object.
392 // |object| is the object being stored into, |value| is the object being
393 // stored. value and scratch registers are clobbered by the operation.
394 // The offset is the offset from the start of the object, not the offset from
395 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
396 void RecordWriteField(
402 SaveFPRegsMode save_fp,
403 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
404 SmiCheck smi_check = INLINE_SMI_CHECK,
405 PointersToHereCheck pointers_to_here_check_for_value =
406 kPointersToHereMaybeInteresting);
408 // As above, but the offset has the tag presubtracted. For use with
409 // MemOperand(reg, off).
410 inline void RecordWriteContextSlot(
416 SaveFPRegsMode save_fp,
417 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
418 SmiCheck smi_check = INLINE_SMI_CHECK,
419 PointersToHereCheck pointers_to_here_check_for_value =
420 kPointersToHereMaybeInteresting) {
421 RecordWriteField(context,
422 offset + kHeapObjectTag,
427 remembered_set_action,
429 pointers_to_here_check_for_value);
432 void RecordWriteForMap(
437 SaveFPRegsMode save_fp);
439 // For a given |object| notify the garbage collector that the slot |address|
440 // has been written. |value| is the object being stored. The value and
441 // address registers are clobbered by the operation.
447 SaveFPRegsMode save_fp,
448 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
449 SmiCheck smi_check = INLINE_SMI_CHECK,
450 PointersToHereCheck pointers_to_here_check_for_value =
451 kPointersToHereMaybeInteresting);
454 // ---------------------------------------------------------------------------
455 // Inline caching support.
457 // Generate code for checking access rights - used for security checks
458 // on access to global objects across environments. The holder register
459 // is left untouched, whereas both scratch registers are clobbered.
460 void CheckAccessGlobalProxy(Register holder_reg,
464 void GetNumberHash(Register reg0, Register scratch);
466 void LoadFromNumberDictionary(Label* miss,
475 inline void MarkCode(NopMarkerTypes type) {
479 // Check if the given instruction is a 'type' marker.
480 // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
481 // nop(type)). These instructions are generated to mark special location in
482 // the code, like some special IC code.
483 static inline bool IsMarkedCode(Instr instr, int type) {
484 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
485 return IsNop(instr, type);
489 static inline int GetCodeMarker(Instr instr) {
490 uint32_t opcode = ((instr & kOpcodeMask));
491 uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
492 uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
493 uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
495 // Return <n> if we have a sll zero_reg, zero_reg, n
497 bool sllzz = (opcode == SLL &&
498 rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
499 rs == static_cast<uint32_t>(ToNumber(zero_reg)));
501 (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
502 DCHECK((type == -1) ||
503 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
509 // ---------------------------------------------------------------------------
510 // Allocation support.
512 // Allocate an object in new space or old pointer space. The object_size is
513 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
514 // is passed. If the space is exhausted control continues at the gc_required
515 // label. The allocated object is returned in result. If the flag
516 // tag_allocated_object is true the result is tagged as as a heap object.
517 // All registers are clobbered also when control continues at the gc_required
519 void Allocate(int object_size,
524 AllocationFlags flags);
526 void Allocate(Register object_size,
531 AllocationFlags flags);
533 // Undo allocation in new space. The object passed and objects allocated after
534 // it will no longer be allocated. The caller must make sure that no pointers
535 // are left to the object(s) no longer allocated as they would be invalid when
536 // allocation is undone.
537 void UndoAllocationInNewSpace(Register object, Register scratch);
540 void AllocateTwoByteString(Register result,
546 void AllocateOneByteString(Register result, Register length,
547 Register scratch1, Register scratch2,
548 Register scratch3, Label* gc_required);
549 void AllocateTwoByteConsString(Register result,
554 void AllocateOneByteConsString(Register result, Register length,
555 Register scratch1, Register scratch2,
557 void AllocateTwoByteSlicedString(Register result,
562 void AllocateOneByteSlicedString(Register result, Register length,
563 Register scratch1, Register scratch2,
566 // Allocates a heap number or jumps to the gc_required label if the young
567 // space is full and a scavenge is needed. All registers are clobbered also
568 // when control continues at the gc_required label.
569 void AllocateHeapNumber(Register result,
572 Register heap_number_map,
574 TaggingMode tagging_mode = TAG_RESULT,
575 MutableMode mode = IMMUTABLE);
577 void AllocateHeapNumberWithValue(Register result,
583 // ---------------------------------------------------------------------------
584 // Instruction macros.
586 #define DEFINE_INSTRUCTION(instr) \
587 void instr(Register rd, Register rs, const Operand& rt); \
588 void instr(Register rd, Register rs, Register rt) { \
589 instr(rd, rs, Operand(rt)); \
591 void instr(Register rs, Register rt, int32_t j) { \
592 instr(rs, rt, Operand(j)); \
595 #define DEFINE_INSTRUCTION2(instr) \
596 void instr(Register rs, const Operand& rt); \
597 void instr(Register rs, Register rt) { \
598 instr(rs, Operand(rt)); \
600 void instr(Register rs, int32_t j) { \
601 instr(rs, Operand(j)); \
604 DEFINE_INSTRUCTION(Addu);
605 DEFINE_INSTRUCTION(Daddu);
606 DEFINE_INSTRUCTION(Div);
607 DEFINE_INSTRUCTION(Divu);
608 DEFINE_INSTRUCTION(Ddivu);
609 DEFINE_INSTRUCTION(Mod);
610 DEFINE_INSTRUCTION(Modu);
611 DEFINE_INSTRUCTION(Ddiv);
612 DEFINE_INSTRUCTION(Subu);
613 DEFINE_INSTRUCTION(Dsubu);
614 DEFINE_INSTRUCTION(Dmod);
615 DEFINE_INSTRUCTION(Dmodu);
616 DEFINE_INSTRUCTION(Mul);
617 DEFINE_INSTRUCTION(Mulh);
618 DEFINE_INSTRUCTION(Mulhu);
619 DEFINE_INSTRUCTION(Dmul);
620 DEFINE_INSTRUCTION(Dmulh);
621 DEFINE_INSTRUCTION2(Mult);
622 DEFINE_INSTRUCTION2(Dmult);
623 DEFINE_INSTRUCTION2(Multu);
624 DEFINE_INSTRUCTION2(Dmultu);
625 DEFINE_INSTRUCTION2(Div);
626 DEFINE_INSTRUCTION2(Ddiv);
627 DEFINE_INSTRUCTION2(Divu);
628 DEFINE_INSTRUCTION2(Ddivu);
630 DEFINE_INSTRUCTION(And);
631 DEFINE_INSTRUCTION(Or);
632 DEFINE_INSTRUCTION(Xor);
633 DEFINE_INSTRUCTION(Nor);
634 DEFINE_INSTRUCTION2(Neg);
636 DEFINE_INSTRUCTION(Slt);
637 DEFINE_INSTRUCTION(Sltu);
639 // MIPS32 R2 instruction macro.
640 DEFINE_INSTRUCTION(Ror);
641 DEFINE_INSTRUCTION(Dror);
643 #undef DEFINE_INSTRUCTION
644 #undef DEFINE_INSTRUCTION2
646 void Pref(int32_t hint, const MemOperand& rs);
649 // ---------------------------------------------------------------------------
650 // Pseudo-instructions.
652 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
654 void Ulw(Register rd, const MemOperand& rs);
655 void Usw(Register rd, const MemOperand& rs);
656 void Uld(Register rd, const MemOperand& rs, Register scratch = at);
657 void Usd(Register rd, const MemOperand& rs, Register scratch = at);
659 // Load int32 in the rd register.
660 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
661 inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
662 li(rd, Operand(j), mode);
664 void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
666 // Push multiple registers on the stack.
667 // Registers are saved in numerical order, with higher numbered registers
668 // saved in higher memory addresses.
669 void MultiPush(RegList regs);
670 void MultiPushReversed(RegList regs);
672 void MultiPushFPU(RegList regs);
673 void MultiPushReversedFPU(RegList regs);
675 void push(Register src) {
676 Daddu(sp, sp, Operand(-kPointerSize));
677 sd(src, MemOperand(sp, 0));
679 void Push(Register src) { push(src); }
682 void Push(Handle<Object> handle);
683 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
685 // Push two registers. Pushes leftmost register first (to highest address).
686 void Push(Register src1, Register src2) {
687 Dsubu(sp, sp, Operand(2 * kPointerSize));
688 sd(src1, MemOperand(sp, 1 * kPointerSize));
689 sd(src2, MemOperand(sp, 0 * kPointerSize));
692 // Push three registers. Pushes leftmost register first (to highest address).
693 void Push(Register src1, Register src2, Register src3) {
694 Dsubu(sp, sp, Operand(3 * kPointerSize));
695 sd(src1, MemOperand(sp, 2 * kPointerSize));
696 sd(src2, MemOperand(sp, 1 * kPointerSize));
697 sd(src3, MemOperand(sp, 0 * kPointerSize));
700 // Push four registers. Pushes leftmost register first (to highest address).
701 void Push(Register src1, Register src2, Register src3, Register src4) {
702 Dsubu(sp, sp, Operand(4 * kPointerSize));
703 sd(src1, MemOperand(sp, 3 * kPointerSize));
704 sd(src2, MemOperand(sp, 2 * kPointerSize));
705 sd(src3, MemOperand(sp, 1 * kPointerSize));
706 sd(src4, MemOperand(sp, 0 * kPointerSize));
709 void Push(Register src, Condition cond, Register tst1, Register tst2) {
710 // Since we don't have conditional execution we use a Branch.
711 Branch(3, cond, tst1, Operand(tst2));
712 Dsubu(sp, sp, Operand(kPointerSize));
713 sd(src, MemOperand(sp, 0));
716 void PushRegisterAsTwoSmis(Register src, Register scratch = at);
717 void PopRegisterAsTwoSmis(Register dst, Register scratch = at);
719 // Pops multiple values from the stack and load them in the
720 // registers specified in regs. Pop order is the opposite as in MultiPush.
721 void MultiPop(RegList regs);
722 void MultiPopReversed(RegList regs);
724 void MultiPopFPU(RegList regs);
725 void MultiPopReversedFPU(RegList regs);
727 void pop(Register dst) {
728 ld(dst, MemOperand(sp, 0));
729 Daddu(sp, sp, Operand(kPointerSize));
731 void Pop(Register dst) { pop(dst); }
733 // Pop two registers. Pops rightmost register first (from lower address).
734 void Pop(Register src1, Register src2) {
735 DCHECK(!src1.is(src2));
736 ld(src2, MemOperand(sp, 0 * kPointerSize));
737 ld(src1, MemOperand(sp, 1 * kPointerSize));
738 Daddu(sp, sp, 2 * kPointerSize);
741 // Pop three registers. Pops rightmost register first (from lower address).
742 void Pop(Register src1, Register src2, Register src3) {
743 ld(src3, MemOperand(sp, 0 * kPointerSize));
744 ld(src2, MemOperand(sp, 1 * kPointerSize));
745 ld(src1, MemOperand(sp, 2 * kPointerSize));
746 Daddu(sp, sp, 3 * kPointerSize);
749 void Pop(uint32_t count = 1) {
750 Daddu(sp, sp, Operand(count * kPointerSize));
753 // Push and pop the registers that can hold pointers, as defined by the
754 // RegList constant kSafepointSavedRegisters.
755 void PushSafepointRegisters();
756 void PopSafepointRegisters();
757 // Store value in register src in the safepoint stack slot for
759 void StoreToSafepointRegisterSlot(Register src, Register dst);
760 // Load the value of the src register from its safepoint stack slot
761 // into register dst.
762 void LoadFromSafepointRegisterSlot(Register dst, Register src);
764 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
766 // Does not handle errors.
767 void FlushICache(Register address, unsigned instructions);
769 // MIPS64 R2 instruction macro.
770 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
771 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
772 void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
774 // ---------------------------------------------------------------------------
775 // FPU macros. These do not handle special cases like NaN or +- inf.
777 // Convert unsigned word to double.
778 void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
779 void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
781 // Convert double to unsigned long.
782 void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch);
784 void Trunc_l_d(FPURegister fd, FPURegister fs);
785 void Round_l_d(FPURegister fd, FPURegister fs);
786 void Floor_l_d(FPURegister fd, FPURegister fs);
787 void Ceil_l_d(FPURegister fd, FPURegister fs);
789 // Convert double to unsigned word.
790 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
791 void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
793 void Trunc_w_d(FPURegister fd, FPURegister fs);
794 void Round_w_d(FPURegister fd, FPURegister fs);
795 void Floor_w_d(FPURegister fd, FPURegister fs);
796 void Ceil_w_d(FPURegister fd, FPURegister fs);
798 void Madd_d(FPURegister fd,
802 FPURegister scratch);
804 // Wrapper function for the different cmp/branch types.
805 void BranchF(Label* target,
810 BranchDelaySlot bd = PROTECT);
812 // Alternate (inline) version for better readability with USE_DELAY_SLOT.
813 inline void BranchF(BranchDelaySlot bd,
819 BranchF(target, nan, cc, cmp1, cmp2, bd);
822 // Truncates a double using a specific rounding mode, and writes the value
823 // to the result register.
824 // The except_flag will contain any exceptions caused by the instruction.
825 // If check_inexact is kDontCheckForInexactConversion, then the inexact
826 // exception is masked.
827 void EmitFPUTruncate(FPURoundingMode rounding_mode,
829 DoubleRegister double_input,
831 DoubleRegister double_scratch,
832 Register except_flag,
833 CheckForInexactConversion check_inexact
834 = kDontCheckForInexactConversion);
836 // Performs a truncating conversion of a floating point number as used by
837 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
838 // succeeds, otherwise falls through if result is saturated. On return
839 // 'result' either holds answer, or is clobbered on fall through.
841 // Only public for the test code in test-code-stubs-arm.cc.
842 void TryInlineTruncateDoubleToI(Register result,
843 DoubleRegister input,
846 // Performs a truncating conversion of a floating point number as used by
847 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
848 // Exits with 'result' holding the answer.
849 void TruncateDoubleToI(Register result, DoubleRegister double_input);
851 // Performs a truncating conversion of a heap number as used by
852 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
853 // must be different registers. Exits with 'result' holding the answer.
854 void TruncateHeapNumberToI(Register result, Register object);
856 // Converts the smi or heap number in object to an int32 using the rules
857 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
858 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
859 // different registers.
860 void TruncateNumberToI(Register object,
862 Register heap_number_map,
866 // Loads the number from object into dst register.
867 // If |object| is neither smi nor heap number, |not_number| is jumped to
868 // with |object| still intact.
869 void LoadNumber(Register object,
871 Register heap_number_map,
875 // Loads the number from object into double_dst in the double format.
876 // Control will jump to not_int32 if the value cannot be exactly represented
877 // by a 32-bit integer.
878 // Floating point value in the 32-bit integer range that are not exact integer
880 void LoadNumberAsInt32Double(Register object,
881 DoubleRegister double_dst,
882 Register heap_number_map,
885 FPURegister double_scratch,
888 // Loads the number from object into dst as a 32-bit integer.
889 // Control will jump to not_int32 if the object cannot be exactly represented
890 // by a 32-bit integer.
891 // Floating point value in the 32-bit integer range that are not exact integer
892 // won't be converted.
893 void LoadNumberAsInt32(Register object,
895 Register heap_number_map,
898 FPURegister double_scratch0,
899 FPURegister double_scratch1,
903 // argc - argument count to be dropped by LeaveExitFrame.
904 // save_doubles - saves FPU registers on stack, currently disabled.
905 // stack_space - extra stack space.
906 void EnterExitFrame(bool save_doubles,
907 int stack_space = 0);
909 // Leave the current exit frame.
910 void LeaveExitFrame(bool save_doubles, Register arg_count,
911 bool restore_context, bool do_return = NO_EMIT_RETURN,
912 bool argument_count_is_length = false);
914 // Get the actual activation frame alignment for target environment.
915 static int ActivationFrameAlignment();
917 // Make sure the stack is aligned. Only emits code in debug mode.
918 void AssertStackIsAligned();
920 void LoadContext(Register dst, int context_chain_length);
922 // Conditionally load the cached Array transitioned map of type
923 // transitioned_kind from the native context if the map in register
924 // map_in_out is the cached Array map in the native context of
926 void LoadTransitionedArrayMapConditional(
927 ElementsKind expected_kind,
928 ElementsKind transitioned_kind,
931 Label* no_map_match);
933 void LoadGlobalFunction(int index, Register function);
935 // Load the initial map from the global function. The registers
936 // function and map can be the same, function is then overwritten.
937 void LoadGlobalFunctionInitialMap(Register function,
941 void InitializeRootRegister() {
942 ExternalReference roots_array_start =
943 ExternalReference::roots_array_start(isolate());
944 li(kRootRegister, Operand(roots_array_start));
947 // -------------------------------------------------------------------------
948 // JavaScript invokes.
950 // Invoke the JavaScript function code by either calling or jumping.
951 void InvokeCode(Register code,
952 const ParameterCount& expected,
953 const ParameterCount& actual,
955 const CallWrapper& call_wrapper);
957 // Invoke the JavaScript function in the given register. Changes the
958 // current context to the context in the function before invoking.
959 void InvokeFunction(Register function,
960 const ParameterCount& actual,
962 const CallWrapper& call_wrapper);
964 void InvokeFunction(Register function,
965 const ParameterCount& expected,
966 const ParameterCount& actual,
968 const CallWrapper& call_wrapper);
970 void InvokeFunction(Handle<JSFunction> function,
971 const ParameterCount& expected,
972 const ParameterCount& actual,
974 const CallWrapper& call_wrapper);
977 void IsObjectJSObjectType(Register heap_object,
982 void IsInstanceJSObjectType(Register map,
986 void IsObjectJSStringType(Register object,
990 void IsObjectNameType(Register object,
994 // -------------------------------------------------------------------------
999 // -------------------------------------------------------------------------
1000 // Exception handling.
1002 // Push a new stack handler and link into stack handler chain.
1003 void PushStackHandler();
1005 // Unlink the stack handler on top of the stack from the stack handler chain.
1006 // Must preserve the result register.
1007 void PopStackHandler();
1009 // Copies a fixed number of fields of heap objects from src to dst.
1010 void CopyFields(Register dst, Register src, RegList temps, int field_count);
1012 // Copies a number of bytes from src to dst. All registers are clobbered. On
1013 // exit src and dst will point to the place just after where the last byte was
1014 // read or written and length will be zero.
1015 void CopyBytes(Register src,
1020 // Initialize fields with filler values. Fields starting at |start_offset|
1021 // not including end_offset are overwritten with the value in |filler|. At
1022 // the end the loop, |start_offset| takes the value of |end_offset|.
1023 void InitializeFieldsWithFiller(Register start_offset,
1024 Register end_offset,
1027 // -------------------------------------------------------------------------
1028 // Support functions.
1030 // Machine code version of Map::GetConstructor().
1031 // |temp| holds |result|'s map when done, and |temp2| its instance type.
1032 void GetMapConstructor(Register result, Register map, Register temp,
1035 // Try to get function prototype of a function and puts the value in
1036 // the result register. Checks that the function really is a
1037 // function and jumps to the miss label if the fast checks fail. The
1038 // function register will be untouched; the other registers may be
1040 void TryGetFunctionPrototype(Register function,
1044 bool miss_on_bound_function = false);
1046 void GetObjectType(Register function,
1050 // Check if a map for a JSObject indicates that the object has fast elements.
1051 // Jump to the specified label if it does not.
1052 void CheckFastElements(Register map,
1056 // Check if a map for a JSObject indicates that the object can have both smi
1057 // and HeapObject elements. Jump to the specified label if it does not.
1058 void CheckFastObjectElements(Register map,
1062 // Check if a map for a JSObject indicates that the object has fast smi only
1063 // elements. Jump to the specified label if it does not.
1064 void CheckFastSmiElements(Register map,
1068 // Check to see if maybe_number can be stored as a double in
1069 // FastDoubleElements. If it can, store it at the index specified by key in
1070 // the FastDoubleElements array elements. Otherwise jump to fail.
1071 void StoreNumberToDoubleElements(Register value_reg,
1073 Register elements_reg,
1077 int elements_offset = 0);
1079 // Compare an object's map with the specified map and its transitioned
1080 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
1081 // "branch_to" if the result of the comparison is "cond". If multiple map
1082 // compares are required, the compare sequences branches to early_success.
1083 void CompareMapAndBranch(Register obj,
1086 Label* early_success,
1090 // As above, but the map of the object is already loaded into the register
1091 // which is preserved by the code generated.
1092 void CompareMapAndBranch(Register obj_map,
1094 Label* early_success,
1098 // Check if the map of an object is equal to a specified map and branch to
1099 // label if not. Skip the smi check if not required (object is known to be a
1100 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1101 // against maps that are ElementsKind transition maps of the specificed map.
1102 void CheckMap(Register obj,
1106 SmiCheckType smi_check_type);
1109 void CheckMap(Register obj,
1111 Heap::RootListIndex index,
1113 SmiCheckType smi_check_type);
1115 // Check if the map of an object is equal to a specified weak map and branch
1116 // to a specified target if equal. Skip the smi check if not required
1117 // (object is known to be a heap object)
1118 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1119 Handle<WeakCell> cell, Handle<Code> success,
1120 SmiCheckType smi_check_type);
1122 // If the value is a NaN, canonicalize the value else, do nothing.
1123 void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
1126 // Get value of the weak cell.
1127 void GetWeakValue(Register value, Handle<WeakCell> cell);
1129 // Load the value of the weak cell in the value register. Branch to the
1130 // given miss label is the weak cell was cleared.
1131 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
1133 // Load and check the instance type of an object for being a string.
1134 // Loads the type into the second argument register.
1135 // Returns a condition that will be enabled if the object was a string.
1136 Condition IsObjectStringType(Register obj,
1139 ld(type, FieldMemOperand(obj, HeapObject::kMapOffset));
1140 lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
1141 And(type, type, Operand(kIsNotStringMask));
1142 DCHECK_EQ(0u, kStringTag);
1147 // Picks out an array index from the hash field.
1149 // hash - holds the index's hash. Clobbered.
1150 // index - holds the overwritten index on exit.
1151 void IndexFromHash(Register hash, Register index);
1153 // Get the number of least significant bits from a register.
1154 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1155 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1157 // Load the value of a number object into a FPU double register. If the
1158 // object is not a number a jump to the label not_number is performed
1159 // and the FPU double register is unchanged.
1160 void ObjectToDoubleFPURegister(
1165 Register heap_number_map,
1167 ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
1169 // Load the value of a smi object into a FPU double register. The register
1170 // scratch1 can be the same register as smi in which case smi will hold the
1171 // untagged value afterwards.
1172 void SmiToDoubleFPURegister(Register smi,
1176 // -------------------------------------------------------------------------
1177 // Overflow handling functions.
1178 // Usage: first call the appropriate arithmetic function, then call one of the
1179 // jump functions with the overflow_dst register as the second parameter.
1181 void AdduAndCheckForOverflow(Register dst,
1184 Register overflow_dst,
1185 Register scratch = at);
1187 void AdduAndCheckForOverflow(Register dst, Register left,
1188 const Operand& right, Register overflow_dst,
1189 Register scratch = at);
1191 void SubuAndCheckForOverflow(Register dst,
1194 Register overflow_dst,
1195 Register scratch = at);
1197 void SubuAndCheckForOverflow(Register dst, Register left,
1198 const Operand& right, Register overflow_dst,
1199 Register scratch = at);
1201 void BranchOnOverflow(Label* label,
1202 Register overflow_check,
1203 BranchDelaySlot bd = PROTECT) {
1204 Branch(label, lt, overflow_check, Operand(zero_reg), bd);
1207 void BranchOnNoOverflow(Label* label,
1208 Register overflow_check,
1209 BranchDelaySlot bd = PROTECT) {
1210 Branch(label, ge, overflow_check, Operand(zero_reg), bd);
1213 void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1214 Ret(lt, overflow_check, Operand(zero_reg), bd);
1217 void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1218 Ret(ge, overflow_check, Operand(zero_reg), bd);
1221 // -------------------------------------------------------------------------
1224 // See comments at the beginning of CEntryStub::Generate.
1225 inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
1227 inline void PrepareCEntryFunction(const ExternalReference& ref) {
1228 li(a1, Operand(ref));
1231 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
1232 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
1234 // Call a code stub.
1235 void CallStub(CodeStub* stub,
1236 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1239 // Tail call a code stub (jump).
1240 void TailCallStub(CodeStub* stub, COND_ARGS);
1244 void CallJSExitStub(CodeStub* stub);
1246 // Call a runtime routine.
1247 void CallRuntime(const Runtime::Function* f,
1249 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1250 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1251 const Runtime::Function* function = Runtime::FunctionForId(id);
1252 CallRuntime(function, function->nargs, kSaveFPRegs);
1255 // Convenience function: Same as above, but takes the fid instead.
1256 void CallRuntime(Runtime::FunctionId id,
1258 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1259 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1262 // Convenience function: call an external reference.
1263 void CallExternalReference(const ExternalReference& ext,
1265 BranchDelaySlot bd = PROTECT);
1267 // Tail call of a runtime routine (jump).
1268 // Like JumpToExternalReference, but also takes care of passing the number
1270 void TailCallExternalReference(const ExternalReference& ext,
1274 // Convenience function: tail call a runtime routine (jump).
1275 void TailCallRuntime(Runtime::FunctionId fid,
1279 int CalculateStackPassedWords(int num_reg_arguments,
1280 int num_double_arguments);
1282 // Before calling a C-function from generated code, align arguments on stack
1283 // and add space for the four mips argument slots.
1284 // After aligning the frame, non-register arguments must be stored on the
1285 // stack, after the argument-slots using helper: CFunctionArgumentOperand().
1286 // The argument count assumes all arguments are word sized.
1287 // Some compilers/platforms require the stack to be aligned when calling
1289 // Needs a scratch register to do some arithmetic. This register will be
1291 void PrepareCallCFunction(int num_reg_arguments,
1292 int num_double_registers,
1294 void PrepareCallCFunction(int num_reg_arguments,
1297 // Arguments 1-4 are placed in registers a0 thru a3 respectively.
1298 // Arguments 5..n are stored to stack using following:
1299 // sw(a4, CFunctionArgumentOperand(5));
1301 // Calls a C function and cleans up the space for arguments allocated
1302 // by PrepareCallCFunction. The called function is not allowed to trigger a
1303 // garbage collection, since that might move the code and invalidate the
1304 // return address (unless this is somehow accounted for by the called
1306 void CallCFunction(ExternalReference function, int num_arguments);
1307 void CallCFunction(Register function, int num_arguments);
1308 void CallCFunction(ExternalReference function,
1309 int num_reg_arguments,
1310 int num_double_arguments);
1311 void CallCFunction(Register function,
1312 int num_reg_arguments,
1313 int num_double_arguments);
1314 void MovFromFloatResult(DoubleRegister dst);
1315 void MovFromFloatParameter(DoubleRegister dst);
1317 // There are two ways of passing double arguments on MIPS, depending on
1318 // whether soft or hard floating point ABI is used. These functions
1319 // abstract parameter passing for the three different ways we call
1320 // C functions from generated code.
1321 void MovToFloatParameter(DoubleRegister src);
1322 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
1323 void MovToFloatResult(DoubleRegister src);
1325 // Jump to the builtin routine.
1326 void JumpToExternalReference(const ExternalReference& builtin,
1327 BranchDelaySlot bd = PROTECT);
1329 // Invoke specified builtin JavaScript function. Adds an entry to
1330 // the unresolved list if the name does not resolve.
1331 void InvokeBuiltin(Builtins::JavaScript id,
1333 const CallWrapper& call_wrapper = NullCallWrapper());
1335 // Store the code object for the given builtin in the target register and
1336 // setup the function in a1.
1337 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
1339 // Store the function for the given builtin in the target register.
1340 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1344 uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
1348 Handle<Object> CodeObject() {
1349 DCHECK(!code_object_.is_null());
1350 return code_object_;
1353 // Emit code for a truncating division by a constant. The dividend register is
1354 // unchanged and at gets clobbered. Dividend and result must be different.
1355 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1357 // -------------------------------------------------------------------------
1358 // StatsCounter support.
1360 void SetCounter(StatsCounter* counter, int value,
1361 Register scratch1, Register scratch2);
1362 void IncrementCounter(StatsCounter* counter, int value,
1363 Register scratch1, Register scratch2);
1364 void DecrementCounter(StatsCounter* counter, int value,
1365 Register scratch1, Register scratch2);
1368 // -------------------------------------------------------------------------
1371 // Calls Abort(msg) if the condition cc is not satisfied.
1372 // Use --debug_code to enable.
1373 void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
1374 void AssertFastElements(Register elements);
1376 // Like Assert(), but always enabled.
1377 void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
1379 // Print a message to stdout and abort execution.
1380 void Abort(BailoutReason msg);
1382 // Verify restrictions about code generated in stubs.
1383 void set_generating_stub(bool value) { generating_stub_ = value; }
1384 bool generating_stub() { return generating_stub_; }
1385 void set_has_frame(bool value) { has_frame_ = value; }
1386 bool has_frame() { return has_frame_; }
1387 inline bool AllowThisStubCall(CodeStub* stub);
1389 // ---------------------------------------------------------------------------
1390 // Number utilities.
1392 // Check whether the value of reg is a power of two and not zero. If not
1393 // control continues at the label not_power_of_two. If reg is a power of two
1394 // the register scratch contains the value of (reg - 1) when control falls
1396 void JumpIfNotPowerOfTwoOrZero(Register reg,
1398 Label* not_power_of_two_or_zero);
1400 // -------------------------------------------------------------------------
1403 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1404 void SmiTagCheckOverflow(Register reg, Register overflow);
1405 void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1407 void SmiTag(Register dst, Register src) {
1408 STATIC_ASSERT(kSmiTag == 0);
1409 if (SmiValuesAre32Bits()) {
1410 STATIC_ASSERT(kSmiShift == 32);
1411 dsll32(dst, src, 0);
1413 Addu(dst, src, src);
1417 void SmiTag(Register reg) {
1421 // Try to convert int32 to smi. If the value is to large, preserve
1422 // the original value and jump to not_a_smi. Destroys scratch and
1424 void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
1425 TrySmiTag(reg, reg, scratch, not_a_smi);
1428 void TrySmiTag(Register dst,
1432 if (SmiValuesAre32Bits()) {
1435 SmiTagCheckOverflow(at, src, scratch);
1436 BranchOnOverflow(not_a_smi, scratch);
1441 void SmiUntag(Register dst, Register src) {
1442 if (SmiValuesAre32Bits()) {
1443 STATIC_ASSERT(kSmiShift == 32);
1444 dsra32(dst, src, 0);
1446 sra(dst, src, kSmiTagSize);
1450 void SmiUntag(Register reg) {
1454 // Left-shifted from int32 equivalent of Smi.
1455 void SmiScale(Register dst, Register src, int scale) {
1456 if (SmiValuesAre32Bits()) {
1457 // The int portion is upper 32-bits of 64-bit word.
1458 dsra(dst, src, kSmiShift - scale);
1460 DCHECK(scale >= kSmiTagSize);
1461 sll(dst, src, scale - kSmiTagSize);
1465 // Combine load with untagging or scaling.
1466 void SmiLoadUntag(Register dst, MemOperand src);
1468 void SmiLoadScale(Register dst, MemOperand src, int scale);
1470 // Returns 2 values: the Smi and a scaled version of the int within the Smi.
1471 void SmiLoadWithScale(Register d_smi,
1476 // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
1477 void SmiLoadUntagWithScale(Register d_int,
1483 // Test if the register contains a smi.
1484 inline void SmiTst(Register value, Register scratch) {
1485 And(scratch, value, Operand(kSmiTagMask));
1487 inline void NonNegativeSmiTst(Register value, Register scratch) {
1488 And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
1491 // Untag the source value into destination and jump if source is a smi.
1492 // Source and destination can be the same register.
1493 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1495 // Untag the source value into destination and jump if source is not a smi.
1496 // Source and destination can be the same register.
1497 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1499 // Jump the register contains a smi.
1500 void JumpIfSmi(Register value,
1502 Register scratch = at,
1503 BranchDelaySlot bd = PROTECT);
1505 // Jump if the register contains a non-smi.
1506 void JumpIfNotSmi(Register value,
1507 Label* not_smi_label,
1508 Register scratch = at,
1509 BranchDelaySlot bd = PROTECT);
1511 // Jump if either of the registers contain a non-smi.
1512 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1513 // Jump if either of the registers contain a smi.
1514 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1516 // Abort execution if argument is a smi, enabled via --debug-code.
1517 void AssertNotSmi(Register object);
1518 void AssertSmi(Register object);
1520 // Abort execution if argument is not a string, enabled via --debug-code.
1521 void AssertString(Register object);
1523 // Abort execution if argument is not a name, enabled via --debug-code.
1524 void AssertName(Register object);
1526 // Abort execution if argument is not undefined or an AllocationSite, enabled
1527 // via --debug-code.
1528 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1530 // Abort execution if reg is not the root value with the given index,
1531 // enabled via --debug-code.
1532 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1534 // ---------------------------------------------------------------------------
1535 // HeapNumber utilities.
1537 void JumpIfNotHeapNumber(Register object,
1538 Register heap_number_map,
1540 Label* on_not_heap_number);
1542 // -------------------------------------------------------------------------
1543 // String utilities.
1545 // Generate code to do a lookup in the number string cache. If the number in
1546 // the register object is found in the cache the generated code falls through
1547 // with the result in the result register. The object and the result register
1548 // can be the same. If the number is not found in the cache the code jumps to
1549 // the label not_found with only the content of register object unchanged.
1550 void LookupNumberStringCache(Register object,
1557 // Checks if both instance types are sequential one-byte strings and jumps to
1558 // label if either is not.
1559 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1560 Register first_object_instance_type, Register second_object_instance_type,
1561 Register scratch1, Register scratch2, Label* failure);
1563 // Check if instance type is sequential one-byte string and jump to label if
1565 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1568 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1570 void EmitSeqStringSetCharCheck(Register string,
1574 uint32_t encoding_mask);
1576 // Checks if both objects are sequential one-byte strings and jumps to label
1577 // if either is not. Assumes that neither object is a smi.
1578 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
1584 // Checks if both objects are sequential one-byte strings and jumps to label
1585 // if either is not.
1586 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1589 Label* not_flat_one_byte_strings);
1591 void ClampUint8(Register output_reg, Register input_reg);
1593 void ClampDoubleToUint8(Register result_reg,
1594 DoubleRegister input_reg,
1595 DoubleRegister temp_double_reg);
1598 void LoadInstanceDescriptors(Register map, Register descriptors);
1599 void EnumLength(Register dst, Register map);
1600 void NumberOfOwnDescriptors(Register dst, Register map);
1601 void LoadAccessor(Register dst, Register holder, int accessor_index,
1602 AccessorComponent accessor);
1604 template<typename Field>
1605 void DecodeField(Register dst, Register src) {
1606 Ext(dst, src, Field::kShift, Field::kSize);
1609 template<typename Field>
1610 void DecodeField(Register reg) {
1611 DecodeField<Field>(reg, reg);
1614 template<typename Field>
1615 void DecodeFieldToSmi(Register dst, Register src) {
1616 static const int shift = Field::kShift;
1617 static const int mask = Field::kMask >> shift;
1618 dsrl(dst, src, shift);
1619 And(dst, dst, Operand(mask));
1620 dsll32(dst, dst, 0);
1623 template<typename Field>
1624 void DecodeFieldToSmi(Register reg) {
1625 DecodeField<Field>(reg, reg);
1627 // Generates function and stub prologue code.
1628 void StubPrologue();
1629 void Prologue(bool code_pre_aging);
1631 // Activation support.
1632 void EnterFrame(StackFrame::Type type);
1633 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
1634 void LeaveFrame(StackFrame::Type type);
1636 // Patch the relocated value (lui/ori pair).
1637 void PatchRelocatedValue(Register li_location,
1639 Register new_value);
1640 // Get the relocatad value (loaded data) from the lui/ori pair.
1641 void GetRelocatedValue(Register li_location,
1645 // Expects object in a0 and returns map with validated enum cache
1646 // in a0. Assumes that any other register can be used as a scratch.
1647 void CheckEnumCache(Register null_value, Label* call_runtime);
1649 // AllocationMemento support. Arrays may have an associated
1650 // AllocationMemento object that can be checked for in order to pretransition
1652 // On entry, receiver_reg should point to the array object.
1653 // scratch_reg gets clobbered.
1654 // If allocation info is present, jump to allocation_memento_present.
1655 void TestJSArrayForAllocationMemento(
1656 Register receiver_reg,
1657 Register scratch_reg,
1658 Label* no_memento_found,
1659 Condition cond = al,
1660 Label* allocation_memento_present = NULL);
1662 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1663 Register scratch_reg,
1664 Label* memento_found) {
1665 Label no_memento_found;
1666 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1667 &no_memento_found, eq, memento_found);
1668 bind(&no_memento_found);
1671 // Jumps to found label if a prototype map has dictionary elements.
1672 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1673 Register scratch1, Label* found);
1676 void CallCFunctionHelper(Register function,
1677 int num_reg_arguments,
1678 int num_double_arguments);
1680 void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
1681 void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
1683 BranchDelaySlot bdslot = PROTECT);
1684 void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
1685 void BranchAndLinkShort(Label* L, Condition cond, Register rs,
1687 BranchDelaySlot bdslot = PROTECT);
1688 void J(Label* L, BranchDelaySlot bdslot);
1689 void Jr(Label* L, BranchDelaySlot bdslot);
1690 void Jalr(Label* L, BranchDelaySlot bdslot);
1692 // Helper functions for generating invokes.
1693 void InvokePrologue(const ParameterCount& expected,
1694 const ParameterCount& actual,
1695 Handle<Code> code_constant,
1698 bool* definitely_mismatches,
1700 const CallWrapper& call_wrapper);
1702 // Get the code for the given builtin. Returns if able to resolve
1703 // the function in the 'resolved' flag.
1704 Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
1706 void InitializeNewString(Register string,
1708 Heap::RootListIndex map_index,
1712 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1713 void InNewSpace(Register object,
1715 Condition cond, // eq for new space, ne otherwise.
1718 // Helper for finding the mark bits for an address. Afterwards, the
1719 // bitmap register points at the word with the mark bits and the mask
1720 // the position of the first bit. Leaves addr_reg unchanged.
1721 inline void GetMarkBits(Register addr_reg,
1722 Register bitmap_reg,
1725 // Compute memory operands for safepoint stack slots.
1726 static int SafepointRegisterStackIndex(int reg_code);
1727 MemOperand SafepointRegisterSlot(Register reg);
1728 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1730 bool generating_stub_;
1732 bool has_double_zero_reg_set_;
1733 // This handle will be patched with the code object on installation.
1734 Handle<Object> code_object_;
1736 // Needs access to SafepointRegisterStackIndex for compiled frame
1738 friend class StandardFrame;
1742 // The code patcher is used to patch (typically) small parts of code e.g. for
1743 // debugging and other types of instrumentation. When using the code patcher
1744 // the exact number of bytes specified must be emitted. It is not legal to emit
1745 // relocation information. If any of these constraints are violated it causes
1746 // an assertion to fail.
1754 CodePatcher(byte* address,
1756 FlushICache flush_cache = FLUSH);
1757 virtual ~CodePatcher();
1759 // Macro assembler to emit code.
1760 MacroAssembler* masm() { return &masm_; }
1762 // Emit an instruction directly.
1763 void Emit(Instr instr);
1765 // Emit an address directly.
1766 void Emit(Address addr);
1768 // Change the condition part of an instruction leaving the rest of the current
1769 // instruction unchanged.
1770 void ChangeBranchCondition(Condition cond);
1773 byte* address_; // The address of the code being patched.
1774 int size_; // Number of bytes of the expected patch size.
1775 MacroAssembler masm_; // Macro assembler used to generate the code.
1776 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1781 #ifdef GENERATED_CODE_COVERAGE
1782 #define CODE_COVERAGE_STRINGIFY(x) #x
1783 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1784 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1785 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1787 #define ACCESS_MASM(masm) masm->
1790 } } // namespace v8::internal
1792 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_