1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
6 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
8 #include "src/assembler.h"
9 #include "src/globals.h"
10 #include "src/mips/assembler-mips.h"
15 // Give alias names to registers for calling conventions.
16 const Register kReturnRegister0 = {kRegister_v0_Code};
17 const Register kReturnRegister1 = {kRegister_v1_Code};
18 const Register kJSFunctionRegister = {kRegister_a1_Code};
19 const Register kContextRegister = {Register::kCpRegister};
20 const Register kInterpreterBytecodeOffsetRegister = {kRegister_t4_Code};
21 const Register kInterpreterBytecodeArrayRegister = {kRegister_t5_Code};
22 const Register kInterpreterDispatchTableRegister = {kRegister_t6_Code};
23 const Register kRuntimeCallFunctionRegister = {kRegister_a1_Code};
24 const Register kRuntimeCallArgCountRegister = {kRegister_a0_Code};
26 // Forward declaration.
29 // Reserved Register Usage Summary.
31 // Registers t8, t9, and at are reserved for use by the MacroAssembler.
33 // The programmer should know that the MacroAssembler may clobber these three,
34 // but won't touch other registers except in special cases.
36 // Per the MIPS ABI, register t9 must be used for indirect function call
37 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
38 // trying to update gp register for position-independent-code. Whenever
39 // MIPS generated code calls C code, it must be via t9 register.
42 // Flags used for LeaveExitFrame function.
43 enum LeaveExitFrameMode {
45 NO_EMIT_RETURN = false
48 // Flags used for AllocateHeapNumber
56 // Flags used for the ObjectToDoubleFPURegister function.
57 enum ObjectToDoubleFlags {
59 NO_OBJECT_TO_DOUBLE_FLAGS = 0,
60 // Object is known to be a non smi.
61 OBJECT_NOT_SMI = 1 << 0,
62 // Don't load NaNs or infinities, branch to the non number case instead.
63 AVOID_NANS_AND_INFINITIES = 1 << 1
66 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
67 enum BranchDelaySlot {
72 // Flags used for the li macro-assembler function.
74 // If the constant value can be represented in just 16 bits, then
75 // optimize the li to use a single instruction, rather than lui/ori pair.
77 // Always use 2 instructions (lui/ori pair), even if the constant could
78 // be loaded with just one, so that this value is patchable later.
83 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
84 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
85 enum PointersToHereCheck {
86 kPointersToHereMaybeInteresting,
87 kPointersToHereAreAlwaysInteresting
89 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
91 Register GetRegisterThatIsNotOneOf(Register reg1,
92 Register reg2 = no_reg,
93 Register reg3 = no_reg,
94 Register reg4 = no_reg,
95 Register reg5 = no_reg,
96 Register reg6 = no_reg);
98 bool AreAliased(Register reg1,
100 Register reg3 = no_reg,
101 Register reg4 = no_reg,
102 Register reg5 = no_reg,
103 Register reg6 = no_reg,
104 Register reg7 = no_reg,
105 Register reg8 = no_reg);
108 // -----------------------------------------------------------------------------
109 // Static helper functions.
111 inline MemOperand ContextOperand(Register context, int index) {
112 return MemOperand(context, Context::SlotOffset(index));
116 inline MemOperand GlobalObjectOperand() {
117 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
121 // Generate a MemOperand for loading a field from an object.
122 inline MemOperand FieldMemOperand(Register object, int offset) {
123 return MemOperand(object, offset - kHeapObjectTag);
127 // Generate a MemOperand for storing arguments 5..N on the stack
128 // when calling CallCFunction().
129 inline MemOperand CFunctionArgumentOperand(int index) {
130 DCHECK(index > kCArgSlotCount);
131 // Argument 5 takes the slot just past the four Arg-slots.
132 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
133 return MemOperand(sp, offset);
137 // MacroAssembler implements a collection of frequently used macros.
138 class MacroAssembler: public Assembler {
140 // The isolate parameter can be NULL if the macro assembler should
141 // not use isolate-dependent functionality. In this case, it's the
142 // responsibility of the caller to never invoke such function on the
144 MacroAssembler(Isolate* isolate, void* buffer, int size);
147 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
148 #define COND_ARGS cond, r1, r2
150 // Cases when relocation is not needed.
151 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
152 void Name(target_type target, BranchDelaySlot bd = PROTECT); \
153 inline void Name(BranchDelaySlot bd, target_type target) { \
156 void Name(target_type target, \
158 BranchDelaySlot bd = PROTECT); \
159 inline void Name(BranchDelaySlot bd, \
160 target_type target, \
162 Name(target, COND_ARGS, bd); \
165 #define DECLARE_BRANCH_PROTOTYPES(Name) \
166 DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
167 DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
169 DECLARE_BRANCH_PROTOTYPES(Branch)
170 DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
171 DECLARE_BRANCH_PROTOTYPES(BranchShort)
173 #undef DECLARE_BRANCH_PROTOTYPES
174 #undef COND_TYPED_ARGS
178 // Jump, Call, and Ret pseudo instructions implementing inter-working.
179 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
180 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
182 void Jump(Register target, COND_ARGS);
183 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
184 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
185 void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
186 static int CallSize(Register target, COND_ARGS);
187 void Call(Register target, COND_ARGS);
188 static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
189 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
190 int CallSize(Handle<Code> code,
191 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
192 TypeFeedbackId ast_id = TypeFeedbackId::None(),
194 void Call(Handle<Code> code,
195 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
196 TypeFeedbackId ast_id = TypeFeedbackId::None(),
199 inline void Ret(BranchDelaySlot bd, Condition cond = al,
200 Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
201 Ret(cond, rs, rt, bd);
204 void Branch(Label* L,
207 Heap::RootListIndex index,
208 BranchDelaySlot bdslot = PROTECT);
212 // Emit code to discard a non-negative number of pointer-sized elements
213 // from the stack, clobbering only the sp register.
215 Condition cond = cc_always,
216 Register reg = no_reg,
217 const Operand& op = Operand(no_reg));
219 // Trivial case of DropAndRet that utilizes the delay slot and only emits
221 void DropAndRet(int drop);
223 void DropAndRet(int drop,
228 // Swap two registers. If the scratch register is omitted then a slightly
229 // less efficient form using xor instead of mov is emitted.
230 void Swap(Register reg1, Register reg2, Register scratch = no_reg);
232 void Call(Label* target);
234 inline void Move(Register dst, Register src) {
240 inline void Move(FPURegister dst, FPURegister src) {
246 inline void Move(Register dst_low, Register dst_high, FPURegister src) {
248 Mfhc1(dst_high, src);
251 inline void FmoveHigh(Register dst_high, FPURegister src) {
252 Mfhc1(dst_high, src);
255 inline void FmoveHigh(FPURegister dst, Register src_high) {
256 Mthc1(src_high, dst);
259 inline void FmoveLow(Register dst_low, FPURegister src) {
263 void FmoveLow(FPURegister dst, Register src_low);
265 inline void Move(FPURegister dst, Register src_low, Register src_high) {
267 Mthc1(src_high, dst);
270 void Move(FPURegister dst, float imm);
271 void Move(FPURegister dst, double imm);
274 void Movz(Register rd, Register rs, Register rt);
275 void Movn(Register rd, Register rs, Register rt);
276 void Movt(Register rd, Register rs, uint16_t cc = 0);
277 void Movf(Register rd, Register rs, uint16_t cc = 0);
279 void Clz(Register rd, Register rs);
281 // Jump unconditionally to given label.
282 // We NEED a nop in the branch delay slot, as it used by v8, for example in
283 // CodeGenerator::ProcessDeferred().
284 // Currently the branch delay slot is filled by the MacroAssembler.
285 // Use rather b(Label) for code generation.
290 void Load(Register dst, const MemOperand& src, Representation r);
291 void Store(Register src, const MemOperand& dst, Representation r);
293 // Load an object from the root table.
294 void LoadRoot(Register destination,
295 Heap::RootListIndex index);
296 void LoadRoot(Register destination,
297 Heap::RootListIndex index,
298 Condition cond, Register src1, const Operand& src2);
300 // Store an object to the root table.
301 void StoreRoot(Register source,
302 Heap::RootListIndex index);
303 void StoreRoot(Register source,
304 Heap::RootListIndex index,
305 Condition cond, Register src1, const Operand& src2);
307 // ---------------------------------------------------------------------------
310 void IncrementalMarkingRecordWriteHelper(Register object,
314 enum RememberedSetFinalAction {
320 // Record in the remembered set the fact that we have a pointer to new space
321 // at the address pointed to by the addr register. Only works if addr is not
323 void RememberedSetHelper(Register object, // Used for debug code.
326 SaveFPRegsMode save_fp,
327 RememberedSetFinalAction and_then);
329 void CheckPageFlag(Register object,
333 Label* condition_met);
335 // Check if object is in new space. Jumps if the object is not in new space.
336 // The register scratch can be object itself, but it will be clobbered.
337 void JumpIfNotInNewSpace(Register object,
340 InNewSpace(object, scratch, ne, branch);
343 // Check if object is in new space. Jumps if the object is in new space.
344 // The register scratch can be object itself, but scratch will be clobbered.
345 void JumpIfInNewSpace(Register object,
348 InNewSpace(object, scratch, eq, branch);
351 // Check if an object has a given incremental marking color.
352 void HasColor(Register object,
359 void JumpIfBlack(Register object,
364 // Checks the color of an object. If the object is already grey or black
365 // then we just fall through, since it is already live. If it is white and
366 // we can determine that it doesn't need to be scanned, then we just mark it
367 // black and fall through. For the rest we jump to the label so the
368 // incremental marker can fix its assumptions.
369 void EnsureNotWhite(Register object,
373 Label* object_is_white_and_not_data);
375 // Detects conservatively whether an object is data-only, i.e. it does need to
376 // be scanned by the garbage collector.
377 void JumpIfDataObject(Register value,
379 Label* not_data_object);
381 // Notify the garbage collector that we wrote a pointer into an object.
382 // |object| is the object being stored into, |value| is the object being
383 // stored. value and scratch registers are clobbered by the operation.
384 // The offset is the offset from the start of the object, not the offset from
385 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
386 void RecordWriteField(
392 SaveFPRegsMode save_fp,
393 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
394 SmiCheck smi_check = INLINE_SMI_CHECK,
395 PointersToHereCheck pointers_to_here_check_for_value =
396 kPointersToHereMaybeInteresting);
398 // As above, but the offset has the tag presubtracted. For use with
399 // MemOperand(reg, off).
400 inline void RecordWriteContextSlot(
406 SaveFPRegsMode save_fp,
407 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
408 SmiCheck smi_check = INLINE_SMI_CHECK,
409 PointersToHereCheck pointers_to_here_check_for_value =
410 kPointersToHereMaybeInteresting) {
411 RecordWriteField(context,
412 offset + kHeapObjectTag,
417 remembered_set_action,
419 pointers_to_here_check_for_value);
422 void RecordWriteForMap(
427 SaveFPRegsMode save_fp);
429 // For a given |object| notify the garbage collector that the slot |address|
430 // has been written. |value| is the object being stored. The value and
431 // address registers are clobbered by the operation.
437 SaveFPRegsMode save_fp,
438 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
439 SmiCheck smi_check = INLINE_SMI_CHECK,
440 PointersToHereCheck pointers_to_here_check_for_value =
441 kPointersToHereMaybeInteresting);
444 // ---------------------------------------------------------------------------
445 // Inline caching support.
447 // Generate code for checking access rights - used for security checks
448 // on access to global objects across environments. The holder register
449 // is left untouched, whereas both scratch registers are clobbered.
450 void CheckAccessGlobalProxy(Register holder_reg,
454 void GetNumberHash(Register reg0, Register scratch);
456 void LoadFromNumberDictionary(Label* miss,
465 inline void MarkCode(NopMarkerTypes type) {
469 // Check if the given instruction is a 'type' marker.
470 // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
471 // nop(type)). These instructions are generated to mark special location in
472 // the code, like some special IC code.
473 static inline bool IsMarkedCode(Instr instr, int type) {
474 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
475 return IsNop(instr, type);
479 static inline int GetCodeMarker(Instr instr) {
480 uint32_t opcode = ((instr & kOpcodeMask));
481 uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
482 uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
483 uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
485 // Return <n> if we have a sll zero_reg, zero_reg, n
487 bool sllzz = (opcode == SLL &&
488 rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
489 rs == static_cast<uint32_t>(ToNumber(zero_reg)));
491 (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
492 DCHECK((type == -1) ||
493 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
499 // ---------------------------------------------------------------------------
500 // Allocation support.
502 // Allocate an object in new space or old space. The object_size is
503 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
504 // is passed. If the space is exhausted control continues at the gc_required
505 // label. The allocated object is returned in result. If the flag
506 // tag_allocated_object is true the result is tagged as as a heap object.
507 // All registers are clobbered also when control continues at the gc_required
509 void Allocate(int object_size,
514 AllocationFlags flags);
516 void Allocate(Register object_size,
521 AllocationFlags flags);
523 void AllocateTwoByteString(Register result,
529 void AllocateOneByteString(Register result, Register length,
530 Register scratch1, Register scratch2,
531 Register scratch3, Label* gc_required);
532 void AllocateTwoByteConsString(Register result,
537 void AllocateOneByteConsString(Register result, Register length,
538 Register scratch1, Register scratch2,
540 void AllocateTwoByteSlicedString(Register result,
545 void AllocateOneByteSlicedString(Register result, Register length,
546 Register scratch1, Register scratch2,
549 // Allocates a heap number or jumps to the gc_required label if the young
550 // space is full and a scavenge is needed. All registers are clobbered also
551 // when control continues at the gc_required label.
552 void AllocateHeapNumber(Register result,
555 Register heap_number_map,
557 TaggingMode tagging_mode = TAG_RESULT,
558 MutableMode mode = IMMUTABLE);
559 void AllocateHeapNumberWithValue(Register result,
565 // ---------------------------------------------------------------------------
566 // Instruction macros.
568 #define DEFINE_INSTRUCTION(instr) \
569 void instr(Register rd, Register rs, const Operand& rt); \
570 void instr(Register rd, Register rs, Register rt) { \
571 instr(rd, rs, Operand(rt)); \
573 void instr(Register rs, Register rt, int32_t j) { \
574 instr(rs, rt, Operand(j)); \
577 #define DEFINE_INSTRUCTION2(instr) \
578 void instr(Register rs, const Operand& rt); \
579 void instr(Register rs, Register rt) { \
580 instr(rs, Operand(rt)); \
582 void instr(Register rs, int32_t j) { \
583 instr(rs, Operand(j)); \
586 #define DEFINE_INSTRUCTION3(instr) \
587 void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
588 void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
589 instr(rd_hi, rd_lo, rs, Operand(rt)); \
591 void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
592 instr(rd_hi, rd_lo, rs, Operand(j)); \
595 DEFINE_INSTRUCTION(Addu);
596 DEFINE_INSTRUCTION(Subu);
597 DEFINE_INSTRUCTION(Mul);
598 DEFINE_INSTRUCTION(Div);
599 DEFINE_INSTRUCTION(Divu);
600 DEFINE_INSTRUCTION(Mod);
601 DEFINE_INSTRUCTION(Modu);
602 DEFINE_INSTRUCTION(Mulh);
603 DEFINE_INSTRUCTION2(Mult);
604 DEFINE_INSTRUCTION(Mulhu);
605 DEFINE_INSTRUCTION2(Multu);
606 DEFINE_INSTRUCTION2(Div);
607 DEFINE_INSTRUCTION2(Divu);
609 DEFINE_INSTRUCTION3(Div);
610 DEFINE_INSTRUCTION3(Mul);
612 DEFINE_INSTRUCTION(And);
613 DEFINE_INSTRUCTION(Or);
614 DEFINE_INSTRUCTION(Xor);
615 DEFINE_INSTRUCTION(Nor);
616 DEFINE_INSTRUCTION2(Neg);
618 DEFINE_INSTRUCTION(Slt);
619 DEFINE_INSTRUCTION(Sltu);
621 // MIPS32 R2 instruction macro.
622 DEFINE_INSTRUCTION(Ror);
624 #undef DEFINE_INSTRUCTION
625 #undef DEFINE_INSTRUCTION2
627 void Pref(int32_t hint, const MemOperand& rs);
630 // ---------------------------------------------------------------------------
631 // Pseudo-instructions.
633 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
635 void Ulw(Register rd, const MemOperand& rs);
636 void Usw(Register rd, const MemOperand& rs);
638 // Load int32 in the rd register.
639 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
640 inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
641 li(rd, Operand(j), mode);
643 void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
645 // Push multiple registers on the stack.
646 // Registers are saved in numerical order, with higher numbered registers
647 // saved in higher memory addresses.
648 void MultiPush(RegList regs);
649 void MultiPushReversed(RegList regs);
651 void MultiPushFPU(RegList regs);
652 void MultiPushReversedFPU(RegList regs);
654 void push(Register src) {
655 Addu(sp, sp, Operand(-kPointerSize));
656 sw(src, MemOperand(sp, 0));
658 void Push(Register src) { push(src); }
661 void Push(Handle<Object> handle);
662 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
664 // Push two registers. Pushes leftmost register first (to highest address).
665 void Push(Register src1, Register src2) {
666 Subu(sp, sp, Operand(2 * kPointerSize));
667 sw(src1, MemOperand(sp, 1 * kPointerSize));
668 sw(src2, MemOperand(sp, 0 * kPointerSize));
671 // Push three registers. Pushes leftmost register first (to highest address).
672 void Push(Register src1, Register src2, Register src3) {
673 Subu(sp, sp, Operand(3 * kPointerSize));
674 sw(src1, MemOperand(sp, 2 * kPointerSize));
675 sw(src2, MemOperand(sp, 1 * kPointerSize));
676 sw(src3, MemOperand(sp, 0 * kPointerSize));
679 // Push four registers. Pushes leftmost register first (to highest address).
680 void Push(Register src1, Register src2, Register src3, Register src4) {
681 Subu(sp, sp, Operand(4 * kPointerSize));
682 sw(src1, MemOperand(sp, 3 * kPointerSize));
683 sw(src2, MemOperand(sp, 2 * kPointerSize));
684 sw(src3, MemOperand(sp, 1 * kPointerSize));
685 sw(src4, MemOperand(sp, 0 * kPointerSize));
688 // Push five registers. Pushes leftmost register first (to highest address).
689 void Push(Register src1, Register src2, Register src3, Register src4,
691 Subu(sp, sp, Operand(5 * kPointerSize));
692 sw(src1, MemOperand(sp, 4 * kPointerSize));
693 sw(src2, MemOperand(sp, 3 * kPointerSize));
694 sw(src3, MemOperand(sp, 2 * kPointerSize));
695 sw(src4, MemOperand(sp, 1 * kPointerSize));
696 sw(src5, MemOperand(sp, 0 * kPointerSize));
699 void Push(Register src, Condition cond, Register tst1, Register tst2) {
700 // Since we don't have conditional execution we use a Branch.
701 Branch(3, cond, tst1, Operand(tst2));
702 Subu(sp, sp, Operand(kPointerSize));
703 sw(src, MemOperand(sp, 0));
706 // Pops multiple values from the stack and load them in the
707 // registers specified in regs. Pop order is the opposite as in MultiPush.
708 void MultiPop(RegList regs);
709 void MultiPopReversed(RegList regs);
711 void MultiPopFPU(RegList regs);
712 void MultiPopReversedFPU(RegList regs);
714 void pop(Register dst) {
715 lw(dst, MemOperand(sp, 0));
716 Addu(sp, sp, Operand(kPointerSize));
718 void Pop(Register dst) { pop(dst); }
720 // Pop two registers. Pops rightmost register first (from lower address).
721 void Pop(Register src1, Register src2) {
722 DCHECK(!src1.is(src2));
723 lw(src2, MemOperand(sp, 0 * kPointerSize));
724 lw(src1, MemOperand(sp, 1 * kPointerSize));
725 Addu(sp, sp, 2 * kPointerSize);
728 // Pop three registers. Pops rightmost register first (from lower address).
729 void Pop(Register src1, Register src2, Register src3) {
730 lw(src3, MemOperand(sp, 0 * kPointerSize));
731 lw(src2, MemOperand(sp, 1 * kPointerSize));
732 lw(src1, MemOperand(sp, 2 * kPointerSize));
733 Addu(sp, sp, 3 * kPointerSize);
736 void Pop(uint32_t count = 1) {
737 Addu(sp, sp, Operand(count * kPointerSize));
740 // Push and pop the registers that can hold pointers, as defined by the
741 // RegList constant kSafepointSavedRegisters.
742 void PushSafepointRegisters();
743 void PopSafepointRegisters();
744 // Store value in register src in the safepoint stack slot for
746 void StoreToSafepointRegisterSlot(Register src, Register dst);
747 // Load the value of the src register from its safepoint stack slot
748 // into register dst.
749 void LoadFromSafepointRegisterSlot(Register dst, Register src);
751 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
753 // Does not handle errors.
754 void FlushICache(Register address, unsigned instructions);
756 // MIPS32 R2 instruction macro.
757 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
758 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
760 // ---------------------------------------------------------------------------
761 // FPU macros. These do not handle special cases like NaN or +- inf.
763 // Convert unsigned word to double.
764 void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
765 void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
767 // Convert double to unsigned word.
768 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
769 void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
771 void Trunc_w_d(FPURegister fd, FPURegister fs);
772 void Round_w_d(FPURegister fd, FPURegister fs);
773 void Floor_w_d(FPURegister fd, FPURegister fs);
774 void Ceil_w_d(FPURegister fd, FPURegister fs);
776 // FP32 mode: Move the general purpose register into
777 // the high part of the double-register pair.
778 // FP64 mode: Move the general-purpose register into
779 // the higher 32 bits of the 64-bit coprocessor register,
780 // while leaving the low bits unchanged.
781 void Mthc1(Register rt, FPURegister fs);
783 // FP32 mode: move the high part of the double-register pair into
784 // general purpose register.
785 // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into
786 // general-purpose register.
787 void Mfhc1(Register rt, FPURegister fs);
789 // Wrapper functions for the different cmp/branch types.
790 inline void BranchF32(Label* target, Label* nan, Condition cc,
791 FPURegister cmp1, FPURegister cmp2,
792 BranchDelaySlot bd = PROTECT) {
793 BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
796 inline void BranchF64(Label* target, Label* nan, Condition cc,
797 FPURegister cmp1, FPURegister cmp2,
798 BranchDelaySlot bd = PROTECT) {
799 BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
802 // Alternate (inline) version for better readability with USE_DELAY_SLOT.
803 inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
804 Condition cc, FPURegister cmp1, FPURegister cmp2) {
805 BranchF64(target, nan, cc, cmp1, cmp2, bd);
808 inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
809 Condition cc, FPURegister cmp1, FPURegister cmp2) {
810 BranchF32(target, nan, cc, cmp1, cmp2, bd);
813 // Alias functions for backward compatibility.
814 inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
815 FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
816 BranchF64(target, nan, cc, cmp1, cmp2, bd);
819 inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
820 Condition cc, FPURegister cmp1, FPURegister cmp2) {
821 BranchF64(bd, target, nan, cc, cmp1, cmp2);
824 // Truncates a double using a specific rounding mode, and writes the value
825 // to the result register.
826 // The except_flag will contain any exceptions caused by the instruction.
827 // If check_inexact is kDontCheckForInexactConversion, then the inexact
828 // exception is masked.
829 void EmitFPUTruncate(FPURoundingMode rounding_mode,
831 DoubleRegister double_input,
833 DoubleRegister double_scratch,
834 Register except_flag,
835 CheckForInexactConversion check_inexact
836 = kDontCheckForInexactConversion);
838 // Performs a truncating conversion of a floating point number as used by
839 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
840 // succeeds, otherwise falls through if result is saturated. On return
841 // 'result' either holds answer, or is clobbered on fall through.
843 // Only public for the test code in test-code-stubs-arm.cc.
844 void TryInlineTruncateDoubleToI(Register result,
845 DoubleRegister input,
848 // Performs a truncating conversion of a floating point number as used by
849 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
850 // Exits with 'result' holding the answer.
851 void TruncateDoubleToI(Register result, DoubleRegister double_input);
853 // Performs a truncating conversion of a heap number as used by
854 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
855 // must be different registers. Exits with 'result' holding the answer.
856 void TruncateHeapNumberToI(Register result, Register object);
858 // Converts the smi or heap number in object to an int32 using the rules
859 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
860 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
861 // different registers.
862 void TruncateNumberToI(Register object,
864 Register heap_number_map,
868 // Loads the number from object into dst register.
869 // If |object| is neither smi nor heap number, |not_number| is jumped to
870 // with |object| still intact.
871 void LoadNumber(Register object,
873 Register heap_number_map,
877 // Loads the number from object into double_dst in the double format.
878 // Control will jump to not_int32 if the value cannot be exactly represented
879 // by a 32-bit integer.
880 // Floating point value in the 32-bit integer range that are not exact integer
882 void LoadNumberAsInt32Double(Register object,
883 DoubleRegister double_dst,
884 Register heap_number_map,
887 FPURegister double_scratch,
890 // Loads the number from object into dst as a 32-bit integer.
891 // Control will jump to not_int32 if the object cannot be exactly represented
892 // by a 32-bit integer.
893 // Floating point value in the 32-bit integer range that are not exact integer
894 // won't be converted.
895 void LoadNumberAsInt32(Register object,
897 Register heap_number_map,
900 FPURegister double_scratch0,
901 FPURegister double_scratch1,
905 // argc - argument count to be dropped by LeaveExitFrame.
906 // save_doubles - saves FPU registers on stack, currently disabled.
907 // stack_space - extra stack space.
908 void EnterExitFrame(bool save_doubles,
909 int stack_space = 0);
911 // Leave the current exit frame.
912 void LeaveExitFrame(bool save_doubles, Register arg_count,
913 bool restore_context, bool do_return = NO_EMIT_RETURN,
914 bool argument_count_is_length = false);
916 // Get the actual activation frame alignment for target environment.
917 static int ActivationFrameAlignment();
919 // Make sure the stack is aligned. Only emits code in debug mode.
920 void AssertStackIsAligned();
922 void LoadContext(Register dst, int context_chain_length);
924 // Conditionally load the cached Array transitioned map of type
925 // transitioned_kind from the native context if the map in register
926 // map_in_out is the cached Array map in the native context of
928 void LoadTransitionedArrayMapConditional(
929 ElementsKind expected_kind,
930 ElementsKind transitioned_kind,
933 Label* no_map_match);
935 void LoadGlobalFunction(int index, Register function);
937 // Load the initial map from the global function. The registers
938 // function and map can be the same, function is then overwritten.
939 void LoadGlobalFunctionInitialMap(Register function,
943 void InitializeRootRegister() {
944 ExternalReference roots_array_start =
945 ExternalReference::roots_array_start(isolate());
946 li(kRootRegister, Operand(roots_array_start));
949 // -------------------------------------------------------------------------
950 // JavaScript invokes.
952 // Invoke the JavaScript function code by either calling or jumping.
953 void InvokeCode(Register code,
954 const ParameterCount& expected,
955 const ParameterCount& actual,
957 const CallWrapper& call_wrapper);
959 // Invoke the JavaScript function in the given register. Changes the
960 // current context to the context in the function before invoking.
961 void InvokeFunction(Register function,
962 const ParameterCount& actual,
964 const CallWrapper& call_wrapper);
966 void InvokeFunction(Register function,
967 const ParameterCount& expected,
968 const ParameterCount& actual,
970 const CallWrapper& call_wrapper);
972 void InvokeFunction(Handle<JSFunction> function,
973 const ParameterCount& expected,
974 const ParameterCount& actual,
976 const CallWrapper& call_wrapper);
979 void IsObjectJSObjectType(Register heap_object,
984 void IsInstanceJSObjectType(Register map,
988 void IsObjectJSStringType(Register object,
992 void IsObjectNameType(Register object,
996 // -------------------------------------------------------------------------
1001 // -------------------------------------------------------------------------
1002 // Exception handling.
1004 // Push a new stack handler and link into stack handler chain.
1005 void PushStackHandler();
1007 // Unlink the stack handler on top of the stack from the stack handler chain.
1008 // Must preserve the result register.
1009 void PopStackHandler();
1011 // Copies a fixed number of fields of heap objects from src to dst.
1012 void CopyFields(Register dst, Register src, RegList temps, int field_count);
1014 // Copies a number of bytes from src to dst. All registers are clobbered. On
1015 // exit src and dst will point to the place just after where the last byte was
1016 // read or written and length will be zero.
1017 void CopyBytes(Register src,
1022 // Initialize fields with filler values. Fields starting at |start_offset|
1023 // not including end_offset are overwritten with the value in |filler|. At
1024 // the end the loop, |start_offset| takes the value of |end_offset|.
1025 void InitializeFieldsWithFiller(Register start_offset,
1026 Register end_offset,
1029 // -------------------------------------------------------------------------
1030 // Support functions.
1032 // Machine code version of Map::GetConstructor().
1033 // |temp| holds |result|'s map when done, and |temp2| its instance type.
1034 void GetMapConstructor(Register result, Register map, Register temp,
1037 // Try to get function prototype of a function and puts the value in
1038 // the result register. Checks that the function really is a
1039 // function and jumps to the miss label if the fast checks fail. The
1040 // function register will be untouched; the other registers may be
1042 void TryGetFunctionPrototype(Register function,
1046 bool miss_on_bound_function = false);
1048 void GetObjectType(Register function,
1052 // Check if a map for a JSObject indicates that the object has fast elements.
1053 // Jump to the specified label if it does not.
1054 void CheckFastElements(Register map,
1058 // Check if a map for a JSObject indicates that the object can have both smi
1059 // and HeapObject elements. Jump to the specified label if it does not.
1060 void CheckFastObjectElements(Register map,
1064 // Check if a map for a JSObject indicates that the object has fast smi only
1065 // elements. Jump to the specified label if it does not.
1066 void CheckFastSmiElements(Register map,
1070 // Check to see if maybe_number can be stored as a double in
1071 // FastDoubleElements. If it can, store it at the index specified by key in
1072 // the FastDoubleElements array elements. Otherwise jump to fail.
1073 void StoreNumberToDoubleElements(Register value_reg,
1075 Register elements_reg,
1080 int elements_offset = 0);
1082 // Compare an object's map with the specified map and its transitioned
1083 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
1084 // "branch_to" if the result of the comparison is "cond". If multiple map
1085 // compares are required, the compare sequences branches to early_success.
1086 void CompareMapAndBranch(Register obj,
1089 Label* early_success,
1093 // As above, but the map of the object is already loaded into the register
1094 // which is preserved by the code generated.
1095 void CompareMapAndBranch(Register obj_map,
1097 Label* early_success,
1101 // Check if the map of an object is equal to a specified map and branch to
1102 // label if not. Skip the smi check if not required (object is known to be a
1103 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1104 // against maps that are ElementsKind transition maps of the specificed map.
1105 void CheckMap(Register obj,
1109 SmiCheckType smi_check_type);
1112 void CheckMap(Register obj,
1114 Heap::RootListIndex index,
1116 SmiCheckType smi_check_type);
1118 // Check if the map of an object is equal to a specified weak map and branch
1119 // to a specified target if equal. Skip the smi check if not required
1120 // (object is known to be a heap object)
1121 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1122 Handle<WeakCell> cell, Handle<Code> success,
1123 SmiCheckType smi_check_type);
1125 // Get value of the weak cell.
1126 void GetWeakValue(Register value, Handle<WeakCell> cell);
1128 // Load the value of the weak cell in the value register. Branch to the
1129 // given miss label is the weak cell was cleared.
1130 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
1132 // Load and check the instance type of an object for being a string.
1133 // Loads the type into the second argument register.
1134 // Returns a condition that will be enabled if the object was a string.
1135 Condition IsObjectStringType(Register obj,
1138 lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
1139 lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
1140 And(type, type, Operand(kIsNotStringMask));
1141 DCHECK_EQ(0u, kStringTag);
1146 // Picks out an array index from the hash field.
1148 // hash - holds the index's hash. Clobbered.
1149 // index - holds the overwritten index on exit.
1150 void IndexFromHash(Register hash, Register index);
1152 // Get the number of least significant bits from a register.
1153 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1154 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1156 // Load the value of a number object into a FPU double register. If the
1157 // object is not a number a jump to the label not_number is performed
1158 // and the FPU double register is unchanged.
1159 void ObjectToDoubleFPURegister(
1164 Register heap_number_map,
1166 ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
1168 // Load the value of a smi object into a FPU double register. The register
1169 // scratch1 can be the same register as smi in which case smi will hold the
1170 // untagged value afterwards.
1171 void SmiToDoubleFPURegister(Register smi,
1175 // -------------------------------------------------------------------------
1176 // Overflow handling functions.
1177 // Usage: first call the appropriate arithmetic function, then call one of the
1178 // jump functions with the overflow_dst register as the second parameter.
1180 void AdduAndCheckForOverflow(Register dst,
1183 Register overflow_dst,
1184 Register scratch = at);
1186 void AdduAndCheckForOverflow(Register dst, Register left,
1187 const Operand& right, Register overflow_dst,
1188 Register scratch = at);
1190 void SubuAndCheckForOverflow(Register dst,
1193 Register overflow_dst,
1194 Register scratch = at);
1196 void SubuAndCheckForOverflow(Register dst, Register left,
1197 const Operand& right, Register overflow_dst,
1198 Register scratch = at);
1200 void BranchOnOverflow(Label* label,
1201 Register overflow_check,
1202 BranchDelaySlot bd = PROTECT) {
1203 Branch(label, lt, overflow_check, Operand(zero_reg), bd);
1206 void BranchOnNoOverflow(Label* label,
1207 Register overflow_check,
1208 BranchDelaySlot bd = PROTECT) {
1209 Branch(label, ge, overflow_check, Operand(zero_reg), bd);
1212 void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1213 Ret(lt, overflow_check, Operand(zero_reg), bd);
1216 void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1217 Ret(ge, overflow_check, Operand(zero_reg), bd);
1220 // -------------------------------------------------------------------------
1223 // See comments at the beginning of CEntryStub::Generate.
1224 inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
1226 inline void PrepareCEntryFunction(const ExternalReference& ref) {
1227 li(a1, Operand(ref));
1230 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
1231 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
1233 // Call a code stub.
1234 void CallStub(CodeStub* stub,
1235 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1238 // Tail call a code stub (jump).
1239 void TailCallStub(CodeStub* stub, COND_ARGS);
1243 void CallJSExitStub(CodeStub* stub);
1245 // Call a runtime routine.
1246 void CallRuntime(const Runtime::Function* f, int num_arguments,
1247 SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1248 BranchDelaySlot bd = PROTECT);
1249 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1250 const Runtime::Function* function = Runtime::FunctionForId(id);
1251 CallRuntime(function, function->nargs, kSaveFPRegs);
1254 // Convenience function: Same as above, but takes the fid instead.
1255 void CallRuntime(Runtime::FunctionId id, int num_arguments,
1256 SaveFPRegsMode save_doubles = kDontSaveFPRegs,
1257 BranchDelaySlot bd = PROTECT) {
1258 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles, bd);
1261 // Convenience function: call an external reference.
1262 void CallExternalReference(const ExternalReference& ext,
1264 BranchDelaySlot bd = PROTECT);
1266 // Tail call of a runtime routine (jump).
1267 // Like JumpToExternalReference, but also takes care of passing the number
1269 void TailCallExternalReference(const ExternalReference& ext,
1273 // Convenience function: tail call a runtime routine (jump).
1274 void TailCallRuntime(Runtime::FunctionId fid,
1278 int CalculateStackPassedWords(int num_reg_arguments,
1279 int num_double_arguments);
1281 // Before calling a C-function from generated code, align arguments on stack
1282 // and add space for the four mips argument slots.
1283 // After aligning the frame, non-register arguments must be stored on the
1284 // stack, after the argument-slots using helper: CFunctionArgumentOperand().
1285 // The argument count assumes all arguments are word sized.
1286 // Some compilers/platforms require the stack to be aligned when calling
1288 // Needs a scratch register to do some arithmetic. This register will be
1290 void PrepareCallCFunction(int num_reg_arguments,
1291 int num_double_registers,
1293 void PrepareCallCFunction(int num_reg_arguments,
1296 // Arguments 1-4 are placed in registers a0 thru a3 respectively.
1297 // Arguments 5..n are stored to stack using following:
1298 // sw(t0, CFunctionArgumentOperand(5));
1300 // Calls a C function and cleans up the space for arguments allocated
1301 // by PrepareCallCFunction. The called function is not allowed to trigger a
1302 // garbage collection, since that might move the code and invalidate the
1303 // return address (unless this is somehow accounted for by the called
1305 void CallCFunction(ExternalReference function, int num_arguments);
1306 void CallCFunction(Register function, int num_arguments);
1307 void CallCFunction(ExternalReference function,
1308 int num_reg_arguments,
1309 int num_double_arguments);
1310 void CallCFunction(Register function,
1311 int num_reg_arguments,
1312 int num_double_arguments);
1313 void MovFromFloatResult(DoubleRegister dst);
1314 void MovFromFloatParameter(DoubleRegister dst);
1316 // There are two ways of passing double arguments on MIPS, depending on
1317 // whether soft or hard floating point ABI is used. These functions
1318 // abstract parameter passing for the three different ways we call
1319 // C functions from generated code.
1320 void MovToFloatParameter(DoubleRegister src);
1321 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
1322 void MovToFloatResult(DoubleRegister src);
1324 // Jump to the builtin routine.
1325 void JumpToExternalReference(const ExternalReference& builtin,
1326 BranchDelaySlot bd = PROTECT);
1328 // Invoke specified builtin JavaScript function. Adds an entry to
1329 // the unresolved list if the name does not resolve.
1330 void InvokeBuiltin(Builtins::JavaScript id,
1332 const CallWrapper& call_wrapper = NullCallWrapper());
1334 // Store the code object for the given builtin in the target register and
1335 // setup the function in a1.
1336 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
1338 // Store the function for the given builtin in the target register.
1339 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1343 uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
1347 Handle<Object> CodeObject() {
1348 DCHECK(!code_object_.is_null());
1349 return code_object_;
1352 // Emit code for a truncating division by a constant. The dividend register is
1353 // unchanged and at gets clobbered. Dividend and result must be different.
1354 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1356 // -------------------------------------------------------------------------
1357 // StatsCounter support.
1359 void SetCounter(StatsCounter* counter, int value,
1360 Register scratch1, Register scratch2);
1361 void IncrementCounter(StatsCounter* counter, int value,
1362 Register scratch1, Register scratch2);
1363 void DecrementCounter(StatsCounter* counter, int value,
1364 Register scratch1, Register scratch2);
1367 // -------------------------------------------------------------------------
1370 // Calls Abort(msg) if the condition cc is not satisfied.
1371 // Use --debug_code to enable.
1372 void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
1373 void AssertFastElements(Register elements);
1375 // Like Assert(), but always enabled.
1376 void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
1378 // Print a message to stdout and abort execution.
1379 void Abort(BailoutReason msg);
1381 // Verify restrictions about code generated in stubs.
1382 void set_generating_stub(bool value) { generating_stub_ = value; }
1383 bool generating_stub() { return generating_stub_; }
1384 void set_has_frame(bool value) { has_frame_ = value; }
1385 bool has_frame() { return has_frame_; }
1386 inline bool AllowThisStubCall(CodeStub* stub);
1388 // ---------------------------------------------------------------------------
1389 // Number utilities.
1391 // Check whether the value of reg is a power of two and not zero. If not
1392 // control continues at the label not_power_of_two. If reg is a power of two
1393 // the register scratch contains the value of (reg - 1) when control falls
1395 void JumpIfNotPowerOfTwoOrZero(Register reg,
1397 Label* not_power_of_two_or_zero);
1399 // -------------------------------------------------------------------------
1402 void SmiTag(Register reg) {
1403 Addu(reg, reg, reg);
1406 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1407 void SmiTagCheckOverflow(Register reg, Register overflow);
1408 void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1410 void SmiTag(Register dst, Register src) {
1411 Addu(dst, src, src);
1414 // Try to convert int32 to smi. If the value is to large, preserve
1415 // the original value and jump to not_a_smi. Destroys scratch and
1417 void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
1418 TrySmiTag(reg, reg, scratch, not_a_smi);
1420 void TrySmiTag(Register dst,
1424 SmiTagCheckOverflow(at, src, scratch);
1425 BranchOnOverflow(not_a_smi, scratch);
1429 void SmiUntag(Register reg) {
1430 sra(reg, reg, kSmiTagSize);
1433 void SmiUntag(Register dst, Register src) {
1434 sra(dst, src, kSmiTagSize);
1437 // Test if the register contains a smi.
1438 inline void SmiTst(Register value, Register scratch) {
1439 And(scratch, value, Operand(kSmiTagMask));
1441 inline void NonNegativeSmiTst(Register value, Register scratch) {
1442 And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
1445 // Untag the source value into destination and jump if source is a smi.
1446 // Souce and destination can be the same register.
1447 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1449 // Untag the source value into destination and jump if source is not a smi.
1450 // Souce and destination can be the same register.
1451 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1453 // Jump the register contains a smi.
1454 void JumpIfSmi(Register value,
1456 Register scratch = at,
1457 BranchDelaySlot bd = PROTECT);
1459 // Jump if the register contains a non-smi.
1460 void JumpIfNotSmi(Register value,
1461 Label* not_smi_label,
1462 Register scratch = at,
1463 BranchDelaySlot bd = PROTECT);
1465 // Jump if either of the registers contain a non-smi.
1466 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1467 // Jump if either of the registers contain a smi.
1468 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1470 // Abort execution if argument is a smi, enabled via --debug-code.
1471 void AssertNotSmi(Register object);
1472 void AssertSmi(Register object);
1474 // Abort execution if argument is not a string, enabled via --debug-code.
1475 void AssertString(Register object);
1477 // Abort execution if argument is not a name, enabled via --debug-code.
1478 void AssertName(Register object);
1480 // Abort execution if argument is not undefined or an AllocationSite, enabled
1481 // via --debug-code.
1482 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1484 // Abort execution if reg is not the root value with the given index,
1485 // enabled via --debug-code.
1486 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1488 // ---------------------------------------------------------------------------
1489 // HeapNumber utilities.
1491 void JumpIfNotHeapNumber(Register object,
1492 Register heap_number_map,
1494 Label* on_not_heap_number);
1496 // -------------------------------------------------------------------------
1497 // String utilities.
1499 // Generate code to do a lookup in the number string cache. If the number in
1500 // the register object is found in the cache the generated code falls through
1501 // with the result in the result register. The object and the result register
1502 // can be the same. If the number is not found in the cache the code jumps to
1503 // the label not_found with only the content of register object unchanged.
1504 void LookupNumberStringCache(Register object,
1511 // Checks if both instance types are sequential ASCII strings and jumps to
1512 // label if either is not.
1513 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1514 Register first_object_instance_type, Register second_object_instance_type,
1515 Register scratch1, Register scratch2, Label* failure);
1517 // Check if instance type is sequential one-byte string and jump to label if
1519 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1522 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1524 void EmitSeqStringSetCharCheck(Register string,
1528 uint32_t encoding_mask);
1530 // Checks if both objects are sequential one-byte strings and jumps to label
1531 // if either is not. Assumes that neither object is a smi.
1532 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
1538 // Checks if both objects are sequential one-byte strings and jumps to label
1539 // if either is not.
1540 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1543 Label* not_flat_one_byte_strings);
1545 void ClampUint8(Register output_reg, Register input_reg);
1547 void ClampDoubleToUint8(Register result_reg,
1548 DoubleRegister input_reg,
1549 DoubleRegister temp_double_reg);
1552 void LoadInstanceDescriptors(Register map, Register descriptors);
1553 void EnumLength(Register dst, Register map);
1554 void NumberOfOwnDescriptors(Register dst, Register map);
1555 void LoadAccessor(Register dst, Register holder, int accessor_index,
1556 AccessorComponent accessor);
1558 template<typename Field>
1559 void DecodeField(Register dst, Register src) {
1560 Ext(dst, src, Field::kShift, Field::kSize);
1563 template<typename Field>
1564 void DecodeField(Register reg) {
1565 DecodeField<Field>(reg, reg);
1568 template<typename Field>
1569 void DecodeFieldToSmi(Register dst, Register src) {
1570 static const int shift = Field::kShift;
1571 static const int mask = Field::kMask >> shift << kSmiTagSize;
1572 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
1573 STATIC_ASSERT(kSmiTag == 0);
1574 if (shift < kSmiTagSize) {
1575 sll(dst, src, kSmiTagSize - shift);
1576 And(dst, dst, Operand(mask));
1577 } else if (shift > kSmiTagSize) {
1578 srl(dst, src, shift - kSmiTagSize);
1579 And(dst, dst, Operand(mask));
1581 And(dst, src, Operand(mask));
1585 template<typename Field>
1586 void DecodeFieldToSmi(Register reg) {
1587 DecodeField<Field>(reg, reg);
1590 // Generates function and stub prologue code.
1591 void StubPrologue();
1592 void Prologue(bool code_pre_aging);
1594 // Activation support.
1595 void EnterFrame(StackFrame::Type type);
1596 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
1597 void LeaveFrame(StackFrame::Type type);
1599 // Patch the relocated value (lui/ori pair).
1600 void PatchRelocatedValue(Register li_location,
1602 Register new_value);
1603 // Get the relocatad value (loaded data) from the lui/ori pair.
1604 void GetRelocatedValue(Register li_location,
1608 // Expects object in a0 and returns map with validated enum cache
1609 // in a0. Assumes that any other register can be used as a scratch.
1610 void CheckEnumCache(Register null_value, Label* call_runtime);
1612 // AllocationMemento support. Arrays may have an associated
1613 // AllocationMemento object that can be checked for in order to pretransition
1615 // On entry, receiver_reg should point to the array object.
1616 // scratch_reg gets clobbered.
1617 // If allocation info is present, jump to allocation_memento_present.
1618 void TestJSArrayForAllocationMemento(
1619 Register receiver_reg,
1620 Register scratch_reg,
1621 Label* no_memento_found,
1622 Condition cond = al,
1623 Label* allocation_memento_present = NULL);
1625 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1626 Register scratch_reg,
1627 Label* memento_found) {
1628 Label no_memento_found;
1629 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1630 &no_memento_found, eq, memento_found);
1631 bind(&no_memento_found);
1634 // Jumps to found label if a prototype map has dictionary elements.
1635 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1636 Register scratch1, Label* found);
1639 void CallCFunctionHelper(Register function,
1640 int num_reg_arguments,
1641 int num_double_arguments);
1643 void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
1644 void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
1646 BranchDelaySlot bdslot = PROTECT);
1647 void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
1648 void BranchAndLinkShort(Label* L, Condition cond, Register rs,
1650 BranchDelaySlot bdslot = PROTECT);
1651 void Jr(Label* L, BranchDelaySlot bdslot);
1652 void Jalr(Label* L, BranchDelaySlot bdslot);
1654 // Common implementation of BranchF functions for the different formats.
1655 void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
1656 Condition cc, FPURegister cmp1, FPURegister cmp2,
1657 BranchDelaySlot bd = PROTECT);
1659 void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
1660 FPURegister cmp1, FPURegister cmp2,
1661 BranchDelaySlot bd = PROTECT);
1663 // Helper functions for generating invokes.
1664 void InvokePrologue(const ParameterCount& expected,
1665 const ParameterCount& actual,
1666 Handle<Code> code_constant,
1669 bool* definitely_mismatches,
1671 const CallWrapper& call_wrapper);
1673 // Get the code for the given builtin. Returns if able to resolve
1674 // the function in the 'resolved' flag.
1675 Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
1677 void InitializeNewString(Register string,
1679 Heap::RootListIndex map_index,
1683 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1684 void InNewSpace(Register object,
1686 Condition cond, // eq for new space, ne otherwise.
1689 // Helper for finding the mark bits for an address. Afterwards, the
1690 // bitmap register points at the word with the mark bits and the mask
1691 // the position of the first bit. Leaves addr_reg unchanged.
1692 inline void GetMarkBits(Register addr_reg,
1693 Register bitmap_reg,
1696 // Compute memory operands for safepoint stack slots.
1697 static int SafepointRegisterStackIndex(int reg_code);
1698 MemOperand SafepointRegisterSlot(Register reg);
1699 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1701 bool generating_stub_;
1703 bool has_double_zero_reg_set_;
1704 // This handle will be patched with the code object on installation.
1705 Handle<Object> code_object_;
1707 // Needs access to SafepointRegisterStackIndex for compiled frame
1709 friend class StandardFrame;
1713 // The code patcher is used to patch (typically) small parts of code e.g. for
1714 // debugging and other types of instrumentation. When using the code patcher
1715 // the exact number of bytes specified must be emitted. It is not legal to emit
1716 // relocation information. If any of these constraints are violated it causes
1717 // an assertion to fail.
1725 CodePatcher(byte* address,
1727 FlushICache flush_cache = FLUSH);
1730 // Macro assembler to emit code.
1731 MacroAssembler* masm() { return &masm_; }
1733 // Emit an instruction directly.
1734 void Emit(Instr instr);
1736 // Emit an address directly.
1737 void Emit(Address addr);
1739 // Change the condition part of an instruction leaving the rest of the current
1740 // instruction unchanged.
1741 void ChangeBranchCondition(Condition cond);
1744 byte* address_; // The address of the code being patched.
1745 int size_; // Number of bytes of the expected patch size.
1746 MacroAssembler masm_; // Macro assembler used to generate the code.
1747 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1752 #ifdef GENERATED_CODE_COVERAGE
1753 #define CODE_COVERAGE_STRINGIFY(x) #x
1754 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1755 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1756 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1758 #define ACCESS_MASM(masm) masm->
1761 } } // namespace v8::internal
1763 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_