1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
6 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
8 #include "src/assembler.h"
9 #include "src/globals.h"
10 #include "src/mips/assembler-mips.h"
15 // Forward declaration.
18 // Reserved Register Usage Summary.
20 // Registers t8, t9, and at are reserved for use by the MacroAssembler.
22 // The programmer should know that the MacroAssembler may clobber these three,
23 // but won't touch other registers except in special cases.
25 // Per the MIPS ABI, register t9 must be used for indirect function call
26 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
27 // trying to update gp register for position-independent-code. Whenever
28 // MIPS generated code calls C code, it must be via t9 register.
31 // Flags used for LeaveExitFrame function.
32 enum LeaveExitFrameMode {
34 NO_EMIT_RETURN = false
37 // Flags used for AllocateHeapNumber
45 // Flags used for the ObjectToDoubleFPURegister function.
46 enum ObjectToDoubleFlags {
48 NO_OBJECT_TO_DOUBLE_FLAGS = 0,
49 // Object is known to be a non smi.
50 OBJECT_NOT_SMI = 1 << 0,
51 // Don't load NaNs or infinities, branch to the non number case instead.
52 AVOID_NANS_AND_INFINITIES = 1 << 1
55 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
56 enum BranchDelaySlot {
61 // Flags used for the li macro-assembler function.
63 // If the constant value can be represented in just 16 bits, then
64 // optimize the li to use a single instruction, rather than lui/ori pair.
66 // Always use 2 instructions (lui/ori pair), even if the constant could
67 // be loaded with just one, so that this value is patchable later.
72 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
73 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
74 enum PointersToHereCheck {
75 kPointersToHereMaybeInteresting,
76 kPointersToHereAreAlwaysInteresting
78 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
80 Register GetRegisterThatIsNotOneOf(Register reg1,
81 Register reg2 = no_reg,
82 Register reg3 = no_reg,
83 Register reg4 = no_reg,
84 Register reg5 = no_reg,
85 Register reg6 = no_reg);
87 bool AreAliased(Register reg1,
89 Register reg3 = no_reg,
90 Register reg4 = no_reg,
91 Register reg5 = no_reg,
92 Register reg6 = no_reg,
93 Register reg7 = no_reg,
94 Register reg8 = no_reg);
97 // -----------------------------------------------------------------------------
98 // Static helper functions.
100 inline MemOperand ContextOperand(Register context, int index) {
101 return MemOperand(context, Context::SlotOffset(index));
105 inline MemOperand GlobalObjectOperand() {
106 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
110 // Generate a MemOperand for loading a field from an object.
111 inline MemOperand FieldMemOperand(Register object, int offset) {
112 return MemOperand(object, offset - kHeapObjectTag);
116 // Generate a MemOperand for storing arguments 5..N on the stack
117 // when calling CallCFunction().
118 inline MemOperand CFunctionArgumentOperand(int index) {
119 DCHECK(index > kCArgSlotCount);
120 // Argument 5 takes the slot just past the four Arg-slots.
121 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
122 return MemOperand(sp, offset);
126 // MacroAssembler implements a collection of frequently used macros.
127 class MacroAssembler: public Assembler {
129 // The isolate parameter can be NULL if the macro assembler should
130 // not use isolate-dependent functionality. In this case, it's the
131 // responsibility of the caller to never invoke such function on the
133 MacroAssembler(Isolate* isolate, void* buffer, int size);
136 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
137 #define COND_ARGS cond, r1, r2
139 // Cases when relocation is not needed.
140 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
141 void Name(target_type target, BranchDelaySlot bd = PROTECT); \
142 inline void Name(BranchDelaySlot bd, target_type target) { \
145 void Name(target_type target, \
147 BranchDelaySlot bd = PROTECT); \
148 inline void Name(BranchDelaySlot bd, \
149 target_type target, \
151 Name(target, COND_ARGS, bd); \
154 #define DECLARE_BRANCH_PROTOTYPES(Name) \
155 DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
156 DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
158 DECLARE_BRANCH_PROTOTYPES(Branch)
159 DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
160 DECLARE_BRANCH_PROTOTYPES(BranchShort)
162 #undef DECLARE_BRANCH_PROTOTYPES
163 #undef COND_TYPED_ARGS
167 // Jump, Call, and Ret pseudo instructions implementing inter-working.
168 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
169 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
171 void Jump(Register target, COND_ARGS);
172 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
173 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
174 void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
175 static int CallSize(Register target, COND_ARGS);
176 void Call(Register target, COND_ARGS);
177 static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
178 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
179 int CallSize(Handle<Code> code,
180 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
181 TypeFeedbackId ast_id = TypeFeedbackId::None(),
183 void Call(Handle<Code> code,
184 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
185 TypeFeedbackId ast_id = TypeFeedbackId::None(),
188 inline void Ret(BranchDelaySlot bd, Condition cond = al,
189 Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
190 Ret(cond, rs, rt, bd);
193 void Branch(Label* L,
196 Heap::RootListIndex index,
197 BranchDelaySlot bdslot = PROTECT);
201 // Emit code to discard a non-negative number of pointer-sized elements
202 // from the stack, clobbering only the sp register.
204 Condition cond = cc_always,
205 Register reg = no_reg,
206 const Operand& op = Operand(no_reg));
208 // Trivial case of DropAndRet that utilizes the delay slot and only emits
210 void DropAndRet(int drop);
212 void DropAndRet(int drop,
217 // Swap two registers. If the scratch register is omitted then a slightly
218 // less efficient form using xor instead of mov is emitted.
219 void Swap(Register reg1, Register reg2, Register scratch = no_reg);
221 void Call(Label* target);
223 inline void Move(Register dst, Register src) {
229 inline void Move(FPURegister dst, FPURegister src) {
235 inline void Move(Register dst_low, Register dst_high, FPURegister src) {
237 Mfhc1(dst_high, src);
240 inline void FmoveHigh(Register dst_high, FPURegister src) {
241 Mfhc1(dst_high, src);
244 inline void FmoveHigh(FPURegister dst, Register src_high) {
245 Mthc1(src_high, dst);
248 inline void FmoveLow(Register dst_low, FPURegister src) {
252 void FmoveLow(FPURegister dst, Register src_low);
254 inline void Move(FPURegister dst, Register src_low, Register src_high) {
256 Mthc1(src_high, dst);
259 void Move(FPURegister dst, float imm);
260 void Move(FPURegister dst, double imm);
263 void Movz(Register rd, Register rs, Register rt);
264 void Movn(Register rd, Register rs, Register rt);
265 void Movt(Register rd, Register rs, uint16_t cc = 0);
266 void Movf(Register rd, Register rs, uint16_t cc = 0);
268 void Clz(Register rd, Register rs);
270 // Jump unconditionally to given label.
271 // We NEED a nop in the branch delay slot, as it used by v8, for example in
272 // CodeGenerator::ProcessDeferred().
273 // Currently the branch delay slot is filled by the MacroAssembler.
274 // Use rather b(Label) for code generation.
279 void Load(Register dst, const MemOperand& src, Representation r);
280 void Store(Register src, const MemOperand& dst, Representation r);
282 // Load an object from the root table.
283 void LoadRoot(Register destination,
284 Heap::RootListIndex index);
285 void LoadRoot(Register destination,
286 Heap::RootListIndex index,
287 Condition cond, Register src1, const Operand& src2);
289 // Store an object to the root table.
290 void StoreRoot(Register source,
291 Heap::RootListIndex index);
292 void StoreRoot(Register source,
293 Heap::RootListIndex index,
294 Condition cond, Register src1, const Operand& src2);
296 // ---------------------------------------------------------------------------
299 void IncrementalMarkingRecordWriteHelper(Register object,
303 enum RememberedSetFinalAction {
309 // Record in the remembered set the fact that we have a pointer to new space
310 // at the address pointed to by the addr register. Only works if addr is not
312 void RememberedSetHelper(Register object, // Used for debug code.
315 SaveFPRegsMode save_fp,
316 RememberedSetFinalAction and_then);
318 void CheckPageFlag(Register object,
322 Label* condition_met);
324 // Check if object is in new space. Jumps if the object is not in new space.
325 // The register scratch can be object itself, but it will be clobbered.
326 void JumpIfNotInNewSpace(Register object,
329 InNewSpace(object, scratch, ne, branch);
332 // Check if object is in new space. Jumps if the object is in new space.
333 // The register scratch can be object itself, but scratch will be clobbered.
334 void JumpIfInNewSpace(Register object,
337 InNewSpace(object, scratch, eq, branch);
340 // Check if an object has a given incremental marking color.
341 void HasColor(Register object,
348 void JumpIfBlack(Register object,
353 // Checks the color of an object. If the object is already grey or black
354 // then we just fall through, since it is already live. If it is white and
355 // we can determine that it doesn't need to be scanned, then we just mark it
356 // black and fall through. For the rest we jump to the label so the
357 // incremental marker can fix its assumptions.
358 void EnsureNotWhite(Register object,
362 Label* object_is_white_and_not_data);
364 // Detects conservatively whether an object is data-only, i.e. it does need to
365 // be scanned by the garbage collector.
366 void JumpIfDataObject(Register value,
368 Label* not_data_object);
370 // Notify the garbage collector that we wrote a pointer into an object.
371 // |object| is the object being stored into, |value| is the object being
372 // stored. value and scratch registers are clobbered by the operation.
373 // The offset is the offset from the start of the object, not the offset from
374 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
375 void RecordWriteField(
381 SaveFPRegsMode save_fp,
382 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
383 SmiCheck smi_check = INLINE_SMI_CHECK,
384 PointersToHereCheck pointers_to_here_check_for_value =
385 kPointersToHereMaybeInteresting);
387 // As above, but the offset has the tag presubtracted. For use with
388 // MemOperand(reg, off).
389 inline void RecordWriteContextSlot(
395 SaveFPRegsMode save_fp,
396 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
397 SmiCheck smi_check = INLINE_SMI_CHECK,
398 PointersToHereCheck pointers_to_here_check_for_value =
399 kPointersToHereMaybeInteresting) {
400 RecordWriteField(context,
401 offset + kHeapObjectTag,
406 remembered_set_action,
408 pointers_to_here_check_for_value);
411 void RecordWriteForMap(
416 SaveFPRegsMode save_fp);
418 // For a given |object| notify the garbage collector that the slot |address|
419 // has been written. |value| is the object being stored. The value and
420 // address registers are clobbered by the operation.
426 SaveFPRegsMode save_fp,
427 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
428 SmiCheck smi_check = INLINE_SMI_CHECK,
429 PointersToHereCheck pointers_to_here_check_for_value =
430 kPointersToHereMaybeInteresting);
433 // ---------------------------------------------------------------------------
434 // Inline caching support.
436 // Generate code for checking access rights - used for security checks
437 // on access to global objects across environments. The holder register
438 // is left untouched, whereas both scratch registers are clobbered.
439 void CheckAccessGlobalProxy(Register holder_reg,
443 void GetNumberHash(Register reg0, Register scratch);
445 void LoadFromNumberDictionary(Label* miss,
454 inline void MarkCode(NopMarkerTypes type) {
458 // Check if the given instruction is a 'type' marker.
459 // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
460 // nop(type)). These instructions are generated to mark special location in
461 // the code, like some special IC code.
462 static inline bool IsMarkedCode(Instr instr, int type) {
463 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
464 return IsNop(instr, type);
468 static inline int GetCodeMarker(Instr instr) {
469 uint32_t opcode = ((instr & kOpcodeMask));
470 uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
471 uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
472 uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
474 // Return <n> if we have a sll zero_reg, zero_reg, n
476 bool sllzz = (opcode == SLL &&
477 rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
478 rs == static_cast<uint32_t>(ToNumber(zero_reg)));
480 (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
481 DCHECK((type == -1) ||
482 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
488 // ---------------------------------------------------------------------------
489 // Allocation support.
491 // Allocate an object in new space or old pointer space. The object_size is
492 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
493 // is passed. If the space is exhausted control continues at the gc_required
494 // label. The allocated object is returned in result. If the flag
495 // tag_allocated_object is true the result is tagged as as a heap object.
496 // All registers are clobbered also when control continues at the gc_required
498 void Allocate(int object_size,
503 AllocationFlags flags);
505 void Allocate(Register object_size,
510 AllocationFlags flags);
512 // Undo allocation in new space. The object passed and objects allocated after
513 // it will no longer be allocated. The caller must make sure that no pointers
514 // are left to the object(s) no longer allocated as they would be invalid when
515 // allocation is undone.
516 void UndoAllocationInNewSpace(Register object, Register scratch);
519 void AllocateTwoByteString(Register result,
525 void AllocateOneByteString(Register result, Register length,
526 Register scratch1, Register scratch2,
527 Register scratch3, Label* gc_required);
528 void AllocateTwoByteConsString(Register result,
533 void AllocateOneByteConsString(Register result, Register length,
534 Register scratch1, Register scratch2,
536 void AllocateTwoByteSlicedString(Register result,
541 void AllocateOneByteSlicedString(Register result, Register length,
542 Register scratch1, Register scratch2,
545 // Allocates a heap number or jumps to the gc_required label if the young
546 // space is full and a scavenge is needed. All registers are clobbered also
547 // when control continues at the gc_required label.
548 void AllocateHeapNumber(Register result,
551 Register heap_number_map,
553 TaggingMode tagging_mode = TAG_RESULT,
554 MutableMode mode = IMMUTABLE);
555 void AllocateHeapNumberWithValue(Register result,
561 // ---------------------------------------------------------------------------
562 // Instruction macros.
564 #define DEFINE_INSTRUCTION(instr) \
565 void instr(Register rd, Register rs, const Operand& rt); \
566 void instr(Register rd, Register rs, Register rt) { \
567 instr(rd, rs, Operand(rt)); \
569 void instr(Register rs, Register rt, int32_t j) { \
570 instr(rs, rt, Operand(j)); \
573 #define DEFINE_INSTRUCTION2(instr) \
574 void instr(Register rs, const Operand& rt); \
575 void instr(Register rs, Register rt) { \
576 instr(rs, Operand(rt)); \
578 void instr(Register rs, int32_t j) { \
579 instr(rs, Operand(j)); \
582 #define DEFINE_INSTRUCTION3(instr) \
583 void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
584 void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
585 instr(rd_hi, rd_lo, rs, Operand(rt)); \
587 void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
588 instr(rd_hi, rd_lo, rs, Operand(j)); \
591 DEFINE_INSTRUCTION(Addu);
592 DEFINE_INSTRUCTION(Subu);
593 DEFINE_INSTRUCTION(Mul);
594 DEFINE_INSTRUCTION(Div);
595 DEFINE_INSTRUCTION(Divu);
596 DEFINE_INSTRUCTION(Mod);
597 DEFINE_INSTRUCTION(Modu);
598 DEFINE_INSTRUCTION(Mulh);
599 DEFINE_INSTRUCTION2(Mult);
600 DEFINE_INSTRUCTION(Mulhu);
601 DEFINE_INSTRUCTION2(Multu);
602 DEFINE_INSTRUCTION2(Div);
603 DEFINE_INSTRUCTION2(Divu);
605 DEFINE_INSTRUCTION3(Div);
606 DEFINE_INSTRUCTION3(Mul);
608 DEFINE_INSTRUCTION(And);
609 DEFINE_INSTRUCTION(Or);
610 DEFINE_INSTRUCTION(Xor);
611 DEFINE_INSTRUCTION(Nor);
612 DEFINE_INSTRUCTION2(Neg);
614 DEFINE_INSTRUCTION(Slt);
615 DEFINE_INSTRUCTION(Sltu);
617 // MIPS32 R2 instruction macro.
618 DEFINE_INSTRUCTION(Ror);
620 #undef DEFINE_INSTRUCTION
621 #undef DEFINE_INSTRUCTION2
623 void Pref(int32_t hint, const MemOperand& rs);
626 // ---------------------------------------------------------------------------
627 // Pseudo-instructions.
629 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
631 void Ulw(Register rd, const MemOperand& rs);
632 void Usw(Register rd, const MemOperand& rs);
634 // Load int32 in the rd register.
635 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
636 inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
637 li(rd, Operand(j), mode);
639 void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
641 // Push multiple registers on the stack.
642 // Registers are saved in numerical order, with higher numbered registers
643 // saved in higher memory addresses.
644 void MultiPush(RegList regs);
645 void MultiPushReversed(RegList regs);
647 void MultiPushFPU(RegList regs);
648 void MultiPushReversedFPU(RegList regs);
650 void push(Register src) {
651 Addu(sp, sp, Operand(-kPointerSize));
652 sw(src, MemOperand(sp, 0));
654 void Push(Register src) { push(src); }
657 void Push(Handle<Object> handle);
658 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
660 // Push two registers. Pushes leftmost register first (to highest address).
661 void Push(Register src1, Register src2) {
662 Subu(sp, sp, Operand(2 * kPointerSize));
663 sw(src1, MemOperand(sp, 1 * kPointerSize));
664 sw(src2, MemOperand(sp, 0 * kPointerSize));
667 // Push three registers. Pushes leftmost register first (to highest address).
668 void Push(Register src1, Register src2, Register src3) {
669 Subu(sp, sp, Operand(3 * kPointerSize));
670 sw(src1, MemOperand(sp, 2 * kPointerSize));
671 sw(src2, MemOperand(sp, 1 * kPointerSize));
672 sw(src3, MemOperand(sp, 0 * kPointerSize));
675 // Push four registers. Pushes leftmost register first (to highest address).
676 void Push(Register src1, Register src2, Register src3, Register src4) {
677 Subu(sp, sp, Operand(4 * kPointerSize));
678 sw(src1, MemOperand(sp, 3 * kPointerSize));
679 sw(src2, MemOperand(sp, 2 * kPointerSize));
680 sw(src3, MemOperand(sp, 1 * kPointerSize));
681 sw(src4, MemOperand(sp, 0 * kPointerSize));
684 void Push(Register src, Condition cond, Register tst1, Register tst2) {
685 // Since we don't have conditional execution we use a Branch.
686 Branch(3, cond, tst1, Operand(tst2));
687 Subu(sp, sp, Operand(kPointerSize));
688 sw(src, MemOperand(sp, 0));
691 // Pops multiple values from the stack and load them in the
692 // registers specified in regs. Pop order is the opposite as in MultiPush.
693 void MultiPop(RegList regs);
694 void MultiPopReversed(RegList regs);
696 void MultiPopFPU(RegList regs);
697 void MultiPopReversedFPU(RegList regs);
699 void pop(Register dst) {
700 lw(dst, MemOperand(sp, 0));
701 Addu(sp, sp, Operand(kPointerSize));
703 void Pop(Register dst) { pop(dst); }
705 // Pop two registers. Pops rightmost register first (from lower address).
706 void Pop(Register src1, Register src2) {
707 DCHECK(!src1.is(src2));
708 lw(src2, MemOperand(sp, 0 * kPointerSize));
709 lw(src1, MemOperand(sp, 1 * kPointerSize));
710 Addu(sp, sp, 2 * kPointerSize);
713 // Pop three registers. Pops rightmost register first (from lower address).
714 void Pop(Register src1, Register src2, Register src3) {
715 lw(src3, MemOperand(sp, 0 * kPointerSize));
716 lw(src2, MemOperand(sp, 1 * kPointerSize));
717 lw(src1, MemOperand(sp, 2 * kPointerSize));
718 Addu(sp, sp, 3 * kPointerSize);
721 void Pop(uint32_t count = 1) {
722 Addu(sp, sp, Operand(count * kPointerSize));
725 // Push and pop the registers that can hold pointers, as defined by the
726 // RegList constant kSafepointSavedRegisters.
727 void PushSafepointRegisters();
728 void PopSafepointRegisters();
729 // Store value in register src in the safepoint stack slot for
731 void StoreToSafepointRegisterSlot(Register src, Register dst);
732 // Load the value of the src register from its safepoint stack slot
733 // into register dst.
734 void LoadFromSafepointRegisterSlot(Register dst, Register src);
736 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
738 // Does not handle errors.
739 void FlushICache(Register address, unsigned instructions);
741 // MIPS32 R2 instruction macro.
742 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
743 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
745 // ---------------------------------------------------------------------------
746 // FPU macros. These do not handle special cases like NaN or +- inf.
748 // Convert unsigned word to double.
749 void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
750 void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
752 // Convert double to unsigned word.
753 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
754 void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
756 void Trunc_w_d(FPURegister fd, FPURegister fs);
757 void Round_w_d(FPURegister fd, FPURegister fs);
758 void Floor_w_d(FPURegister fd, FPURegister fs);
759 void Ceil_w_d(FPURegister fd, FPURegister fs);
761 // FP32 mode: Move the general purpose register into
762 // the high part of the double-register pair.
763 // FP64 mode: Move the general-purpose register into
764 // the higher 32 bits of the 64-bit coprocessor register,
765 // while leaving the low bits unchanged.
766 void Mthc1(Register rt, FPURegister fs);
768 // FP32 mode: move the high part of the double-register pair into
769 // general purpose register.
770 // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into
771 // general-purpose register.
772 void Mfhc1(Register rt, FPURegister fs);
774 // Wrapper functions for the different cmp/branch types.
775 inline void BranchF32(Label* target, Label* nan, Condition cc,
776 FPURegister cmp1, FPURegister cmp2,
777 BranchDelaySlot bd = PROTECT) {
778 BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
781 inline void BranchF64(Label* target, Label* nan, Condition cc,
782 FPURegister cmp1, FPURegister cmp2,
783 BranchDelaySlot bd = PROTECT) {
784 BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
787 // Alternate (inline) version for better readability with USE_DELAY_SLOT.
788 inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
789 Condition cc, FPURegister cmp1, FPURegister cmp2) {
790 BranchF64(target, nan, cc, cmp1, cmp2, bd);
793 inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
794 Condition cc, FPURegister cmp1, FPURegister cmp2) {
795 BranchF32(target, nan, cc, cmp1, cmp2, bd);
798 // Alias functions for backward compatibility.
799 inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
800 FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
801 BranchF64(target, nan, cc, cmp1, cmp2, bd);
804 inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
805 Condition cc, FPURegister cmp1, FPURegister cmp2) {
806 BranchF64(bd, target, nan, cc, cmp1, cmp2);
809 // Truncates a double using a specific rounding mode, and writes the value
810 // to the result register.
811 // The except_flag will contain any exceptions caused by the instruction.
812 // If check_inexact is kDontCheckForInexactConversion, then the inexact
813 // exception is masked.
814 void EmitFPUTruncate(FPURoundingMode rounding_mode,
816 DoubleRegister double_input,
818 DoubleRegister double_scratch,
819 Register except_flag,
820 CheckForInexactConversion check_inexact
821 = kDontCheckForInexactConversion);
823 // Performs a truncating conversion of a floating point number as used by
824 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
825 // succeeds, otherwise falls through if result is saturated. On return
826 // 'result' either holds answer, or is clobbered on fall through.
828 // Only public for the test code in test-code-stubs-arm.cc.
829 void TryInlineTruncateDoubleToI(Register result,
830 DoubleRegister input,
833 // Performs a truncating conversion of a floating point number as used by
834 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
835 // Exits with 'result' holding the answer.
836 void TruncateDoubleToI(Register result, DoubleRegister double_input);
838 // Performs a truncating conversion of a heap number as used by
839 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
840 // must be different registers. Exits with 'result' holding the answer.
841 void TruncateHeapNumberToI(Register result, Register object);
843 // Converts the smi or heap number in object to an int32 using the rules
844 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
845 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
846 // different registers.
847 void TruncateNumberToI(Register object,
849 Register heap_number_map,
853 // Loads the number from object into dst register.
854 // If |object| is neither smi nor heap number, |not_number| is jumped to
855 // with |object| still intact.
856 void LoadNumber(Register object,
858 Register heap_number_map,
862 // Loads the number from object into double_dst in the double format.
863 // Control will jump to not_int32 if the value cannot be exactly represented
864 // by a 32-bit integer.
865 // Floating point value in the 32-bit integer range that are not exact integer
867 void LoadNumberAsInt32Double(Register object,
868 DoubleRegister double_dst,
869 Register heap_number_map,
872 FPURegister double_scratch,
875 // Loads the number from object into dst as a 32-bit integer.
876 // Control will jump to not_int32 if the object cannot be exactly represented
877 // by a 32-bit integer.
878 // Floating point value in the 32-bit integer range that are not exact integer
879 // won't be converted.
880 void LoadNumberAsInt32(Register object,
882 Register heap_number_map,
885 FPURegister double_scratch0,
886 FPURegister double_scratch1,
890 // argc - argument count to be dropped by LeaveExitFrame.
891 // save_doubles - saves FPU registers on stack, currently disabled.
892 // stack_space - extra stack space.
893 void EnterExitFrame(bool save_doubles,
894 int stack_space = 0);
896 // Leave the current exit frame.
897 void LeaveExitFrame(bool save_doubles, Register arg_count,
898 bool restore_context, bool do_return = NO_EMIT_RETURN,
899 bool argument_count_is_length = false);
901 // Get the actual activation frame alignment for target environment.
902 static int ActivationFrameAlignment();
904 // Make sure the stack is aligned. Only emits code in debug mode.
905 void AssertStackIsAligned();
907 void LoadContext(Register dst, int context_chain_length);
909 // Conditionally load the cached Array transitioned map of type
910 // transitioned_kind from the native context if the map in register
911 // map_in_out is the cached Array map in the native context of
913 void LoadTransitionedArrayMapConditional(
914 ElementsKind expected_kind,
915 ElementsKind transitioned_kind,
918 Label* no_map_match);
920 void LoadGlobalFunction(int index, Register function);
922 // Load the initial map from the global function. The registers
923 // function and map can be the same, function is then overwritten.
924 void LoadGlobalFunctionInitialMap(Register function,
928 void InitializeRootRegister() {
929 ExternalReference roots_array_start =
930 ExternalReference::roots_array_start(isolate());
931 li(kRootRegister, Operand(roots_array_start));
934 // -------------------------------------------------------------------------
935 // JavaScript invokes.
937 // Invoke the JavaScript function code by either calling or jumping.
938 void InvokeCode(Register code,
939 const ParameterCount& expected,
940 const ParameterCount& actual,
942 const CallWrapper& call_wrapper);
944 // Invoke the JavaScript function in the given register. Changes the
945 // current context to the context in the function before invoking.
946 void InvokeFunction(Register function,
947 const ParameterCount& actual,
949 const CallWrapper& call_wrapper);
951 void InvokeFunction(Register function,
952 const ParameterCount& expected,
953 const ParameterCount& actual,
955 const CallWrapper& call_wrapper);
957 void InvokeFunction(Handle<JSFunction> function,
958 const ParameterCount& expected,
959 const ParameterCount& actual,
961 const CallWrapper& call_wrapper);
964 void IsObjectJSObjectType(Register heap_object,
969 void IsInstanceJSObjectType(Register map,
973 void IsObjectJSStringType(Register object,
977 void IsObjectNameType(Register object,
981 // -------------------------------------------------------------------------
986 // -------------------------------------------------------------------------
987 // Exception handling.
989 // Push a new stack handler and link into stack handler chain.
990 void PushStackHandler();
992 // Unlink the stack handler on top of the stack from the stack handler chain.
993 // Must preserve the result register.
994 void PopStackHandler();
996 // Copies a fixed number of fields of heap objects from src to dst.
997 void CopyFields(Register dst, Register src, RegList temps, int field_count);
999 // Copies a number of bytes from src to dst. All registers are clobbered. On
1000 // exit src and dst will point to the place just after where the last byte was
1001 // read or written and length will be zero.
1002 void CopyBytes(Register src,
1007 // Initialize fields with filler values. Fields starting at |start_offset|
1008 // not including end_offset are overwritten with the value in |filler|. At
1009 // the end the loop, |start_offset| takes the value of |end_offset|.
1010 void InitializeFieldsWithFiller(Register start_offset,
1011 Register end_offset,
1014 // -------------------------------------------------------------------------
1015 // Support functions.
1017 // Machine code version of Map::GetConstructor().
1018 // |temp| holds |result|'s map when done, and |temp2| its instance type.
1019 void GetMapConstructor(Register result, Register map, Register temp,
1022 // Try to get function prototype of a function and puts the value in
1023 // the result register. Checks that the function really is a
1024 // function and jumps to the miss label if the fast checks fail. The
1025 // function register will be untouched; the other registers may be
1027 void TryGetFunctionPrototype(Register function,
1031 bool miss_on_bound_function = false);
1033 void GetObjectType(Register function,
1037 // Check if a map for a JSObject indicates that the object has fast elements.
1038 // Jump to the specified label if it does not.
1039 void CheckFastElements(Register map,
1043 // Check if a map for a JSObject indicates that the object can have both smi
1044 // and HeapObject elements. Jump to the specified label if it does not.
1045 void CheckFastObjectElements(Register map,
1049 // Check if a map for a JSObject indicates that the object has fast smi only
1050 // elements. Jump to the specified label if it does not.
1051 void CheckFastSmiElements(Register map,
1055 // Check to see if maybe_number can be stored as a double in
1056 // FastDoubleElements. If it can, store it at the index specified by key in
1057 // the FastDoubleElements array elements. Otherwise jump to fail.
1058 void StoreNumberToDoubleElements(Register value_reg,
1060 Register elements_reg,
1065 int elements_offset = 0);
1067 // Compare an object's map with the specified map and its transitioned
1068 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
1069 // "branch_to" if the result of the comparison is "cond". If multiple map
1070 // compares are required, the compare sequences branches to early_success.
1071 void CompareMapAndBranch(Register obj,
1074 Label* early_success,
1078 // As above, but the map of the object is already loaded into the register
1079 // which is preserved by the code generated.
1080 void CompareMapAndBranch(Register obj_map,
1082 Label* early_success,
1086 // Check if the map of an object is equal to a specified map and branch to
1087 // label if not. Skip the smi check if not required (object is known to be a
1088 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1089 // against maps that are ElementsKind transition maps of the specificed map.
1090 void CheckMap(Register obj,
1094 SmiCheckType smi_check_type);
1097 void CheckMap(Register obj,
1099 Heap::RootListIndex index,
1101 SmiCheckType smi_check_type);
1103 // Check if the map of an object is equal to a specified weak map and branch
1104 // to a specified target if equal. Skip the smi check if not required
1105 // (object is known to be a heap object)
1106 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1107 Handle<WeakCell> cell, Handle<Code> success,
1108 SmiCheckType smi_check_type);
1110 // Get value of the weak cell.
1111 void GetWeakValue(Register value, Handle<WeakCell> cell);
1113 // Load the value of the weak cell in the value register. Branch to the
1114 // given miss label is the weak cell was cleared.
1115 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
1117 // Load and check the instance type of an object for being a string.
1118 // Loads the type into the second argument register.
1119 // Returns a condition that will be enabled if the object was a string.
1120 Condition IsObjectStringType(Register obj,
1123 lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
1124 lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
1125 And(type, type, Operand(kIsNotStringMask));
1126 DCHECK_EQ(0u, kStringTag);
1131 // Picks out an array index from the hash field.
1133 // hash - holds the index's hash. Clobbered.
1134 // index - holds the overwritten index on exit.
1135 void IndexFromHash(Register hash, Register index);
1137 // Get the number of least significant bits from a register.
1138 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1139 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1141 // Load the value of a number object into a FPU double register. If the
1142 // object is not a number a jump to the label not_number is performed
1143 // and the FPU double register is unchanged.
1144 void ObjectToDoubleFPURegister(
1149 Register heap_number_map,
1151 ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
1153 // Load the value of a smi object into a FPU double register. The register
1154 // scratch1 can be the same register as smi in which case smi will hold the
1155 // untagged value afterwards.
1156 void SmiToDoubleFPURegister(Register smi,
1160 // -------------------------------------------------------------------------
1161 // Overflow handling functions.
1162 // Usage: first call the appropriate arithmetic function, then call one of the
1163 // jump functions with the overflow_dst register as the second parameter.
1165 void AdduAndCheckForOverflow(Register dst,
1168 Register overflow_dst,
1169 Register scratch = at);
1171 void AdduAndCheckForOverflow(Register dst, Register left,
1172 const Operand& right, Register overflow_dst,
1173 Register scratch = at);
1175 void SubuAndCheckForOverflow(Register dst,
1178 Register overflow_dst,
1179 Register scratch = at);
1181 void SubuAndCheckForOverflow(Register dst, Register left,
1182 const Operand& right, Register overflow_dst,
1183 Register scratch = at);
1185 void BranchOnOverflow(Label* label,
1186 Register overflow_check,
1187 BranchDelaySlot bd = PROTECT) {
1188 Branch(label, lt, overflow_check, Operand(zero_reg), bd);
1191 void BranchOnNoOverflow(Label* label,
1192 Register overflow_check,
1193 BranchDelaySlot bd = PROTECT) {
1194 Branch(label, ge, overflow_check, Operand(zero_reg), bd);
1197 void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1198 Ret(lt, overflow_check, Operand(zero_reg), bd);
1201 void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1202 Ret(ge, overflow_check, Operand(zero_reg), bd);
1205 // -------------------------------------------------------------------------
1208 // See comments at the beginning of CEntryStub::Generate.
1209 inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
1211 inline void PrepareCEntryFunction(const ExternalReference& ref) {
1212 li(a1, Operand(ref));
1215 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
1216 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
1218 // Call a code stub.
1219 void CallStub(CodeStub* stub,
1220 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1223 // Tail call a code stub (jump).
1224 void TailCallStub(CodeStub* stub, COND_ARGS);
1228 void CallJSExitStub(CodeStub* stub);
1230 // Call a runtime routine.
1231 void CallRuntime(const Runtime::Function* f,
1233 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1234 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1235 const Runtime::Function* function = Runtime::FunctionForId(id);
1236 CallRuntime(function, function->nargs, kSaveFPRegs);
1239 // Convenience function: Same as above, but takes the fid instead.
1240 void CallRuntime(Runtime::FunctionId id,
1242 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1243 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1246 // Convenience function: call an external reference.
1247 void CallExternalReference(const ExternalReference& ext,
1249 BranchDelaySlot bd = PROTECT);
1251 // Tail call of a runtime routine (jump).
1252 // Like JumpToExternalReference, but also takes care of passing the number
1254 void TailCallExternalReference(const ExternalReference& ext,
1258 // Convenience function: tail call a runtime routine (jump).
1259 void TailCallRuntime(Runtime::FunctionId fid,
1263 int CalculateStackPassedWords(int num_reg_arguments,
1264 int num_double_arguments);
1266 // Before calling a C-function from generated code, align arguments on stack
1267 // and add space for the four mips argument slots.
1268 // After aligning the frame, non-register arguments must be stored on the
1269 // stack, after the argument-slots using helper: CFunctionArgumentOperand().
1270 // The argument count assumes all arguments are word sized.
1271 // Some compilers/platforms require the stack to be aligned when calling
1273 // Needs a scratch register to do some arithmetic. This register will be
1275 void PrepareCallCFunction(int num_reg_arguments,
1276 int num_double_registers,
1278 void PrepareCallCFunction(int num_reg_arguments,
1281 // Arguments 1-4 are placed in registers a0 thru a3 respectively.
1282 // Arguments 5..n are stored to stack using following:
1283 // sw(t0, CFunctionArgumentOperand(5));
1285 // Calls a C function and cleans up the space for arguments allocated
1286 // by PrepareCallCFunction. The called function is not allowed to trigger a
1287 // garbage collection, since that might move the code and invalidate the
1288 // return address (unless this is somehow accounted for by the called
1290 void CallCFunction(ExternalReference function, int num_arguments);
1291 void CallCFunction(Register function, int num_arguments);
1292 void CallCFunction(ExternalReference function,
1293 int num_reg_arguments,
1294 int num_double_arguments);
1295 void CallCFunction(Register function,
1296 int num_reg_arguments,
1297 int num_double_arguments);
1298 void MovFromFloatResult(DoubleRegister dst);
1299 void MovFromFloatParameter(DoubleRegister dst);
1301 // There are two ways of passing double arguments on MIPS, depending on
1302 // whether soft or hard floating point ABI is used. These functions
1303 // abstract parameter passing for the three different ways we call
1304 // C functions from generated code.
1305 void MovToFloatParameter(DoubleRegister src);
1306 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
1307 void MovToFloatResult(DoubleRegister src);
1309 // Jump to the builtin routine.
1310 void JumpToExternalReference(const ExternalReference& builtin,
1311 BranchDelaySlot bd = PROTECT);
1313 // Invoke specified builtin JavaScript function. Adds an entry to
1314 // the unresolved list if the name does not resolve.
1315 void InvokeBuiltin(Builtins::JavaScript id,
1317 const CallWrapper& call_wrapper = NullCallWrapper());
1319 // Store the code object for the given builtin in the target register and
1320 // setup the function in a1.
1321 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
1323 // Store the function for the given builtin in the target register.
1324 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1328 uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
1332 Handle<Object> CodeObject() {
1333 DCHECK(!code_object_.is_null());
1334 return code_object_;
1337 // Emit code for a truncating division by a constant. The dividend register is
1338 // unchanged and at gets clobbered. Dividend and result must be different.
1339 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1341 // -------------------------------------------------------------------------
1342 // StatsCounter support.
1344 void SetCounter(StatsCounter* counter, int value,
1345 Register scratch1, Register scratch2);
1346 void IncrementCounter(StatsCounter* counter, int value,
1347 Register scratch1, Register scratch2);
1348 void DecrementCounter(StatsCounter* counter, int value,
1349 Register scratch1, Register scratch2);
1352 // -------------------------------------------------------------------------
1355 // Calls Abort(msg) if the condition cc is not satisfied.
1356 // Use --debug_code to enable.
1357 void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
1358 void AssertFastElements(Register elements);
1360 // Like Assert(), but always enabled.
1361 void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
1363 // Print a message to stdout and abort execution.
1364 void Abort(BailoutReason msg);
1366 // Verify restrictions about code generated in stubs.
1367 void set_generating_stub(bool value) { generating_stub_ = value; }
1368 bool generating_stub() { return generating_stub_; }
1369 void set_has_frame(bool value) { has_frame_ = value; }
1370 bool has_frame() { return has_frame_; }
1371 inline bool AllowThisStubCall(CodeStub* stub);
1373 // ---------------------------------------------------------------------------
1374 // Number utilities.
1376 // Check whether the value of reg is a power of two and not zero. If not
1377 // control continues at the label not_power_of_two. If reg is a power of two
1378 // the register scratch contains the value of (reg - 1) when control falls
1380 void JumpIfNotPowerOfTwoOrZero(Register reg,
1382 Label* not_power_of_two_or_zero);
1384 // -------------------------------------------------------------------------
1387 void SmiTag(Register reg) {
1388 Addu(reg, reg, reg);
1391 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1392 void SmiTagCheckOverflow(Register reg, Register overflow);
1393 void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1395 void SmiTag(Register dst, Register src) {
1396 Addu(dst, src, src);
1399 // Try to convert int32 to smi. If the value is to large, preserve
1400 // the original value and jump to not_a_smi. Destroys scratch and
1402 void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
1403 TrySmiTag(reg, reg, scratch, not_a_smi);
1405 void TrySmiTag(Register dst,
1409 SmiTagCheckOverflow(at, src, scratch);
1410 BranchOnOverflow(not_a_smi, scratch);
1414 void SmiUntag(Register reg) {
1415 sra(reg, reg, kSmiTagSize);
1418 void SmiUntag(Register dst, Register src) {
1419 sra(dst, src, kSmiTagSize);
1422 // Test if the register contains a smi.
1423 inline void SmiTst(Register value, Register scratch) {
1424 And(scratch, value, Operand(kSmiTagMask));
1426 inline void NonNegativeSmiTst(Register value, Register scratch) {
1427 And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
1430 // Untag the source value into destination and jump if source is a smi.
1431 // Souce and destination can be the same register.
1432 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1434 // Untag the source value into destination and jump if source is not a smi.
1435 // Souce and destination can be the same register.
1436 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1438 // Jump the register contains a smi.
1439 void JumpIfSmi(Register value,
1441 Register scratch = at,
1442 BranchDelaySlot bd = PROTECT);
1444 // Jump if the register contains a non-smi.
1445 void JumpIfNotSmi(Register value,
1446 Label* not_smi_label,
1447 Register scratch = at,
1448 BranchDelaySlot bd = PROTECT);
1450 // Jump if either of the registers contain a non-smi.
1451 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1452 // Jump if either of the registers contain a smi.
1453 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1455 // Abort execution if argument is a smi, enabled via --debug-code.
1456 void AssertNotSmi(Register object);
1457 void AssertSmi(Register object);
1459 // Abort execution if argument is not a string, enabled via --debug-code.
1460 void AssertString(Register object);
1462 // Abort execution if argument is not a name, enabled via --debug-code.
1463 void AssertName(Register object);
1465 // Abort execution if argument is not undefined or an AllocationSite, enabled
1466 // via --debug-code.
1467 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1469 // Abort execution if reg is not the root value with the given index,
1470 // enabled via --debug-code.
1471 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1473 // ---------------------------------------------------------------------------
1474 // HeapNumber utilities.
1476 void JumpIfNotHeapNumber(Register object,
1477 Register heap_number_map,
1479 Label* on_not_heap_number);
1481 // -------------------------------------------------------------------------
1482 // String utilities.
1484 // Generate code to do a lookup in the number string cache. If the number in
1485 // the register object is found in the cache the generated code falls through
1486 // with the result in the result register. The object and the result register
1487 // can be the same. If the number is not found in the cache the code jumps to
1488 // the label not_found with only the content of register object unchanged.
1489 void LookupNumberStringCache(Register object,
1496 // Checks if both instance types are sequential ASCII strings and jumps to
1497 // label if either is not.
1498 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1499 Register first_object_instance_type, Register second_object_instance_type,
1500 Register scratch1, Register scratch2, Label* failure);
1502 // Check if instance type is sequential one-byte string and jump to label if
1504 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1507 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1509 void EmitSeqStringSetCharCheck(Register string,
1513 uint32_t encoding_mask);
1515 // Checks if both objects are sequential one-byte strings and jumps to label
1516 // if either is not. Assumes that neither object is a smi.
1517 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
1523 // Checks if both objects are sequential one-byte strings and jumps to label
1524 // if either is not.
1525 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1528 Label* not_flat_one_byte_strings);
1530 void ClampUint8(Register output_reg, Register input_reg);
1532 void ClampDoubleToUint8(Register result_reg,
1533 DoubleRegister input_reg,
1534 DoubleRegister temp_double_reg);
1537 void LoadInstanceDescriptors(Register map, Register descriptors);
1538 void EnumLength(Register dst, Register map);
1539 void NumberOfOwnDescriptors(Register dst, Register map);
1540 void LoadAccessor(Register dst, Register holder, int accessor_index,
1541 AccessorComponent accessor);
1543 template<typename Field>
1544 void DecodeField(Register dst, Register src) {
1545 Ext(dst, src, Field::kShift, Field::kSize);
1548 template<typename Field>
1549 void DecodeField(Register reg) {
1550 DecodeField<Field>(reg, reg);
1553 template<typename Field>
1554 void DecodeFieldToSmi(Register dst, Register src) {
1555 static const int shift = Field::kShift;
1556 static const int mask = Field::kMask >> shift << kSmiTagSize;
1557 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
1558 STATIC_ASSERT(kSmiTag == 0);
1559 if (shift < kSmiTagSize) {
1560 sll(dst, src, kSmiTagSize - shift);
1561 And(dst, dst, Operand(mask));
1562 } else if (shift > kSmiTagSize) {
1563 srl(dst, src, shift - kSmiTagSize);
1564 And(dst, dst, Operand(mask));
1566 And(dst, src, Operand(mask));
1570 template<typename Field>
1571 void DecodeFieldToSmi(Register reg) {
1572 DecodeField<Field>(reg, reg);
1575 // Generates function and stub prologue code.
1576 void StubPrologue();
1577 void Prologue(bool code_pre_aging);
1579 // Activation support.
1580 void EnterFrame(StackFrame::Type type);
1581 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
1582 void LeaveFrame(StackFrame::Type type);
1584 // Patch the relocated value (lui/ori pair).
1585 void PatchRelocatedValue(Register li_location,
1587 Register new_value);
1588 // Get the relocatad value (loaded data) from the lui/ori pair.
1589 void GetRelocatedValue(Register li_location,
1593 // Expects object in a0 and returns map with validated enum cache
1594 // in a0. Assumes that any other register can be used as a scratch.
1595 void CheckEnumCache(Register null_value, Label* call_runtime);
1597 // AllocationMemento support. Arrays may have an associated
1598 // AllocationMemento object that can be checked for in order to pretransition
1600 // On entry, receiver_reg should point to the array object.
1601 // scratch_reg gets clobbered.
1602 // If allocation info is present, jump to allocation_memento_present.
1603 void TestJSArrayForAllocationMemento(
1604 Register receiver_reg,
1605 Register scratch_reg,
1606 Label* no_memento_found,
1607 Condition cond = al,
1608 Label* allocation_memento_present = NULL);
1610 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1611 Register scratch_reg,
1612 Label* memento_found) {
1613 Label no_memento_found;
1614 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1615 &no_memento_found, eq, memento_found);
1616 bind(&no_memento_found);
1619 // Jumps to found label if a prototype map has dictionary elements.
1620 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1621 Register scratch1, Label* found);
1624 void CallCFunctionHelper(Register function,
1625 int num_reg_arguments,
1626 int num_double_arguments);
1628 void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
1629 void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
1631 BranchDelaySlot bdslot = PROTECT);
1632 void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
1633 void BranchAndLinkShort(Label* L, Condition cond, Register rs,
1635 BranchDelaySlot bdslot = PROTECT);
1636 void Jr(Label* L, BranchDelaySlot bdslot);
1637 void Jalr(Label* L, BranchDelaySlot bdslot);
1639 // Common implementation of BranchF functions for the different formats.
1640 void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
1641 Condition cc, FPURegister cmp1, FPURegister cmp2,
1642 BranchDelaySlot bd = PROTECT);
1644 // Helper functions for generating invokes.
1645 void InvokePrologue(const ParameterCount& expected,
1646 const ParameterCount& actual,
1647 Handle<Code> code_constant,
1650 bool* definitely_mismatches,
1652 const CallWrapper& call_wrapper);
1654 // Get the code for the given builtin. Returns if able to resolve
1655 // the function in the 'resolved' flag.
1656 Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
1658 void InitializeNewString(Register string,
1660 Heap::RootListIndex map_index,
1664 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1665 void InNewSpace(Register object,
1667 Condition cond, // eq for new space, ne otherwise.
1670 // Helper for finding the mark bits for an address. Afterwards, the
1671 // bitmap register points at the word with the mark bits and the mask
1672 // the position of the first bit. Leaves addr_reg unchanged.
1673 inline void GetMarkBits(Register addr_reg,
1674 Register bitmap_reg,
1677 // Compute memory operands for safepoint stack slots.
1678 static int SafepointRegisterStackIndex(int reg_code);
1679 MemOperand SafepointRegisterSlot(Register reg);
1680 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1682 bool generating_stub_;
1684 bool has_double_zero_reg_set_;
1685 // This handle will be patched with the code object on installation.
1686 Handle<Object> code_object_;
1688 // Needs access to SafepointRegisterStackIndex for compiled frame
1690 friend class StandardFrame;
1694 // The code patcher is used to patch (typically) small parts of code e.g. for
1695 // debugging and other types of instrumentation. When using the code patcher
1696 // the exact number of bytes specified must be emitted. It is not legal to emit
1697 // relocation information. If any of these constraints are violated it causes
1698 // an assertion to fail.
1706 CodePatcher(byte* address,
1708 FlushICache flush_cache = FLUSH);
1709 virtual ~CodePatcher();
1711 // Macro assembler to emit code.
1712 MacroAssembler* masm() { return &masm_; }
1714 // Emit an instruction directly.
1715 void Emit(Instr instr);
1717 // Emit an address directly.
1718 void Emit(Address addr);
1720 // Change the condition part of an instruction leaving the rest of the current
1721 // instruction unchanged.
1722 void ChangeBranchCondition(Condition cond);
1725 byte* address_; // The address of the code being patched.
1726 int size_; // Number of bytes of the expected patch size.
1727 MacroAssembler masm_; // Macro assembler used to generate the code.
1728 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1733 #ifdef GENERATED_CODE_COVERAGE
1734 #define CODE_COVERAGE_STRINGIFY(x) #x
1735 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1736 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1737 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1739 #define ACCESS_MASM(masm) masm->
1742 } } // namespace v8::internal
1744 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_