1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
38 #if V8_TARGET_ARCH_MIPS64
40 #include "src/base/cpu.h"
41 #include "src/mips64/assembler-mips64-inl.h"
42 #include "src/serialize.h"
48 // Get the CPU features enabled by the build. For cross compilation the
49 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
50 // can be defined to enable FPU instructions when building the
52 static unsigned CpuFeaturesImpliedByCompiler() {
54 #ifdef CAN_USE_FPU_INSTRUCTIONS
56 #endif // def CAN_USE_FPU_INSTRUCTIONS
58 // If the compiler is allowed to use FPU then we can use FPU too in our code
59 // generation even when generating snapshots. This won't work for cross
61 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
69 const char* DoubleRegister::AllocationIndexToString(int index) {
70 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
71 const char* const names[] = {
91 void CpuFeatures::ProbeImpl(bool cross_compile) {
92 supported_ |= CpuFeaturesImpliedByCompiler();
94 // Only use statically determined features for cross compile (snapshot).
95 if (cross_compile) return;
97 // If the compiler is allowed to use fpu then we can use fpu too in our
100 // For the simulator build, use FPU.
101 supported_ |= 1u << FPU;
103 // Probe for additional features at runtime.
105 if (cpu.has_fpu()) supported_ |= 1u << FPU;
110 void CpuFeatures::PrintTarget() { }
111 void CpuFeatures::PrintFeatures() { }
114 int ToNumber(Register reg) {
115 DCHECK(reg.is_valid());
116 const int kNumbers[] = {
150 return kNumbers[reg.code()];
154 Register ToRegister(int num) {
155 DCHECK(num >= 0 && num < kNumRegisters);
156 const Register kRegisters[] = {
160 a0, a1, a2, a3, a4, a5, a6, a7,
162 s0, s1, s2, s3, s4, s5, s6, s7,
170 return kRegisters[num];
174 // -----------------------------------------------------------------------------
175 // Implementation of RelocInfo.
177 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
178 1 << RelocInfo::INTERNAL_REFERENCE;
181 bool RelocInfo::IsCodedSpecially() {
182 // The deserializer needs to know whether a pointer is specially coded. Being
183 // specially coded on MIPS means that it is a lui/ori instruction, and that is
184 // always the case inside code objects.
189 bool RelocInfo::IsInConstantPool() {
194 // Patch the code at the current address with the supplied instructions.
195 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
196 Instr* pc = reinterpret_cast<Instr*>(pc_);
197 Instr* instr = reinterpret_cast<Instr*>(instructions);
198 for (int i = 0; i < instruction_count; i++) {
199 *(pc + i) = *(instr + i);
202 // Indicate that code has changed.
203 CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
207 // Patch the code at the current PC with a call to the target address.
208 // Additional guard instructions can be added if required.
209 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
210 // Patch the code at the current address with a call to the target.
211 UNIMPLEMENTED_MIPS();
215 // -----------------------------------------------------------------------------
216 // Implementation of Operand and MemOperand.
217 // See assembler-mips-inl.h for inlined constructors.
219 Operand::Operand(Handle<Object> handle) {
220 AllowDeferredHandleDereference using_raw_address;
222 // Verify all Objects referred by code are NOT in new space.
223 Object* obj = *handle;
224 if (obj->IsHeapObject()) {
225 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
226 imm64_ = reinterpret_cast<intptr_t>(handle.location());
227 rmode_ = RelocInfo::EMBEDDED_OBJECT;
229 // No relocation needed.
230 imm64_ = reinterpret_cast<intptr_t>(obj);
231 rmode_ = RelocInfo::NONE64;
236 MemOperand::MemOperand(Register rm, int64_t offset) : Operand(rm) {
241 MemOperand::MemOperand(Register rm, int64_t unit, int64_t multiplier,
242 OffsetAddend offset_addend) : Operand(rm) {
243 offset_ = unit * multiplier + offset_addend;
247 // -----------------------------------------------------------------------------
248 // Specific instructions, constants, and masks.
250 static const int kNegOffset = 0x00008000;
251 // daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
252 // operations as post-increment of sp.
253 const Instr kPopInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
254 | (kRegister_sp_Code << kRtShift)
255 | (kPointerSize & kImm16Mask); // NOLINT
256 // daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
257 const Instr kPushInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
258 | (kRegister_sp_Code << kRtShift)
259 | (-kPointerSize & kImm16Mask); // NOLINT
260 // sd(r, MemOperand(sp, 0))
261 const Instr kPushRegPattern = SD | (kRegister_sp_Code << kRsShift)
262 | (0 & kImm16Mask); // NOLINT
263 // ld(r, MemOperand(sp, 0))
264 const Instr kPopRegPattern = LD | (kRegister_sp_Code << kRsShift)
265 | (0 & kImm16Mask); // NOLINT
267 const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
268 | (0 & kImm16Mask); // NOLINT
270 const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
271 | (0 & kImm16Mask); // NOLINT
273 const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
274 | (kNegOffset & kImm16Mask); // NOLINT
276 const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
277 | (kNegOffset & kImm16Mask); // NOLINT
278 // A mask for the Rt register for push, pop, lw, sw instructions.
279 const Instr kRtMask = kRtFieldMask;
280 const Instr kLwSwInstrTypeMask = 0xffe00000;
281 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
282 const Instr kLwSwOffsetMask = kImm16Mask;
285 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
286 : AssemblerBase(isolate, buffer, buffer_size),
287 recorded_ast_id_(TypeFeedbackId::None()),
288 positions_recorder_(this) {
289 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
291 last_trampoline_pool_end_ = 0;
292 no_trampoline_pool_before_ = 0;
293 trampoline_pool_blocked_nesting_ = 0;
294 // We leave space (16 * kTrampolineSlotsSize)
295 // for BlockTrampolinePoolScope buffer.
296 next_buffer_check_ = FLAG_force_long_branches
297 ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
298 internal_trampoline_exception_ = false;
301 trampoline_emitted_ = FLAG_force_long_branches;
302 unbound_labels_count_ = 0;
303 block_buffer_growth_ = false;
305 ClearRecordedAstId();
309 void Assembler::GetCode(CodeDesc* desc) {
310 DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
311 // Set up code descriptor.
312 desc->buffer = buffer_;
313 desc->buffer_size = buffer_size_;
314 desc->instr_size = pc_offset();
315 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
320 void Assembler::Align(int m) {
321 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
322 while ((pc_offset() & (m - 1)) != 0) {
328 void Assembler::CodeTargetAlign() {
329 // No advantage to aligning branch/call targets to more than
330 // single instruction, that I am aware of.
335 Register Assembler::GetRtReg(Instr instr) {
337 rt.code_ = (instr & kRtFieldMask) >> kRtShift;
342 Register Assembler::GetRsReg(Instr instr) {
344 rs.code_ = (instr & kRsFieldMask) >> kRsShift;
349 Register Assembler::GetRdReg(Instr instr) {
351 rd.code_ = (instr & kRdFieldMask) >> kRdShift;
356 uint32_t Assembler::GetRt(Instr instr) {
357 return (instr & kRtFieldMask) >> kRtShift;
361 uint32_t Assembler::GetRtField(Instr instr) {
362 return instr & kRtFieldMask;
366 uint32_t Assembler::GetRs(Instr instr) {
367 return (instr & kRsFieldMask) >> kRsShift;
371 uint32_t Assembler::GetRsField(Instr instr) {
372 return instr & kRsFieldMask;
376 uint32_t Assembler::GetRd(Instr instr) {
377 return (instr & kRdFieldMask) >> kRdShift;
381 uint32_t Assembler::GetRdField(Instr instr) {
382 return instr & kRdFieldMask;
386 uint32_t Assembler::GetSa(Instr instr) {
387 return (instr & kSaFieldMask) >> kSaShift;
391 uint32_t Assembler::GetSaField(Instr instr) {
392 return instr & kSaFieldMask;
396 uint32_t Assembler::GetOpcodeField(Instr instr) {
397 return instr & kOpcodeMask;
401 uint32_t Assembler::GetFunction(Instr instr) {
402 return (instr & kFunctionFieldMask) >> kFunctionShift;
406 uint32_t Assembler::GetFunctionField(Instr instr) {
407 return instr & kFunctionFieldMask;
411 uint32_t Assembler::GetImmediate16(Instr instr) {
412 return instr & kImm16Mask;
416 uint32_t Assembler::GetLabelConst(Instr instr) {
417 return instr & ~kImm16Mask;
421 bool Assembler::IsPop(Instr instr) {
422 return (instr & ~kRtMask) == kPopRegPattern;
426 bool Assembler::IsPush(Instr instr) {
427 return (instr & ~kRtMask) == kPushRegPattern;
431 bool Assembler::IsSwRegFpOffset(Instr instr) {
432 return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
436 bool Assembler::IsLwRegFpOffset(Instr instr) {
437 return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
441 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
442 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
443 kSwRegFpNegOffsetPattern);
447 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
448 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
449 kLwRegFpNegOffsetPattern);
453 // Labels refer to positions in the (to be) generated code.
454 // There are bound, linked, and unused labels.
456 // Bound labels refer to known positions in the already
457 // generated code. pos() is the position the label refers to.
459 // Linked labels refer to unknown positions in the code
460 // to be generated; pos() is the position of the last
461 // instruction using the label.
463 // The link chain is terminated by a value in the instruction of -1,
464 // which is an otherwise illegal value (branch -1 is inf loop).
465 // The instruction 16-bit offset field addresses 32-bit words, but in
466 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
468 const int kEndOfChain = -4;
469 // Determines the end of the Jump chain (a subset of the label link chain).
470 const int kEndOfJumpChain = 0;
473 bool Assembler::IsBranch(Instr instr) {
474 uint32_t opcode = GetOpcodeField(instr);
475 uint32_t rt_field = GetRtField(instr);
476 uint32_t rs_field = GetRsField(instr);
477 // Checks if the instruction is a branch.
478 return opcode == BEQ ||
486 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
487 rt_field == BLTZAL || rt_field == BGEZAL)) ||
488 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
489 (opcode == COP1 && rs_field == BC1EQZ) ||
490 (opcode == COP1 && rs_field == BC1NEZ);
494 bool Assembler::IsEmittedConstant(Instr instr) {
495 uint32_t label_constant = GetLabelConst(instr);
496 return label_constant == 0; // Emitted label const in reg-exp engine.
500 bool Assembler::IsBeq(Instr instr) {
501 return GetOpcodeField(instr) == BEQ;
505 bool Assembler::IsBne(Instr instr) {
506 return GetOpcodeField(instr) == BNE;
510 bool Assembler::IsJump(Instr instr) {
511 uint32_t opcode = GetOpcodeField(instr);
512 uint32_t rt_field = GetRtField(instr);
513 uint32_t rd_field = GetRdField(instr);
514 uint32_t function_field = GetFunctionField(instr);
515 // Checks if the instruction is a jump.
516 return opcode == J || opcode == JAL ||
517 (opcode == SPECIAL && rt_field == 0 &&
518 ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
522 bool Assembler::IsJ(Instr instr) {
523 uint32_t opcode = GetOpcodeField(instr);
524 // Checks if the instruction is a jump.
529 bool Assembler::IsJal(Instr instr) {
530 return GetOpcodeField(instr) == JAL;
534 bool Assembler::IsJr(Instr instr) {
535 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
539 bool Assembler::IsJalr(Instr instr) {
540 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
544 bool Assembler::IsLui(Instr instr) {
545 uint32_t opcode = GetOpcodeField(instr);
546 // Checks if the instruction is a load upper immediate.
547 return opcode == LUI;
551 bool Assembler::IsOri(Instr instr) {
552 uint32_t opcode = GetOpcodeField(instr);
553 // Checks if the instruction is a load upper immediate.
554 return opcode == ORI;
558 bool Assembler::IsNop(Instr instr, unsigned int type) {
559 // See Assembler::nop(type).
561 uint32_t opcode = GetOpcodeField(instr);
562 uint32_t function = GetFunctionField(instr);
563 uint32_t rt = GetRt(instr);
564 uint32_t rd = GetRd(instr);
565 uint32_t sa = GetSa(instr);
567 // Traditional mips nop == sll(zero_reg, zero_reg, 0)
568 // When marking non-zero type, use sll(zero_reg, at, type)
569 // to avoid use of mips ssnop and ehb special encodings
570 // of the sll instruction.
572 Register nop_rt_reg = (type == 0) ? zero_reg : at;
573 bool ret = (opcode == SPECIAL && function == SLL &&
574 rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
575 rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
582 int32_t Assembler::GetBranchOffset(Instr instr) {
583 DCHECK(IsBranch(instr));
584 return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
588 bool Assembler::IsLw(Instr instr) {
589 return ((instr & kOpcodeMask) == LW);
593 int16_t Assembler::GetLwOffset(Instr instr) {
595 return ((instr & kImm16Mask));
599 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
602 // We actually create a new lw instruction based on the original one.
603 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
604 | (offset & kImm16Mask);
610 bool Assembler::IsSw(Instr instr) {
611 return ((instr & kOpcodeMask) == SW);
615 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
617 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
621 bool Assembler::IsAddImmediate(Instr instr) {
622 return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
626 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
627 DCHECK(IsAddImmediate(instr));
628 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
632 bool Assembler::IsAndImmediate(Instr instr) {
633 return GetOpcodeField(instr) == ANDI;
637 int64_t Assembler::target_at(int64_t pos) {
638 Instr instr = instr_at(pos);
639 if ((instr & ~kImm16Mask) == 0) {
640 // Emitted label constant, not part of a branch.
644 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
645 return (imm18 + pos);
648 // Check we have a branch or jump instruction.
649 DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
650 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
651 // the compiler uses arithmetic shifts for signed integers.
652 if (IsBranch(instr)) {
653 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
654 if (imm18 == kEndOfChain) {
655 // EndOfChain sentinel is returned directly, not relative to pc or pos.
658 return pos + kBranchPCOffset + imm18;
660 } else if (IsLui(instr)) {
661 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
662 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
663 Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
664 DCHECK(IsOri(instr_ori));
665 DCHECK(IsOri(instr_ori2));
667 // TODO(plind) create named constants for shift values.
668 int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
669 imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
670 imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
671 // Sign extend address;
674 if (imm == kEndOfJumpChain) {
675 // EndOfChain sentinel is returned directly, not relative to pc or pos.
678 uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
679 int64_t delta = instr_address - imm;
684 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
685 if (imm28 == kEndOfJumpChain) {
686 // EndOfChain sentinel is returned directly, not relative to pc or pos.
689 uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
690 instr_address &= kImm28Mask;
691 int64_t delta = instr_address - imm28;
699 void Assembler::target_at_put(int64_t pos, int64_t target_pos) {
700 Instr instr = instr_at(pos);
701 if ((instr & ~kImm16Mask) == 0) {
702 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
703 // Emitted label constant, not part of a branch.
704 // Make label relative to Code* of generated Code object.
705 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
709 DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
710 if (IsBranch(instr)) {
711 int32_t imm18 = target_pos - (pos + kBranchPCOffset);
712 DCHECK((imm18 & 3) == 0);
714 instr &= ~kImm16Mask;
715 int32_t imm16 = imm18 >> 2;
716 DCHECK(is_int16(imm16));
718 instr_at_put(pos, instr | (imm16 & kImm16Mask));
719 } else if (IsLui(instr)) {
720 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
721 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
722 Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
723 DCHECK(IsOri(instr_ori));
724 DCHECK(IsOri(instr_ori2));
726 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
727 DCHECK((imm & 3) == 0);
729 instr_lui &= ~kImm16Mask;
730 instr_ori &= ~kImm16Mask;
731 instr_ori2 &= ~kImm16Mask;
733 instr_at_put(pos + 0 * Assembler::kInstrSize,
734 instr_lui | ((imm >> 32) & kImm16Mask));
735 instr_at_put(pos + 1 * Assembler::kInstrSize,
736 instr_ori | ((imm >> 16) & kImm16Mask));
737 instr_at_put(pos + 3 * Assembler::kInstrSize,
738 instr_ori2 | (imm & kImm16Mask));
740 uint64_t imm28 = reinterpret_cast<uint64_t>(buffer_) + target_pos;
742 DCHECK((imm28 & 3) == 0);
744 instr &= ~kImm26Mask;
745 uint32_t imm26 = imm28 >> 2;
746 DCHECK(is_uint26(imm26));
748 instr_at_put(pos, instr | (imm26 & kImm26Mask));
753 void Assembler::print(Label* L) {
754 if (L->is_unused()) {
755 PrintF("unused label\n");
756 } else if (L->is_bound()) {
757 PrintF("bound label to %d\n", L->pos());
758 } else if (L->is_linked()) {
760 PrintF("unbound label");
761 while (l.is_linked()) {
762 PrintF("@ %d ", l.pos());
763 Instr instr = instr_at(l.pos());
764 if ((instr & ~kImm16Mask) == 0) {
767 PrintF("%d\n", instr);
772 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
777 void Assembler::bind_to(Label* L, int pos) {
778 DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
779 int32_t trampoline_pos = kInvalidSlotPos;
780 if (L->is_linked() && !trampoline_emitted_) {
781 unbound_labels_count_--;
782 next_buffer_check_ += kTrampolineSlotsSize;
785 while (L->is_linked()) {
786 int32_t fixup_pos = L->pos();
787 int32_t dist = pos - fixup_pos;
788 next(L); // Call next before overwriting link with target at fixup_pos.
789 Instr instr = instr_at(fixup_pos);
790 if (IsBranch(instr)) {
791 if (dist > kMaxBranchOffset) {
792 if (trampoline_pos == kInvalidSlotPos) {
793 trampoline_pos = get_trampoline_entry(fixup_pos);
794 CHECK(trampoline_pos != kInvalidSlotPos);
796 DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
797 target_at_put(fixup_pos, trampoline_pos);
798 fixup_pos = trampoline_pos;
799 dist = pos - fixup_pos;
801 target_at_put(fixup_pos, pos);
803 DCHECK(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
804 target_at_put(fixup_pos, pos);
809 // Keep track of the last bound label so we don't eliminate any instructions
810 // before a bound label.
811 if (pos > last_bound_pos_)
812 last_bound_pos_ = pos;
816 void Assembler::bind(Label* L) {
817 DCHECK(!L->is_bound()); // Label can only be bound once.
818 bind_to(L, pc_offset());
822 void Assembler::next(Label* L) {
823 DCHECK(L->is_linked());
824 int link = target_at(L->pos());
825 if (link == kEndOfChain) {
834 bool Assembler::is_near(Label* L) {
836 return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
842 // We have to use a temporary register for things that can be relocated even
843 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
844 // space. There is no guarantee that the relocated location can be similarly
846 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
847 return !RelocInfo::IsNone(rmode);
850 void Assembler::GenInstrRegister(Opcode opcode,
855 SecondaryField func) {
856 DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
857 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
858 | (rd.code() << kRdShift) | (sa << kSaShift) | func;
863 void Assembler::GenInstrRegister(Opcode opcode,
868 SecondaryField func) {
869 DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
870 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
871 | (msb << kRdShift) | (lsb << kSaShift) | func;
876 void Assembler::GenInstrRegister(Opcode opcode,
881 SecondaryField func) {
882 DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
883 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
884 | (fd.code() << kFdShift) | func;
889 void Assembler::GenInstrRegister(Opcode opcode,
894 SecondaryField func) {
895 DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
896 Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
897 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
902 void Assembler::GenInstrRegister(Opcode opcode,
907 SecondaryField func) {
908 DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
909 Instr instr = opcode | fmt | (rt.code() << kRtShift)
910 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
915 void Assembler::GenInstrRegister(Opcode opcode,
918 FPUControlRegister fs,
919 SecondaryField func) {
920 DCHECK(fs.is_valid() && rt.is_valid());
922 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
927 // Instructions with immediate value.
928 // Registers are in the order of the instruction encoding, from left to right.
929 void Assembler::GenInstrImmediate(Opcode opcode,
933 DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
934 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
940 void Assembler::GenInstrImmediate(Opcode opcode,
944 DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
945 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
950 void Assembler::GenInstrImmediate(Opcode opcode,
954 DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
955 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
961 void Assembler::GenInstrJump(Opcode opcode,
963 BlockTrampolinePoolScope block_trampoline_pool(this);
964 DCHECK(is_uint26(address));
965 Instr instr = opcode | address;
967 BlockTrampolinePoolFor(1); // For associated delay slot.
971 // Returns the next free trampoline entry.
972 int32_t Assembler::get_trampoline_entry(int32_t pos) {
973 int32_t trampoline_entry = kInvalidSlotPos;
974 if (!internal_trampoline_exception_) {
975 if (trampoline_.start() > pos) {
976 trampoline_entry = trampoline_.take_slot();
979 if (kInvalidSlotPos == trampoline_entry) {
980 internal_trampoline_exception_ = true;
983 return trampoline_entry;
987 uint64_t Assembler::jump_address(Label* L) {
990 target_pos = L->pos();
992 if (L->is_linked()) {
993 target_pos = L->pos(); // L's link.
994 L->link_to(pc_offset());
996 L->link_to(pc_offset());
997 return kEndOfJumpChain;
1001 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
1002 DCHECK((imm & 3) == 0);
1008 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1010 if (L->is_bound()) {
1011 target_pos = L->pos();
1013 if (L->is_linked()) {
1014 target_pos = L->pos();
1015 L->link_to(pc_offset());
1017 L->link_to(pc_offset());
1018 if (!trampoline_emitted_) {
1019 unbound_labels_count_++;
1020 next_buffer_check_ -= kTrampolineSlotsSize;
1026 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1027 DCHECK((offset & 3) == 0);
1028 DCHECK(is_int16(offset >> 2));
1034 int32_t Assembler::branch_offset_compact(Label* L,
1035 bool jump_elimination_allowed) {
1037 if (L->is_bound()) {
1038 target_pos = L->pos();
1040 if (L->is_linked()) {
1041 target_pos = L->pos();
1042 L->link_to(pc_offset());
1044 L->link_to(pc_offset());
1045 if (!trampoline_emitted_) {
1046 unbound_labels_count_++;
1047 next_buffer_check_ -= kTrampolineSlotsSize;
1053 int32_t offset = target_pos - pc_offset();
1054 DCHECK((offset & 3) == 0);
1055 DCHECK(is_int16(offset >> 2));
1061 int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
1063 if (L->is_bound()) {
1064 target_pos = L->pos();
1066 if (L->is_linked()) {
1067 target_pos = L->pos();
1068 L->link_to(pc_offset());
1070 L->link_to(pc_offset());
1071 if (!trampoline_emitted_) {
1072 unbound_labels_count_++;
1073 next_buffer_check_ -= kTrampolineSlotsSize;
1079 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1080 DCHECK((offset & 3) == 0);
1081 DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
1087 int32_t Assembler::branch_offset21_compact(Label* L,
1088 bool jump_elimination_allowed) {
1090 if (L->is_bound()) {
1091 target_pos = L->pos();
1093 if (L->is_linked()) {
1094 target_pos = L->pos();
1095 L->link_to(pc_offset());
1097 L->link_to(pc_offset());
1098 if (!trampoline_emitted_) {
1099 unbound_labels_count_++;
1100 next_buffer_check_ -= kTrampolineSlotsSize;
1106 int32_t offset = target_pos - pc_offset();
1107 DCHECK((offset & 3) == 0);
1108 DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
1114 void Assembler::label_at_put(Label* L, int at_offset) {
1116 if (L->is_bound()) {
1117 target_pos = L->pos();
1118 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1120 if (L->is_linked()) {
1121 target_pos = L->pos(); // L's link.
1122 int32_t imm18 = target_pos - at_offset;
1123 DCHECK((imm18 & 3) == 0);
1124 int32_t imm16 = imm18 >> 2;
1125 DCHECK(is_int16(imm16));
1126 instr_at_put(at_offset, (imm16 & kImm16Mask));
1128 target_pos = kEndOfChain;
1129 instr_at_put(at_offset, 0);
1130 if (!trampoline_emitted_) {
1131 unbound_labels_count_++;
1132 next_buffer_check_ -= kTrampolineSlotsSize;
1135 L->link_to(at_offset);
1140 //------- Branch and jump instructions --------
1142 void Assembler::b(int16_t offset) {
1143 beq(zero_reg, zero_reg, offset);
1147 void Assembler::bal(int16_t offset) {
1148 positions_recorder()->WriteRecordedPositions();
1149 bgezal(zero_reg, offset);
1153 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1154 BlockTrampolinePoolScope block_trampoline_pool(this);
1155 GenInstrImmediate(BEQ, rs, rt, offset);
1156 BlockTrampolinePoolFor(1); // For associated delay slot.
1160 void Assembler::bgez(Register rs, int16_t offset) {
1161 BlockTrampolinePoolScope block_trampoline_pool(this);
1162 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1163 BlockTrampolinePoolFor(1); // For associated delay slot.
1167 void Assembler::bgezc(Register rt, int16_t offset) {
1168 DCHECK(kArchVariant == kMips64r6);
1169 DCHECK(!(rt.is(zero_reg)));
1170 GenInstrImmediate(BLEZL, rt, rt, offset);
1174 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1175 DCHECK(kArchVariant == kMips64r6);
1176 DCHECK(!(rs.is(zero_reg)));
1177 DCHECK(!(rt.is(zero_reg)));
1178 DCHECK(rs.code() != rt.code());
1179 GenInstrImmediate(BLEZ, rs, rt, offset);
1183 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1184 DCHECK(kArchVariant == kMips64r6);
1185 DCHECK(!(rs.is(zero_reg)));
1186 DCHECK(!(rt.is(zero_reg)));
1187 DCHECK(rs.code() != rt.code());
1188 GenInstrImmediate(BLEZL, rs, rt, offset);
1192 void Assembler::bgezal(Register rs, int16_t offset) {
1193 DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
1194 BlockTrampolinePoolScope block_trampoline_pool(this);
1195 positions_recorder()->WriteRecordedPositions();
1196 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1197 BlockTrampolinePoolFor(1); // For associated delay slot.
1201 void Assembler::bgtz(Register rs, int16_t offset) {
1202 BlockTrampolinePoolScope block_trampoline_pool(this);
1203 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1204 BlockTrampolinePoolFor(1); // For associated delay slot.
1208 void Assembler::bgtzc(Register rt, int16_t offset) {
1209 DCHECK(kArchVariant == kMips64r6);
1210 DCHECK(!(rt.is(zero_reg)));
1211 GenInstrImmediate(BGTZL, zero_reg, rt, offset);
1215 void Assembler::blez(Register rs, int16_t offset) {
1216 BlockTrampolinePoolScope block_trampoline_pool(this);
1217 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1218 BlockTrampolinePoolFor(1); // For associated delay slot.
1222 void Assembler::blezc(Register rt, int16_t offset) {
1223 DCHECK(kArchVariant == kMips64r6);
1224 DCHECK(!(rt.is(zero_reg)));
1225 GenInstrImmediate(BLEZL, zero_reg, rt, offset);
1229 void Assembler::bltzc(Register rt, int16_t offset) {
1230 DCHECK(kArchVariant == kMips64r6);
1231 DCHECK(!(rt.is(zero_reg)));
1232 GenInstrImmediate(BGTZL, rt, rt, offset);
1236 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1237 DCHECK(kArchVariant == kMips64r6);
1238 DCHECK(!(rs.is(zero_reg)));
1239 DCHECK(!(rt.is(zero_reg)));
1240 DCHECK(rs.code() != rt.code());
1241 GenInstrImmediate(BGTZ, rs, rt, offset);
1245 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1246 DCHECK(kArchVariant == kMips64r6);
1247 DCHECK(!(rs.is(zero_reg)));
1248 DCHECK(!(rt.is(zero_reg)));
1249 DCHECK(rs.code() != rt.code());
1250 GenInstrImmediate(BGTZL, rs, rt, offset);
1254 void Assembler::bltz(Register rs, int16_t offset) {
1255 BlockTrampolinePoolScope block_trampoline_pool(this);
1256 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1257 BlockTrampolinePoolFor(1); // For associated delay slot.
1261 void Assembler::bltzal(Register rs, int16_t offset) {
1262 DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
1263 BlockTrampolinePoolScope block_trampoline_pool(this);
1264 positions_recorder()->WriteRecordedPositions();
1265 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1266 BlockTrampolinePoolFor(1); // For associated delay slot.
1270 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1271 BlockTrampolinePoolScope block_trampoline_pool(this);
1272 GenInstrImmediate(BNE, rs, rt, offset);
1273 BlockTrampolinePoolFor(1); // For associated delay slot.
1277 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1278 DCHECK(kArchVariant == kMips64r6);
1279 DCHECK(!(rs.is(zero_reg)));
1280 DCHECK(rs.code() >= rt.code());
1281 GenInstrImmediate(ADDI, rs, rt, offset);
1285 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1286 DCHECK(kArchVariant == kMips64r6);
1287 DCHECK(!(rs.is(zero_reg)));
1288 DCHECK(rs.code() >= rt.code());
1289 GenInstrImmediate(DADDI, rs, rt, offset);
1293 void Assembler::blezalc(Register rt, int16_t offset) {
1294 DCHECK(kArchVariant == kMips64r6);
1295 DCHECK(!(rt.is(zero_reg)));
1296 GenInstrImmediate(BLEZ, zero_reg, rt, offset);
1300 void Assembler::bgezalc(Register rt, int16_t offset) {
1301 DCHECK(kArchVariant == kMips64r6);
1302 DCHECK(!(rt.is(zero_reg)));
1303 GenInstrImmediate(BLEZ, rt, rt, offset);
1307 void Assembler::bgezall(Register rs, int16_t offset) {
1308 DCHECK(kArchVariant == kMips64r6);
1309 DCHECK(!(rs.is(zero_reg)));
1310 GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1314 void Assembler::bltzalc(Register rt, int16_t offset) {
1315 DCHECK(kArchVariant == kMips64r6);
1316 DCHECK(!(rt.is(zero_reg)));
1317 GenInstrImmediate(BGTZ, rt, rt, offset);
1321 void Assembler::bgtzalc(Register rt, int16_t offset) {
1322 DCHECK(kArchVariant == kMips64r6);
1323 DCHECK(!(rt.is(zero_reg)));
1324 GenInstrImmediate(BGTZ, zero_reg, rt, offset);
1328 void Assembler::beqzalc(Register rt, int16_t offset) {
1329 DCHECK(kArchVariant == kMips64r6);
1330 DCHECK(!(rt.is(zero_reg)));
1331 GenInstrImmediate(ADDI, zero_reg, rt, offset);
1335 void Assembler::bnezalc(Register rt, int16_t offset) {
1336 DCHECK(kArchVariant == kMips64r6);
1337 DCHECK(!(rt.is(zero_reg)));
1338 GenInstrImmediate(DADDI, zero_reg, rt, offset);
1342 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1343 DCHECK(kArchVariant == kMips64r6);
1344 DCHECK(rs.code() < rt.code());
1345 GenInstrImmediate(ADDI, rs, rt, offset);
1349 void Assembler::beqzc(Register rs, int32_t offset) {
1350 DCHECK(kArchVariant == kMips64r6);
1351 DCHECK(!(rs.is(zero_reg)));
1352 Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
1357 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1358 DCHECK(kArchVariant == kMips64r6);
1359 DCHECK(rs.code() < rt.code());
1360 GenInstrImmediate(DADDI, rs, rt, offset);
1364 void Assembler::bnezc(Register rs, int32_t offset) {
1365 DCHECK(kArchVariant == kMips64r6);
1366 DCHECK(!(rs.is(zero_reg)));
1367 Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
1372 void Assembler::j(int64_t target) {
1374 // Get pc of delay slot.
1375 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1376 bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1377 (kImm26Bits + kImmFieldShift)) == 0;
1378 DCHECK(in_range && ((target & 3) == 0));
1380 GenInstrJump(J, target >> 2);
1384 void Assembler::jr(Register rs) {
1385 if (kArchVariant != kMips64r6) {
1386 BlockTrampolinePoolScope block_trampoline_pool(this);
1388 positions_recorder()->WriteRecordedPositions();
1390 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1391 BlockTrampolinePoolFor(1); // For associated delay slot.
1398 void Assembler::jal(int64_t target) {
1400 // Get pc of delay slot.
1401 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1402 bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1403 (kImm26Bits + kImmFieldShift)) == 0;
1404 DCHECK(in_range && ((target & 3) == 0));
1406 positions_recorder()->WriteRecordedPositions();
1407 GenInstrJump(JAL, target >> 2);
1411 void Assembler::jalr(Register rs, Register rd) {
1412 BlockTrampolinePoolScope block_trampoline_pool(this);
1413 positions_recorder()->WriteRecordedPositions();
1414 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1415 BlockTrampolinePoolFor(1); // For associated delay slot.
1419 void Assembler::j_or_jr(int64_t target, Register rs) {
1420 // Get pc of delay slot.
1421 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1422 bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1423 (kImm26Bits + kImmFieldShift)) == 0;
1432 void Assembler::jal_or_jalr(int64_t target, Register rs) {
1433 // Get pc of delay slot.
1434 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1435 bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1436 (kImm26Bits+kImmFieldShift)) == 0;
1445 // -------Data-processing-instructions---------
1449 void Assembler::addu(Register rd, Register rs, Register rt) {
1450 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1454 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1455 GenInstrImmediate(ADDIU, rs, rd, j);
1459 void Assembler::subu(Register rd, Register rs, Register rt) {
1460 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1464 void Assembler::mul(Register rd, Register rs, Register rt) {
1465 if (kArchVariant == kMips64r6) {
1466 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1468 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1473 void Assembler::muh(Register rd, Register rs, Register rt) {
1474 DCHECK(kArchVariant == kMips64r6);
1475 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1479 void Assembler::mulu(Register rd, Register rs, Register rt) {
1480 DCHECK(kArchVariant == kMips64r6);
1481 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1485 void Assembler::muhu(Register rd, Register rs, Register rt) {
1486 DCHECK(kArchVariant == kMips64r6);
1487 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1491 void Assembler::dmul(Register rd, Register rs, Register rt) {
1492 DCHECK(kArchVariant == kMips64r6);
1493 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
1497 void Assembler::dmuh(Register rd, Register rs, Register rt) {
1498 DCHECK(kArchVariant == kMips64r6);
1499 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
1503 void Assembler::dmulu(Register rd, Register rs, Register rt) {
1504 DCHECK(kArchVariant == kMips64r6);
1505 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
1509 void Assembler::dmuhu(Register rd, Register rs, Register rt) {
1510 DCHECK(kArchVariant == kMips64r6);
1511 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
1515 void Assembler::mult(Register rs, Register rt) {
1516 DCHECK(kArchVariant != kMips64r6);
1517 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1521 void Assembler::multu(Register rs, Register rt) {
1522 DCHECK(kArchVariant != kMips64r6);
1523 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1527 void Assembler::daddiu(Register rd, Register rs, int32_t j) {
1528 GenInstrImmediate(DADDIU, rs, rd, j);
1532 void Assembler::div(Register rs, Register rt) {
1533 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1537 void Assembler::div(Register rd, Register rs, Register rt) {
1538 DCHECK(kArchVariant == kMips64r6);
1539 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1543 void Assembler::mod(Register rd, Register rs, Register rt) {
1544 DCHECK(kArchVariant == kMips64r6);
1545 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1549 void Assembler::divu(Register rs, Register rt) {
1550 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1554 void Assembler::divu(Register rd, Register rs, Register rt) {
1555 DCHECK(kArchVariant == kMips64r6);
1556 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1560 void Assembler::modu(Register rd, Register rs, Register rt) {
1561 DCHECK(kArchVariant == kMips64r6);
1562 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1566 void Assembler::daddu(Register rd, Register rs, Register rt) {
1567 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
1571 void Assembler::dsubu(Register rd, Register rs, Register rt) {
1572 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
1576 void Assembler::dmult(Register rs, Register rt) {
1577 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
1581 void Assembler::dmultu(Register rs, Register rt) {
1582 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
1586 void Assembler::ddiv(Register rs, Register rt) {
1587 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
1591 void Assembler::ddiv(Register rd, Register rs, Register rt) {
1592 DCHECK(kArchVariant == kMips64r6);
1593 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
1597 void Assembler::dmod(Register rd, Register rs, Register rt) {
1598 DCHECK(kArchVariant == kMips64r6);
1599 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
1603 void Assembler::ddivu(Register rs, Register rt) {
1604 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
1608 void Assembler::ddivu(Register rd, Register rs, Register rt) {
1609 DCHECK(kArchVariant == kMips64r6);
1610 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
1614 void Assembler::dmodu(Register rd, Register rs, Register rt) {
1615 DCHECK(kArchVariant == kMips64r6);
1616 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
1622 void Assembler::and_(Register rd, Register rs, Register rt) {
1623 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1627 void Assembler::andi(Register rt, Register rs, int32_t j) {
1628 DCHECK(is_uint16(j));
1629 GenInstrImmediate(ANDI, rs, rt, j);
1633 void Assembler::or_(Register rd, Register rs, Register rt) {
1634 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1638 void Assembler::ori(Register rt, Register rs, int32_t j) {
1639 DCHECK(is_uint16(j));
1640 GenInstrImmediate(ORI, rs, rt, j);
1644 void Assembler::xor_(Register rd, Register rs, Register rt) {
1645 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1649 void Assembler::xori(Register rt, Register rs, int32_t j) {
1650 DCHECK(is_uint16(j));
1651 GenInstrImmediate(XORI, rs, rt, j);
1655 void Assembler::nor(Register rd, Register rs, Register rt) {
1656 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1661 void Assembler::sll(Register rd,
1664 bool coming_from_nop) {
1665 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1666 // generated using the sll instruction. They must be generated using
1667 // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1669 DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1670 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1674 void Assembler::sllv(Register rd, Register rt, Register rs) {
1675 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1679 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1680 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1684 void Assembler::srlv(Register rd, Register rt, Register rs) {
1685 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1689 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1690 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1694 void Assembler::srav(Register rd, Register rt, Register rs) {
1695 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1699 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1700 // Should be called via MacroAssembler::Ror.
1701 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1702 DCHECK(kArchVariant == kMips64r2);
1703 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1704 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1709 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1710 // Should be called via MacroAssembler::Ror.
1711 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1712 DCHECK(kArchVariant == kMips64r2);
1713 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1714 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1719 void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
1720 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL);
1724 void Assembler::dsllv(Register rd, Register rt, Register rs) {
1725 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
1729 void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
1730 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL);
1734 void Assembler::dsrlv(Register rd, Register rt, Register rs) {
1735 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
1739 void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
1740 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1741 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1742 | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
1747 void Assembler::drotrv(Register rd, Register rt, Register rs) {
1748 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1749 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1750 | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
1755 void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
1756 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA);
1760 void Assembler::dsrav(Register rd, Register rt, Register rs) {
1761 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
1765 void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
1766 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL32);
1770 void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
1771 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL32);
1775 void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
1776 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA32);
1780 // ------------Memory-instructions-------------
1782 // Helper for base-reg + offset, when offset is larger than int16.
1783 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1784 DCHECK(!src.rm().is(at));
1785 DCHECK(is_int32(src.offset_));
1786 daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask);
1787 dsll(at, at, kLuiShift);
1788 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1789 daddu(at, at, src.rm()); // Add base register.
1793 void Assembler::lb(Register rd, const MemOperand& rs) {
1794 if (is_int16(rs.offset_)) {
1795 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1796 } else { // Offset > 16 bits, use multiple instructions to load.
1797 LoadRegPlusOffsetToAt(rs);
1798 GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1803 void Assembler::lbu(Register rd, const MemOperand& rs) {
1804 if (is_int16(rs.offset_)) {
1805 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1806 } else { // Offset > 16 bits, use multiple instructions to load.
1807 LoadRegPlusOffsetToAt(rs);
1808 GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1813 void Assembler::lh(Register rd, const MemOperand& rs) {
1814 if (is_int16(rs.offset_)) {
1815 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1816 } else { // Offset > 16 bits, use multiple instructions to load.
1817 LoadRegPlusOffsetToAt(rs);
1818 GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
1823 void Assembler::lhu(Register rd, const MemOperand& rs) {
1824 if (is_int16(rs.offset_)) {
1825 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1826 } else { // Offset > 16 bits, use multiple instructions to load.
1827 LoadRegPlusOffsetToAt(rs);
1828 GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
1833 void Assembler::lw(Register rd, const MemOperand& rs) {
1834 if (is_int16(rs.offset_)) {
1835 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1836 } else { // Offset > 16 bits, use multiple instructions to load.
1837 LoadRegPlusOffsetToAt(rs);
1838 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1843 void Assembler::lwu(Register rd, const MemOperand& rs) {
1844 if (is_int16(rs.offset_)) {
1845 GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
1846 } else { // Offset > 16 bits, use multiple instructions to load.
1847 LoadRegPlusOffsetToAt(rs);
1848 GenInstrImmediate(LWU, at, rd, 0); // Equiv to lwu(rd, MemOperand(at, 0));
1853 void Assembler::lwl(Register rd, const MemOperand& rs) {
1854 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1858 void Assembler::lwr(Register rd, const MemOperand& rs) {
1859 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1863 void Assembler::sb(Register rd, const MemOperand& rs) {
1864 if (is_int16(rs.offset_)) {
1865 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1866 } else { // Offset > 16 bits, use multiple instructions to store.
1867 LoadRegPlusOffsetToAt(rs);
1868 GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
1873 void Assembler::sh(Register rd, const MemOperand& rs) {
1874 if (is_int16(rs.offset_)) {
1875 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1876 } else { // Offset > 16 bits, use multiple instructions to store.
1877 LoadRegPlusOffsetToAt(rs);
1878 GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
1883 void Assembler::sw(Register rd, const MemOperand& rs) {
1884 if (is_int16(rs.offset_)) {
1885 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1886 } else { // Offset > 16 bits, use multiple instructions to store.
1887 LoadRegPlusOffsetToAt(rs);
1888 GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1893 void Assembler::swl(Register rd, const MemOperand& rs) {
1894 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1898 void Assembler::swr(Register rd, const MemOperand& rs) {
1899 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1903 void Assembler::lui(Register rd, int32_t j) {
1904 DCHECK(is_uint16(j));
1905 GenInstrImmediate(LUI, zero_reg, rd, j);
1909 void Assembler::aui(Register rs, Register rt, int32_t j) {
1910 // This instruction uses same opcode as 'lui'. The difference in encoding is
1911 // 'lui' has zero reg. for rs field.
1912 DCHECK(is_uint16(j));
1913 GenInstrImmediate(LUI, rs, rt, j);
1917 void Assembler::daui(Register rs, Register rt, int32_t j) {
1918 DCHECK(is_uint16(j));
1919 GenInstrImmediate(DAUI, rs, rt, j);
1923 void Assembler::dahi(Register rs, int32_t j) {
1924 DCHECK(is_uint16(j));
1925 GenInstrImmediate(REGIMM, rs, DAHI, j);
1929 void Assembler::dati(Register rs, int32_t j) {
1930 DCHECK(is_uint16(j));
1931 GenInstrImmediate(REGIMM, rs, DATI, j);
1935 void Assembler::ldl(Register rd, const MemOperand& rs) {
1936 GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
1940 void Assembler::ldr(Register rd, const MemOperand& rs) {
1941 GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
1945 void Assembler::sdl(Register rd, const MemOperand& rs) {
1946 GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
1950 void Assembler::sdr(Register rd, const MemOperand& rs) {
1951 GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
1955 void Assembler::ld(Register rd, const MemOperand& rs) {
1956 if (is_int16(rs.offset_)) {
1957 GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
1958 } else { // Offset > 16 bits, use multiple instructions to load.
1959 LoadRegPlusOffsetToAt(rs);
1960 GenInstrImmediate(LD, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1965 void Assembler::sd(Register rd, const MemOperand& rs) {
1966 if (is_int16(rs.offset_)) {
1967 GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
1968 } else { // Offset > 16 bits, use multiple instructions to store.
1969 LoadRegPlusOffsetToAt(rs);
1970 GenInstrImmediate(SD, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1975 // -------------Misc-instructions--------------
1977 // Break / Trap instructions.
1978 void Assembler::break_(uint32_t code, bool break_as_stop) {
1979 DCHECK((code & ~0xfffff) == 0);
1980 // We need to invalidate breaks that could be stops as well because the
1981 // simulator expects a char pointer after the stop instruction.
1982 // See constants-mips.h for explanation.
1983 DCHECK((break_as_stop &&
1984 code <= kMaxStopCode &&
1985 code > kMaxWatchpointCode) ||
1987 (code > kMaxStopCode ||
1988 code <= kMaxWatchpointCode)));
1989 Instr break_instr = SPECIAL | BREAK | (code << 6);
1994 void Assembler::stop(const char* msg, uint32_t code) {
1995 DCHECK(code > kMaxWatchpointCode);
1996 DCHECK(code <= kMaxStopCode);
1997 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
1999 #else // V8_HOST_ARCH_MIPS
2000 BlockTrampolinePoolFor(3);
2001 // The Simulator will handle the stop instruction and get the message address.
2002 // On MIPS stop() is just a special kind of break_().
2004 emit(reinterpret_cast<uint64_t>(msg));
2009 void Assembler::tge(Register rs, Register rt, uint16_t code) {
2010 DCHECK(is_uint10(code));
2011 Instr instr = SPECIAL | TGE | rs.code() << kRsShift
2012 | rt.code() << kRtShift | code << 6;
2017 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2018 DCHECK(is_uint10(code));
2019 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
2020 | rt.code() << kRtShift | code << 6;
2025 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2026 DCHECK(is_uint10(code));
2028 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2033 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2034 DCHECK(is_uint10(code));
2036 SPECIAL | TLTU | rs.code() << kRsShift
2037 | rt.code() << kRtShift | code << 6;
2042 void Assembler::teq(Register rs, Register rt, uint16_t code) {
2043 DCHECK(is_uint10(code));
2045 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2050 void Assembler::tne(Register rs, Register rt, uint16_t code) {
2051 DCHECK(is_uint10(code));
2053 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2058 // Move from HI/LO register.
2060 void Assembler::mfhi(Register rd) {
2061 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2065 void Assembler::mflo(Register rd) {
2066 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2070 // Set on less than instructions.
2071 void Assembler::slt(Register rd, Register rs, Register rt) {
2072 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2076 void Assembler::sltu(Register rd, Register rs, Register rt) {
2077 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2081 void Assembler::slti(Register rt, Register rs, int32_t j) {
2082 GenInstrImmediate(SLTI, rs, rt, j);
2086 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2087 GenInstrImmediate(SLTIU, rs, rt, j);
2091 // Conditional move.
2092 void Assembler::movz(Register rd, Register rs, Register rt) {
2093 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2097 void Assembler::movn(Register rd, Register rs, Register rt) {
2098 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2102 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2104 rt.code_ = (cc & 0x0007) << 2 | 1;
2105 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2109 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2111 rt.code_ = (cc & 0x0007) << 2 | 0;
2112 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2116 void Assembler::sel(SecondaryField fmt, FPURegister fd,
2117 FPURegister ft, FPURegister fs, uint8_t sel) {
2118 DCHECK(kArchVariant == kMips64r6);
2122 Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
2123 fs.code() << kFsShift | fd.code() << kFdShift | SEL;
2129 void Assembler::seleqz(Register rs, Register rt, Register rd) {
2130 DCHECK(kArchVariant == kMips64r6);
2131 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2136 void Assembler::seleqz(SecondaryField fmt, FPURegister fd,
2137 FPURegister ft, FPURegister fs) {
2138 DCHECK(kArchVariant == kMips64r6);
2142 Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
2143 fs.code() << kFsShift | fd.code() << kFdShift | SELEQZ_C;
2149 void Assembler::selnez(Register rs, Register rt, Register rd) {
2150 DCHECK(kArchVariant == kMips64r6);
2151 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2156 void Assembler::selnez(SecondaryField fmt, FPURegister fd,
2157 FPURegister ft, FPURegister fs) {
2158 DCHECK(kArchVariant == kMips64r6);
2162 Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
2163 fs.code() << kFsShift | fd.code() << kFdShift | SELNEZ_C;
2169 void Assembler::clz(Register rd, Register rs) {
2170 if (kArchVariant != kMips64r6) {
2171 // Clz instr requires same GPR number in 'rd' and 'rt' fields.
2172 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2174 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2179 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2180 // Should be called via MacroAssembler::Ins.
2181 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2182 DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
2183 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2187 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2188 // Should be called via MacroAssembler::Ext.
2189 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
2190 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2191 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2195 void Assembler::pref(int32_t hint, const MemOperand& rs) {
2196 DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2197 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
2203 // --------Coprocessor-instructions----------------
2205 // Load, store, move.
2206 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2207 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2211 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
2212 GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
2216 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
2217 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
2221 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
2222 GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
2226 void Assembler::mtc1(Register rt, FPURegister fs) {
2227 GenInstrRegister(COP1, MTC1, rt, fs, f0);
2231 void Assembler::mthc1(Register rt, FPURegister fs) {
2232 GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2236 void Assembler::dmtc1(Register rt, FPURegister fs) {
2237 GenInstrRegister(COP1, DMTC1, rt, fs, f0);
2241 void Assembler::mfc1(Register rt, FPURegister fs) {
2242 GenInstrRegister(COP1, MFC1, rt, fs, f0);
2246 void Assembler::mfhc1(Register rt, FPURegister fs) {
2247 GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2251 void Assembler::dmfc1(Register rt, FPURegister fs) {
2252 GenInstrRegister(COP1, DMFC1, rt, fs, f0);
2256 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2257 GenInstrRegister(COP1, CTC1, rt, fs);
2261 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2262 GenInstrRegister(COP1, CFC1, rt, fs);
2266 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2270 *lo = i & 0xffffffff;
2277 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2278 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2282 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2283 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2287 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2288 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2292 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2294 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2298 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2299 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2303 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2304 GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2308 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2309 GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2313 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2314 GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2318 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2319 GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2325 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2326 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2330 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2331 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2335 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2336 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2340 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2341 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2345 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2346 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2350 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2351 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2355 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2356 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2360 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2361 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2365 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2366 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2370 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2371 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2375 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2376 DCHECK(kArchVariant == kMips64r2);
2377 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2381 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
2382 DCHECK(kArchVariant == kMips64r2);
2383 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2387 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
2388 DCHECK(kArchVariant == kMips64r2);
2389 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2393 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
2394 DCHECK(kArchVariant == kMips64r2);
2395 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2399 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2400 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2404 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2405 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2409 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
2410 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
2414 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
2415 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
2419 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
2420 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
2424 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
2425 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
2429 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
2431 DCHECK(kArchVariant == kMips64r6);
2432 DCHECK((fmt == D) || (fmt == S));
2433 GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2437 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
2439 DCHECK(kArchVariant == kMips64r6);
2440 DCHECK((fmt == D) || (fmt == S));
2441 GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2445 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
2447 DCHECK(kArchVariant == kMips64r6);
2448 DCHECK((fmt == D) || (fmt == S));
2449 GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2453 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
2455 DCHECK(kArchVariant == kMips64r6);
2456 DCHECK((fmt == D) || (fmt == S));
2457 GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
2461 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
2462 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
2466 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
2467 DCHECK(kArchVariant == kMips64r2);
2468 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
2472 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
2473 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
2477 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
2478 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
2482 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
2483 DCHECK(kArchVariant == kMips64r2);
2484 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
2488 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
2489 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
2493 // Conditions for >= MIPSr6.
2494 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
2495 FPURegister fd, FPURegister fs, FPURegister ft) {
2496 DCHECK(kArchVariant == kMips64r6);
2497 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2498 Instr instr = COP1 | fmt | ft.code() << kFtShift |
2499 fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
2504 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
2505 DCHECK(kArchVariant == kMips64r6);
2506 Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
2511 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
2512 DCHECK(kArchVariant == kMips64r6);
2513 Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
2518 // Conditions for < MIPSr6.
2519 void Assembler::c(FPUCondition cond, SecondaryField fmt,
2520 FPURegister fs, FPURegister ft, uint16_t cc) {
2521 DCHECK(kArchVariant != kMips64r6);
2522 DCHECK(is_uint3(cc));
2523 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2524 Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
2525 | cc << 8 | 3 << 4 | cond;
2530 void Assembler::fcmp(FPURegister src1, const double src2,
2531 FPUCondition cond) {
2532 DCHECK(src2 == 0.0);
2533 mtc1(zero_reg, f14);
2535 c(cond, D, src1, f14, 0);
2539 void Assembler::bc1f(int16_t offset, uint16_t cc) {
2540 DCHECK(is_uint3(cc));
2541 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
2546 void Assembler::bc1t(int16_t offset, uint16_t cc) {
2547 DCHECK(is_uint3(cc));
2548 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
2554 void Assembler::RecordJSReturn() {
2555 positions_recorder()->WriteRecordedPositions();
2557 RecordRelocInfo(RelocInfo::JS_RETURN);
2561 void Assembler::RecordDebugBreakSlot() {
2562 positions_recorder()->WriteRecordedPositions();
2564 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2568 void Assembler::RecordComment(const char* msg) {
2569 if (FLAG_code_comments) {
2571 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2576 int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
2577 Instr instr = instr_at(pc);
2578 DCHECK(IsJ(instr) || IsLui(instr));
2580 Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
2581 Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
2582 Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize);
2583 DCHECK(IsOri(instr_ori));
2584 DCHECK(IsOri(instr_ori2));
2585 // TODO(plind): symbolic names for the shifts.
2586 int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
2587 imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
2588 imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
2589 // Sign extend address.
2592 if (imm == kEndOfJumpChain) {
2593 return 0; // Number of instructions patched.
2596 DCHECK((imm & 3) == 0);
2598 instr_lui &= ~kImm16Mask;
2599 instr_ori &= ~kImm16Mask;
2600 instr_ori2 &= ~kImm16Mask;
2602 instr_at_put(pc + 0 * Assembler::kInstrSize,
2603 instr_lui | ((imm >> 32) & kImm16Mask));
2604 instr_at_put(pc + 1 * Assembler::kInstrSize,
2605 instr_ori | (imm >> 16 & kImm16Mask));
2606 instr_at_put(pc + 3 * Assembler::kInstrSize,
2607 instr_ori2 | (imm & kImm16Mask));
2608 return 4; // Number of instructions patched.
2610 uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
2611 if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
2612 return 0; // Number of instructions patched.
2616 imm28 &= kImm28Mask;
2617 DCHECK((imm28 & 3) == 0);
2619 instr &= ~kImm26Mask;
2620 uint32_t imm26 = imm28 >> 2;
2621 DCHECK(is_uint26(imm26));
2623 instr_at_put(pc, instr | (imm26 & kImm26Mask));
2624 return 1; // Number of instructions patched.
2629 void Assembler::GrowBuffer() {
2630 if (!own_buffer_) FATAL("external code buffer is too small");
2632 // Compute new buffer size.
2633 CodeDesc desc; // The new buffer.
2634 if (buffer_size_ < 1 * MB) {
2635 desc.buffer_size = 2*buffer_size_;
2637 desc.buffer_size = buffer_size_ + 1*MB;
2639 CHECK_GT(desc.buffer_size, 0); // No overflow.
2641 // Set up new buffer.
2642 desc.buffer = NewArray<byte>(desc.buffer_size);
2644 desc.instr_size = pc_offset();
2645 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2648 intptr_t pc_delta = desc.buffer - buffer_;
2649 intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
2650 (buffer_ + buffer_size_);
2651 MemMove(desc.buffer, buffer_, desc.instr_size);
2652 MemMove(reloc_info_writer.pos() + rc_delta,
2653 reloc_info_writer.pos(), desc.reloc_size);
2656 DeleteArray(buffer_);
2657 buffer_ = desc.buffer;
2658 buffer_size_ = desc.buffer_size;
2660 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2661 reloc_info_writer.last_pc() + pc_delta);
2663 // Relocate runtime entries.
2664 for (RelocIterator it(desc); !it.done(); it.next()) {
2665 RelocInfo::Mode rmode = it.rinfo()->rmode();
2666 if (rmode == RelocInfo::INTERNAL_REFERENCE) {
2667 byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
2668 RelocateInternalReference(p, pc_delta);
2672 DCHECK(!overflow());
2676 void Assembler::db(uint8_t data) {
2678 *reinterpret_cast<uint8_t*>(pc_) = data;
2679 pc_ += sizeof(uint8_t);
2683 void Assembler::dd(uint32_t data) {
2685 *reinterpret_cast<uint32_t*>(pc_) = data;
2686 pc_ += sizeof(uint32_t);
2690 void Assembler::emit_code_stub_address(Code* stub) {
2692 *reinterpret_cast<uint64_t*>(pc_) =
2693 reinterpret_cast<uint64_t>(stub->instruction_start());
2694 pc_ += sizeof(uint64_t);
2698 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2699 // We do not try to reuse pool constants.
2700 RelocInfo rinfo(pc_, rmode, data, NULL);
2701 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2702 // Adjust code for new modes.
2703 DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
2704 || RelocInfo::IsJSReturn(rmode)
2705 || RelocInfo::IsComment(rmode)
2706 || RelocInfo::IsPosition(rmode));
2707 // These modes do not need an entry in the constant pool.
2709 if (!RelocInfo::IsNone(rinfo.rmode())) {
2710 // Don't record external references unless the heap will be serialized.
2711 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
2712 !serializer_enabled() && !emit_debug_code()) {
2715 DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
2716 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2717 RelocInfo reloc_info_with_ast_id(pc_,
2719 RecordedAstId().ToInt(),
2721 ClearRecordedAstId();
2722 reloc_info_writer.Write(&reloc_info_with_ast_id);
2724 reloc_info_writer.Write(&rinfo);
2730 void Assembler::BlockTrampolinePoolFor(int instructions) {
2731 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2735 void Assembler::CheckTrampolinePool() {
2736 // Some small sequences of instructions must not be broken up by the
2737 // insertion of a trampoline pool; such sequences are protected by setting
2738 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2739 // which are both checked here. Also, recursive calls to CheckTrampolinePool
2740 // are blocked by trampoline_pool_blocked_nesting_.
2741 if ((trampoline_pool_blocked_nesting_ > 0) ||
2742 (pc_offset() < no_trampoline_pool_before_)) {
2743 // Emission is currently blocked; make sure we try again as soon as
2745 if (trampoline_pool_blocked_nesting_ > 0) {
2746 next_buffer_check_ = pc_offset() + kInstrSize;
2748 next_buffer_check_ = no_trampoline_pool_before_;
2753 DCHECK(!trampoline_emitted_);
2754 DCHECK(unbound_labels_count_ >= 0);
2755 if (unbound_labels_count_ > 0) {
2756 // First we emit jump (2 instructions), then we emit trampoline pool.
2757 { BlockTrampolinePoolScope block_trampoline_pool(this);
2762 int pool_start = pc_offset();
2763 for (int i = 0; i < unbound_labels_count_; i++) {
2765 imm64 = jump_address(&after_pool);
2766 { BlockGrowBufferScope block_buf_growth(this);
2767 // Buffer growth (and relocation) must be blocked for internal
2768 // references until associated instructions are emitted and available
2770 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2771 // TODO(plind): Verify this, presume I cannot use macro-assembler
2773 lui(at, (imm64 >> 32) & kImm16Mask);
2774 ori(at, at, (imm64 >> 16) & kImm16Mask);
2776 ori(at, at, imm64 & kImm16Mask);
2782 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2784 trampoline_emitted_ = true;
2785 // As we are only going to emit trampoline once, we need to prevent any
2786 // further emission.
2787 next_buffer_check_ = kMaxInt;
2790 // Number of branches to unbound label at this point is zero, so we can
2791 // move next buffer check to maximum.
2792 next_buffer_check_ = pc_offset() +
2793 kMaxBranchOffset - kTrampolineSlotsSize * 16;
2799 Address Assembler::target_address_at(Address pc) {
2800 Instr instr0 = instr_at(pc);
2801 Instr instr1 = instr_at(pc + 1 * kInstrSize);
2802 Instr instr3 = instr_at(pc + 3 * kInstrSize);
2804 // Interpret 4 instructions for address generated by li: See listing in
2805 // Assembler::set_target_address_at() just below.
2806 if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
2807 (GetOpcodeField(instr3) == ORI)) {
2808 // Assemble the 48 bit value.
2809 int64_t addr = static_cast<int64_t>(
2810 ((uint64_t)(GetImmediate16(instr0)) << 32) |
2811 ((uint64_t)(GetImmediate16(instr1)) << 16) |
2812 ((uint64_t)(GetImmediate16(instr3))));
2814 // Sign extend to get canonical address.
2815 addr = (addr << 16) >> 16;
2816 return reinterpret_cast<Address>(addr);
2818 // We should never get here, force a bad address if we do.
2820 return (Address)0x0;
2824 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
2825 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
2826 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
2827 // OS::nan_value() returns a qNaN.
2828 void Assembler::QuietNaN(HeapObject* object) {
2829 HeapNumber::cast(object)->set_value(base::OS::nan_value());
2833 // On Mips64, a target address is stored in a 4-instruction sequence:
2834 // 0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
2835 // 1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
2836 // 2: dsll(rd, rd, 16);
2837 // 3: ori(rd, rd, j.imm32_ & kImm16Mask);
2839 // Patching the address must replace all the lui & ori instructions,
2840 // and flush the i-cache.
2842 // There is an optimization below, which emits a nop when the address
2843 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
2844 // and possibly removed.
2845 void Assembler::set_target_address_at(Address pc,
2847 ICacheFlushMode icache_flush_mode) {
2848 // There is an optimization where only 4 instructions are used to load address
2849 // in code on MIP64 because only 48-bits of address is effectively used.
2850 // It relies on fact the upper [63:48] bits are not used for virtual address
2851 // translation and they have to be set according to value of bit 47 in order
2852 // get canonical address.
2853 Instr instr1 = instr_at(pc + kInstrSize);
2854 uint32_t rt_code = GetRt(instr1);
2855 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2856 uint64_t itarget = reinterpret_cast<uint64_t>(target);
2859 // Check we have the result from a li macro-instruction.
2860 Instr instr0 = instr_at(pc);
2861 Instr instr3 = instr_at(pc + kInstrSize * 3);
2862 CHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
2863 GetOpcodeField(instr3) == ORI));
2866 // Must use 4 instructions to insure patchable code.
2867 // lui rt, upper-16.
2868 // ori rt, rt, lower-16.
2870 // ori rt rt, lower-16.
2871 *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
2872 *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
2873 | ((itarget >> 16) & kImm16Mask);
2874 *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
2875 | (itarget & kImm16Mask);
2877 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
2878 CpuFeatures::FlushICache(pc, 4 * Assembler::kInstrSize);
2883 void Assembler::JumpLabelToJumpRegister(Address pc) {
2884 // Address pc points to lui/ori instructions.
2885 // Jump to label may follow at pc + 2 * kInstrSize.
2886 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2888 Instr instr1 = instr_at(pc);
2890 Instr instr2 = instr_at(pc + 1 * kInstrSize);
2891 Instr instr3 = instr_at(pc + 6 * kInstrSize);
2892 bool patched = false;
2894 if (IsJal(instr3)) {
2895 DCHECK(GetOpcodeField(instr1) == LUI);
2896 DCHECK(GetOpcodeField(instr2) == ORI);
2898 uint32_t rs_field = GetRt(instr2) << kRsShift;
2899 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2900 *(p+6) = SPECIAL | rs_field | rd_field | JALR;
2902 } else if (IsJ(instr3)) {
2903 DCHECK(GetOpcodeField(instr1) == LUI);
2904 DCHECK(GetOpcodeField(instr2) == ORI);
2906 uint32_t rs_field = GetRt(instr2) << kRsShift;
2907 *(p+6) = SPECIAL | rs_field | JR;
2912 CpuFeatures::FlushICache(pc+6, sizeof(int32_t));
2917 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
2918 // No out-of-line constant pool support.
2919 DCHECK(!FLAG_enable_ool_constant_pool);
2920 return isolate->factory()->empty_constant_pool_array();
2924 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2925 // No out-of-line constant pool support.
2926 DCHECK(!FLAG_enable_ool_constant_pool);
2931 } } // namespace v8::internal
2933 #endif // V8_TARGET_ARCH_MIPS64