1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
37 #if V8_TARGET_ARCH_MIPS64
39 #include "src/base/cpu.h"
40 #include "src/mips64/assembler-mips64-inl.h"
46 // Get the CPU features enabled by the build. For cross compilation the
47 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
48 // can be defined to enable FPU instructions when building the
50 static unsigned CpuFeaturesImpliedByCompiler() {
52 #ifdef CAN_USE_FPU_INSTRUCTIONS
54 #endif // def CAN_USE_FPU_INSTRUCTIONS
56 // If the compiler is allowed to use FPU then we can use FPU too in our code
57 // generation even when generating snapshots. This won't work for cross
59 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
67 const char* DoubleRegister::AllocationIndexToString(int index) {
68 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
69 const char* const names[] = {
89 void CpuFeatures::ProbeImpl(bool cross_compile) {
90 supported_ |= CpuFeaturesImpliedByCompiler();
92 // Only use statically determined features for cross compile (snapshot).
93 if (cross_compile) return;
95 // If the compiler is allowed to use fpu then we can use fpu too in our
98 // For the simulator build, use FPU.
99 supported_ |= 1u << FPU;
101 // Probe for additional features at runtime.
103 if (cpu.has_fpu()) supported_ |= 1u << FPU;
108 void CpuFeatures::PrintTarget() { }
109 void CpuFeatures::PrintFeatures() { }
112 int ToNumber(Register reg) {
113 DCHECK(reg.is_valid());
114 const int kNumbers[] = {
148 return kNumbers[reg.code()];
152 Register ToRegister(int num) {
153 DCHECK(num >= 0 && num < kNumRegisters);
154 const Register kRegisters[] = {
158 a0, a1, a2, a3, a4, a5, a6, a7,
160 s0, s1, s2, s3, s4, s5, s6, s7,
168 return kRegisters[num];
172 // -----------------------------------------------------------------------------
173 // Implementation of RelocInfo.
175 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
176 1 << RelocInfo::INTERNAL_REFERENCE |
177 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
180 bool RelocInfo::IsCodedSpecially() {
181 // The deserializer needs to know whether a pointer is specially coded. Being
182 // specially coded on MIPS means that it is a lui/ori instruction, and that is
183 // always the case inside code objects.
188 bool RelocInfo::IsInConstantPool() {
193 // -----------------------------------------------------------------------------
194 // Implementation of Operand and MemOperand.
195 // See assembler-mips-inl.h for inlined constructors.
197 Operand::Operand(Handle<Object> handle) {
198 AllowDeferredHandleDereference using_raw_address;
200 // Verify all Objects referred by code are NOT in new space.
201 Object* obj = *handle;
202 if (obj->IsHeapObject()) {
203 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
204 imm64_ = reinterpret_cast<intptr_t>(handle.location());
205 rmode_ = RelocInfo::EMBEDDED_OBJECT;
207 // No relocation needed.
208 imm64_ = reinterpret_cast<intptr_t>(obj);
209 rmode_ = RelocInfo::NONE64;
214 MemOperand::MemOperand(Register rm, int64_t offset) : Operand(rm) {
219 MemOperand::MemOperand(Register rm, int64_t unit, int64_t multiplier,
220 OffsetAddend offset_addend) : Operand(rm) {
221 offset_ = unit * multiplier + offset_addend;
225 // -----------------------------------------------------------------------------
226 // Specific instructions, constants, and masks.
228 static const int kNegOffset = 0x00008000;
229 // daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
230 // operations as post-increment of sp.
231 const Instr kPopInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
232 | (kRegister_sp_Code << kRtShift)
233 | (kPointerSize & kImm16Mask); // NOLINT
234 // daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
235 const Instr kPushInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
236 | (kRegister_sp_Code << kRtShift)
237 | (-kPointerSize & kImm16Mask); // NOLINT
238 // sd(r, MemOperand(sp, 0))
239 const Instr kPushRegPattern = SD | (kRegister_sp_Code << kRsShift)
240 | (0 & kImm16Mask); // NOLINT
241 // ld(r, MemOperand(sp, 0))
242 const Instr kPopRegPattern = LD | (kRegister_sp_Code << kRsShift)
243 | (0 & kImm16Mask); // NOLINT
245 const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
246 | (0 & kImm16Mask); // NOLINT
248 const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
249 | (0 & kImm16Mask); // NOLINT
251 const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
252 | (kNegOffset & kImm16Mask); // NOLINT
254 const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
255 | (kNegOffset & kImm16Mask); // NOLINT
256 // A mask for the Rt register for push, pop, lw, sw instructions.
257 const Instr kRtMask = kRtFieldMask;
258 const Instr kLwSwInstrTypeMask = 0xffe00000;
259 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
260 const Instr kLwSwOffsetMask = kImm16Mask;
263 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
264 : AssemblerBase(isolate, buffer, buffer_size),
265 recorded_ast_id_(TypeFeedbackId::None()),
266 positions_recorder_(this) {
267 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
269 last_trampoline_pool_end_ = 0;
270 no_trampoline_pool_before_ = 0;
271 trampoline_pool_blocked_nesting_ = 0;
272 // We leave space (16 * kTrampolineSlotsSize)
273 // for BlockTrampolinePoolScope buffer.
274 next_buffer_check_ = FLAG_force_long_branches
275 ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
276 internal_trampoline_exception_ = false;
279 trampoline_emitted_ = FLAG_force_long_branches;
280 unbound_labels_count_ = 0;
281 block_buffer_growth_ = false;
283 ClearRecordedAstId();
287 void Assembler::GetCode(CodeDesc* desc) {
288 DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
289 // Set up code descriptor.
290 desc->buffer = buffer_;
291 desc->buffer_size = buffer_size_;
292 desc->instr_size = pc_offset();
293 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
298 void Assembler::Align(int m) {
299 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
300 while ((pc_offset() & (m - 1)) != 0) {
306 void Assembler::CodeTargetAlign() {
307 // No advantage to aligning branch/call targets to more than
308 // single instruction, that I am aware of.
313 Register Assembler::GetRtReg(Instr instr) {
315 rt.code_ = (instr & kRtFieldMask) >> kRtShift;
320 Register Assembler::GetRsReg(Instr instr) {
322 rs.code_ = (instr & kRsFieldMask) >> kRsShift;
327 Register Assembler::GetRdReg(Instr instr) {
329 rd.code_ = (instr & kRdFieldMask) >> kRdShift;
334 uint32_t Assembler::GetRt(Instr instr) {
335 return (instr & kRtFieldMask) >> kRtShift;
339 uint32_t Assembler::GetRtField(Instr instr) {
340 return instr & kRtFieldMask;
344 uint32_t Assembler::GetRs(Instr instr) {
345 return (instr & kRsFieldMask) >> kRsShift;
349 uint32_t Assembler::GetRsField(Instr instr) {
350 return instr & kRsFieldMask;
354 uint32_t Assembler::GetRd(Instr instr) {
355 return (instr & kRdFieldMask) >> kRdShift;
359 uint32_t Assembler::GetRdField(Instr instr) {
360 return instr & kRdFieldMask;
364 uint32_t Assembler::GetSa(Instr instr) {
365 return (instr & kSaFieldMask) >> kSaShift;
369 uint32_t Assembler::GetSaField(Instr instr) {
370 return instr & kSaFieldMask;
374 uint32_t Assembler::GetOpcodeField(Instr instr) {
375 return instr & kOpcodeMask;
379 uint32_t Assembler::GetFunction(Instr instr) {
380 return (instr & kFunctionFieldMask) >> kFunctionShift;
384 uint32_t Assembler::GetFunctionField(Instr instr) {
385 return instr & kFunctionFieldMask;
389 uint32_t Assembler::GetImmediate16(Instr instr) {
390 return instr & kImm16Mask;
394 uint32_t Assembler::GetLabelConst(Instr instr) {
395 return instr & ~kImm16Mask;
399 bool Assembler::IsPop(Instr instr) {
400 return (instr & ~kRtMask) == kPopRegPattern;
404 bool Assembler::IsPush(Instr instr) {
405 return (instr & ~kRtMask) == kPushRegPattern;
409 bool Assembler::IsSwRegFpOffset(Instr instr) {
410 return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
414 bool Assembler::IsLwRegFpOffset(Instr instr) {
415 return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
419 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
420 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
421 kSwRegFpNegOffsetPattern);
425 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
426 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
427 kLwRegFpNegOffsetPattern);
431 // Labels refer to positions in the (to be) generated code.
432 // There are bound, linked, and unused labels.
434 // Bound labels refer to known positions in the already
435 // generated code. pos() is the position the label refers to.
437 // Linked labels refer to unknown positions in the code
438 // to be generated; pos() is the position of the last
439 // instruction using the label.
441 // The link chain is terminated by a value in the instruction of -1,
442 // which is an otherwise illegal value (branch -1 is inf loop).
443 // The instruction 16-bit offset field addresses 32-bit words, but in
444 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
446 const int kEndOfChain = -4;
447 // Determines the end of the Jump chain (a subset of the label link chain).
448 const int kEndOfJumpChain = 0;
451 bool Assembler::IsBranch(Instr instr) {
452 uint32_t opcode = GetOpcodeField(instr);
453 uint32_t rt_field = GetRtField(instr);
454 uint32_t rs_field = GetRsField(instr);
455 // Checks if the instruction is a branch.
456 return opcode == BEQ ||
464 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
465 rt_field == BLTZAL || rt_field == BGEZAL)) ||
466 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
467 (opcode == COP1 && rs_field == BC1EQZ) ||
468 (opcode == COP1 && rs_field == BC1NEZ);
472 bool Assembler::IsEmittedConstant(Instr instr) {
473 uint32_t label_constant = GetLabelConst(instr);
474 return label_constant == 0; // Emitted label const in reg-exp engine.
478 bool Assembler::IsBeq(Instr instr) {
479 return GetOpcodeField(instr) == BEQ;
483 bool Assembler::IsBne(Instr instr) {
484 return GetOpcodeField(instr) == BNE;
488 bool Assembler::IsJump(Instr instr) {
489 uint32_t opcode = GetOpcodeField(instr);
490 uint32_t rt_field = GetRtField(instr);
491 uint32_t rd_field = GetRdField(instr);
492 uint32_t function_field = GetFunctionField(instr);
493 // Checks if the instruction is a jump.
494 return opcode == J || opcode == JAL ||
495 (opcode == SPECIAL && rt_field == 0 &&
496 ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
500 bool Assembler::IsJ(Instr instr) {
501 uint32_t opcode = GetOpcodeField(instr);
502 // Checks if the instruction is a jump.
507 bool Assembler::IsJal(Instr instr) {
508 return GetOpcodeField(instr) == JAL;
512 bool Assembler::IsJr(Instr instr) {
513 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
517 bool Assembler::IsJalr(Instr instr) {
518 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
522 bool Assembler::IsLui(Instr instr) {
523 uint32_t opcode = GetOpcodeField(instr);
524 // Checks if the instruction is a load upper immediate.
525 return opcode == LUI;
529 bool Assembler::IsOri(Instr instr) {
530 uint32_t opcode = GetOpcodeField(instr);
531 // Checks if the instruction is a load upper immediate.
532 return opcode == ORI;
536 bool Assembler::IsNop(Instr instr, unsigned int type) {
537 // See Assembler::nop(type).
539 uint32_t opcode = GetOpcodeField(instr);
540 uint32_t function = GetFunctionField(instr);
541 uint32_t rt = GetRt(instr);
542 uint32_t rd = GetRd(instr);
543 uint32_t sa = GetSa(instr);
545 // Traditional mips nop == sll(zero_reg, zero_reg, 0)
546 // When marking non-zero type, use sll(zero_reg, at, type)
547 // to avoid use of mips ssnop and ehb special encodings
548 // of the sll instruction.
550 Register nop_rt_reg = (type == 0) ? zero_reg : at;
551 bool ret = (opcode == SPECIAL && function == SLL &&
552 rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
553 rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
560 int32_t Assembler::GetBranchOffset(Instr instr) {
561 DCHECK(IsBranch(instr));
562 return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
566 bool Assembler::IsLw(Instr instr) {
567 return ((instr & kOpcodeMask) == LW);
571 int16_t Assembler::GetLwOffset(Instr instr) {
573 return ((instr & kImm16Mask));
577 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
580 // We actually create a new lw instruction based on the original one.
581 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
582 | (offset & kImm16Mask);
588 bool Assembler::IsSw(Instr instr) {
589 return ((instr & kOpcodeMask) == SW);
593 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
595 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
599 bool Assembler::IsAddImmediate(Instr instr) {
600 return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
604 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
605 DCHECK(IsAddImmediate(instr));
606 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
610 bool Assembler::IsAndImmediate(Instr instr) {
611 return GetOpcodeField(instr) == ANDI;
615 int Assembler::target_at(int pos, bool is_internal) {
617 int64_t* p = reinterpret_cast<int64_t*>(buffer_ + pos);
618 int64_t address = *p;
619 if (address == kEndOfJumpChain) {
622 int64_t instr_address = reinterpret_cast<int64_t>(p);
623 DCHECK(instr_address - address < INT_MAX);
624 int delta = static_cast<int>(instr_address - address);
629 Instr instr = instr_at(pos);
630 if ((instr & ~kImm16Mask) == 0) {
631 // Emitted label constant, not part of a branch.
635 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
636 return (imm18 + pos);
639 // Check we have a branch or jump instruction.
640 DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
641 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
642 // the compiler uses arithmetic shifts for signed integers.
643 if (IsBranch(instr)) {
644 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
645 if (imm18 == kEndOfChain) {
646 // EndOfChain sentinel is returned directly, not relative to pc or pos.
649 return pos + kBranchPCOffset + imm18;
651 } else if (IsLui(instr)) {
652 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
653 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
654 Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
655 DCHECK(IsOri(instr_ori));
656 DCHECK(IsOri(instr_ori2));
658 // TODO(plind) create named constants for shift values.
659 int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
660 imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
661 imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
662 // Sign extend address;
665 if (imm == kEndOfJumpChain) {
666 // EndOfChain sentinel is returned directly, not relative to pc or pos.
669 uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
670 DCHECK(instr_address - imm < INT_MAX);
671 int delta = static_cast<int>(instr_address - imm);
676 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
677 if (imm28 == kEndOfJumpChain) {
678 // EndOfChain sentinel is returned directly, not relative to pc or pos.
681 uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
682 instr_address &= kImm28Mask;
683 int delta = static_cast<int>(instr_address - imm28);
691 void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
693 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
694 *reinterpret_cast<uint64_t*>(buffer_ + pos) = imm;
697 Instr instr = instr_at(pos);
698 if ((instr & ~kImm16Mask) == 0) {
699 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
700 // Emitted label constant, not part of a branch.
701 // Make label relative to Code* of generated Code object.
702 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
706 DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
707 if (IsBranch(instr)) {
708 int32_t imm18 = target_pos - (pos + kBranchPCOffset);
709 DCHECK((imm18 & 3) == 0);
711 instr &= ~kImm16Mask;
712 int32_t imm16 = imm18 >> 2;
713 DCHECK(is_int16(imm16));
715 instr_at_put(pos, instr | (imm16 & kImm16Mask));
716 } else if (IsLui(instr)) {
717 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
718 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
719 Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
720 DCHECK(IsOri(instr_ori));
721 DCHECK(IsOri(instr_ori2));
723 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
724 DCHECK((imm & 3) == 0);
726 instr_lui &= ~kImm16Mask;
727 instr_ori &= ~kImm16Mask;
728 instr_ori2 &= ~kImm16Mask;
730 instr_at_put(pos + 0 * Assembler::kInstrSize,
731 instr_lui | ((imm >> 32) & kImm16Mask));
732 instr_at_put(pos + 1 * Assembler::kInstrSize,
733 instr_ori | ((imm >> 16) & kImm16Mask));
734 instr_at_put(pos + 3 * Assembler::kInstrSize,
735 instr_ori2 | (imm & kImm16Mask));
737 uint64_t imm28 = reinterpret_cast<uint64_t>(buffer_) + target_pos;
739 DCHECK((imm28 & 3) == 0);
741 instr &= ~kImm26Mask;
742 uint32_t imm26 = imm28 >> 2;
743 DCHECK(is_uint26(imm26));
745 instr_at_put(pos, instr | (imm26 & kImm26Mask));
750 void Assembler::print(Label* L) {
751 if (L->is_unused()) {
752 PrintF("unused label\n");
753 } else if (L->is_bound()) {
754 PrintF("bound label to %d\n", L->pos());
755 } else if (L->is_linked()) {
757 PrintF("unbound label");
758 while (l.is_linked()) {
759 PrintF("@ %d ", l.pos());
760 Instr instr = instr_at(l.pos());
761 if ((instr & ~kImm16Mask) == 0) {
764 PrintF("%d\n", instr);
766 next(&l, internal_reference_positions_.find(l.pos()) !=
767 internal_reference_positions_.end());
770 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
775 void Assembler::bind_to(Label* L, int pos) {
776 DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
777 int trampoline_pos = kInvalidSlotPos;
778 bool is_internal = false;
779 if (L->is_linked() && !trampoline_emitted_) {
780 unbound_labels_count_--;
781 next_buffer_check_ += kTrampolineSlotsSize;
784 while (L->is_linked()) {
785 int fixup_pos = L->pos();
786 int dist = pos - fixup_pos;
787 is_internal = internal_reference_positions_.find(fixup_pos) !=
788 internal_reference_positions_.end();
789 next(L, is_internal); // Call next before overwriting link with target at
791 Instr instr = instr_at(fixup_pos);
793 target_at_put(fixup_pos, pos, is_internal);
794 } else if (IsBranch(instr)) {
795 if (dist > kMaxBranchOffset) {
796 if (trampoline_pos == kInvalidSlotPos) {
797 trampoline_pos = get_trampoline_entry(fixup_pos);
798 CHECK(trampoline_pos != kInvalidSlotPos);
800 DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
801 target_at_put(fixup_pos, trampoline_pos, false);
802 fixup_pos = trampoline_pos;
803 dist = pos - fixup_pos;
805 target_at_put(fixup_pos, pos, false);
807 DCHECK(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
808 target_at_put(fixup_pos, pos, false);
813 // Keep track of the last bound label so we don't eliminate any instructions
814 // before a bound label.
815 if (pos > last_bound_pos_)
816 last_bound_pos_ = pos;
820 void Assembler::bind(Label* L) {
821 DCHECK(!L->is_bound()); // Label can only be bound once.
822 bind_to(L, pc_offset());
826 void Assembler::next(Label* L, bool is_internal) {
827 DCHECK(L->is_linked());
828 int link = target_at(L->pos(), is_internal);
829 if (link == kEndOfChain) {
838 bool Assembler::is_near(Label* L) {
840 return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
846 // We have to use a temporary register for things that can be relocated even
847 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
848 // space. There is no guarantee that the relocated location can be similarly
850 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
851 return !RelocInfo::IsNone(rmode);
854 void Assembler::GenInstrRegister(Opcode opcode,
859 SecondaryField func) {
860 DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
861 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
862 | (rd.code() << kRdShift) | (sa << kSaShift) | func;
867 void Assembler::GenInstrRegister(Opcode opcode,
872 SecondaryField func) {
873 DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
874 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
875 | (msb << kRdShift) | (lsb << kSaShift) | func;
880 void Assembler::GenInstrRegister(Opcode opcode,
885 SecondaryField func) {
886 DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
887 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
888 | (fd.code() << kFdShift) | func;
893 void Assembler::GenInstrRegister(Opcode opcode,
898 SecondaryField func) {
899 DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
900 Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
901 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
906 void Assembler::GenInstrRegister(Opcode opcode,
911 SecondaryField func) {
912 DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
913 Instr instr = opcode | fmt | (rt.code() << kRtShift)
914 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
919 void Assembler::GenInstrRegister(Opcode opcode,
922 FPUControlRegister fs,
923 SecondaryField func) {
924 DCHECK(fs.is_valid() && rt.is_valid());
926 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
931 // Instructions with immediate value.
932 // Registers are in the order of the instruction encoding, from left to right.
933 void Assembler::GenInstrImmediate(Opcode opcode,
937 DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
938 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
944 void Assembler::GenInstrImmediate(Opcode opcode,
948 DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
949 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
954 void Assembler::GenInstrImmediate(Opcode opcode,
958 DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
959 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
965 void Assembler::GenInstrJump(Opcode opcode,
967 BlockTrampolinePoolScope block_trampoline_pool(this);
968 DCHECK(is_uint26(address));
969 Instr instr = opcode | address;
971 BlockTrampolinePoolFor(1); // For associated delay slot.
975 // Returns the next free trampoline entry.
976 int32_t Assembler::get_trampoline_entry(int32_t pos) {
977 int32_t trampoline_entry = kInvalidSlotPos;
978 if (!internal_trampoline_exception_) {
979 if (trampoline_.start() > pos) {
980 trampoline_entry = trampoline_.take_slot();
983 if (kInvalidSlotPos == trampoline_entry) {
984 internal_trampoline_exception_ = true;
987 return trampoline_entry;
991 uint64_t Assembler::jump_address(Label* L) {
994 target_pos = L->pos();
996 if (L->is_linked()) {
997 target_pos = L->pos(); // L's link.
998 L->link_to(pc_offset());
1000 L->link_to(pc_offset());
1001 return kEndOfJumpChain;
1005 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
1006 DCHECK((imm & 3) == 0);
1012 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1014 if (L->is_bound()) {
1015 target_pos = L->pos();
1017 if (L->is_linked()) {
1018 target_pos = L->pos();
1019 L->link_to(pc_offset());
1021 L->link_to(pc_offset());
1022 if (!trampoline_emitted_) {
1023 unbound_labels_count_++;
1024 next_buffer_check_ -= kTrampolineSlotsSize;
1030 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1031 DCHECK((offset & 3) == 0);
1032 DCHECK(is_int16(offset >> 2));
1038 int32_t Assembler::branch_offset_compact(Label* L,
1039 bool jump_elimination_allowed) {
1041 if (L->is_bound()) {
1042 target_pos = L->pos();
1044 if (L->is_linked()) {
1045 target_pos = L->pos();
1046 L->link_to(pc_offset());
1048 L->link_to(pc_offset());
1049 if (!trampoline_emitted_) {
1050 unbound_labels_count_++;
1051 next_buffer_check_ -= kTrampolineSlotsSize;
1057 int32_t offset = target_pos - pc_offset();
1058 DCHECK((offset & 3) == 0);
1059 DCHECK(is_int16(offset >> 2));
1065 int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
1067 if (L->is_bound()) {
1068 target_pos = L->pos();
1070 if (L->is_linked()) {
1071 target_pos = L->pos();
1072 L->link_to(pc_offset());
1074 L->link_to(pc_offset());
1075 if (!trampoline_emitted_) {
1076 unbound_labels_count_++;
1077 next_buffer_check_ -= kTrampolineSlotsSize;
1083 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1084 DCHECK((offset & 3) == 0);
1085 DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
1091 int32_t Assembler::branch_offset21_compact(Label* L,
1092 bool jump_elimination_allowed) {
1094 if (L->is_bound()) {
1095 target_pos = L->pos();
1097 if (L->is_linked()) {
1098 target_pos = L->pos();
1099 L->link_to(pc_offset());
1101 L->link_to(pc_offset());
1102 if (!trampoline_emitted_) {
1103 unbound_labels_count_++;
1104 next_buffer_check_ -= kTrampolineSlotsSize;
1110 int32_t offset = target_pos - pc_offset();
1111 DCHECK((offset & 3) == 0);
1112 DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
1118 void Assembler::label_at_put(Label* L, int at_offset) {
1120 if (L->is_bound()) {
1121 target_pos = L->pos();
1122 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1124 if (L->is_linked()) {
1125 target_pos = L->pos(); // L's link.
1126 int32_t imm18 = target_pos - at_offset;
1127 DCHECK((imm18 & 3) == 0);
1128 int32_t imm16 = imm18 >> 2;
1129 DCHECK(is_int16(imm16));
1130 instr_at_put(at_offset, (imm16 & kImm16Mask));
1132 target_pos = kEndOfChain;
1133 instr_at_put(at_offset, 0);
1134 if (!trampoline_emitted_) {
1135 unbound_labels_count_++;
1136 next_buffer_check_ -= kTrampolineSlotsSize;
1139 L->link_to(at_offset);
1144 //------- Branch and jump instructions --------
1146 void Assembler::b(int16_t offset) {
1147 beq(zero_reg, zero_reg, offset);
1151 void Assembler::bal(int16_t offset) {
1152 positions_recorder()->WriteRecordedPositions();
1153 bgezal(zero_reg, offset);
1157 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1158 BlockTrampolinePoolScope block_trampoline_pool(this);
1159 GenInstrImmediate(BEQ, rs, rt, offset);
1160 BlockTrampolinePoolFor(1); // For associated delay slot.
1164 void Assembler::bgez(Register rs, int16_t offset) {
1165 BlockTrampolinePoolScope block_trampoline_pool(this);
1166 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1167 BlockTrampolinePoolFor(1); // For associated delay slot.
1171 void Assembler::bgezc(Register rt, int16_t offset) {
1172 DCHECK(kArchVariant == kMips64r6);
1173 DCHECK(!(rt.is(zero_reg)));
1174 GenInstrImmediate(BLEZL, rt, rt, offset);
1178 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1179 DCHECK(kArchVariant == kMips64r6);
1180 DCHECK(!(rs.is(zero_reg)));
1181 DCHECK(!(rt.is(zero_reg)));
1182 DCHECK(rs.code() != rt.code());
1183 GenInstrImmediate(BLEZ, rs, rt, offset);
1187 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1188 DCHECK(kArchVariant == kMips64r6);
1189 DCHECK(!(rs.is(zero_reg)));
1190 DCHECK(!(rt.is(zero_reg)));
1191 DCHECK(rs.code() != rt.code());
1192 GenInstrImmediate(BLEZL, rs, rt, offset);
1196 void Assembler::bgezal(Register rs, int16_t offset) {
1197 DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
1198 BlockTrampolinePoolScope block_trampoline_pool(this);
1199 positions_recorder()->WriteRecordedPositions();
1200 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1201 BlockTrampolinePoolFor(1); // For associated delay slot.
1205 void Assembler::bgtz(Register rs, int16_t offset) {
1206 BlockTrampolinePoolScope block_trampoline_pool(this);
1207 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1208 BlockTrampolinePoolFor(1); // For associated delay slot.
1212 void Assembler::bgtzc(Register rt, int16_t offset) {
1213 DCHECK(kArchVariant == kMips64r6);
1214 DCHECK(!(rt.is(zero_reg)));
1215 GenInstrImmediate(BGTZL, zero_reg, rt, offset);
1219 void Assembler::blez(Register rs, int16_t offset) {
1220 BlockTrampolinePoolScope block_trampoline_pool(this);
1221 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1222 BlockTrampolinePoolFor(1); // For associated delay slot.
1226 void Assembler::blezc(Register rt, int16_t offset) {
1227 DCHECK(kArchVariant == kMips64r6);
1228 DCHECK(!(rt.is(zero_reg)));
1229 GenInstrImmediate(BLEZL, zero_reg, rt, offset);
1233 void Assembler::bltzc(Register rt, int16_t offset) {
1234 DCHECK(kArchVariant == kMips64r6);
1235 DCHECK(!(rt.is(zero_reg)));
1236 GenInstrImmediate(BGTZL, rt, rt, offset);
1240 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1241 DCHECK(kArchVariant == kMips64r6);
1242 DCHECK(!(rs.is(zero_reg)));
1243 DCHECK(!(rt.is(zero_reg)));
1244 DCHECK(rs.code() != rt.code());
1245 GenInstrImmediate(BGTZ, rs, rt, offset);
1249 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1250 DCHECK(kArchVariant == kMips64r6);
1251 DCHECK(!(rs.is(zero_reg)));
1252 DCHECK(!(rt.is(zero_reg)));
1253 DCHECK(rs.code() != rt.code());
1254 GenInstrImmediate(BGTZL, rs, rt, offset);
1258 void Assembler::bltz(Register rs, int16_t offset) {
1259 BlockTrampolinePoolScope block_trampoline_pool(this);
1260 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1261 BlockTrampolinePoolFor(1); // For associated delay slot.
1265 void Assembler::bltzal(Register rs, int16_t offset) {
1266 DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
1267 BlockTrampolinePoolScope block_trampoline_pool(this);
1268 positions_recorder()->WriteRecordedPositions();
1269 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1270 BlockTrampolinePoolFor(1); // For associated delay slot.
1274 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1275 BlockTrampolinePoolScope block_trampoline_pool(this);
1276 GenInstrImmediate(BNE, rs, rt, offset);
1277 BlockTrampolinePoolFor(1); // For associated delay slot.
1281 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1282 DCHECK(kArchVariant == kMips64r6);
1283 DCHECK(!(rs.is(zero_reg)));
1284 DCHECK(rs.code() >= rt.code());
1285 GenInstrImmediate(ADDI, rs, rt, offset);
1289 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1290 DCHECK(kArchVariant == kMips64r6);
1291 DCHECK(!(rs.is(zero_reg)));
1292 DCHECK(rs.code() >= rt.code());
1293 GenInstrImmediate(DADDI, rs, rt, offset);
1297 void Assembler::blezalc(Register rt, int16_t offset) {
1298 DCHECK(kArchVariant == kMips64r6);
1299 DCHECK(!(rt.is(zero_reg)));
1300 GenInstrImmediate(BLEZ, zero_reg, rt, offset);
1304 void Assembler::bgezalc(Register rt, int16_t offset) {
1305 DCHECK(kArchVariant == kMips64r6);
1306 DCHECK(!(rt.is(zero_reg)));
1307 GenInstrImmediate(BLEZ, rt, rt, offset);
1311 void Assembler::bgezall(Register rs, int16_t offset) {
1312 DCHECK(kArchVariant == kMips64r6);
1313 DCHECK(!(rs.is(zero_reg)));
1314 GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1318 void Assembler::bltzalc(Register rt, int16_t offset) {
1319 DCHECK(kArchVariant == kMips64r6);
1320 DCHECK(!(rt.is(zero_reg)));
1321 GenInstrImmediate(BGTZ, rt, rt, offset);
1325 void Assembler::bgtzalc(Register rt, int16_t offset) {
1326 DCHECK(kArchVariant == kMips64r6);
1327 DCHECK(!(rt.is(zero_reg)));
1328 GenInstrImmediate(BGTZ, zero_reg, rt, offset);
1332 void Assembler::beqzalc(Register rt, int16_t offset) {
1333 DCHECK(kArchVariant == kMips64r6);
1334 DCHECK(!(rt.is(zero_reg)));
1335 GenInstrImmediate(ADDI, zero_reg, rt, offset);
1339 void Assembler::bnezalc(Register rt, int16_t offset) {
1340 DCHECK(kArchVariant == kMips64r6);
1341 DCHECK(!(rt.is(zero_reg)));
1342 GenInstrImmediate(DADDI, zero_reg, rt, offset);
1346 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1347 DCHECK(kArchVariant == kMips64r6);
1348 DCHECK(rs.code() < rt.code());
1349 GenInstrImmediate(ADDI, rs, rt, offset);
1353 void Assembler::beqzc(Register rs, int32_t offset) {
1354 DCHECK(kArchVariant == kMips64r6);
1355 DCHECK(!(rs.is(zero_reg)));
1356 Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
1361 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1362 DCHECK(kArchVariant == kMips64r6);
1363 DCHECK(rs.code() < rt.code());
1364 GenInstrImmediate(DADDI, rs, rt, offset);
1368 void Assembler::bnezc(Register rs, int32_t offset) {
1369 DCHECK(kArchVariant == kMips64r6);
1370 DCHECK(!(rs.is(zero_reg)));
1371 Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
1376 void Assembler::j(int64_t target) {
1378 // Get pc of delay slot.
1379 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1380 bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1381 (kImm26Bits + kImmFieldShift)) == 0;
1382 DCHECK(in_range && ((target & 3) == 0));
1384 GenInstrJump(J, target >> 2);
1388 void Assembler::jr(Register rs) {
1389 if (kArchVariant != kMips64r6) {
1390 BlockTrampolinePoolScope block_trampoline_pool(this);
1392 positions_recorder()->WriteRecordedPositions();
1394 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1395 BlockTrampolinePoolFor(1); // For associated delay slot.
1402 void Assembler::jal(int64_t target) {
1404 // Get pc of delay slot.
1405 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1406 bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1407 (kImm26Bits + kImmFieldShift)) == 0;
1408 DCHECK(in_range && ((target & 3) == 0));
1410 positions_recorder()->WriteRecordedPositions();
1411 GenInstrJump(JAL, target >> 2);
1415 void Assembler::jalr(Register rs, Register rd) {
1416 BlockTrampolinePoolScope block_trampoline_pool(this);
1417 positions_recorder()->WriteRecordedPositions();
1418 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1419 BlockTrampolinePoolFor(1); // For associated delay slot.
1423 void Assembler::j_or_jr(int64_t target, Register rs) {
1424 // Get pc of delay slot.
1425 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1426 bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1427 (kImm26Bits + kImmFieldShift)) == 0;
1436 void Assembler::jal_or_jalr(int64_t target, Register rs) {
1437 // Get pc of delay slot.
1438 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1439 bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1440 (kImm26Bits+kImmFieldShift)) == 0;
1449 // -------Data-processing-instructions---------
1453 void Assembler::addu(Register rd, Register rs, Register rt) {
1454 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1458 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1459 GenInstrImmediate(ADDIU, rs, rd, j);
1463 void Assembler::subu(Register rd, Register rs, Register rt) {
1464 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1468 void Assembler::mul(Register rd, Register rs, Register rt) {
1469 if (kArchVariant == kMips64r6) {
1470 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1472 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1477 void Assembler::muh(Register rd, Register rs, Register rt) {
1478 DCHECK(kArchVariant == kMips64r6);
1479 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1483 void Assembler::mulu(Register rd, Register rs, Register rt) {
1484 DCHECK(kArchVariant == kMips64r6);
1485 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1489 void Assembler::muhu(Register rd, Register rs, Register rt) {
1490 DCHECK(kArchVariant == kMips64r6);
1491 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1495 void Assembler::dmul(Register rd, Register rs, Register rt) {
1496 DCHECK(kArchVariant == kMips64r6);
1497 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
1501 void Assembler::dmuh(Register rd, Register rs, Register rt) {
1502 DCHECK(kArchVariant == kMips64r6);
1503 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
1507 void Assembler::dmulu(Register rd, Register rs, Register rt) {
1508 DCHECK(kArchVariant == kMips64r6);
1509 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
1513 void Assembler::dmuhu(Register rd, Register rs, Register rt) {
1514 DCHECK(kArchVariant == kMips64r6);
1515 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
1519 void Assembler::mult(Register rs, Register rt) {
1520 DCHECK(kArchVariant != kMips64r6);
1521 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1525 void Assembler::multu(Register rs, Register rt) {
1526 DCHECK(kArchVariant != kMips64r6);
1527 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1531 void Assembler::daddiu(Register rd, Register rs, int32_t j) {
1532 GenInstrImmediate(DADDIU, rs, rd, j);
1536 void Assembler::div(Register rs, Register rt) {
1537 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1541 void Assembler::div(Register rd, Register rs, Register rt) {
1542 DCHECK(kArchVariant == kMips64r6);
1543 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1547 void Assembler::mod(Register rd, Register rs, Register rt) {
1548 DCHECK(kArchVariant == kMips64r6);
1549 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1553 void Assembler::divu(Register rs, Register rt) {
1554 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1558 void Assembler::divu(Register rd, Register rs, Register rt) {
1559 DCHECK(kArchVariant == kMips64r6);
1560 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1564 void Assembler::modu(Register rd, Register rs, Register rt) {
1565 DCHECK(kArchVariant == kMips64r6);
1566 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1570 void Assembler::daddu(Register rd, Register rs, Register rt) {
1571 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
1575 void Assembler::dsubu(Register rd, Register rs, Register rt) {
1576 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
1580 void Assembler::dmult(Register rs, Register rt) {
1581 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
1585 void Assembler::dmultu(Register rs, Register rt) {
1586 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
1590 void Assembler::ddiv(Register rs, Register rt) {
1591 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
1595 void Assembler::ddiv(Register rd, Register rs, Register rt) {
1596 DCHECK(kArchVariant == kMips64r6);
1597 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
1601 void Assembler::dmod(Register rd, Register rs, Register rt) {
1602 DCHECK(kArchVariant == kMips64r6);
1603 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
1607 void Assembler::ddivu(Register rs, Register rt) {
1608 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
1612 void Assembler::ddivu(Register rd, Register rs, Register rt) {
1613 DCHECK(kArchVariant == kMips64r6);
1614 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
1618 void Assembler::dmodu(Register rd, Register rs, Register rt) {
1619 DCHECK(kArchVariant == kMips64r6);
1620 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
1626 void Assembler::and_(Register rd, Register rs, Register rt) {
1627 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1631 void Assembler::andi(Register rt, Register rs, int32_t j) {
1632 DCHECK(is_uint16(j));
1633 GenInstrImmediate(ANDI, rs, rt, j);
1637 void Assembler::or_(Register rd, Register rs, Register rt) {
1638 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1642 void Assembler::ori(Register rt, Register rs, int32_t j) {
1643 DCHECK(is_uint16(j));
1644 GenInstrImmediate(ORI, rs, rt, j);
1648 void Assembler::xor_(Register rd, Register rs, Register rt) {
1649 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1653 void Assembler::xori(Register rt, Register rs, int32_t j) {
1654 DCHECK(is_uint16(j));
1655 GenInstrImmediate(XORI, rs, rt, j);
1659 void Assembler::nor(Register rd, Register rs, Register rt) {
1660 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1665 void Assembler::sll(Register rd,
1668 bool coming_from_nop) {
1669 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1670 // generated using the sll instruction. They must be generated using
1671 // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1673 DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1674 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1678 void Assembler::sllv(Register rd, Register rt, Register rs) {
1679 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1683 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1684 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1688 void Assembler::srlv(Register rd, Register rt, Register rs) {
1689 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1693 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1694 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1698 void Assembler::srav(Register rd, Register rt, Register rs) {
1699 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1703 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1704 // Should be called via MacroAssembler::Ror.
1705 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1706 DCHECK(kArchVariant == kMips64r2);
1707 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1708 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1713 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1714 // Should be called via MacroAssembler::Ror.
1715 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1716 DCHECK(kArchVariant == kMips64r2);
1717 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1718 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1723 void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
1724 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL);
1728 void Assembler::dsllv(Register rd, Register rt, Register rs) {
1729 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
1733 void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
1734 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL);
1738 void Assembler::dsrlv(Register rd, Register rt, Register rs) {
1739 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
1743 void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
1744 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1745 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1746 | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
1751 void Assembler::drotrv(Register rd, Register rt, Register rs) {
1752 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1753 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1754 | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
1759 void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
1760 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA);
1764 void Assembler::dsrav(Register rd, Register rt, Register rs) {
1765 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
1769 void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
1770 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL32);
1774 void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
1775 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL32);
1779 void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
1780 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA32);
1784 // ------------Memory-instructions-------------
1786 // Helper for base-reg + offset, when offset is larger than int16.
1787 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1788 DCHECK(!src.rm().is(at));
1789 DCHECK(is_int32(src.offset_));
1790 daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask);
1791 dsll(at, at, kLuiShift);
1792 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1793 daddu(at, at, src.rm()); // Add base register.
1797 void Assembler::lb(Register rd, const MemOperand& rs) {
1798 if (is_int16(rs.offset_)) {
1799 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1800 } else { // Offset > 16 bits, use multiple instructions to load.
1801 LoadRegPlusOffsetToAt(rs);
1802 GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1807 void Assembler::lbu(Register rd, const MemOperand& rs) {
1808 if (is_int16(rs.offset_)) {
1809 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1810 } else { // Offset > 16 bits, use multiple instructions to load.
1811 LoadRegPlusOffsetToAt(rs);
1812 GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1817 void Assembler::lh(Register rd, const MemOperand& rs) {
1818 if (is_int16(rs.offset_)) {
1819 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1820 } else { // Offset > 16 bits, use multiple instructions to load.
1821 LoadRegPlusOffsetToAt(rs);
1822 GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
1827 void Assembler::lhu(Register rd, const MemOperand& rs) {
1828 if (is_int16(rs.offset_)) {
1829 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1830 } else { // Offset > 16 bits, use multiple instructions to load.
1831 LoadRegPlusOffsetToAt(rs);
1832 GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
1837 void Assembler::lw(Register rd, const MemOperand& rs) {
1838 if (is_int16(rs.offset_)) {
1839 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1840 } else { // Offset > 16 bits, use multiple instructions to load.
1841 LoadRegPlusOffsetToAt(rs);
1842 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1847 void Assembler::lwu(Register rd, const MemOperand& rs) {
1848 if (is_int16(rs.offset_)) {
1849 GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
1850 } else { // Offset > 16 bits, use multiple instructions to load.
1851 LoadRegPlusOffsetToAt(rs);
1852 GenInstrImmediate(LWU, at, rd, 0); // Equiv to lwu(rd, MemOperand(at, 0));
1857 void Assembler::lwl(Register rd, const MemOperand& rs) {
1858 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1862 void Assembler::lwr(Register rd, const MemOperand& rs) {
1863 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1867 void Assembler::sb(Register rd, const MemOperand& rs) {
1868 if (is_int16(rs.offset_)) {
1869 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1870 } else { // Offset > 16 bits, use multiple instructions to store.
1871 LoadRegPlusOffsetToAt(rs);
1872 GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
1877 void Assembler::sh(Register rd, const MemOperand& rs) {
1878 if (is_int16(rs.offset_)) {
1879 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1880 } else { // Offset > 16 bits, use multiple instructions to store.
1881 LoadRegPlusOffsetToAt(rs);
1882 GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
1887 void Assembler::sw(Register rd, const MemOperand& rs) {
1888 if (is_int16(rs.offset_)) {
1889 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1890 } else { // Offset > 16 bits, use multiple instructions to store.
1891 LoadRegPlusOffsetToAt(rs);
1892 GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1897 void Assembler::swl(Register rd, const MemOperand& rs) {
1898 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1902 void Assembler::swr(Register rd, const MemOperand& rs) {
1903 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1907 void Assembler::lui(Register rd, int32_t j) {
1908 DCHECK(is_uint16(j));
1909 GenInstrImmediate(LUI, zero_reg, rd, j);
1913 void Assembler::aui(Register rs, Register rt, int32_t j) {
1914 // This instruction uses same opcode as 'lui'. The difference in encoding is
1915 // 'lui' has zero reg. for rs field.
1916 DCHECK(is_uint16(j));
1917 GenInstrImmediate(LUI, rs, rt, j);
1921 void Assembler::daui(Register rs, Register rt, int32_t j) {
1922 DCHECK(is_uint16(j));
1923 GenInstrImmediate(DAUI, rs, rt, j);
1927 void Assembler::dahi(Register rs, int32_t j) {
1928 DCHECK(is_uint16(j));
1929 GenInstrImmediate(REGIMM, rs, DAHI, j);
1933 void Assembler::dati(Register rs, int32_t j) {
1934 DCHECK(is_uint16(j));
1935 GenInstrImmediate(REGIMM, rs, DATI, j);
1939 void Assembler::ldl(Register rd, const MemOperand& rs) {
1940 GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
1944 void Assembler::ldr(Register rd, const MemOperand& rs) {
1945 GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
1949 void Assembler::sdl(Register rd, const MemOperand& rs) {
1950 GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
1954 void Assembler::sdr(Register rd, const MemOperand& rs) {
1955 GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
1959 void Assembler::ld(Register rd, const MemOperand& rs) {
1960 if (is_int16(rs.offset_)) {
1961 GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
1962 } else { // Offset > 16 bits, use multiple instructions to load.
1963 LoadRegPlusOffsetToAt(rs);
1964 GenInstrImmediate(LD, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1969 void Assembler::sd(Register rd, const MemOperand& rs) {
1970 if (is_int16(rs.offset_)) {
1971 GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
1972 } else { // Offset > 16 bits, use multiple instructions to store.
1973 LoadRegPlusOffsetToAt(rs);
1974 GenInstrImmediate(SD, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1979 // -------------Misc-instructions--------------
1981 // Break / Trap instructions.
1982 void Assembler::break_(uint32_t code, bool break_as_stop) {
1983 DCHECK((code & ~0xfffff) == 0);
1984 // We need to invalidate breaks that could be stops as well because the
1985 // simulator expects a char pointer after the stop instruction.
1986 // See constants-mips.h for explanation.
1987 DCHECK((break_as_stop &&
1988 code <= kMaxStopCode &&
1989 code > kMaxWatchpointCode) ||
1991 (code > kMaxStopCode ||
1992 code <= kMaxWatchpointCode)));
1993 Instr break_instr = SPECIAL | BREAK | (code << 6);
1998 void Assembler::stop(const char* msg, uint32_t code) {
1999 DCHECK(code > kMaxWatchpointCode);
2000 DCHECK(code <= kMaxStopCode);
2001 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
2003 #else // V8_HOST_ARCH_MIPS
2004 BlockTrampolinePoolFor(3);
2005 // The Simulator will handle the stop instruction and get the message address.
2006 // On MIPS stop() is just a special kind of break_().
2008 emit(reinterpret_cast<uint64_t>(msg));
2013 void Assembler::tge(Register rs, Register rt, uint16_t code) {
2014 DCHECK(is_uint10(code));
2015 Instr instr = SPECIAL | TGE | rs.code() << kRsShift
2016 | rt.code() << kRtShift | code << 6;
2021 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2022 DCHECK(is_uint10(code));
2023 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
2024 | rt.code() << kRtShift | code << 6;
2029 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2030 DCHECK(is_uint10(code));
2032 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2037 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2038 DCHECK(is_uint10(code));
2040 SPECIAL | TLTU | rs.code() << kRsShift
2041 | rt.code() << kRtShift | code << 6;
2046 void Assembler::teq(Register rs, Register rt, uint16_t code) {
2047 DCHECK(is_uint10(code));
2049 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2054 void Assembler::tne(Register rs, Register rt, uint16_t code) {
2055 DCHECK(is_uint10(code));
2057 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2062 // Move from HI/LO register.
2064 void Assembler::mfhi(Register rd) {
2065 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2069 void Assembler::mflo(Register rd) {
2070 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2074 // Set on less than instructions.
2075 void Assembler::slt(Register rd, Register rs, Register rt) {
2076 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2080 void Assembler::sltu(Register rd, Register rs, Register rt) {
2081 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2085 void Assembler::slti(Register rt, Register rs, int32_t j) {
2086 GenInstrImmediate(SLTI, rs, rt, j);
2090 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2091 GenInstrImmediate(SLTIU, rs, rt, j);
2095 // Conditional move.
2096 void Assembler::movz(Register rd, Register rs, Register rt) {
2097 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2101 void Assembler::movn(Register rd, Register rs, Register rt) {
2102 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2106 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2108 rt.code_ = (cc & 0x0007) << 2 | 1;
2109 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2113 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2115 rt.code_ = (cc & 0x0007) << 2 | 0;
2116 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2120 void Assembler::sel(SecondaryField fmt, FPURegister fd,
2121 FPURegister ft, FPURegister fs, uint8_t sel) {
2122 DCHECK(kArchVariant == kMips64r6);
2126 Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
2127 fs.code() << kFsShift | fd.code() << kFdShift | SEL;
2133 void Assembler::seleqz(Register rs, Register rt, Register rd) {
2134 DCHECK(kArchVariant == kMips64r6);
2135 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2140 void Assembler::seleqz(SecondaryField fmt, FPURegister fd,
2141 FPURegister ft, FPURegister fs) {
2142 DCHECK(kArchVariant == kMips64r6);
2146 Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
2147 fs.code() << kFsShift | fd.code() << kFdShift | SELEQZ_C;
2153 void Assembler::selnez(Register rs, Register rt, Register rd) {
2154 DCHECK(kArchVariant == kMips64r6);
2155 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2160 void Assembler::selnez(SecondaryField fmt, FPURegister fd,
2161 FPURegister ft, FPURegister fs) {
2162 DCHECK(kArchVariant == kMips64r6);
2166 Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
2167 fs.code() << kFsShift | fd.code() << kFdShift | SELNEZ_C;
2173 void Assembler::clz(Register rd, Register rs) {
2174 if (kArchVariant != kMips64r6) {
2175 // Clz instr requires same GPR number in 'rd' and 'rt' fields.
2176 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2178 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2183 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2184 // Should be called via MacroAssembler::Ins.
2185 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2186 DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
2187 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2191 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2192 // Should be called via MacroAssembler::Ext.
2193 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
2194 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2195 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2199 void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2200 // Should be called via MacroAssembler::Ext.
2201 // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
2202 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2203 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
2207 void Assembler::pref(int32_t hint, const MemOperand& rs) {
2208 DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2209 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
2215 // --------Coprocessor-instructions----------------
2217 // Load, store, move.
2218 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2219 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2223 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
2224 GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
2228 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
2229 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
2233 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
2234 GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
2238 void Assembler::mtc1(Register rt, FPURegister fs) {
2239 GenInstrRegister(COP1, MTC1, rt, fs, f0);
2243 void Assembler::mthc1(Register rt, FPURegister fs) {
2244 GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2248 void Assembler::dmtc1(Register rt, FPURegister fs) {
2249 GenInstrRegister(COP1, DMTC1, rt, fs, f0);
2253 void Assembler::mfc1(Register rt, FPURegister fs) {
2254 GenInstrRegister(COP1, MFC1, rt, fs, f0);
2258 void Assembler::mfhc1(Register rt, FPURegister fs) {
2259 GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2263 void Assembler::dmfc1(Register rt, FPURegister fs) {
2264 GenInstrRegister(COP1, DMFC1, rt, fs, f0);
2268 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2269 GenInstrRegister(COP1, CTC1, rt, fs);
2273 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2274 GenInstrRegister(COP1, CFC1, rt, fs);
2278 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2282 *lo = i & 0xffffffff;
2289 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2290 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2294 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2295 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2299 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2300 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2304 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2306 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2310 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2311 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2315 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2316 GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2320 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2321 GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2325 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2326 GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2330 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2331 GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2337 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2338 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2342 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2343 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2347 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2348 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2352 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2353 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2357 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2358 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2362 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2363 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2367 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2368 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2372 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2373 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2377 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2378 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2382 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2383 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2387 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2388 DCHECK(kArchVariant == kMips64r2);
2389 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2393 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
2394 DCHECK(kArchVariant == kMips64r2);
2395 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2399 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
2400 DCHECK(kArchVariant == kMips64r2);
2401 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2405 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
2406 DCHECK(kArchVariant == kMips64r2);
2407 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2411 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2412 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2416 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2417 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2421 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
2422 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
2426 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
2427 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
2431 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
2432 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
2436 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
2437 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
2441 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
2443 DCHECK(kArchVariant == kMips64r6);
2444 DCHECK((fmt == D) || (fmt == S));
2445 GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2449 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
2451 DCHECK(kArchVariant == kMips64r6);
2452 DCHECK((fmt == D) || (fmt == S));
2453 GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2457 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
2459 DCHECK(kArchVariant == kMips64r6);
2460 DCHECK((fmt == D) || (fmt == S));
2461 GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2465 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
2467 DCHECK(kArchVariant == kMips64r6);
2468 DCHECK((fmt == D) || (fmt == S));
2469 GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
2473 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
2474 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
2478 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
2479 DCHECK(kArchVariant == kMips64r2);
2480 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
2484 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
2485 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
2489 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
2490 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
2494 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
2495 DCHECK(kArchVariant == kMips64r2);
2496 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
2500 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
2501 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
2505 // Conditions for >= MIPSr6.
2506 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
2507 FPURegister fd, FPURegister fs, FPURegister ft) {
2508 DCHECK(kArchVariant == kMips64r6);
2509 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2510 Instr instr = COP1 | fmt | ft.code() << kFtShift |
2511 fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
2516 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
2517 DCHECK(kArchVariant == kMips64r6);
2518 Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
2523 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
2524 DCHECK(kArchVariant == kMips64r6);
2525 Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
2530 // Conditions for < MIPSr6.
2531 void Assembler::c(FPUCondition cond, SecondaryField fmt,
2532 FPURegister fs, FPURegister ft, uint16_t cc) {
2533 DCHECK(kArchVariant != kMips64r6);
2534 DCHECK(is_uint3(cc));
2535 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2536 Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
2537 | cc << 8 | 3 << 4 | cond;
2542 void Assembler::fcmp(FPURegister src1, const double src2,
2543 FPUCondition cond) {
2544 DCHECK(src2 == 0.0);
2545 mtc1(zero_reg, f14);
2547 c(cond, D, src1, f14, 0);
2551 void Assembler::bc1f(int16_t offset, uint16_t cc) {
2552 DCHECK(is_uint3(cc));
2553 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
2558 void Assembler::bc1t(int16_t offset, uint16_t cc) {
2559 DCHECK(is_uint3(cc));
2560 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
2566 int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
2567 intptr_t pc_delta) {
2568 if (RelocInfo::IsInternalReference(rmode)) {
2569 int64_t* p = reinterpret_cast<int64_t*>(pc);
2570 if (*p == kEndOfJumpChain) {
2571 return 0; // Number of instructions patched.
2574 return 2; // Number of instructions patched.
2576 Instr instr = instr_at(pc);
2577 DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
2578 DCHECK(IsJ(instr) || IsLui(instr));
2580 Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
2581 Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
2582 Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize);
2583 DCHECK(IsOri(instr_ori));
2584 DCHECK(IsOri(instr_ori2));
2585 // TODO(plind): symbolic names for the shifts.
2586 int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
2587 imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
2588 imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
2589 // Sign extend address.
2592 if (imm == kEndOfJumpChain) {
2593 return 0; // Number of instructions patched.
2596 DCHECK((imm & 3) == 0);
2598 instr_lui &= ~kImm16Mask;
2599 instr_ori &= ~kImm16Mask;
2600 instr_ori2 &= ~kImm16Mask;
2602 instr_at_put(pc + 0 * Assembler::kInstrSize,
2603 instr_lui | ((imm >> 32) & kImm16Mask));
2604 instr_at_put(pc + 1 * Assembler::kInstrSize,
2605 instr_ori | (imm >> 16 & kImm16Mask));
2606 instr_at_put(pc + 3 * Assembler::kInstrSize,
2607 instr_ori2 | (imm & kImm16Mask));
2608 return 4; // Number of instructions patched.
2610 uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
2611 if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
2612 return 0; // Number of instructions patched.
2616 imm28 &= kImm28Mask;
2617 DCHECK((imm28 & 3) == 0);
2619 instr &= ~kImm26Mask;
2620 uint32_t imm26 = imm28 >> 2;
2621 DCHECK(is_uint26(imm26));
2623 instr_at_put(pc, instr | (imm26 & kImm26Mask));
2624 return 1; // Number of instructions patched.
2629 void Assembler::GrowBuffer() {
2630 if (!own_buffer_) FATAL("external code buffer is too small");
2632 // Compute new buffer size.
2633 CodeDesc desc; // The new buffer.
2634 if (buffer_size_ < 1 * MB) {
2635 desc.buffer_size = 2*buffer_size_;
2637 desc.buffer_size = buffer_size_ + 1*MB;
2639 CHECK_GT(desc.buffer_size, 0); // No overflow.
2641 // Set up new buffer.
2642 desc.buffer = NewArray<byte>(desc.buffer_size);
2644 desc.instr_size = pc_offset();
2645 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2648 intptr_t pc_delta = desc.buffer - buffer_;
2649 intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
2650 (buffer_ + buffer_size_);
2651 MemMove(desc.buffer, buffer_, desc.instr_size);
2652 MemMove(reloc_info_writer.pos() + rc_delta,
2653 reloc_info_writer.pos(), desc.reloc_size);
2656 DeleteArray(buffer_);
2657 buffer_ = desc.buffer;
2658 buffer_size_ = desc.buffer_size;
2660 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2661 reloc_info_writer.last_pc() + pc_delta);
2663 // Relocate runtime entries.
2664 for (RelocIterator it(desc); !it.done(); it.next()) {
2665 RelocInfo::Mode rmode = it.rinfo()->rmode();
2666 if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED ||
2667 rmode == RelocInfo::INTERNAL_REFERENCE) {
2668 byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
2669 RelocateInternalReference(rmode, p, pc_delta);
2672 DCHECK(!overflow());
2676 void Assembler::db(uint8_t data) {
2678 *reinterpret_cast<uint8_t*>(pc_) = data;
2679 pc_ += sizeof(uint8_t);
2683 void Assembler::dd(uint32_t data) {
2685 *reinterpret_cast<uint32_t*>(pc_) = data;
2686 pc_ += sizeof(uint32_t);
2690 void Assembler::dd(Label* label) {
2692 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2693 if (label->is_bound()) {
2694 uint64_t data = reinterpret_cast<uint64_t>(buffer_ + label->pos());
2695 *reinterpret_cast<uint64_t*>(pc_) = data;
2696 pc_ += sizeof(uint64_t);
2698 uint64_t target_pos = jump_address(label);
2700 internal_reference_positions_.insert(label->pos());
2705 void Assembler::emit_code_stub_address(Code* stub) {
2707 *reinterpret_cast<uint64_t*>(pc_) =
2708 reinterpret_cast<uint64_t>(stub->instruction_start());
2709 pc_ += sizeof(uint64_t);
2713 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2714 // We do not try to reuse pool constants.
2715 RelocInfo rinfo(pc_, rmode, data, NULL);
2716 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2717 // Adjust code for new modes.
2718 DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
2719 || RelocInfo::IsJSReturn(rmode)
2720 || RelocInfo::IsComment(rmode)
2721 || RelocInfo::IsPosition(rmode));
2722 // These modes do not need an entry in the constant pool.
2724 if (!RelocInfo::IsNone(rinfo.rmode())) {
2725 // Don't record external references unless the heap will be serialized.
2726 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
2727 !serializer_enabled() && !emit_debug_code()) {
2730 DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
2731 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2732 RelocInfo reloc_info_with_ast_id(pc_,
2734 RecordedAstId().ToInt(),
2736 ClearRecordedAstId();
2737 reloc_info_writer.Write(&reloc_info_with_ast_id);
2739 reloc_info_writer.Write(&rinfo);
2745 void Assembler::BlockTrampolinePoolFor(int instructions) {
2746 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2750 void Assembler::CheckTrampolinePool() {
2751 // Some small sequences of instructions must not be broken up by the
2752 // insertion of a trampoline pool; such sequences are protected by setting
2753 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2754 // which are both checked here. Also, recursive calls to CheckTrampolinePool
2755 // are blocked by trampoline_pool_blocked_nesting_.
2756 if ((trampoline_pool_blocked_nesting_ > 0) ||
2757 (pc_offset() < no_trampoline_pool_before_)) {
2758 // Emission is currently blocked; make sure we try again as soon as
2760 if (trampoline_pool_blocked_nesting_ > 0) {
2761 next_buffer_check_ = pc_offset() + kInstrSize;
2763 next_buffer_check_ = no_trampoline_pool_before_;
2768 DCHECK(!trampoline_emitted_);
2769 DCHECK(unbound_labels_count_ >= 0);
2770 if (unbound_labels_count_ > 0) {
2771 // First we emit jump (2 instructions), then we emit trampoline pool.
2772 { BlockTrampolinePoolScope block_trampoline_pool(this);
2777 int pool_start = pc_offset();
2778 for (int i = 0; i < unbound_labels_count_; i++) {
2780 imm64 = jump_address(&after_pool);
2781 { BlockGrowBufferScope block_buf_growth(this);
2782 // Buffer growth (and relocation) must be blocked for internal
2783 // references until associated instructions are emitted and available
2785 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
2786 // TODO(plind): Verify this, presume I cannot use macro-assembler
2788 lui(at, (imm64 >> 32) & kImm16Mask);
2789 ori(at, at, (imm64 >> 16) & kImm16Mask);
2791 ori(at, at, imm64 & kImm16Mask);
2797 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2799 trampoline_emitted_ = true;
2800 // As we are only going to emit trampoline once, we need to prevent any
2801 // further emission.
2802 next_buffer_check_ = kMaxInt;
2805 // Number of branches to unbound label at this point is zero, so we can
2806 // move next buffer check to maximum.
2807 next_buffer_check_ = pc_offset() +
2808 kMaxBranchOffset - kTrampolineSlotsSize * 16;
2814 Address Assembler::target_address_at(Address pc) {
2815 Instr instr0 = instr_at(pc);
2816 Instr instr1 = instr_at(pc + 1 * kInstrSize);
2817 Instr instr3 = instr_at(pc + 3 * kInstrSize);
2819 // Interpret 4 instructions for address generated by li: See listing in
2820 // Assembler::set_target_address_at() just below.
2821 if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
2822 (GetOpcodeField(instr3) == ORI)) {
2823 // Assemble the 48 bit value.
2824 int64_t addr = static_cast<int64_t>(
2825 ((uint64_t)(GetImmediate16(instr0)) << 32) |
2826 ((uint64_t)(GetImmediate16(instr1)) << 16) |
2827 ((uint64_t)(GetImmediate16(instr3))));
2829 // Sign extend to get canonical address.
2830 addr = (addr << 16) >> 16;
2831 return reinterpret_cast<Address>(addr);
2833 // We should never get here, force a bad address if we do.
2835 return (Address)0x0;
2839 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
2840 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
2841 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
2842 // OS::nan_value() returns a qNaN.
2843 void Assembler::QuietNaN(HeapObject* object) {
2844 HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
2848 // On Mips64, a target address is stored in a 4-instruction sequence:
2849 // 0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
2850 // 1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
2851 // 2: dsll(rd, rd, 16);
2852 // 3: ori(rd, rd, j.imm32_ & kImm16Mask);
2854 // Patching the address must replace all the lui & ori instructions,
2855 // and flush the i-cache.
2857 // There is an optimization below, which emits a nop when the address
2858 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
2859 // and possibly removed.
2860 void Assembler::set_target_address_at(Address pc,
2862 ICacheFlushMode icache_flush_mode) {
2863 // There is an optimization where only 4 instructions are used to load address
2864 // in code on MIP64 because only 48-bits of address is effectively used.
2865 // It relies on fact the upper [63:48] bits are not used for virtual address
2866 // translation and they have to be set according to value of bit 47 in order
2867 // get canonical address.
2868 Instr instr1 = instr_at(pc + kInstrSize);
2869 uint32_t rt_code = GetRt(instr1);
2870 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2871 uint64_t itarget = reinterpret_cast<uint64_t>(target);
2874 // Check we have the result from a li macro-instruction.
2875 Instr instr0 = instr_at(pc);
2876 Instr instr3 = instr_at(pc + kInstrSize * 3);
2877 CHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
2878 GetOpcodeField(instr3) == ORI));
2881 // Must use 4 instructions to insure patchable code.
2882 // lui rt, upper-16.
2883 // ori rt, rt, lower-16.
2885 // ori rt rt, lower-16.
2886 *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
2887 *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
2888 | ((itarget >> 16) & kImm16Mask);
2889 *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
2890 | (itarget & kImm16Mask);
2892 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
2893 CpuFeatures::FlushICache(pc, 4 * Assembler::kInstrSize);
2898 void Assembler::JumpLabelToJumpRegister(Address pc) {
2899 // Address pc points to lui/ori instructions.
2900 // Jump to label may follow at pc + 2 * kInstrSize.
2901 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2903 Instr instr1 = instr_at(pc);
2905 Instr instr2 = instr_at(pc + 1 * kInstrSize);
2906 Instr instr3 = instr_at(pc + 6 * kInstrSize);
2907 bool patched = false;
2909 if (IsJal(instr3)) {
2910 DCHECK(GetOpcodeField(instr1) == LUI);
2911 DCHECK(GetOpcodeField(instr2) == ORI);
2913 uint32_t rs_field = GetRt(instr2) << kRsShift;
2914 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2915 *(p+6) = SPECIAL | rs_field | rd_field | JALR;
2917 } else if (IsJ(instr3)) {
2918 DCHECK(GetOpcodeField(instr1) == LUI);
2919 DCHECK(GetOpcodeField(instr2) == ORI);
2921 uint32_t rs_field = GetRt(instr2) << kRsShift;
2922 *(p+6) = SPECIAL | rs_field | JR;
2927 CpuFeatures::FlushICache(pc+6, sizeof(int32_t));
2932 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
2933 // No out-of-line constant pool support.
2934 DCHECK(!FLAG_enable_ool_constant_pool);
2935 return isolate->factory()->empty_constant_pool_array();
2939 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2940 // No out-of-line constant pool support.
2941 DCHECK(!FLAG_enable_ool_constant_pool);
2946 } } // namespace v8::internal
2948 #endif // V8_TARGET_ARCH_MIPS64