1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
38 #if V8_TARGET_ARCH_MIPS
40 #include "mips/assembler-mips-inl.h"
41 #include "serialize.h"
47 bool CpuFeatures::initialized_ = false;
49 unsigned CpuFeatures::supported_ = 0;
50 unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
51 unsigned CpuFeatures::cross_compile_ = 0;
54 ExternalReference ExternalReference::cpu_features() {
55 ASSERT(CpuFeatures::initialized_);
56 return ExternalReference(&CpuFeatures::supported_);
60 // Get the CPU features enabled by the build. For cross compilation the
61 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
62 // can be defined to enable FPU instructions when building the
64 static uint64_t CpuFeaturesImpliedByCompiler() {
66 #ifdef CAN_USE_FPU_INSTRUCTIONS
67 answer |= static_cast<uint64_t>(1) << FPU;
68 #endif // def CAN_USE_FPU_INSTRUCTIONS
71 // If the compiler is allowed to use FPU then we can use FPU too in our code
72 // generation even when generating snapshots. This won't work for cross
74 #if(defined(__mips_hard_float) && __mips_hard_float != 0)
75 answer |= static_cast<uint64_t>(1) << FPU;
76 #endif // defined(__mips_hard_float) && __mips_hard_float != 0
77 #endif // def __mips__
83 const char* DoubleRegister::AllocationIndexToString(int index) {
84 ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
85 const char* const names[] = {
105 void CpuFeatures::Probe(bool serializer_enabled) {
106 unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
107 CpuFeaturesImpliedByCompiler());
108 ASSERT(supported_ == 0 ||
109 (supported_ & standard_features) == standard_features);
114 // Get the features implied by the OS and the compiler settings. This is the
115 // minimal set of features which is also allowed for generated code in the
117 supported_ |= standard_features;
119 if (serializer_enabled) {
120 // No probing for features if we might serialize (generate snapshot).
124 // If the compiler is allowed to use fpu then we can use fpu too in our
126 #if !defined(__mips__)
127 // For the simulator build, use FPU.
128 supported_ |= static_cast<uint64_t>(1) << FPU;
130 // Probe for additional features not already known to be available.
133 // This implementation also sets the FPU flags if
134 // runtime detection of FPU returns true.
135 supported_ |= static_cast<uint64_t>(1) << FPU;
136 found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << FPU;
142 int ToNumber(Register reg) {
143 ASSERT(reg.is_valid());
144 const int kNumbers[] = {
178 return kNumbers[reg.code()];
182 Register ToRegister(int num) {
183 ASSERT(num >= 0 && num < kNumRegisters);
184 const Register kRegisters[] = {
189 t0, t1, t2, t3, t4, t5, t6, t7,
190 s0, s1, s2, s3, s4, s5, s6, s7,
198 return kRegisters[num];
202 // -----------------------------------------------------------------------------
203 // Implementation of RelocInfo.
205 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
206 1 << RelocInfo::INTERNAL_REFERENCE;
209 bool RelocInfo::IsCodedSpecially() {
210 // The deserializer needs to know whether a pointer is specially coded. Being
211 // specially coded on MIPS means that it is a lui/ori instruction, and that is
212 // always the case inside code objects.
217 bool RelocInfo::IsInConstantPool() {
222 // Patch the code at the current address with the supplied instructions.
223 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
224 Instr* pc = reinterpret_cast<Instr*>(pc_);
225 Instr* instr = reinterpret_cast<Instr*>(instructions);
226 for (int i = 0; i < instruction_count; i++) {
227 *(pc + i) = *(instr + i);
230 // Indicate that code has changed.
231 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
235 // Patch the code at the current PC with a call to the target address.
236 // Additional guard instructions can be added if required.
237 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
238 // Patch the code at the current address with a call to the target.
239 UNIMPLEMENTED_MIPS();
243 // -----------------------------------------------------------------------------
244 // Implementation of Operand and MemOperand.
245 // See assembler-mips-inl.h for inlined constructors.
247 Operand::Operand(Handle<Object> handle) {
248 AllowDeferredHandleDereference using_raw_address;
250 // Verify all Objects referred by code are NOT in new space.
251 Object* obj = *handle;
252 if (obj->IsHeapObject()) {
253 ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
254 imm32_ = reinterpret_cast<intptr_t>(handle.location());
255 rmode_ = RelocInfo::EMBEDDED_OBJECT;
257 // No relocation needed.
258 imm32_ = reinterpret_cast<intptr_t>(obj);
259 rmode_ = RelocInfo::NONE32;
264 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
269 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
270 OffsetAddend offset_addend) : Operand(rm) {
271 offset_ = unit * multiplier + offset_addend;
275 // -----------------------------------------------------------------------------
276 // Specific instructions, constants, and masks.
278 static const int kNegOffset = 0x00008000;
279 // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
280 // operations as post-increment of sp.
281 const Instr kPopInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
282 | (kRegister_sp_Code << kRtShift) | (kPointerSize & kImm16Mask);
283 // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
284 const Instr kPushInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
285 | (kRegister_sp_Code << kRtShift) | (-kPointerSize & kImm16Mask);
286 // sw(r, MemOperand(sp, 0))
287 const Instr kPushRegPattern = SW | (kRegister_sp_Code << kRsShift)
289 // lw(r, MemOperand(sp, 0))
290 const Instr kPopRegPattern = LW | (kRegister_sp_Code << kRsShift)
293 const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
296 const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
299 const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
300 | (kNegOffset & kImm16Mask);
302 const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
303 | (kNegOffset & kImm16Mask);
304 // A mask for the Rt register for push, pop, lw, sw instructions.
305 const Instr kRtMask = kRtFieldMask;
306 const Instr kLwSwInstrTypeMask = 0xffe00000;
307 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
308 const Instr kLwSwOffsetMask = kImm16Mask;
311 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
312 : AssemblerBase(isolate, buffer, buffer_size),
313 recorded_ast_id_(TypeFeedbackId::None()),
314 positions_recorder_(this) {
315 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
317 last_trampoline_pool_end_ = 0;
318 no_trampoline_pool_before_ = 0;
319 trampoline_pool_blocked_nesting_ = 0;
320 // We leave space (16 * kTrampolineSlotsSize)
321 // for BlockTrampolinePoolScope buffer.
322 next_buffer_check_ = FLAG_force_long_branches
323 ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
324 internal_trampoline_exception_ = false;
327 trampoline_emitted_ = FLAG_force_long_branches;
328 unbound_labels_count_ = 0;
329 block_buffer_growth_ = false;
331 ClearRecordedAstId();
335 void Assembler::GetCode(CodeDesc* desc) {
336 ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
337 // Set up code descriptor.
338 desc->buffer = buffer_;
339 desc->buffer_size = buffer_size_;
340 desc->instr_size = pc_offset();
341 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
346 void Assembler::Align(int m) {
347 ASSERT(m >= 4 && IsPowerOf2(m));
348 while ((pc_offset() & (m - 1)) != 0) {
354 void Assembler::CodeTargetAlign() {
355 // No advantage to aligning branch/call targets to more than
356 // single instruction, that I am aware of.
361 Register Assembler::GetRtReg(Instr instr) {
363 rt.code_ = (instr & kRtFieldMask) >> kRtShift;
368 Register Assembler::GetRsReg(Instr instr) {
370 rs.code_ = (instr & kRsFieldMask) >> kRsShift;
375 Register Assembler::GetRdReg(Instr instr) {
377 rd.code_ = (instr & kRdFieldMask) >> kRdShift;
382 uint32_t Assembler::GetRt(Instr instr) {
383 return (instr & kRtFieldMask) >> kRtShift;
387 uint32_t Assembler::GetRtField(Instr instr) {
388 return instr & kRtFieldMask;
392 uint32_t Assembler::GetRs(Instr instr) {
393 return (instr & kRsFieldMask) >> kRsShift;
397 uint32_t Assembler::GetRsField(Instr instr) {
398 return instr & kRsFieldMask;
402 uint32_t Assembler::GetRd(Instr instr) {
403 return (instr & kRdFieldMask) >> kRdShift;
407 uint32_t Assembler::GetRdField(Instr instr) {
408 return instr & kRdFieldMask;
412 uint32_t Assembler::GetSa(Instr instr) {
413 return (instr & kSaFieldMask) >> kSaShift;
417 uint32_t Assembler::GetSaField(Instr instr) {
418 return instr & kSaFieldMask;
422 uint32_t Assembler::GetOpcodeField(Instr instr) {
423 return instr & kOpcodeMask;
427 uint32_t Assembler::GetFunction(Instr instr) {
428 return (instr & kFunctionFieldMask) >> kFunctionShift;
432 uint32_t Assembler::GetFunctionField(Instr instr) {
433 return instr & kFunctionFieldMask;
437 uint32_t Assembler::GetImmediate16(Instr instr) {
438 return instr & kImm16Mask;
442 uint32_t Assembler::GetLabelConst(Instr instr) {
443 return instr & ~kImm16Mask;
447 bool Assembler::IsPop(Instr instr) {
448 return (instr & ~kRtMask) == kPopRegPattern;
452 bool Assembler::IsPush(Instr instr) {
453 return (instr & ~kRtMask) == kPushRegPattern;
457 bool Assembler::IsSwRegFpOffset(Instr instr) {
458 return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
462 bool Assembler::IsLwRegFpOffset(Instr instr) {
463 return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
467 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
468 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
469 kSwRegFpNegOffsetPattern);
473 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
474 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
475 kLwRegFpNegOffsetPattern);
479 // Labels refer to positions in the (to be) generated code.
480 // There are bound, linked, and unused labels.
482 // Bound labels refer to known positions in the already
483 // generated code. pos() is the position the label refers to.
485 // Linked labels refer to unknown positions in the code
486 // to be generated; pos() is the position of the last
487 // instruction using the label.
489 // The link chain is terminated by a value in the instruction of -1,
490 // which is an otherwise illegal value (branch -1 is inf loop).
491 // The instruction 16-bit offset field addresses 32-bit words, but in
492 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
494 const int kEndOfChain = -4;
495 // Determines the end of the Jump chain (a subset of the label link chain).
496 const int kEndOfJumpChain = 0;
499 bool Assembler::IsBranch(Instr instr) {
500 uint32_t opcode = GetOpcodeField(instr);
501 uint32_t rt_field = GetRtField(instr);
502 uint32_t rs_field = GetRsField(instr);
503 // Checks if the instruction is a branch.
504 return opcode == BEQ ||
512 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
513 rt_field == BLTZAL || rt_field == BGEZAL)) ||
514 (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
518 bool Assembler::IsEmittedConstant(Instr instr) {
519 uint32_t label_constant = GetLabelConst(instr);
520 return label_constant == 0; // Emitted label const in reg-exp engine.
524 bool Assembler::IsBeq(Instr instr) {
525 return GetOpcodeField(instr) == BEQ;
529 bool Assembler::IsBne(Instr instr) {
530 return GetOpcodeField(instr) == BNE;
534 bool Assembler::IsJump(Instr instr) {
535 uint32_t opcode = GetOpcodeField(instr);
536 uint32_t rt_field = GetRtField(instr);
537 uint32_t rd_field = GetRdField(instr);
538 uint32_t function_field = GetFunctionField(instr);
539 // Checks if the instruction is a jump.
540 return opcode == J || opcode == JAL ||
541 (opcode == SPECIAL && rt_field == 0 &&
542 ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
546 bool Assembler::IsJ(Instr instr) {
547 uint32_t opcode = GetOpcodeField(instr);
548 // Checks if the instruction is a jump.
553 bool Assembler::IsJal(Instr instr) {
554 return GetOpcodeField(instr) == JAL;
558 bool Assembler::IsJr(Instr instr) {
559 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
563 bool Assembler::IsJalr(Instr instr) {
564 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
568 bool Assembler::IsLui(Instr instr) {
569 uint32_t opcode = GetOpcodeField(instr);
570 // Checks if the instruction is a load upper immediate.
571 return opcode == LUI;
575 bool Assembler::IsOri(Instr instr) {
576 uint32_t opcode = GetOpcodeField(instr);
577 // Checks if the instruction is a load upper immediate.
578 return opcode == ORI;
582 bool Assembler::IsNop(Instr instr, unsigned int type) {
583 // See Assembler::nop(type).
585 uint32_t opcode = GetOpcodeField(instr);
586 uint32_t function = GetFunctionField(instr);
587 uint32_t rt = GetRt(instr);
588 uint32_t rd = GetRd(instr);
589 uint32_t sa = GetSa(instr);
591 // Traditional mips nop == sll(zero_reg, zero_reg, 0)
592 // When marking non-zero type, use sll(zero_reg, at, type)
593 // to avoid use of mips ssnop and ehb special encodings
594 // of the sll instruction.
596 Register nop_rt_reg = (type == 0) ? zero_reg : at;
597 bool ret = (opcode == SPECIAL && function == SLL &&
598 rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
599 rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
606 int32_t Assembler::GetBranchOffset(Instr instr) {
607 ASSERT(IsBranch(instr));
608 return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
612 bool Assembler::IsLw(Instr instr) {
613 return ((instr & kOpcodeMask) == LW);
617 int16_t Assembler::GetLwOffset(Instr instr) {
619 return ((instr & kImm16Mask));
623 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
626 // We actually create a new lw instruction based on the original one.
627 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
628 | (offset & kImm16Mask);
634 bool Assembler::IsSw(Instr instr) {
635 return ((instr & kOpcodeMask) == SW);
639 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
641 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
645 bool Assembler::IsAddImmediate(Instr instr) {
646 return ((instr & kOpcodeMask) == ADDIU);
650 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
651 ASSERT(IsAddImmediate(instr));
652 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
656 bool Assembler::IsAndImmediate(Instr instr) {
657 return GetOpcodeField(instr) == ANDI;
661 int Assembler::target_at(int32_t pos) {
662 Instr instr = instr_at(pos);
663 if ((instr & ~kImm16Mask) == 0) {
664 // Emitted label constant, not part of a branch.
668 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
669 return (imm18 + pos);
672 // Check we have a branch or jump instruction.
673 ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
674 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
675 // the compiler uses arithmectic shifts for signed integers.
676 if (IsBranch(instr)) {
677 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
679 if (imm18 == kEndOfChain) {
680 // EndOfChain sentinel is returned directly, not relative to pc or pos.
683 return pos + kBranchPCOffset + imm18;
685 } else if (IsLui(instr)) {
686 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
687 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
688 ASSERT(IsOri(instr_ori));
689 int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
690 imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
692 if (imm == kEndOfJumpChain) {
693 // EndOfChain sentinel is returned directly, not relative to pc or pos.
696 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
697 int32_t delta = instr_address - imm;
702 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
703 if (imm28 == kEndOfJumpChain) {
704 // EndOfChain sentinel is returned directly, not relative to pc or pos.
707 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
708 instr_address &= kImm28Mask;
709 int32_t delta = instr_address - imm28;
717 void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
718 Instr instr = instr_at(pos);
719 if ((instr & ~kImm16Mask) == 0) {
720 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
721 // Emitted label constant, not part of a branch.
722 // Make label relative to Code* of generated Code object.
723 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
727 ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
728 if (IsBranch(instr)) {
729 int32_t imm18 = target_pos - (pos + kBranchPCOffset);
730 ASSERT((imm18 & 3) == 0);
732 instr &= ~kImm16Mask;
733 int32_t imm16 = imm18 >> 2;
734 ASSERT(is_int16(imm16));
736 instr_at_put(pos, instr | (imm16 & kImm16Mask));
737 } else if (IsLui(instr)) {
738 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
739 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
740 ASSERT(IsOri(instr_ori));
741 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
742 ASSERT((imm & 3) == 0);
744 instr_lui &= ~kImm16Mask;
745 instr_ori &= ~kImm16Mask;
747 instr_at_put(pos + 0 * Assembler::kInstrSize,
748 instr_lui | ((imm & kHiMask) >> kLuiShift));
749 instr_at_put(pos + 1 * Assembler::kInstrSize,
750 instr_ori | (imm & kImm16Mask));
752 uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
754 ASSERT((imm28 & 3) == 0);
756 instr &= ~kImm26Mask;
757 uint32_t imm26 = imm28 >> 2;
758 ASSERT(is_uint26(imm26));
760 instr_at_put(pos, instr | (imm26 & kImm26Mask));
765 void Assembler::print(Label* L) {
766 if (L->is_unused()) {
767 PrintF("unused label\n");
768 } else if (L->is_bound()) {
769 PrintF("bound label to %d\n", L->pos());
770 } else if (L->is_linked()) {
772 PrintF("unbound label");
773 while (l.is_linked()) {
774 PrintF("@ %d ", l.pos());
775 Instr instr = instr_at(l.pos());
776 if ((instr & ~kImm16Mask) == 0) {
779 PrintF("%d\n", instr);
784 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
789 void Assembler::bind_to(Label* L, int pos) {
790 ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
791 int32_t trampoline_pos = kInvalidSlotPos;
792 if (L->is_linked() && !trampoline_emitted_) {
793 unbound_labels_count_--;
794 next_buffer_check_ += kTrampolineSlotsSize;
797 while (L->is_linked()) {
798 int32_t fixup_pos = L->pos();
799 int32_t dist = pos - fixup_pos;
800 next(L); // Call next before overwriting link with target at fixup_pos.
801 Instr instr = instr_at(fixup_pos);
802 if (IsBranch(instr)) {
803 if (dist > kMaxBranchOffset) {
804 if (trampoline_pos == kInvalidSlotPos) {
805 trampoline_pos = get_trampoline_entry(fixup_pos);
806 CHECK(trampoline_pos != kInvalidSlotPos);
808 ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
809 target_at_put(fixup_pos, trampoline_pos);
810 fixup_pos = trampoline_pos;
811 dist = pos - fixup_pos;
813 target_at_put(fixup_pos, pos);
815 ASSERT(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
816 target_at_put(fixup_pos, pos);
821 // Keep track of the last bound label so we don't eliminate any instructions
822 // before a bound label.
823 if (pos > last_bound_pos_)
824 last_bound_pos_ = pos;
828 void Assembler::bind(Label* L) {
829 ASSERT(!L->is_bound()); // Label can only be bound once.
830 bind_to(L, pc_offset());
834 void Assembler::next(Label* L) {
835 ASSERT(L->is_linked());
836 int link = target_at(L->pos());
837 if (link == kEndOfChain) {
846 bool Assembler::is_near(Label* L) {
848 return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
854 // We have to use a temporary register for things that can be relocated even
855 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
856 // space. There is no guarantee that the relocated location can be similarly
858 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
859 return !RelocInfo::IsNone(rmode);
862 void Assembler::GenInstrRegister(Opcode opcode,
867 SecondaryField func) {
868 ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
869 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
870 | (rd.code() << kRdShift) | (sa << kSaShift) | func;
875 void Assembler::GenInstrRegister(Opcode opcode,
880 SecondaryField func) {
881 ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
882 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
883 | (msb << kRdShift) | (lsb << kSaShift) | func;
888 void Assembler::GenInstrRegister(Opcode opcode,
893 SecondaryField func) {
894 ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
895 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
896 | (fd.code() << kFdShift) | func;
901 void Assembler::GenInstrRegister(Opcode opcode,
906 SecondaryField func) {
907 ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
908 Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
909 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
914 void Assembler::GenInstrRegister(Opcode opcode,
919 SecondaryField func) {
920 ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
921 Instr instr = opcode | fmt | (rt.code() << kRtShift)
922 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
927 void Assembler::GenInstrRegister(Opcode opcode,
930 FPUControlRegister fs,
931 SecondaryField func) {
932 ASSERT(fs.is_valid() && rt.is_valid());
934 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
939 // Instructions with immediate value.
940 // Registers are in the order of the instruction encoding, from left to right.
941 void Assembler::GenInstrImmediate(Opcode opcode,
945 ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
946 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
952 void Assembler::GenInstrImmediate(Opcode opcode,
956 ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
957 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
962 void Assembler::GenInstrImmediate(Opcode opcode,
966 ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
967 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
973 void Assembler::GenInstrJump(Opcode opcode,
975 BlockTrampolinePoolScope block_trampoline_pool(this);
976 ASSERT(is_uint26(address));
977 Instr instr = opcode | address;
979 BlockTrampolinePoolFor(1); // For associated delay slot.
983 // Returns the next free trampoline entry.
984 int32_t Assembler::get_trampoline_entry(int32_t pos) {
985 int32_t trampoline_entry = kInvalidSlotPos;
987 if (!internal_trampoline_exception_) {
988 if (trampoline_.start() > pos) {
989 trampoline_entry = trampoline_.take_slot();
992 if (kInvalidSlotPos == trampoline_entry) {
993 internal_trampoline_exception_ = true;
996 return trampoline_entry;
1000 uint32_t Assembler::jump_address(Label* L) {
1003 if (L->is_bound()) {
1004 target_pos = L->pos();
1006 if (L->is_linked()) {
1007 target_pos = L->pos(); // L's link.
1008 L->link_to(pc_offset());
1010 L->link_to(pc_offset());
1011 return kEndOfJumpChain;
1015 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
1016 ASSERT((imm & 3) == 0);
1022 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1025 if (L->is_bound()) {
1026 target_pos = L->pos();
1028 if (L->is_linked()) {
1029 target_pos = L->pos();
1030 L->link_to(pc_offset());
1032 L->link_to(pc_offset());
1033 if (!trampoline_emitted_) {
1034 unbound_labels_count_++;
1035 next_buffer_check_ -= kTrampolineSlotsSize;
1041 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1042 ASSERT((offset & 3) == 0);
1043 ASSERT(is_int16(offset >> 2));
1049 void Assembler::label_at_put(Label* L, int at_offset) {
1051 if (L->is_bound()) {
1052 target_pos = L->pos();
1053 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1055 if (L->is_linked()) {
1056 target_pos = L->pos(); // L's link.
1057 int32_t imm18 = target_pos - at_offset;
1058 ASSERT((imm18 & 3) == 0);
1059 int32_t imm16 = imm18 >> 2;
1060 ASSERT(is_int16(imm16));
1061 instr_at_put(at_offset, (imm16 & kImm16Mask));
1063 target_pos = kEndOfChain;
1064 instr_at_put(at_offset, 0);
1065 if (!trampoline_emitted_) {
1066 unbound_labels_count_++;
1067 next_buffer_check_ -= kTrampolineSlotsSize;
1070 L->link_to(at_offset);
1075 //------- Branch and jump instructions --------
1077 void Assembler::b(int16_t offset) {
1078 beq(zero_reg, zero_reg, offset);
1082 void Assembler::bal(int16_t offset) {
1083 positions_recorder()->WriteRecordedPositions();
1084 bgezal(zero_reg, offset);
1088 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1089 BlockTrampolinePoolScope block_trampoline_pool(this);
1090 GenInstrImmediate(BEQ, rs, rt, offset);
1091 BlockTrampolinePoolFor(1); // For associated delay slot.
1095 void Assembler::bgez(Register rs, int16_t offset) {
1096 BlockTrampolinePoolScope block_trampoline_pool(this);
1097 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1098 BlockTrampolinePoolFor(1); // For associated delay slot.
1102 void Assembler::bgezal(Register rs, int16_t offset) {
1103 BlockTrampolinePoolScope block_trampoline_pool(this);
1104 positions_recorder()->WriteRecordedPositions();
1105 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1106 BlockTrampolinePoolFor(1); // For associated delay slot.
1110 void Assembler::bgtz(Register rs, int16_t offset) {
1111 BlockTrampolinePoolScope block_trampoline_pool(this);
1112 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1113 BlockTrampolinePoolFor(1); // For associated delay slot.
1117 void Assembler::blez(Register rs, int16_t offset) {
1118 BlockTrampolinePoolScope block_trampoline_pool(this);
1119 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1120 BlockTrampolinePoolFor(1); // For associated delay slot.
1124 void Assembler::bltz(Register rs, int16_t offset) {
1125 BlockTrampolinePoolScope block_trampoline_pool(this);
1126 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1127 BlockTrampolinePoolFor(1); // For associated delay slot.
1131 void Assembler::bltzal(Register rs, int16_t offset) {
1132 BlockTrampolinePoolScope block_trampoline_pool(this);
1133 positions_recorder()->WriteRecordedPositions();
1134 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1135 BlockTrampolinePoolFor(1); // For associated delay slot.
1139 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1140 BlockTrampolinePoolScope block_trampoline_pool(this);
1141 GenInstrImmediate(BNE, rs, rt, offset);
1142 BlockTrampolinePoolFor(1); // For associated delay slot.
1146 void Assembler::j(int32_t target) {
1148 // Get pc of delay slot.
1149 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1150 bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1151 (kImm26Bits + kImmFieldShift)) == 0;
1152 ASSERT(in_range && ((target & 3) == 0));
1154 GenInstrJump(J, target >> 2);
1158 void Assembler::jr(Register rs) {
1159 BlockTrampolinePoolScope block_trampoline_pool(this);
1161 positions_recorder()->WriteRecordedPositions();
1163 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1164 BlockTrampolinePoolFor(1); // For associated delay slot.
1168 void Assembler::jal(int32_t target) {
1170 // Get pc of delay slot.
1171 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1172 bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1173 (kImm26Bits + kImmFieldShift)) == 0;
1174 ASSERT(in_range && ((target & 3) == 0));
1176 positions_recorder()->WriteRecordedPositions();
1177 GenInstrJump(JAL, target >> 2);
1181 void Assembler::jalr(Register rs, Register rd) {
1182 BlockTrampolinePoolScope block_trampoline_pool(this);
1183 positions_recorder()->WriteRecordedPositions();
1184 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1185 BlockTrampolinePoolFor(1); // For associated delay slot.
1189 void Assembler::j_or_jr(int32_t target, Register rs) {
1190 // Get pc of delay slot.
1191 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1192 bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1193 (kImm26Bits + kImmFieldShift)) == 0;
1202 void Assembler::jal_or_jalr(int32_t target, Register rs) {
1203 // Get pc of delay slot.
1204 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1205 bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1206 (kImm26Bits+kImmFieldShift)) == 0;
1215 //-------Data-processing-instructions---------
1219 void Assembler::addu(Register rd, Register rs, Register rt) {
1220 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1224 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1225 GenInstrImmediate(ADDIU, rs, rd, j);
1229 void Assembler::subu(Register rd, Register rs, Register rt) {
1230 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1234 void Assembler::mul(Register rd, Register rs, Register rt) {
1235 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1239 void Assembler::mult(Register rs, Register rt) {
1240 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1244 void Assembler::multu(Register rs, Register rt) {
1245 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1249 void Assembler::div(Register rs, Register rt) {
1250 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1254 void Assembler::divu(Register rs, Register rt) {
1255 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1261 void Assembler::and_(Register rd, Register rs, Register rt) {
1262 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1266 void Assembler::andi(Register rt, Register rs, int32_t j) {
1267 ASSERT(is_uint16(j));
1268 GenInstrImmediate(ANDI, rs, rt, j);
1272 void Assembler::or_(Register rd, Register rs, Register rt) {
1273 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1277 void Assembler::ori(Register rt, Register rs, int32_t j) {
1278 ASSERT(is_uint16(j));
1279 GenInstrImmediate(ORI, rs, rt, j);
1283 void Assembler::xor_(Register rd, Register rs, Register rt) {
1284 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1288 void Assembler::xori(Register rt, Register rs, int32_t j) {
1289 ASSERT(is_uint16(j));
1290 GenInstrImmediate(XORI, rs, rt, j);
1294 void Assembler::nor(Register rd, Register rs, Register rt) {
1295 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1300 void Assembler::sll(Register rd,
1303 bool coming_from_nop) {
1304 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1305 // generated using the sll instruction. They must be generated using
1306 // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1308 ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1309 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1313 void Assembler::sllv(Register rd, Register rt, Register rs) {
1314 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1318 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1319 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1323 void Assembler::srlv(Register rd, Register rt, Register rs) {
1324 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1328 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1329 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1333 void Assembler::srav(Register rd, Register rt, Register rs) {
1334 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1338 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1339 // Should be called via MacroAssembler::Ror.
1340 ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1341 ASSERT(kArchVariant == kMips32r2);
1342 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1343 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1348 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1349 // Should be called via MacroAssembler::Ror.
1350 ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1351 ASSERT(kArchVariant == kMips32r2);
1352 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1353 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1358 //------------Memory-instructions-------------
1360 // Helper for base-reg + offset, when offset is larger than int16.
1361 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1362 ASSERT(!src.rm().is(at));
1363 lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
1364 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1365 addu(at, at, src.rm()); // Add base register.
1369 void Assembler::lb(Register rd, const MemOperand& rs) {
1370 if (is_int16(rs.offset_)) {
1371 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1372 } else { // Offset > 16 bits, use multiple instructions to load.
1373 LoadRegPlusOffsetToAt(rs);
1374 GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1379 void Assembler::lbu(Register rd, const MemOperand& rs) {
1380 if (is_int16(rs.offset_)) {
1381 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1382 } else { // Offset > 16 bits, use multiple instructions to load.
1383 LoadRegPlusOffsetToAt(rs);
1384 GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1389 void Assembler::lh(Register rd, const MemOperand& rs) {
1390 if (is_int16(rs.offset_)) {
1391 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1392 } else { // Offset > 16 bits, use multiple instructions to load.
1393 LoadRegPlusOffsetToAt(rs);
1394 GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
1399 void Assembler::lhu(Register rd, const MemOperand& rs) {
1400 if (is_int16(rs.offset_)) {
1401 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1402 } else { // Offset > 16 bits, use multiple instructions to load.
1403 LoadRegPlusOffsetToAt(rs);
1404 GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
1409 void Assembler::lw(Register rd, const MemOperand& rs) {
1410 if (is_int16(rs.offset_)) {
1411 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1412 } else { // Offset > 16 bits, use multiple instructions to load.
1413 LoadRegPlusOffsetToAt(rs);
1414 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1419 void Assembler::lwl(Register rd, const MemOperand& rs) {
1420 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1424 void Assembler::lwr(Register rd, const MemOperand& rs) {
1425 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1429 void Assembler::sb(Register rd, const MemOperand& rs) {
1430 if (is_int16(rs.offset_)) {
1431 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1432 } else { // Offset > 16 bits, use multiple instructions to store.
1433 LoadRegPlusOffsetToAt(rs);
1434 GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
1439 void Assembler::sh(Register rd, const MemOperand& rs) {
1440 if (is_int16(rs.offset_)) {
1441 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1442 } else { // Offset > 16 bits, use multiple instructions to store.
1443 LoadRegPlusOffsetToAt(rs);
1444 GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
1449 void Assembler::sw(Register rd, const MemOperand& rs) {
1450 if (is_int16(rs.offset_)) {
1451 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1452 } else { // Offset > 16 bits, use multiple instructions to store.
1453 LoadRegPlusOffsetToAt(rs);
1454 GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1459 void Assembler::swl(Register rd, const MemOperand& rs) {
1460 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1464 void Assembler::swr(Register rd, const MemOperand& rs) {
1465 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1469 void Assembler::lui(Register rd, int32_t j) {
1470 ASSERT(is_uint16(j));
1471 GenInstrImmediate(LUI, zero_reg, rd, j);
1475 //-------------Misc-instructions--------------
1477 // Break / Trap instructions.
1478 void Assembler::break_(uint32_t code, bool break_as_stop) {
1479 ASSERT((code & ~0xfffff) == 0);
1480 // We need to invalidate breaks that could be stops as well because the
1481 // simulator expects a char pointer after the stop instruction.
1482 // See constants-mips.h for explanation.
1483 ASSERT((break_as_stop &&
1484 code <= kMaxStopCode &&
1485 code > kMaxWatchpointCode) ||
1487 (code > kMaxStopCode ||
1488 code <= kMaxWatchpointCode)));
1489 Instr break_instr = SPECIAL | BREAK | (code << 6);
1494 void Assembler::stop(const char* msg, uint32_t code) {
1495 ASSERT(code > kMaxWatchpointCode);
1496 ASSERT(code <= kMaxStopCode);
1497 #if V8_HOST_ARCH_MIPS
1499 #else // V8_HOST_ARCH_MIPS
1500 BlockTrampolinePoolFor(2);
1501 // The Simulator will handle the stop instruction and get the message address.
1502 // On MIPS stop() is just a special kind of break_().
1504 emit(reinterpret_cast<Instr>(msg));
1509 void Assembler::tge(Register rs, Register rt, uint16_t code) {
1510 ASSERT(is_uint10(code));
1511 Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1512 | rt.code() << kRtShift | code << 6;
1517 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
1518 ASSERT(is_uint10(code));
1519 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
1520 | rt.code() << kRtShift | code << 6;
1525 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
1526 ASSERT(is_uint10(code));
1528 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1533 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
1534 ASSERT(is_uint10(code));
1536 SPECIAL | TLTU | rs.code() << kRsShift
1537 | rt.code() << kRtShift | code << 6;
1542 void Assembler::teq(Register rs, Register rt, uint16_t code) {
1543 ASSERT(is_uint10(code));
1545 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1550 void Assembler::tne(Register rs, Register rt, uint16_t code) {
1551 ASSERT(is_uint10(code));
1553 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1558 // Move from HI/LO register.
1560 void Assembler::mfhi(Register rd) {
1561 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
1565 void Assembler::mflo(Register rd) {
1566 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
1570 // Set on less than instructions.
1571 void Assembler::slt(Register rd, Register rs, Register rt) {
1572 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
1576 void Assembler::sltu(Register rd, Register rs, Register rt) {
1577 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
1581 void Assembler::slti(Register rt, Register rs, int32_t j) {
1582 GenInstrImmediate(SLTI, rs, rt, j);
1586 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
1587 GenInstrImmediate(SLTIU, rs, rt, j);
1591 // Conditional move.
1592 void Assembler::movz(Register rd, Register rs, Register rt) {
1593 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
1597 void Assembler::movn(Register rd, Register rs, Register rt) {
1598 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
1602 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
1604 rt.code_ = (cc & 0x0007) << 2 | 1;
1605 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1609 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1611 rt.code_ = (cc & 0x0007) << 2 | 0;
1612 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1617 void Assembler::clz(Register rd, Register rs) {
1618 // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1619 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1623 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1624 // Should be called via MacroAssembler::Ins.
1625 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1626 ASSERT(kArchVariant == kMips32r2);
1627 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1631 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1632 // Should be called via MacroAssembler::Ext.
1633 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1634 ASSERT(kArchVariant == kMips32r2);
1635 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1639 void Assembler::pref(int32_t hint, const MemOperand& rs) {
1640 ASSERT(kArchVariant != kLoongson);
1641 ASSERT(is_uint5(hint) && is_uint16(rs.offset_));
1642 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
1648 //--------Coprocessor-instructions----------------
1650 // Load, store, move.
1651 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1652 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1656 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
1657 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1658 // load to two 32-bit loads.
1659 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
1660 Register::kMantissaOffset);
1661 FPURegister nextfpreg;
1662 nextfpreg.setcode(fd.code() + 1);
1663 GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
1664 Register::kExponentOffset);
1668 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1669 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1673 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
1674 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1675 // store to two 32-bit stores.
1676 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
1677 Register::kMantissaOffset);
1678 FPURegister nextfpreg;
1679 nextfpreg.setcode(fd.code() + 1);
1680 GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
1681 Register::kExponentOffset);
1685 void Assembler::mtc1(Register rt, FPURegister fs) {
1686 GenInstrRegister(COP1, MTC1, rt, fs, f0);
1690 void Assembler::mfc1(Register rt, FPURegister fs) {
1691 GenInstrRegister(COP1, MFC1, rt, fs, f0);
1695 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
1696 GenInstrRegister(COP1, CTC1, rt, fs);
1700 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
1701 GenInstrRegister(COP1, CFC1, rt, fs);
1705 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
1707 OS::MemCopy(&i, &d, 8);
1709 *lo = i & 0xffffffff;
1716 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1717 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
1721 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1722 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
1726 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1727 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
1731 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
1733 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
1737 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1738 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
1742 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
1743 GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
1747 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
1748 GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
1752 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
1753 GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
1757 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
1758 GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
1764 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
1765 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
1769 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
1770 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
1774 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
1775 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
1779 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
1780 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
1784 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
1785 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
1789 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
1790 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
1794 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
1795 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
1799 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
1800 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
1804 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
1805 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
1809 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
1810 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
1814 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
1815 ASSERT(kArchVariant == kMips32r2);
1816 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
1820 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
1821 ASSERT(kArchVariant == kMips32r2);
1822 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
1826 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
1827 ASSERT(kArchVariant == kMips32r2);
1828 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
1832 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
1833 ASSERT(kArchVariant == kMips32r2);
1834 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
1838 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
1839 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
1843 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
1844 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
1848 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
1849 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
1853 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
1854 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
1858 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
1859 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
1863 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
1864 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
1868 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
1869 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
1873 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
1874 ASSERT(kArchVariant == kMips32r2);
1875 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
1879 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
1880 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
1884 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
1885 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
1889 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
1890 ASSERT(kArchVariant == kMips32r2);
1891 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
1895 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
1896 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
1901 void Assembler::c(FPUCondition cond, SecondaryField fmt,
1902 FPURegister fs, FPURegister ft, uint16_t cc) {
1903 ASSERT(is_uint3(cc));
1904 ASSERT((fmt & ~(31 << kRsShift)) == 0);
1905 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
1906 | cc << 8 | 3 << 4 | cond;
1911 void Assembler::fcmp(FPURegister src1, const double src2,
1912 FPUCondition cond) {
1913 ASSERT(src2 == 0.0);
1914 mtc1(zero_reg, f14);
1916 c(cond, D, src1, f14, 0);
1920 void Assembler::bc1f(int16_t offset, uint16_t cc) {
1921 ASSERT(is_uint3(cc));
1922 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
1927 void Assembler::bc1t(int16_t offset, uint16_t cc) {
1928 ASSERT(is_uint3(cc));
1929 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
1935 void Assembler::RecordJSReturn() {
1936 positions_recorder()->WriteRecordedPositions();
1938 RecordRelocInfo(RelocInfo::JS_RETURN);
1942 void Assembler::RecordDebugBreakSlot() {
1943 positions_recorder()->WriteRecordedPositions();
1945 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
1949 void Assembler::RecordComment(const char* msg) {
1950 if (FLAG_code_comments) {
1952 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1957 int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
1958 Instr instr = instr_at(pc);
1959 ASSERT(IsJ(instr) || IsLui(instr));
1961 Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
1962 Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
1963 ASSERT(IsOri(instr_ori));
1964 int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
1965 imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
1966 if (imm == kEndOfJumpChain) {
1967 return 0; // Number of instructions patched.
1970 ASSERT((imm & 3) == 0);
1972 instr_lui &= ~kImm16Mask;
1973 instr_ori &= ~kImm16Mask;
1975 instr_at_put(pc + 0 * Assembler::kInstrSize,
1976 instr_lui | ((imm >> kLuiShift) & kImm16Mask));
1977 instr_at_put(pc + 1 * Assembler::kInstrSize,
1978 instr_ori | (imm & kImm16Mask));
1979 return 2; // Number of instructions patched.
1981 uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
1982 if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
1983 return 0; // Number of instructions patched.
1986 imm28 &= kImm28Mask;
1987 ASSERT((imm28 & 3) == 0);
1989 instr &= ~kImm26Mask;
1990 uint32_t imm26 = imm28 >> 2;
1991 ASSERT(is_uint26(imm26));
1993 instr_at_put(pc, instr | (imm26 & kImm26Mask));
1994 return 1; // Number of instructions patched.
1999 void Assembler::GrowBuffer() {
2000 if (!own_buffer_) FATAL("external code buffer is too small");
2002 // Compute new buffer size.
2003 CodeDesc desc; // The new buffer.
2004 if (buffer_size_ < 4*KB) {
2005 desc.buffer_size = 4*KB;
2006 } else if (buffer_size_ < 1*MB) {
2007 desc.buffer_size = 2*buffer_size_;
2009 desc.buffer_size = buffer_size_ + 1*MB;
2011 CHECK_GT(desc.buffer_size, 0); // No overflow.
2013 // Set up new buffer.
2014 desc.buffer = NewArray<byte>(desc.buffer_size);
2016 desc.instr_size = pc_offset();
2017 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2020 int pc_delta = desc.buffer - buffer_;
2021 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2022 OS::MemMove(desc.buffer, buffer_, desc.instr_size);
2023 OS::MemMove(reloc_info_writer.pos() + rc_delta,
2024 reloc_info_writer.pos(), desc.reloc_size);
2027 DeleteArray(buffer_);
2028 buffer_ = desc.buffer;
2029 buffer_size_ = desc.buffer_size;
2031 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2032 reloc_info_writer.last_pc() + pc_delta);
2034 // Relocate runtime entries.
2035 for (RelocIterator it(desc); !it.done(); it.next()) {
2036 RelocInfo::Mode rmode = it.rinfo()->rmode();
2037 if (rmode == RelocInfo::INTERNAL_REFERENCE) {
2038 byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
2039 RelocateInternalReference(p, pc_delta);
2043 ASSERT(!overflow());
2047 void Assembler::db(uint8_t data) {
2049 *reinterpret_cast<uint8_t*>(pc_) = data;
2050 pc_ += sizeof(uint8_t);
2054 void Assembler::dd(uint32_t data) {
2056 *reinterpret_cast<uint32_t*>(pc_) = data;
2057 pc_ += sizeof(uint32_t);
2061 void Assembler::emit_code_stub_address(Code* stub) {
2063 *reinterpret_cast<uint32_t*>(pc_) =
2064 reinterpret_cast<uint32_t>(stub->instruction_start());
2065 pc_ += sizeof(uint32_t);
2069 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2070 // We do not try to reuse pool constants.
2071 RelocInfo rinfo(pc_, rmode, data, NULL);
2072 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2073 // Adjust code for new modes.
2074 ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2075 || RelocInfo::IsJSReturn(rmode)
2076 || RelocInfo::IsComment(rmode)
2077 || RelocInfo::IsPosition(rmode));
2078 // These modes do not need an entry in the constant pool.
2080 if (!RelocInfo::IsNone(rinfo.rmode())) {
2081 // Don't record external references unless the heap will be serialized.
2082 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2083 if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
2087 ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
2088 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2089 RelocInfo reloc_info_with_ast_id(pc_,
2091 RecordedAstId().ToInt(),
2093 ClearRecordedAstId();
2094 reloc_info_writer.Write(&reloc_info_with_ast_id);
2096 reloc_info_writer.Write(&rinfo);
2102 void Assembler::BlockTrampolinePoolFor(int instructions) {
2103 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2107 void Assembler::CheckTrampolinePool() {
2108 // Some small sequences of instructions must not be broken up by the
2109 // insertion of a trampoline pool; such sequences are protected by setting
2110 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2111 // which are both checked here. Also, recursive calls to CheckTrampolinePool
2112 // are blocked by trampoline_pool_blocked_nesting_.
2113 if ((trampoline_pool_blocked_nesting_ > 0) ||
2114 (pc_offset() < no_trampoline_pool_before_)) {
2115 // Emission is currently blocked; make sure we try again as soon as
2117 if (trampoline_pool_blocked_nesting_ > 0) {
2118 next_buffer_check_ = pc_offset() + kInstrSize;
2120 next_buffer_check_ = no_trampoline_pool_before_;
2125 ASSERT(!trampoline_emitted_);
2126 ASSERT(unbound_labels_count_ >= 0);
2127 if (unbound_labels_count_ > 0) {
2128 // First we emit jump (2 instructions), then we emit trampoline pool.
2129 { BlockTrampolinePoolScope block_trampoline_pool(this);
2134 int pool_start = pc_offset();
2135 for (int i = 0; i < unbound_labels_count_; i++) {
2137 imm32 = jump_address(&after_pool);
2138 { BlockGrowBufferScope block_buf_growth(this);
2139 // Buffer growth (and relocation) must be blocked for internal
2140 // references until associated instructions are emitted and available
2142 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2143 lui(at, (imm32 & kHiMask) >> kLuiShift);
2144 ori(at, at, (imm32 & kImm16Mask));
2150 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2152 trampoline_emitted_ = true;
2153 // As we are only going to emit trampoline once, we need to prevent any
2154 // further emission.
2155 next_buffer_check_ = kMaxInt;
2158 // Number of branches to unbound label at this point is zero, so we can
2159 // move next buffer check to maximum.
2160 next_buffer_check_ = pc_offset() +
2161 kMaxBranchOffset - kTrampolineSlotsSize * 16;
2167 Address Assembler::target_address_at(Address pc) {
2168 Instr instr1 = instr_at(pc);
2169 Instr instr2 = instr_at(pc + kInstrSize);
2170 // Interpret 2 instructions generated by li: lui/ori
2171 if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
2172 // Assemble the 32 bit value.
2173 return reinterpret_cast<Address>(
2174 (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
2177 // We should never get here, force a bad address if we do.
2179 return (Address)0x0;
2183 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
2184 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
2185 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
2186 // OS::nan_value() returns a qNaN.
2187 void Assembler::QuietNaN(HeapObject* object) {
2188 HeapNumber::cast(object)->set_value(OS::nan_value());
2192 // On Mips, a target address is stored in a lui/ori instruction pair, each
2193 // of which load 16 bits of the 32-bit address to a register.
2194 // Patching the address must replace both instr, and flush the i-cache.
2196 // There is an optimization below, which emits a nop when the address
2197 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
2198 // and possibly removed.
2199 void Assembler::set_target_address_at(Address pc, Address target) {
2200 Instr instr2 = instr_at(pc + kInstrSize);
2201 uint32_t rt_code = GetRtField(instr2);
2202 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2203 uint32_t itarget = reinterpret_cast<uint32_t>(target);
2206 // Check we have the result from a li macro-instruction, using instr pair.
2207 Instr instr1 = instr_at(pc);
2208 CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
2211 // Must use 2 instructions to insure patchable code => just use lui and ori.
2212 // lui rt, upper-16.
2213 // ori rt rt, lower-16.
2214 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2215 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
2217 // The following code is an optimization for the common case of Call()
2218 // or Jump() which is load to register, and jump through register:
2219 // li(t9, address); jalr(t9) (or jr(t9)).
2220 // If the destination address is in the same 256 MB page as the call, it
2221 // is faster to do a direct jal, or j, rather than jump thru register, since
2222 // that lets the cpu pipeline prefetch the target address. However each
2223 // time the address above is patched, we have to patch the direct jal/j
2224 // instruction, as well as possibly revert to jalr/jr if we now cross a
2225 // 256 MB page. Note that with the jal/j instructions, we do not need to
2226 // load the register, but that code is left, since it makes it easy to
2227 // revert this process. A further optimization could try replacing the
2228 // li sequence with nops.
2229 // This optimization can only be applied if the rt-code from instr2 is the
2230 // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
2231 // mips return. Occasionally this lands after an li().
2233 Instr instr3 = instr_at(pc + 2 * kInstrSize);
2234 uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
2235 bool in_range = ((ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
2236 uint32_t target_field =
2237 static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift;
2238 bool patched_jump = false;
2240 #ifndef ALLOW_JAL_IN_BOUNDARY_REGION
2241 // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
2242 // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
2243 // apply this workaround for all cores so we don't have to identify the core.
2245 // The 24k core E156 bug has some very specific requirements, we only check
2246 // the most simple one: if the address of the delay slot instruction is in
2247 // the first or last 32 KB of the 256 MB segment.
2248 uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
2249 uint32_t ipc_segment_addr = ipc & segment_mask;
2250 if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
2255 if (IsJalr(instr3)) {
2256 // Try to convert JALR to JAL.
2257 if (in_range && GetRt(instr2) == GetRs(instr3)) {
2258 *(p+2) = JAL | target_field;
2259 patched_jump = true;
2261 } else if (IsJr(instr3)) {
2262 // Try to convert JR to J, skip returns (jr ra).
2263 bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
2264 if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
2265 *(p+2) = J | target_field;
2266 patched_jump = true;
2268 } else if (IsJal(instr3)) {
2270 // We are patching an already converted JAL.
2271 *(p+2) = JAL | target_field;
2273 // Patch JAL, but out of range, revert to JALR.
2274 // JALR rs reg is the rt reg specified in the ORI instruction.
2275 uint32_t rs_field = GetRt(instr2) << kRsShift;
2276 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2277 *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2279 patched_jump = true;
2280 } else if (IsJ(instr3)) {
2282 // We are patching an already converted J (jump).
2283 *(p+2) = J | target_field;
2285 // Trying patch J, but out of range, just go back to JR.
2286 // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
2287 uint32_t rs_field = GetRt(instr2) << kRsShift;
2288 *(p+2) = SPECIAL | rs_field | JR;
2290 patched_jump = true;
2293 CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
2297 void Assembler::JumpLabelToJumpRegister(Address pc) {
2298 // Address pc points to lui/ori instructions.
2299 // Jump to label may follow at pc + 2 * kInstrSize.
2300 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2302 Instr instr1 = instr_at(pc);
2304 Instr instr2 = instr_at(pc + 1 * kInstrSize);
2305 Instr instr3 = instr_at(pc + 2 * kInstrSize);
2306 bool patched = false;
2308 if (IsJal(instr3)) {
2309 ASSERT(GetOpcodeField(instr1) == LUI);
2310 ASSERT(GetOpcodeField(instr2) == ORI);
2312 uint32_t rs_field = GetRt(instr2) << kRsShift;
2313 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2314 *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2316 } else if (IsJ(instr3)) {
2317 ASSERT(GetOpcodeField(instr1) == LUI);
2318 ASSERT(GetOpcodeField(instr2) == ORI);
2320 uint32_t rs_field = GetRt(instr2) << kRsShift;
2321 *(p+2) = SPECIAL | rs_field | JR;
2326 CPU::FlushICache(pc+2, sizeof(Address));
2331 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
2332 // No out-of-line constant pool support.
2333 ASSERT(!FLAG_enable_ool_constant_pool);
2334 return isolate->factory()->empty_constant_pool_array();
2338 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2339 // No out-of-line constant pool support.
2340 ASSERT(!FLAG_enable_ool_constant_pool);
2345 } } // namespace v8::internal
2347 #endif // V8_TARGET_ARCH_MIPS