1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2014 the V8 project authors. All rights reserved.
39 #if V8_TARGET_ARCH_PPC
41 #include "src/base/bits.h"
42 #include "src/base/cpu.h"
43 #include "src/macro-assembler.h"
44 #include "src/ppc/assembler-ppc-inl.h"
49 // Get the CPU features enabled by the build.
50 static unsigned CpuFeaturesImpliedByCompiler() {
56 void CpuFeatures::ProbeImpl(bool cross_compile) {
57 supported_ |= CpuFeaturesImpliedByCompiler();
58 cache_line_size_ = 128;
60 // Only use statically determined features for cross compile (snapshot).
61 if (cross_compile) return;
63 // Detect whether frim instruction is supported (POWER5+)
64 // For now we will just check for processors we know do not
67 // Probe for additional features at runtime.
69 #if V8_TARGET_ARCH_PPC64
70 if (cpu.part() == base::CPU::PPC_POWER8) {
71 supported_ |= (1u << FPR_GPR_MOV);
74 if (cpu.part() == base::CPU::PPC_POWER6 ||
75 cpu.part() == base::CPU::PPC_POWER7 ||
76 cpu.part() == base::CPU::PPC_POWER8) {
77 supported_ |= (1u << LWSYNC);
79 if (cpu.part() == base::CPU::PPC_POWER7 ||
80 cpu.part() == base::CPU::PPC_POWER8) {
81 supported_ |= (1u << ISELECT);
84 if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
86 supported_ |= (1u << FPU);
89 // Assume support FP support and default cache line size
90 supported_ |= (1u << FPU);
93 supported_ |= (1u << FPU);
94 supported_ |= (1u << LWSYNC);
95 supported_ |= (1u << ISELECT);
96 #if V8_TARGET_ARCH_PPC64
97 supported_ |= (1u << FPR_GPR_MOV);
103 void CpuFeatures::PrintTarget() {
104 const char* ppc_arch = NULL;
106 #if V8_TARGET_ARCH_PPC64
112 printf("target %s\n", ppc_arch);
116 void CpuFeatures::PrintFeatures() {
117 printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
121 Register ToRegister(int num) {
122 DCHECK(num >= 0 && num < kNumRegisters);
123 const Register kRegisters[] = {r0, sp, r2, r3, r4, r5, r6, r7,
124 r8, r9, r10, r11, ip, r13, r14, r15,
125 r16, r17, r18, r19, r20, r21, r22, r23,
126 r24, r25, r26, r27, r28, r29, r30, fp};
127 return kRegisters[num];
131 const char* DoubleRegister::AllocationIndexToString(int index) {
132 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
133 const char* const names[] = {
134 "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
135 "d11", "d12", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
136 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
141 // -----------------------------------------------------------------------------
142 // Implementation of RelocInfo
144 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
145 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
148 bool RelocInfo::IsCodedSpecially() {
149 // The deserializer needs to know whether a pointer is specially
150 // coded. Being specially coded on PPC means that it is a lis/ori
151 // instruction sequence, and these are always the case inside code
157 bool RelocInfo::IsInConstantPool() {
162 // -----------------------------------------------------------------------------
163 // Implementation of Operand and MemOperand
164 // See assembler-ppc-inl.h for inlined constructors
166 Operand::Operand(Handle<Object> handle) {
167 AllowDeferredHandleDereference using_raw_address;
169 // Verify all Objects referred by code are NOT in new space.
170 Object* obj = *handle;
171 if (obj->IsHeapObject()) {
172 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
173 imm_ = reinterpret_cast<intptr_t>(handle.location());
174 rmode_ = RelocInfo::EMBEDDED_OBJECT;
176 // no relocation needed
177 imm_ = reinterpret_cast<intptr_t>(obj);
178 rmode_ = kRelocInfo_NONEPTR;
183 MemOperand::MemOperand(Register rn, int32_t offset) {
190 MemOperand::MemOperand(Register ra, Register rb) {
197 // -----------------------------------------------------------------------------
198 // Specific instructions, constants, and masks.
201 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
202 : AssemblerBase(isolate, buffer, buffer_size),
203 recorded_ast_id_(TypeFeedbackId::None()),
204 positions_recorder_(this) {
205 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
207 no_trampoline_pool_before_ = 0;
208 trampoline_pool_blocked_nesting_ = 0;
209 // We leave space (kMaxBlockTrampolineSectionSize)
210 // for BlockTrampolinePoolScope buffer.
212 FLAG_force_long_branches ? kMaxInt : kMaxCondBranchReach -
213 kMaxBlockTrampolineSectionSize;
214 internal_trampoline_exception_ = false;
216 trampoline_emitted_ = FLAG_force_long_branches;
217 unbound_labels_count_ = 0;
218 ClearRecordedAstId();
219 relocations_.reserve(128);
223 void Assembler::GetCode(CodeDesc* desc) {
226 // Set up code descriptor.
227 desc->buffer = buffer_;
228 desc->buffer_size = buffer_size_;
229 desc->instr_size = pc_offset();
230 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
235 void Assembler::Align(int m) {
236 #if V8_TARGET_ARCH_PPC64
237 DCHECK(m >= 4 && base::bits::IsPowerOfTwo64(m));
239 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
241 while ((pc_offset() & (m - 1)) != 0) {
247 void Assembler::CodeTargetAlign() { Align(8); }
250 Condition Assembler::GetCondition(Instr instr) {
251 switch (instr & kCondMask) {
263 bool Assembler::IsLis(Instr instr) {
264 return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0);
268 bool Assembler::IsLi(Instr instr) {
269 return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0);
273 bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
276 bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
279 bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
282 Register Assembler::GetRA(Instr instr) {
284 reg.code_ = Instruction::RAValue(instr);
289 Register Assembler::GetRB(Instr instr) {
291 reg.code_ = Instruction::RBValue(instr);
296 #if V8_TARGET_ARCH_PPC64
297 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
298 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
299 Instr instr4, Instr instr5) {
300 // Check the instructions are indeed a five part load (into r12)
301 // 3d800000 lis r12, 0
302 // 618c0000 ori r12, r12, 0
303 // 798c07c6 rldicr r12, r12, 32, 31
304 // 658c00c3 oris r12, r12, 195
305 // 618ccd40 ori r12, r12, 52544
306 return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) &&
307 (instr3 == 0x798c07c6) && ((instr4 >> 16) == 0x658c) &&
308 ((instr5 >> 16) == 0x618c));
311 // This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
312 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
313 // Check the instruction is indeed a two part load (into r12)
314 // 3d802553 lis r12, 9555
315 // 618c5000 ori r12, r12, 20480
316 return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c));
321 bool Assembler::IsCmpRegister(Instr instr) {
322 return (((instr & kOpcodeMask) == EXT2) &&
323 ((instr & kExt2OpcodeMask) == CMP));
327 bool Assembler::IsRlwinm(Instr instr) {
328 return ((instr & kOpcodeMask) == RLWINMX);
332 #if V8_TARGET_ARCH_PPC64
333 bool Assembler::IsRldicl(Instr instr) {
334 return (((instr & kOpcodeMask) == EXT5) &&
335 ((instr & kExt5OpcodeMask) == RLDICL));
340 bool Assembler::IsCmpImmediate(Instr instr) {
341 return ((instr & kOpcodeMask) == CMPI);
345 bool Assembler::IsCrSet(Instr instr) {
346 return (((instr & kOpcodeMask) == EXT1) &&
347 ((instr & kExt1OpcodeMask) == CREQV));
351 Register Assembler::GetCmpImmediateRegister(Instr instr) {
352 DCHECK(IsCmpImmediate(instr));
357 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
358 DCHECK(IsCmpImmediate(instr));
359 return instr & kOff16Mask;
363 // Labels refer to positions in the (to be) generated code.
364 // There are bound, linked, and unused labels.
366 // Bound labels refer to known positions in the already
367 // generated code. pos() is the position the label refers to.
369 // Linked labels refer to unknown positions in the code
370 // to be generated; pos() is the position of the last
371 // instruction using the label.
374 // The link chain is terminated by a negative code position (must be aligned)
375 const int kEndOfChain = -4;
378 // Dummy opcodes for unbound label mov instructions or jump table entries.
380 kUnboundMovLabelOffsetOpcode = 0 << 26,
381 kUnboundAddLabelOffsetOpcode = 1 << 26,
382 kUnboundMovLabelAddrOpcode = 2 << 26,
383 kUnboundJumpTableEntryOpcode = 3 << 26
387 int Assembler::target_at(int pos) {
388 Instr instr = instr_at(pos);
389 // check which type of branch this is 16 or 26 bit offset
390 int opcode = instr & kOpcodeMask;
394 link = SIGN_EXT_IMM26(instr & kImm26Mask);
395 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
398 link = SIGN_EXT_IMM16((instr & kImm16Mask));
399 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
401 case kUnboundMovLabelOffsetOpcode:
402 case kUnboundAddLabelOffsetOpcode:
403 case kUnboundMovLabelAddrOpcode:
404 case kUnboundJumpTableEntryOpcode:
405 link = SIGN_EXT_IMM26(instr & kImm26Mask);
413 if (link == 0) return kEndOfChain;
418 void Assembler::target_at_put(int pos, int target_pos) {
419 Instr instr = instr_at(pos);
420 int opcode = instr & kOpcodeMask;
424 int imm26 = target_pos - pos;
425 DCHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
426 if (imm26 == kInstrSize && !(instr & kLKMask)) {
427 // Branch to next instr without link.
428 instr = ORI; // nop: ori, 0,0,0
430 instr &= ((~kImm26Mask) | kAAMask | kLKMask);
431 instr |= (imm26 & kImm26Mask);
433 instr_at_put(pos, instr);
437 int imm16 = target_pos - pos;
438 DCHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
439 if (imm16 == kInstrSize && !(instr & kLKMask)) {
440 // Branch to next instr without link.
441 instr = ORI; // nop: ori, 0,0,0
443 instr &= ((~kImm16Mask) | kAAMask | kLKMask);
444 instr |= (imm16 & kImm16Mask);
446 instr_at_put(pos, instr);
449 case kUnboundMovLabelOffsetOpcode: {
450 // Load the position of the label relative to the generated code object
451 // pointer in a register.
452 Register dst = Register::from_code(instr_at(pos + kInstrSize));
453 int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
454 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
455 CodePatcher::DONT_FLUSH);
456 patcher.masm()->bitwise_mov32(dst, offset);
459 case kUnboundAddLabelOffsetOpcode: {
460 // dst = base + position + immediate
461 Instr operands = instr_at(pos + kInstrSize);
462 Register dst = Register::from_code((operands >> 21) & 0x1f);
463 Register base = Register::from_code((operands >> 16) & 0x1f);
464 int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
465 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
466 CodePatcher::DONT_FLUSH);
467 patcher.masm()->bitwise_add32(dst, base, offset);
470 case kUnboundMovLabelAddrOpcode: {
471 // Load the address of the label in a register.
472 Register dst = Register::from_code(instr_at(pos + kInstrSize));
473 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
474 kMovInstructions, CodePatcher::DONT_FLUSH);
475 // Keep internal references relative until EmitRelocations.
476 patcher.masm()->bitwise_mov(dst, target_pos);
479 case kUnboundJumpTableEntryOpcode: {
480 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
481 kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
482 // Keep internal references relative until EmitRelocations.
483 patcher.masm()->emit_ptr(target_pos);
493 int Assembler::max_reach_from(int pos) {
494 Instr instr = instr_at(pos);
495 int opcode = instr & kOpcodeMask;
497 // check which type of branch this is 16 or 26 bit offset
503 case kUnboundMovLabelOffsetOpcode:
504 case kUnboundAddLabelOffsetOpcode:
505 case kUnboundMovLabelAddrOpcode:
506 case kUnboundJumpTableEntryOpcode:
507 return 0; // no limit on reach
515 void Assembler::bind_to(Label* L, int pos) {
516 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
517 int32_t trampoline_pos = kInvalidSlotPos;
518 if (L->is_linked() && !trampoline_emitted_) {
519 unbound_labels_count_--;
520 next_buffer_check_ += kTrampolineSlotsSize;
523 while (L->is_linked()) {
524 int fixup_pos = L->pos();
525 int32_t offset = pos - fixup_pos;
526 int maxReach = max_reach_from(fixup_pos);
527 next(L); // call next before overwriting link with target at fixup_pos
528 if (maxReach && is_intn(offset, maxReach) == false) {
529 if (trampoline_pos == kInvalidSlotPos) {
530 trampoline_pos = get_trampoline_entry();
531 CHECK(trampoline_pos != kInvalidSlotPos);
532 target_at_put(trampoline_pos, pos);
534 target_at_put(fixup_pos, trampoline_pos);
536 target_at_put(fixup_pos, pos);
541 // Keep track of the last bound label so we don't eliminate any instructions
542 // before a bound label.
543 if (pos > last_bound_pos_) last_bound_pos_ = pos;
547 void Assembler::bind(Label* L) {
548 DCHECK(!L->is_bound()); // label can only be bound once
549 bind_to(L, pc_offset());
553 void Assembler::next(Label* L) {
554 DCHECK(L->is_linked());
555 int link = target_at(L->pos());
556 if (link == kEndOfChain) {
565 bool Assembler::is_near(Label* L, Condition cond) {
566 DCHECK(L->is_bound());
567 if (L->is_bound() == false) return false;
569 int maxReach = ((cond == al) ? 26 : 16);
570 int offset = L->pos() - pc_offset();
572 return is_intn(offset, maxReach);
576 void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
577 DoubleRegister frb, RCBit r) {
578 emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
582 void Assembler::d_form(Instr instr, Register rt, Register ra,
583 const intptr_t val, bool signed_disp) {
585 if (!is_int16(val)) {
586 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
588 DCHECK(is_int16(val));
590 if (!is_uint16(val)) {
591 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
592 ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
593 val, val, is_uint16(val), kImm16Mask);
595 DCHECK(is_uint16(val));
597 emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
601 void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb,
603 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r);
607 void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
609 emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
613 void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
614 int maskbit, RCBit r) {
615 int sh0_4 = shift & 0x1f;
616 int sh5 = (shift >> 5) & 0x1;
617 int m0_4 = maskbit & 0x1f;
618 int m5 = (maskbit >> 5) & 0x1;
620 emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
621 m5 * B5 | sh5 * B1 | r);
625 void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
626 int maskbit, RCBit r) {
627 int m0_4 = maskbit & 0x1f;
628 int m5 = (maskbit >> 5) & 0x1;
630 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
635 // Returns the next free trampoline entry.
636 int32_t Assembler::get_trampoline_entry() {
637 int32_t trampoline_entry = kInvalidSlotPos;
639 if (!internal_trampoline_exception_) {
640 trampoline_entry = trampoline_.take_slot();
642 if (kInvalidSlotPos == trampoline_entry) {
643 internal_trampoline_exception_ = true;
646 return trampoline_entry;
650 int Assembler::link(Label* L) {
655 if (L->is_linked()) {
656 position = L->pos(); // L's link
658 // was: target_pos = kEndOfChain;
659 // However, using self to mark the first reference
660 // should avoid most instances of branch offset overflow. See
661 // target_at() for where this is converted back to kEndOfChain.
662 position = pc_offset();
663 if (!trampoline_emitted_) {
664 unbound_labels_count_++;
665 next_buffer_check_ -= kTrampolineSlotsSize;
668 L->link_to(pc_offset());
675 // Branch instructions.
678 void Assembler::bclr(BOfield bo, LKBit lk) {
679 positions_recorder()->WriteRecordedPositions();
680 emit(EXT1 | bo | BCLRX | lk);
684 void Assembler::bcctr(BOfield bo, LKBit lk) {
685 positions_recorder()->WriteRecordedPositions();
686 emit(EXT1 | bo | BCCTRX | lk);
690 // Pseudo op - branch to link register
691 void Assembler::blr() { bclr(BA, LeaveLK); }
694 // Pseudo op - branch to count register -- used for "jump"
695 void Assembler::bctr() { bcctr(BA, LeaveLK); }
698 void Assembler::bctrl() { bcctr(BA, SetLK); }
701 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
703 positions_recorder()->WriteRecordedPositions();
705 DCHECK(is_int16(branch_offset));
706 emit(BCX | bo | condition_bit * B16 | (kImm16Mask & branch_offset) | lk);
710 void Assembler::b(int branch_offset, LKBit lk) {
712 positions_recorder()->WriteRecordedPositions();
714 DCHECK((branch_offset & 3) == 0);
715 int imm26 = branch_offset;
716 DCHECK(is_int26(imm26));
717 // todo add AA and LK bits
718 emit(BX | (imm26 & kImm26Mask) | lk);
722 void Assembler::xori(Register dst, Register src, const Operand& imm) {
723 d_form(XORI, src, dst, imm.imm_, false);
727 void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
728 d_form(XORIS, rs, ra, imm.imm_, false);
732 void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) {
733 x_form(EXT2 | XORX, dst, src1, src2, rc);
737 void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) {
738 x_form(EXT2 | CNTLZWX, ra, rs, r0, rc);
742 void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) {
743 x_form(EXT2 | ANDX, ra, rs, rb, rc);
747 void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
752 emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
757 void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
761 emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
766 void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
771 emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
776 void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
777 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
778 rlwinm(dst, src, val.imm_, 0, 31 - val.imm_, rc);
782 void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
783 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
784 rlwinm(dst, src, 32 - val.imm_, val.imm_, 31, rc);
788 void Assembler::clrrwi(Register dst, Register src, const Operand& val,
790 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
791 rlwinm(dst, src, 0, 0, 31 - val.imm_, rc);
795 void Assembler::clrlwi(Register dst, Register src, const Operand& val,
797 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
798 rlwinm(dst, src, 0, val.imm_, 31, rc);
802 void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) {
803 emit(EXT2 | SRAWIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | r);
807 void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) {
808 x_form(EXT2 | SRWX, dst, src1, src2, r);
812 void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) {
813 x_form(EXT2 | SLWX, dst, src1, src2, r);
817 void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) {
818 x_form(EXT2 | SRAW, ra, rs, rb, r);
822 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
823 rlwnm(ra, rs, rb, 0, 31, r);
827 void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
828 rlwinm(ra, rs, sh, 0, 31, r);
832 void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
833 rlwinm(ra, rs, 32 - sh, 0, 31, r);
837 void Assembler::subi(Register dst, Register src, const Operand& imm) {
838 addi(dst, src, Operand(-(imm.imm_)));
841 void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
843 xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
847 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
849 emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
853 void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
855 xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
859 void Assembler::subfc(Register dst, Register src1, Register src2, OEBit o,
861 xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
865 void Assembler::subfic(Register dst, Register src, const Operand& imm) {
866 d_form(SUBFIC, dst, src, imm.imm_, true);
870 void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
872 xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
877 void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
879 xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
884 void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
885 xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
889 // Multiply hi word unsigned
890 void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
891 xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
896 void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
898 xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
902 // Divide word unsigned
903 void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
905 xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
909 void Assembler::addi(Register dst, Register src, const Operand& imm) {
910 DCHECK(!src.is(r0)); // use li instead to show intent
911 d_form(ADDI, dst, src, imm.imm_, true);
915 void Assembler::addis(Register dst, Register src, const Operand& imm) {
916 DCHECK(!src.is(r0)); // use lis instead to show intent
917 d_form(ADDIS, dst, src, imm.imm_, true);
921 void Assembler::addic(Register dst, Register src, const Operand& imm) {
922 d_form(ADDIC, dst, src, imm.imm_, true);
926 void Assembler::andi(Register ra, Register rs, const Operand& imm) {
927 d_form(ANDIx, rs, ra, imm.imm_, false);
931 void Assembler::andis(Register ra, Register rs, const Operand& imm) {
932 d_form(ANDISx, rs, ra, imm.imm_, false);
936 void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) {
937 x_form(EXT2 | NORX, dst, src1, src2, r);
941 void Assembler::notx(Register dst, Register src, RCBit r) {
942 x_form(EXT2 | NORX, dst, src, src, r);
946 void Assembler::ori(Register ra, Register rs, const Operand& imm) {
947 d_form(ORI, rs, ra, imm.imm_, false);
951 void Assembler::oris(Register dst, Register src, const Operand& imm) {
952 d_form(ORIS, src, dst, imm.imm_, false);
956 void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) {
957 x_form(EXT2 | ORX, dst, src1, src2, rc);
961 void Assembler::orc(Register dst, Register src1, Register src2, RCBit rc) {
962 x_form(EXT2 | ORC, dst, src1, src2, rc);
966 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
967 intptr_t imm16 = src2.imm_;
968 #if V8_TARGET_ARCH_PPC64
973 DCHECK(is_int16(imm16));
974 DCHECK(cr.code() >= 0 && cr.code() <= 7);
976 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
980 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
981 uintptr_t uimm16 = src2.imm_;
982 #if V8_TARGET_ARCH_PPC64
987 DCHECK(is_uint16(uimm16));
988 DCHECK(cr.code() >= 0 && cr.code() <= 7);
989 uimm16 &= kImm16Mask;
990 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
994 void Assembler::cmp(Register src1, Register src2, CRegister cr) {
995 #if V8_TARGET_ARCH_PPC64
1000 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1001 emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
1006 void Assembler::cmpl(Register src1, Register src2, CRegister cr) {
1007 #if V8_TARGET_ARCH_PPC64
1012 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1013 emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
1018 void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
1019 intptr_t imm16 = src2.imm_;
1021 DCHECK(is_int16(imm16));
1022 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1023 imm16 &= kImm16Mask;
1024 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
1028 void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
1029 uintptr_t uimm16 = src2.imm_;
1031 DCHECK(is_uint16(uimm16));
1032 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1033 uimm16 &= kImm16Mask;
1034 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
1038 void Assembler::cmpw(Register src1, Register src2, CRegister cr) {
1040 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1041 emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
1046 void Assembler::cmplw(Register src1, Register src2, CRegister cr) {
1048 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1049 emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
1054 void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
1055 emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1060 // Pseudo op - load immediate
1061 void Assembler::li(Register dst, const Operand& imm) {
1062 d_form(ADDI, dst, r0, imm.imm_, true);
1066 void Assembler::lis(Register dst, const Operand& imm) {
1067 d_form(ADDIS, dst, r0, imm.imm_, true);
1071 // Pseudo op - move register
1072 void Assembler::mr(Register dst, Register src) {
1073 // actually or(dst, src, src)
1078 void Assembler::lbz(Register dst, const MemOperand& src) {
1079 DCHECK(!src.ra_.is(r0));
1080 d_form(LBZ, dst, src.ra(), src.offset(), true);
1084 void Assembler::lbzx(Register rt, const MemOperand& src) {
1085 Register ra = src.ra();
1086 Register rb = src.rb();
1088 emit(EXT2 | LBZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1093 void Assembler::lbzux(Register rt, const MemOperand& src) {
1094 Register ra = src.ra();
1095 Register rb = src.rb();
1097 emit(EXT2 | LBZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1102 void Assembler::lhz(Register dst, const MemOperand& src) {
1103 DCHECK(!src.ra_.is(r0));
1104 d_form(LHZ, dst, src.ra(), src.offset(), true);
1108 void Assembler::lhzx(Register rt, const MemOperand& src) {
1109 Register ra = src.ra();
1110 Register rb = src.rb();
1112 emit(EXT2 | LHZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1117 void Assembler::lhzux(Register rt, const MemOperand& src) {
1118 Register ra = src.ra();
1119 Register rb = src.rb();
1121 emit(EXT2 | LHZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1126 void Assembler::lhax(Register rt, const MemOperand& src) {
1127 Register ra = src.ra();
1128 Register rb = src.rb();
1130 emit(EXT2 | LHAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
1134 void Assembler::lwz(Register dst, const MemOperand& src) {
1135 DCHECK(!src.ra_.is(r0));
1136 d_form(LWZ, dst, src.ra(), src.offset(), true);
1140 void Assembler::lwzu(Register dst, const MemOperand& src) {
1141 DCHECK(!src.ra_.is(r0));
1142 d_form(LWZU, dst, src.ra(), src.offset(), true);
1146 void Assembler::lwzx(Register rt, const MemOperand& src) {
1147 Register ra = src.ra();
1148 Register rb = src.rb();
1150 emit(EXT2 | LWZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1155 void Assembler::lwzux(Register rt, const MemOperand& src) {
1156 Register ra = src.ra();
1157 Register rb = src.rb();
1159 emit(EXT2 | LWZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1164 void Assembler::lha(Register dst, const MemOperand& src) {
1165 DCHECK(!src.ra_.is(r0));
1166 d_form(LHA, dst, src.ra(), src.offset(), true);
1170 void Assembler::lwa(Register dst, const MemOperand& src) {
1171 #if V8_TARGET_ARCH_PPC64
1172 int offset = src.offset();
1173 DCHECK(!src.ra_.is(r0));
1174 DCHECK(!(offset & 3) && is_int16(offset));
1175 offset = kImm16Mask & offset;
1176 emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
1183 void Assembler::lwax(Register rt, const MemOperand& src) {
1184 #if V8_TARGET_ARCH_PPC64
1185 Register ra = src.ra();
1186 Register rb = src.rb();
1188 emit(EXT2 | LWAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
1195 void Assembler::stb(Register dst, const MemOperand& src) {
1196 DCHECK(!src.ra_.is(r0));
1197 d_form(STB, dst, src.ra(), src.offset(), true);
1201 void Assembler::stbx(Register rs, const MemOperand& src) {
1202 Register ra = src.ra();
1203 Register rb = src.rb();
1205 emit(EXT2 | STBX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1210 void Assembler::stbux(Register rs, const MemOperand& src) {
1211 Register ra = src.ra();
1212 Register rb = src.rb();
1214 emit(EXT2 | STBUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1219 void Assembler::sth(Register dst, const MemOperand& src) {
1220 DCHECK(!src.ra_.is(r0));
1221 d_form(STH, dst, src.ra(), src.offset(), true);
1225 void Assembler::sthx(Register rs, const MemOperand& src) {
1226 Register ra = src.ra();
1227 Register rb = src.rb();
1229 emit(EXT2 | STHX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1234 void Assembler::sthux(Register rs, const MemOperand& src) {
1235 Register ra = src.ra();
1236 Register rb = src.rb();
1238 emit(EXT2 | STHUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1243 void Assembler::stw(Register dst, const MemOperand& src) {
1244 DCHECK(!src.ra_.is(r0));
1245 d_form(STW, dst, src.ra(), src.offset(), true);
1249 void Assembler::stwu(Register dst, const MemOperand& src) {
1250 DCHECK(!src.ra_.is(r0));
1251 d_form(STWU, dst, src.ra(), src.offset(), true);
1255 void Assembler::stwx(Register rs, const MemOperand& src) {
1256 Register ra = src.ra();
1257 Register rb = src.rb();
1259 emit(EXT2 | STWX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1264 void Assembler::stwux(Register rs, const MemOperand& src) {
1265 Register ra = src.ra();
1266 Register rb = src.rb();
1268 emit(EXT2 | STWUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1273 void Assembler::extsb(Register rs, Register ra, RCBit rc) {
1274 emit(EXT2 | EXTSB | ra.code() * B21 | rs.code() * B16 | rc);
1278 void Assembler::extsh(Register rs, Register ra, RCBit rc) {
1279 emit(EXT2 | EXTSH | ra.code() * B21 | rs.code() * B16 | rc);
1283 void Assembler::extsw(Register rs, Register ra, RCBit rc) {
1284 #if V8_TARGET_ARCH_PPC64
1285 emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
1288 DCHECK(rs.is(ra) && rc == LeaveRC);
1293 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
1294 emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
1298 void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) {
1299 x_form(EXT2 | ANDCX, dst, src1, src2, rc);
1303 #if V8_TARGET_ARCH_PPC64
1304 // 64bit specific instructions
1305 void Assembler::ld(Register rd, const MemOperand& src) {
1306 int offset = src.offset();
1307 DCHECK(!src.ra_.is(r0));
1308 DCHECK(!(offset & 3) && is_int16(offset));
1309 offset = kImm16Mask & offset;
1310 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
1314 void Assembler::ldx(Register rd, const MemOperand& src) {
1315 Register ra = src.ra();
1316 Register rb = src.rb();
1318 emit(EXT2 | LDX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
1322 void Assembler::ldu(Register rd, const MemOperand& src) {
1323 int offset = src.offset();
1324 DCHECK(!src.ra_.is(r0));
1325 DCHECK(!(offset & 3) && is_int16(offset));
1326 offset = kImm16Mask & offset;
1327 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
1331 void Assembler::ldux(Register rd, const MemOperand& src) {
1332 Register ra = src.ra();
1333 Register rb = src.rb();
1335 emit(EXT2 | LDUX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
1339 void Assembler::std(Register rs, const MemOperand& src) {
1340 int offset = src.offset();
1341 DCHECK(!src.ra_.is(r0));
1342 DCHECK(!(offset & 3) && is_int16(offset));
1343 offset = kImm16Mask & offset;
1344 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
1348 void Assembler::stdx(Register rs, const MemOperand& src) {
1349 Register ra = src.ra();
1350 Register rb = src.rb();
1352 emit(EXT2 | STDX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
1356 void Assembler::stdu(Register rs, const MemOperand& src) {
1357 int offset = src.offset();
1358 DCHECK(!src.ra_.is(r0));
1359 DCHECK(!(offset & 3) && is_int16(offset));
1360 offset = kImm16Mask & offset;
1361 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
1365 void Assembler::stdux(Register rs, const MemOperand& src) {
1366 Register ra = src.ra();
1367 Register rb = src.rb();
1369 emit(EXT2 | STDUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
1373 void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
1374 md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
1378 void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
1379 md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
1383 void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
1384 mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
1388 void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
1389 md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
1393 void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
1394 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1395 rldicr(dst, src, val.imm_, 63 - val.imm_, rc);
1399 void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
1400 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1401 rldicl(dst, src, 64 - val.imm_, val.imm_, rc);
1405 void Assembler::clrrdi(Register dst, Register src, const Operand& val,
1407 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1408 rldicr(dst, src, 0, 63 - val.imm_, rc);
1412 void Assembler::clrldi(Register dst, Register src, const Operand& val,
1414 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1415 rldicl(dst, src, 0, val.imm_, rc);
1419 void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
1420 md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
1424 void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
1425 int sh0_4 = sh & 0x1f;
1426 int sh5 = (sh >> 5) & 0x1;
1428 emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
1433 void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) {
1434 x_form(EXT2 | SRDX, dst, src1, src2, r);
1438 void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) {
1439 x_form(EXT2 | SLDX, dst, src1, src2, r);
1443 void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) {
1444 x_form(EXT2 | SRAD, ra, rs, rb, r);
1448 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
1449 rldcl(ra, rs, rb, 0, r);
1453 void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
1454 rldicl(ra, rs, sh, 0, r);
1458 void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
1459 rldicl(ra, rs, 64 - sh, 0, r);
1463 void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
1464 x_form(EXT2 | CNTLZDX, ra, rs, r0, rc);
1468 void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
1470 xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
1474 void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
1476 xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
1480 void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
1482 xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
1487 // Function descriptor for AIX.
1488 // Code address skips the function descriptor "header".
1489 // TOC and static chain are ignored and set to 0.
1490 void Assembler::function_descriptor() {
1491 #if ABI_USES_FUNCTION_DESCRIPTORS
1493 DCHECK(pc_offset() == 0);
1494 emit_label_addr(&instructions);
1497 bind(&instructions);
1502 void Assembler::EnsureSpaceFor(int space_needed) {
1503 if (buffer_space() <= (kGap + space_needed)) {
1504 GrowBuffer(space_needed);
1509 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1510 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1511 if (assembler != NULL && assembler->predictable_code_size()) return true;
1512 return assembler->serializer_enabled();
1513 } else if (RelocInfo::IsNone(rmode_)) {
1520 // Primarily used for loading constants
1521 // This should really move to be in macro-assembler as it
1522 // is really a pseudo instruction
1523 // Some usages of this intend for a FIXED_SEQUENCE to be used
1524 // Todo - break this dependency so we can optimize mov() in general
1525 // and only use the generic version when we require a fixed sequence
1526 void Assembler::mov(Register dst, const Operand& src) {
1527 intptr_t value = src.immediate();
1528 bool relocatable = src.must_output_reloc_info(this);
1532 !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
1535 if (is_int16(value)) {
1536 li(dst, Operand(value));
1539 #if V8_TARGET_ARCH_PPC64
1540 if (is_int32(value)) {
1542 lis(dst, Operand(value >> 16));
1543 #if V8_TARGET_ARCH_PPC64
1545 if (is_int48(value)) {
1546 li(dst, Operand(value >> 32));
1548 lis(dst, Operand(value >> 48));
1549 u16 = ((value >> 32) & 0xffff);
1551 ori(dst, dst, Operand(u16));
1554 sldi(dst, dst, Operand(32));
1555 u16 = ((value >> 16) & 0xffff);
1557 oris(dst, dst, Operand(u16));
1561 u16 = (value & 0xffff);
1563 ori(dst, dst, Operand(u16));
1569 DCHECK(!canOptimize);
1571 RecordRelocInfo(src.rmode_);
1573 bitwise_mov(dst, value);
1577 void Assembler::bitwise_mov(Register dst, intptr_t value) {
1578 BlockTrampolinePoolScope block_trampoline_pool(this);
1579 #if V8_TARGET_ARCH_PPC64
1580 int32_t hi_32 = static_cast<int32_t>(value >> 32);
1581 int32_t lo_32 = static_cast<int32_t>(value);
1582 int hi_word = static_cast<int>(hi_32 >> 16);
1583 int lo_word = static_cast<int>(hi_32 & 0xffff);
1584 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1585 ori(dst, dst, Operand(lo_word));
1586 sldi(dst, dst, Operand(32));
1587 hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff));
1588 lo_word = static_cast<int>(lo_32 & 0xffff);
1589 oris(dst, dst, Operand(hi_word));
1590 ori(dst, dst, Operand(lo_word));
1592 int hi_word = static_cast<int>(value >> 16);
1593 int lo_word = static_cast<int>(value & 0xffff);
1594 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1595 ori(dst, dst, Operand(lo_word));
1600 void Assembler::bitwise_mov32(Register dst, int32_t value) {
1601 BlockTrampolinePoolScope block_trampoline_pool(this);
1602 int hi_word = static_cast<int>(value >> 16);
1603 int lo_word = static_cast<int>(value & 0xffff);
1604 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1605 ori(dst, dst, Operand(lo_word));
1609 void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
1610 BlockTrampolinePoolScope block_trampoline_pool(this);
1611 if (is_int16(value)) {
1612 addi(dst, src, Operand(value));
1615 int hi_word = static_cast<int>(value >> 16);
1616 int lo_word = static_cast<int>(value & 0xffff);
1617 if (lo_word & 0x8000) hi_word++;
1618 addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
1619 addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
1624 void Assembler::mov_label_offset(Register dst, Label* label) {
1625 int position = link(label);
1626 if (label->is_bound()) {
1627 // Load the position of the label relative to the generated code object.
1628 mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
1630 // Encode internal reference to unbound label. We use a dummy opcode
1631 // such that it won't collide with any opcode that might appear in the
1632 // label's chain. Encode the destination register in the 2nd instruction.
1633 int link = position - pc_offset();
1634 DCHECK_EQ(0, link & 3);
1636 DCHECK(is_int26(link));
1638 // When the label is bound, these instructions will be patched
1639 // with a 2 instruction mov sequence that will load the
1640 // destination register with the position of the label from the
1641 // beginning of the code.
1643 // target_at extracts the link and target_at_put patches the instructions.
1644 BlockTrampolinePoolScope block_trampoline_pool(this);
1645 emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
1651 void Assembler::add_label_offset(Register dst, Register base, Label* label,
1653 int position = link(label);
1654 if (label->is_bound()) {
1655 // dst = base + position + delta
1657 bitwise_add32(dst, base, position);
1659 // Encode internal reference to unbound label. We use a dummy opcode
1660 // such that it won't collide with any opcode that might appear in the
1661 // label's chain. Encode the operands in the 2nd instruction.
1662 int link = position - pc_offset();
1663 DCHECK_EQ(0, link & 3);
1665 DCHECK(is_int26(link));
1666 DCHECK(is_int16(delta));
1668 BlockTrampolinePoolScope block_trampoline_pool(this);
1669 emit(kUnboundAddLabelOffsetOpcode | (link & kImm26Mask));
1670 emit(dst.code() * B21 | base.code() * B16 | (delta & kImm16Mask));
1675 void Assembler::mov_label_addr(Register dst, Label* label) {
1677 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
1678 int position = link(label);
1679 if (label->is_bound()) {
1680 // Keep internal references relative until EmitRelocations.
1681 bitwise_mov(dst, position);
1683 // Encode internal reference to unbound label. We use a dummy opcode
1684 // such that it won't collide with any opcode that might appear in the
1685 // label's chain. Encode the destination register in the 2nd instruction.
1686 int link = position - pc_offset();
1687 DCHECK_EQ(0, link & 3);
1689 DCHECK(is_int26(link));
1691 // When the label is bound, these instructions will be patched
1692 // with a multi-instruction mov sequence that will load the
1693 // destination register with the address of the label.
1695 // target_at extracts the link and target_at_put patches the instructions.
1696 BlockTrampolinePoolScope block_trampoline_pool(this);
1697 emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
1699 DCHECK(kMovInstructions >= 2);
1700 for (int i = 0; i < kMovInstructions - 2; i++) nop();
1705 void Assembler::emit_label_addr(Label* label) {
1707 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
1708 int position = link(label);
1709 if (label->is_bound()) {
1710 // Keep internal references relative until EmitRelocations.
1713 // Encode internal reference to unbound label. We use a dummy opcode
1714 // such that it won't collide with any opcode that might appear in the
1716 int link = position - pc_offset();
1717 DCHECK_EQ(0, link & 3);
1719 DCHECK(is_int26(link));
1721 // When the label is bound, the instruction(s) will be patched
1722 // as a jump table entry containing the label address. target_at extracts
1723 // the link and target_at_put patches the instruction(s).
1724 BlockTrampolinePoolScope block_trampoline_pool(this);
1725 emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
1726 #if V8_TARGET_ARCH_PPC64
1733 // Special register instructions
1734 void Assembler::crxor(int bt, int ba, int bb) {
1735 emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
1739 void Assembler::creqv(int bt, int ba, int bb) {
1740 emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
1744 void Assembler::mflr(Register dst) {
1745 emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11); // Ignore RC bit
1749 void Assembler::mtlr(Register src) {
1750 emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11); // Ignore RC bit
1754 void Assembler::mtctr(Register src) {
1755 emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11); // Ignore RC bit
1759 void Assembler::mtxer(Register src) {
1760 emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
1764 void Assembler::mcrfs(int bf, int bfa) {
1765 emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
1769 void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
1772 #if V8_TARGET_ARCH_PPC64
1773 void Assembler::mffprd(Register dst, DoubleRegister src) {
1774 emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
1778 void Assembler::mffprwz(Register dst, DoubleRegister src) {
1779 emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
1783 void Assembler::mtfprd(DoubleRegister dst, Register src) {
1784 emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
1788 void Assembler::mtfprwz(DoubleRegister dst, Register src) {
1789 emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
1793 void Assembler::mtfprwa(DoubleRegister dst, Register src) {
1794 emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
1799 // Exception-generating instructions and debugging support.
1800 // Stops with a non-negative code less than kNumOfWatchedStops support
1801 // enabling/disabling and a counter feature. See simulator-ppc.h .
1802 void Assembler::stop(const char* msg, Condition cond, int32_t code,
1806 b(NegateCondition(cond), &skip, cr);
1815 void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
1818 void Assembler::dcbf(Register ra, Register rb) {
1819 emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
1823 void Assembler::sync() { emit(EXT2 | SYNC); }
1826 void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
1829 void Assembler::icbi(Register ra, Register rb) {
1830 emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
1834 void Assembler::isync() { emit(EXT1 | ISYNC); }
1837 // Floating point support
1839 void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
1840 int offset = src.offset();
1841 Register ra = src.ra();
1842 DCHECK(is_int16(offset));
1843 int imm16 = offset & kImm16Mask;
1844 // could be x_form instruction with some casting magic
1845 emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
1849 void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
1850 int offset = src.offset();
1851 Register ra = src.ra();
1852 DCHECK(is_int16(offset));
1853 int imm16 = offset & kImm16Mask;
1854 // could be x_form instruction with some casting magic
1855 emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
1859 void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) {
1860 Register ra = src.ra();
1861 Register rb = src.rb();
1863 emit(EXT2 | LFDX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1868 void Assembler::lfdux(const DoubleRegister frt, const MemOperand& src) {
1869 Register ra = src.ra();
1870 Register rb = src.rb();
1872 emit(EXT2 | LFDUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1877 void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
1878 int offset = src.offset();
1879 Register ra = src.ra();
1880 DCHECK(is_int16(offset));
1882 int imm16 = offset & kImm16Mask;
1883 // could be x_form instruction with some casting magic
1884 emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
1888 void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
1889 int offset = src.offset();
1890 Register ra = src.ra();
1891 DCHECK(is_int16(offset));
1893 int imm16 = offset & kImm16Mask;
1894 // could be x_form instruction with some casting magic
1895 emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
1899 void Assembler::lfsx(const DoubleRegister frt, const MemOperand& src) {
1900 Register ra = src.ra();
1901 Register rb = src.rb();
1903 emit(EXT2 | LFSX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1908 void Assembler::lfsux(const DoubleRegister frt, const MemOperand& src) {
1909 Register ra = src.ra();
1910 Register rb = src.rb();
1912 emit(EXT2 | LFSUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1917 void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
1918 int offset = src.offset();
1919 Register ra = src.ra();
1920 DCHECK(is_int16(offset));
1922 int imm16 = offset & kImm16Mask;
1923 // could be x_form instruction with some casting magic
1924 emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
1928 void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
1929 int offset = src.offset();
1930 Register ra = src.ra();
1931 DCHECK(is_int16(offset));
1933 int imm16 = offset & kImm16Mask;
1934 // could be x_form instruction with some casting magic
1935 emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
1939 void Assembler::stfdx(const DoubleRegister frs, const MemOperand& src) {
1940 Register ra = src.ra();
1941 Register rb = src.rb();
1943 emit(EXT2 | STFDX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1948 void Assembler::stfdux(const DoubleRegister frs, const MemOperand& src) {
1949 Register ra = src.ra();
1950 Register rb = src.rb();
1952 emit(EXT2 | STFDUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1957 void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
1958 int offset = src.offset();
1959 Register ra = src.ra();
1960 DCHECK(is_int16(offset));
1962 int imm16 = offset & kImm16Mask;
1963 // could be x_form instruction with some casting magic
1964 emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
1968 void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
1969 int offset = src.offset();
1970 Register ra = src.ra();
1971 DCHECK(is_int16(offset));
1973 int imm16 = offset & kImm16Mask;
1974 // could be x_form instruction with some casting magic
1975 emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
1979 void Assembler::stfsx(const DoubleRegister frs, const MemOperand& src) {
1980 Register ra = src.ra();
1981 Register rb = src.rb();
1983 emit(EXT2 | STFSX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1988 void Assembler::stfsux(const DoubleRegister frs, const MemOperand& src) {
1989 Register ra = src.ra();
1990 Register rb = src.rb();
1992 emit(EXT2 | STFSUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1997 void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
1998 const DoubleRegister frb, RCBit rc) {
1999 a_form(EXT4 | FSUB, frt, fra, frb, rc);
2003 void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
2004 const DoubleRegister frb, RCBit rc) {
2005 a_form(EXT4 | FADD, frt, fra, frb, rc);
2009 void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
2010 const DoubleRegister frc, RCBit rc) {
2011 emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
2016 void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
2017 const DoubleRegister frb, RCBit rc) {
2018 a_form(EXT4 | FDIV, frt, fra, frb, rc);
2022 void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
2024 DCHECK(cr.code() >= 0 && cr.code() <= 7);
2025 emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
2029 void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
2031 emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
2035 void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
2036 emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
2040 void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
2041 emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
2045 void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
2047 emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
2051 void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
2053 emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
2057 void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
2059 emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
2063 void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
2065 emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
2069 void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
2071 emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
2075 void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
2077 emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
2081 void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
2083 emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
2087 void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
2089 emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
2093 void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
2094 const DoubleRegister frc, const DoubleRegister frb,
2096 emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
2097 frc.code() * B6 | rc);
2101 void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
2103 emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
2107 void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
2108 emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
2112 void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
2113 emit(EXT4 | MFFS | frt.code() * B21 | rc);
2117 void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
2119 emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
2123 void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
2125 emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
2129 void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
2131 emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
2135 void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
2136 const DoubleRegister frc, const DoubleRegister frb,
2138 emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
2139 frc.code() * B6 | rc);
2143 void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
2144 const DoubleRegister frc, const DoubleRegister frb,
2146 emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
2147 frc.code() * B6 | rc);
2151 // Pseudo instructions.
2152 void Assembler::nop(int type) {
2155 case NON_MARKING_NOP:
2158 case GROUP_ENDING_NOP:
2161 case DEBUG_BREAK_NOP:
2168 ori(reg, reg, Operand::Zero());
2172 bool Assembler::IsNop(Instr instr, int type) {
2175 case NON_MARKING_NOP:
2178 case GROUP_ENDING_NOP:
2181 case DEBUG_BREAK_NOP:
2187 return instr == (ORI | reg * B21 | reg * B16);
2191 void Assembler::GrowBuffer(int needed) {
2192 if (!own_buffer_) FATAL("external code buffer is too small");
2194 // Compute new buffer size.
2195 CodeDesc desc; // the new buffer
2196 if (buffer_size_ < 4 * KB) {
2197 desc.buffer_size = 4 * KB;
2198 } else if (buffer_size_ < 1 * MB) {
2199 desc.buffer_size = 2 * buffer_size_;
2201 desc.buffer_size = buffer_size_ + 1 * MB;
2203 int space = buffer_space() + (desc.buffer_size - buffer_size_);
2204 if (space < needed) {
2205 desc.buffer_size += needed - space;
2207 CHECK_GT(desc.buffer_size, 0); // no overflow
2209 // Set up new buffer.
2210 desc.buffer = NewArray<byte>(desc.buffer_size);
2212 desc.instr_size = pc_offset();
2213 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2216 intptr_t pc_delta = desc.buffer - buffer_;
2218 (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2219 memmove(desc.buffer, buffer_, desc.instr_size);
2220 memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2224 DeleteArray(buffer_);
2225 buffer_ = desc.buffer;
2226 buffer_size_ = desc.buffer_size;
2228 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2229 reloc_info_writer.last_pc() + pc_delta);
2231 // Nothing else to do here since we keep all internal references and
2232 // deferred relocation entries relative to the buffer (until
2233 // EmitRelocations).
2237 void Assembler::db(uint8_t data) {
2239 *reinterpret_cast<uint8_t*>(pc_) = data;
2240 pc_ += sizeof(uint8_t);
2244 void Assembler::dd(uint32_t data) {
2246 *reinterpret_cast<uint32_t*>(pc_) = data;
2247 pc_ += sizeof(uint32_t);
2251 void Assembler::emit_ptr(intptr_t data) {
2253 *reinterpret_cast<intptr_t*>(pc_) = data;
2254 pc_ += sizeof(intptr_t);
2258 void Assembler::emit_double(double value) {
2260 *reinterpret_cast<double*>(pc_) = value;
2261 pc_ += sizeof(double);
2265 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2266 DeferredRelocInfo rinfo(pc_offset(), rmode, data);
2267 RecordRelocInfo(rinfo);
2271 void Assembler::RecordRelocInfo(const DeferredRelocInfo& rinfo) {
2272 if (rinfo.rmode() >= RelocInfo::JS_RETURN &&
2273 rinfo.rmode() <= RelocInfo::DEBUG_BREAK_SLOT) {
2274 // Adjust code for new modes.
2275 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo.rmode()) ||
2276 RelocInfo::IsJSReturn(rinfo.rmode()) ||
2277 RelocInfo::IsComment(rinfo.rmode()) ||
2278 RelocInfo::IsPosition(rinfo.rmode()));
2280 if (!RelocInfo::IsNone(rinfo.rmode())) {
2281 // Don't record external references unless the heap will be serialized.
2282 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
2283 if (!serializer_enabled() && !emit_debug_code()) {
2287 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
2288 DeferredRelocInfo reloc_info_with_ast_id(rinfo.position(), rinfo.rmode(),
2289 RecordedAstId().ToInt());
2290 ClearRecordedAstId();
2291 relocations_.push_back(reloc_info_with_ast_id);
2293 relocations_.push_back(rinfo);
2299 void Assembler::EmitRelocations() {
2300 EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
2302 for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
2303 it != relocations_.end(); it++) {
2304 RelocInfo::Mode rmode = it->rmode();
2305 Address pc = buffer_ + it->position();
2307 RelocInfo rinfo(pc, rmode, it->data(), code);
2309 // Fix up internal references now that they are guaranteed to be bound.
2310 if (RelocInfo::IsInternalReference(rmode)) {
2312 intptr_t pos = reinterpret_cast<intptr_t>(Memory::Address_at(pc));
2313 Memory::Address_at(pc) = buffer_ + pos;
2314 } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
2316 intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
2317 set_target_address_at(pc, code, buffer_ + pos, SKIP_ICACHE_FLUSH);
2320 reloc_info_writer.Write(&rinfo);
2323 reloc_info_writer.Finish();
2327 void Assembler::BlockTrampolinePoolFor(int instructions) {
2328 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2332 void Assembler::CheckTrampolinePool() {
2333 // Some small sequences of instructions must not be broken up by the
2334 // insertion of a trampoline pool; such sequences are protected by setting
2335 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2336 // which are both checked here. Also, recursive calls to CheckTrampolinePool
2337 // are blocked by trampoline_pool_blocked_nesting_.
2338 if ((trampoline_pool_blocked_nesting_ > 0) ||
2339 (pc_offset() < no_trampoline_pool_before_)) {
2340 // Emission is currently blocked; make sure we try again as soon as
2342 if (trampoline_pool_blocked_nesting_ > 0) {
2343 next_buffer_check_ = pc_offset() + kInstrSize;
2345 next_buffer_check_ = no_trampoline_pool_before_;
2350 DCHECK(!trampoline_emitted_);
2351 DCHECK(unbound_labels_count_ >= 0);
2352 if (unbound_labels_count_ > 0) {
2353 // First we emit jump, then we emit trampoline pool.
2355 BlockTrampolinePoolScope block_trampoline_pool(this);
2359 int pool_start = pc_offset();
2360 for (int i = 0; i < unbound_labels_count_; i++) {
2364 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2366 trampoline_emitted_ = true;
2367 // As we are only going to emit trampoline once, we need to prevent any
2368 // further emission.
2369 next_buffer_check_ = kMaxInt;
2372 // Number of branches to unbound label at this point is zero, so we can
2373 // move next buffer check to maximum.
2374 next_buffer_check_ =
2375 pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize;
2381 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
2382 DCHECK(!FLAG_enable_ool_constant_pool);
2383 return isolate->factory()->empty_constant_pool_array();
2387 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2388 DCHECK(!FLAG_enable_ool_constant_pool);
2391 } // namespace v8::internal
2393 #endif // V8_TARGET_ARCH_PPC