1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/arm64/macro-assembler-arm64.h"
8 #include "src/compiler/code-generator-impl.h"
9 #include "src/compiler/gap-resolver.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/compiler/node-properties-inl.h"
12 #include "src/scopes.h"
21 // Adds Arm64-specific methods to convert InstructionOperands.
22 class Arm64OperandConverter FINAL : public InstructionOperandConverter {
24 Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
25 : InstructionOperandConverter(gen, instr) {}
27 Register InputRegister32(int index) {
28 return ToRegister(instr_->InputAt(index)).W();
31 Register InputRegister64(int index) { return InputRegister(index); }
33 Operand InputImmediate(int index) {
34 return ToImmediate(instr_->InputAt(index));
37 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
39 Operand InputOperand64(int index) { return InputOperand(index); }
41 Operand InputOperand32(int index) {
42 return ToOperand32(instr_->InputAt(index));
45 Register OutputRegister64() { return OutputRegister(); }
47 Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
49 MemOperand MemoryOperand(int* first_index) {
50 const int index = *first_index;
51 switch (AddressingModeField::decode(instr_->opcode())) {
56 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
59 return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
63 return MemOperand(no_reg);
66 MemOperand MemoryOperand() {
68 return MemoryOperand(&index);
71 Operand ToOperand(InstructionOperand* op) {
72 if (op->IsRegister()) {
73 return Operand(ToRegister(op));
75 return ToImmediate(op);
78 Operand ToOperand32(InstructionOperand* op) {
79 if (op->IsRegister()) {
80 return Operand(ToRegister(op).W());
82 return ToImmediate(op);
85 Operand ToImmediate(InstructionOperand* operand) {
86 Constant constant = ToConstant(operand);
87 switch (constant.type()) {
88 case Constant::kInt32:
89 return Operand(constant.ToInt32());
90 case Constant::kInt64:
91 return Operand(constant.ToInt64());
92 case Constant::kFloat64:
94 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
95 case Constant::kExternalReference:
96 return Operand(constant.ToExternalReference());
97 case Constant::kHeapObject:
98 return Operand(constant.ToHeapObject());
104 MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
106 DCHECK(!op->IsRegister());
107 DCHECK(!op->IsDoubleRegister());
108 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
109 // The linkage computes where all spill slots are located.
110 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
111 return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
117 #define ASSEMBLE_SHIFT(asm_instr, width) \
119 if (instr->InputAt(1)->IsRegister()) { \
120 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
121 i.InputRegister##width(1)); \
123 int64_t imm = i.InputOperand##width(1).immediate().value(); \
124 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
129 // Assembles an instruction after register allocation, producing machine code.
130 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
131 Arm64OperandConverter i(this, instr);
132 InstructionCode opcode = instr->opcode();
133 switch (ArchOpcodeField::decode(opcode)) {
134 case kArchCallCodeObject: {
135 EnsureSpaceForLazyDeopt();
136 if (instr->InputAt(0)->IsImmediate()) {
137 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
138 RelocInfo::CODE_TARGET);
140 Register target = i.InputRegister(0);
141 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
144 AddSafepointAndDeopt(instr);
147 case kArchCallJSFunction: {
148 EnsureSpaceForLazyDeopt();
149 Register func = i.InputRegister(0);
150 if (FLAG_debug_code) {
151 // Check the function's context matches the context argument.
152 UseScratchRegisterScope scope(masm());
153 Register temp = scope.AcquireX();
154 __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
156 __ Assert(eq, kWrongFunctionContext);
158 __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
160 AddSafepointAndDeopt(instr);
164 __ B(code_->GetLabel(i.InputBlock(0)));
167 // don't emit code for nops.
172 case kArchTruncateDoubleToI:
173 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
176 __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
179 if (FlagsModeField::decode(opcode) != kFlags_none) {
180 __ Adds(i.OutputRegister32(), i.InputRegister32(0),
181 i.InputOperand32(1));
183 __ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
187 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
190 __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
193 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
196 __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
199 __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
202 __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
205 __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
208 __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
211 UseScratchRegisterScope scope(masm());
212 Register temp = scope.AcquireX();
213 __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
214 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
218 UseScratchRegisterScope scope(masm());
219 Register temp = scope.AcquireW();
220 __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
221 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
222 i.InputRegister32(0));
226 UseScratchRegisterScope scope(masm());
227 Register temp = scope.AcquireX();
228 __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
229 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
233 UseScratchRegisterScope scope(masm());
234 Register temp = scope.AcquireW();
235 __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
236 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
237 i.InputRegister32(0));
240 // TODO(dcarney): use mvn instr??
242 __ Orn(i.OutputRegister(), xzr, i.InputOperand(0));
245 __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0));
248 __ Neg(i.OutputRegister(), i.InputOperand(0));
251 __ Neg(i.OutputRegister32(), i.InputOperand32(0));
254 __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
257 __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
260 __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
263 __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
266 __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
269 if (FlagsModeField::decode(opcode) != kFlags_none) {
270 __ Subs(i.OutputRegister32(), i.InputRegister32(0),
271 i.InputOperand32(1));
273 __ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
277 ASSEMBLE_SHIFT(Lsl, 64);
280 ASSEMBLE_SHIFT(Lsl, 32);
283 ASSEMBLE_SHIFT(Lsr, 64);
286 ASSEMBLE_SHIFT(Lsr, 32);
289 ASSEMBLE_SHIFT(Asr, 64);
292 ASSEMBLE_SHIFT(Asr, 32);
295 ASSEMBLE_SHIFT(Ror, 64);
298 ASSEMBLE_SHIFT(Ror, 32);
301 __ Mov(i.OutputRegister32(), i.InputRegister32(0));
304 __ Sxtw(i.OutputRegister(), i.InputRegister32(0));
307 int words = MiscField::decode(instr->opcode());
312 int slot = MiscField::decode(instr->opcode());
313 Operand operand(slot * kPointerSize);
314 __ Poke(i.InputRegister(0), operand);
317 case kArm64PokePairZero: {
318 // TODO(dcarney): test slot offset and register order.
319 int slot = MiscField::decode(instr->opcode()) - 1;
320 __ PokePair(i.InputRegister(0), xzr, slot * kPointerSize);
323 case kArm64PokePair: {
324 int slot = MiscField::decode(instr->opcode()) - 1;
325 __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
329 __ Cmp(i.InputRegister(0), i.InputOperand(1));
332 __ Cmp(i.InputRegister32(0), i.InputOperand32(1));
335 __ Cmn(i.InputRegister(0), i.InputOperand(1));
338 __ Cmn(i.InputRegister32(0), i.InputOperand32(1));
341 __ Tst(i.InputRegister(0), i.InputOperand(1));
344 __ Tst(i.InputRegister32(0), i.InputOperand32(1));
346 case kArm64Float64Cmp:
347 __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
349 case kArm64Float64Add:
350 __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
351 i.InputDoubleRegister(1));
353 case kArm64Float64Sub:
354 __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
355 i.InputDoubleRegister(1));
357 case kArm64Float64Mul:
358 __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
359 i.InputDoubleRegister(1));
361 case kArm64Float64Div:
362 __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
363 i.InputDoubleRegister(1));
365 case kArm64Float64Mod: {
366 // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
367 FrameScope scope(masm(), StackFrame::MANUAL);
368 DCHECK(d0.is(i.InputDoubleRegister(0)));
369 DCHECK(d1.is(i.InputDoubleRegister(1)));
370 DCHECK(d0.is(i.OutputDoubleRegister()));
371 // TODO(dcarney): make sure this saves all relevant registers.
372 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
376 case kArm64Float64Sqrt:
377 __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
379 case kArm64Float64ToInt32:
380 __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
382 case kArm64Float64ToUint32:
383 __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
385 case kArm64Int32ToFloat64:
386 __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
388 case kArm64Uint32ToFloat64:
389 __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
392 __ Ldrb(i.OutputRegister(), i.MemoryOperand());
395 __ Ldrsb(i.OutputRegister(), i.MemoryOperand());
398 __ Strb(i.InputRegister(2), i.MemoryOperand());
401 __ Ldrh(i.OutputRegister(), i.MemoryOperand());
404 __ Ldrsh(i.OutputRegister(), i.MemoryOperand());
407 __ Strh(i.InputRegister(2), i.MemoryOperand());
410 __ Ldr(i.OutputRegister32(), i.MemoryOperand());
413 __ Str(i.InputRegister32(2), i.MemoryOperand());
416 __ Ldr(i.OutputRegister(), i.MemoryOperand());
419 __ Str(i.InputRegister(2), i.MemoryOperand());
422 UseScratchRegisterScope scope(masm());
423 FPRegister scratch = scope.AcquireS();
424 __ Ldr(scratch, i.MemoryOperand());
425 __ Fcvt(i.OutputDoubleRegister(), scratch);
429 UseScratchRegisterScope scope(masm());
430 FPRegister scratch = scope.AcquireS();
431 __ Fcvt(scratch, i.InputDoubleRegister(2));
432 __ Str(scratch, i.MemoryOperand());
436 __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
439 __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
441 case kArm64StoreWriteBarrier: {
442 Register object = i.InputRegister(0);
443 Register index = i.InputRegister(1);
444 Register value = i.InputRegister(2);
445 __ Add(index, object, Operand(index, SXTW));
446 __ Str(value, MemOperand(index));
447 SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
450 // TODO(dcarney): we shouldn't test write barriers from c calls.
451 LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
452 UseScratchRegisterScope scope(masm());
453 Register temp = no_reg;
454 if (csp.is(masm()->StackPointer())) {
455 temp = scope.AcquireX();
456 lr_status = kLRHasBeenSaved;
457 __ Push(lr, temp); // Need to push a pair
459 __ RecordWrite(object, index, value, lr_status, mode);
460 if (csp.is(masm()->StackPointer())) {
469 // Assemble branches after this instruction.
470 void CodeGenerator::AssembleArchBranch(Instruction* instr,
471 FlagsCondition condition) {
472 Arm64OperandConverter i(this, instr);
475 // Emit a branch. The true and false targets are always the last two inputs
476 // to the instruction.
477 BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
478 BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
479 bool fallthru = IsNextInAssemblyOrder(fblock);
480 Label* tlabel = code()->GetLabel(tblock);
481 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
483 case kUnorderedEqual:
489 case kUnorderedNotEqual:
495 case kSignedLessThan:
498 case kSignedGreaterThanOrEqual:
501 case kSignedLessThanOrEqual:
504 case kSignedGreaterThan:
507 case kUnorderedLessThan:
510 case kUnsignedLessThan:
513 case kUnorderedGreaterThanOrEqual:
516 case kUnsignedGreaterThanOrEqual:
519 case kUnorderedLessThanOrEqual:
522 case kUnsignedLessThanOrEqual:
525 case kUnorderedGreaterThan:
528 case kUnsignedGreaterThan:
538 if (!fallthru) __ B(flabel); // no fallthru to flabel.
543 // Assemble boolean materializations after this instruction.
544 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
545 FlagsCondition condition) {
546 Arm64OperandConverter i(this, instr);
549 // Materialize a full 64-bit 1 or 0 value. The result register is always the
550 // last output of the instruction.
552 DCHECK_NE(0, instr->OutputCount());
553 Register reg = i.OutputRegister(instr->OutputCount() - 1);
556 case kUnorderedEqual:
564 case kUnorderedNotEqual:
572 case kSignedLessThan:
575 case kSignedGreaterThanOrEqual:
578 case kSignedLessThanOrEqual:
581 case kSignedGreaterThan:
584 case kUnorderedLessThan:
589 case kUnsignedLessThan:
592 case kUnorderedGreaterThanOrEqual:
597 case kUnsignedGreaterThanOrEqual:
600 case kUnorderedLessThanOrEqual:
605 case kUnsignedLessThanOrEqual:
608 case kUnorderedGreaterThan:
613 case kUnsignedGreaterThan:
629 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
630 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
631 isolate(), deoptimization_id, Deoptimizer::LAZY);
632 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
636 // TODO(dcarney): increase stack slots in frame once before first use.
637 static int AlignedStackSlots(int stack_slots) {
638 if (stack_slots & 1) stack_slots++;
643 void CodeGenerator::AssemblePrologue() {
644 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
645 if (descriptor->kind() == CallDescriptor::kCallAddress) {
646 __ SetStackPointer(csp);
649 // TODO(dcarney): correct callee saved registers.
650 __ PushCalleeSavedRegisters();
651 frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
652 } else if (descriptor->IsJSFunctionCall()) {
653 CompilationInfo* info = linkage()->info();
654 __ SetStackPointer(jssp);
655 __ Prologue(info->IsCodePreAgingActive());
656 frame()->SetRegisterSaveAreaSize(
657 StandardFrameConstants::kFixedFrameSizeFromFp);
659 // Sloppy mode functions and builtins need to replace the receiver with the
660 // global proxy when called as functions (without an explicit receiver
662 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
663 if (info->strict_mode() == SLOPPY && !info->is_native()) {
665 // +2 for return address and saved frame pointer.
666 int receiver_slot = info->scope()->num_parameters() + 2;
667 __ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize));
668 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
669 __ Ldr(x10, GlobalObjectMemOperand());
670 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
671 __ Str(x10, MemOperand(fp, receiver_slot * kXRegSize));
676 __ SetStackPointer(jssp);
678 frame()->SetRegisterSaveAreaSize(
679 StandardFrameConstants::kFixedFrameSizeFromFp);
681 int stack_slots = frame()->GetSpillSlotCount();
682 if (stack_slots > 0) {
683 Register sp = __ StackPointer();
685 __ Sub(sp, sp, stack_slots * kPointerSize);
687 __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
692 void CodeGenerator::AssembleReturn() {
693 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
694 if (descriptor->kind() == CallDescriptor::kCallAddress) {
695 if (frame()->GetRegisterSaveAreaSize() > 0) {
696 // Remove this frame's spill slots first.
697 int stack_slots = frame()->GetSpillSlotCount();
698 if (stack_slots > 0) {
699 __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
701 // Restore registers.
702 // TODO(dcarney): correct callee saved registers.
703 __ PopCalleeSavedRegisters();
711 int pop_count = descriptor->IsJSFunctionCall()
712 ? static_cast<int>(descriptor->JSParameterCount())
720 void CodeGenerator::AssembleMove(InstructionOperand* source,
721 InstructionOperand* destination) {
722 Arm64OperandConverter g(this, NULL);
723 // Dispatch on the source and destination operand kinds. Not all
724 // combinations are possible.
725 if (source->IsRegister()) {
726 DCHECK(destination->IsRegister() || destination->IsStackSlot());
727 Register src = g.ToRegister(source);
728 if (destination->IsRegister()) {
729 __ Mov(g.ToRegister(destination), src);
731 __ Str(src, g.ToMemOperand(destination, masm()));
733 } else if (source->IsStackSlot()) {
734 MemOperand src = g.ToMemOperand(source, masm());
735 DCHECK(destination->IsRegister() || destination->IsStackSlot());
736 if (destination->IsRegister()) {
737 __ Ldr(g.ToRegister(destination), src);
739 UseScratchRegisterScope scope(masm());
740 Register temp = scope.AcquireX();
742 __ Str(temp, g.ToMemOperand(destination, masm()));
744 } else if (source->IsConstant()) {
745 ConstantOperand* constant_source = ConstantOperand::cast(source);
746 if (destination->IsRegister() || destination->IsStackSlot()) {
747 UseScratchRegisterScope scope(masm());
748 Register dst = destination->IsRegister() ? g.ToRegister(destination)
750 Constant src = g.ToConstant(source);
751 if (src.type() == Constant::kHeapObject) {
752 __ LoadObject(dst, src.ToHeapObject());
754 __ Mov(dst, g.ToImmediate(source));
756 if (destination->IsStackSlot()) {
757 __ Str(dst, g.ToMemOperand(destination, masm()));
759 } else if (destination->IsDoubleRegister()) {
760 FPRegister result = g.ToDoubleRegister(destination);
761 __ Fmov(result, g.ToDouble(constant_source));
763 DCHECK(destination->IsDoubleStackSlot());
764 UseScratchRegisterScope scope(masm());
765 FPRegister temp = scope.AcquireD();
766 __ Fmov(temp, g.ToDouble(constant_source));
767 __ Str(temp, g.ToMemOperand(destination, masm()));
769 } else if (source->IsDoubleRegister()) {
770 FPRegister src = g.ToDoubleRegister(source);
771 if (destination->IsDoubleRegister()) {
772 FPRegister dst = g.ToDoubleRegister(destination);
775 DCHECK(destination->IsDoubleStackSlot());
776 __ Str(src, g.ToMemOperand(destination, masm()));
778 } else if (source->IsDoubleStackSlot()) {
779 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
780 MemOperand src = g.ToMemOperand(source, masm());
781 if (destination->IsDoubleRegister()) {
782 __ Ldr(g.ToDoubleRegister(destination), src);
784 UseScratchRegisterScope scope(masm());
785 FPRegister temp = scope.AcquireD();
787 __ Str(temp, g.ToMemOperand(destination, masm()));
795 void CodeGenerator::AssembleSwap(InstructionOperand* source,
796 InstructionOperand* destination) {
797 Arm64OperandConverter g(this, NULL);
798 // Dispatch on the source and destination operand kinds. Not all
799 // combinations are possible.
800 if (source->IsRegister()) {
801 // Register-register.
802 UseScratchRegisterScope scope(masm());
803 Register temp = scope.AcquireX();
804 Register src = g.ToRegister(source);
805 if (destination->IsRegister()) {
806 Register dst = g.ToRegister(destination);
811 DCHECK(destination->IsStackSlot());
812 MemOperand dst = g.ToMemOperand(destination, masm());
817 } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
818 UseScratchRegisterScope scope(masm());
819 CPURegister temp_0 = scope.AcquireX();
820 CPURegister temp_1 = scope.AcquireX();
821 MemOperand src = g.ToMemOperand(source, masm());
822 MemOperand dst = g.ToMemOperand(destination, masm());
827 } else if (source->IsDoubleRegister()) {
828 UseScratchRegisterScope scope(masm());
829 FPRegister temp = scope.AcquireD();
830 FPRegister src = g.ToDoubleRegister(source);
831 if (destination->IsDoubleRegister()) {
832 FPRegister dst = g.ToDoubleRegister(destination);
837 DCHECK(destination->IsDoubleStackSlot());
838 MemOperand dst = g.ToMemOperand(destination, masm());
844 // No other combinations are possible.
850 void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
853 void CodeGenerator::EnsureSpaceForLazyDeopt() {
854 int space_needed = Deoptimizer::patch_size();
855 if (!linkage()->info()->IsStub()) {
856 // Ensure that we have enough space after the previous lazy-bailout
857 // instruction for patching the code here.
858 intptr_t current_pc = masm()->pc_offset();
860 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
861 intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
862 DCHECK((padding_size % kInstructionSize) == 0);
863 InstructionAccurateScope instruction_accurate(
864 masm(), padding_size / kInstructionSize);
866 while (padding_size > 0) {
868 padding_size -= kInstructionSize;
877 } // namespace compiler
878 } // namespace internal