1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/arm64/macro-assembler-arm64.h"
8 #include "src/compiler/code-generator-impl.h"
9 #include "src/compiler/gap-resolver.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/compiler/node-properties-inl.h"
12 #include "src/scopes.h"
21 // Adds Arm64-specific methods to convert InstructionOperands.
22 class Arm64OperandConverter V8_FINAL : public InstructionOperandConverter {
24 Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
25 : InstructionOperandConverter(gen, instr) {}
27 Register InputRegister32(int index) {
28 return ToRegister(instr_->InputAt(index)).W();
31 Register InputRegister64(int index) { return InputRegister(index); }
33 Operand InputImmediate(int index) {
34 return ToImmediate(instr_->InputAt(index));
37 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
39 Operand InputOperand64(int index) { return InputOperand(index); }
41 Operand InputOperand32(int index) {
42 return ToOperand32(instr_->InputAt(index));
45 Register OutputRegister64() { return OutputRegister(); }
47 Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
49 MemOperand MemoryOperand(int* first_index) {
50 const int index = *first_index;
51 switch (AddressingModeField::decode(instr_->opcode())) {
56 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
59 return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
63 return MemOperand(no_reg);
66 MemOperand MemoryOperand() {
68 return MemoryOperand(&index);
71 Operand ToOperand(InstructionOperand* op) {
72 if (op->IsRegister()) {
73 return Operand(ToRegister(op));
75 return ToImmediate(op);
78 Operand ToOperand32(InstructionOperand* op) {
79 if (op->IsRegister()) {
80 return Operand(ToRegister(op).W());
82 return ToImmediate(op);
85 Operand ToImmediate(InstructionOperand* operand) {
86 Constant constant = ToConstant(operand);
87 switch (constant.type()) {
88 case Constant::kInt32:
89 return Operand(constant.ToInt32());
90 case Constant::kInt64:
91 return Operand(constant.ToInt64());
92 case Constant::kFloat64:
94 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
95 case Constant::kExternalReference:
96 return Operand(constant.ToExternalReference());
97 case Constant::kHeapObject:
98 return Operand(constant.ToHeapObject());
104 MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
106 DCHECK(!op->IsRegister());
107 DCHECK(!op->IsDoubleRegister());
108 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
109 // The linkage computes where all spill slots are located.
110 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
111 return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
117 #define ASSEMBLE_SHIFT(asm_instr, width) \
119 if (instr->InputAt(1)->IsRegister()) { \
120 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
121 i.InputRegister##width(1)); \
123 int64_t imm = i.InputOperand##width(1).immediate().value(); \
124 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
129 // Assembles an instruction after register allocation, producing machine code.
130 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
131 Arm64OperandConverter i(this, instr);
132 InstructionCode opcode = instr->opcode();
133 switch (ArchOpcodeField::decode(opcode)) {
135 __ B(code_->GetLabel(i.InputBlock(0)));
138 // don't emit code for nops.
143 case kArchDeoptimize: {
144 int deoptimization_id = MiscField::decode(instr->opcode());
145 BuildTranslation(instr, deoptimization_id);
147 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
148 isolate(), deoptimization_id, Deoptimizer::LAZY);
149 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
153 __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
156 if (FlagsModeField::decode(opcode) != kFlags_none) {
157 __ Adds(i.OutputRegister32(), i.InputRegister32(0),
158 i.InputOperand32(1));
160 __ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
164 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
167 __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
170 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
173 __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
176 __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
179 __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
182 __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
185 __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
188 UseScratchRegisterScope scope(masm());
189 Register temp = scope.AcquireX();
190 __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
191 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
195 UseScratchRegisterScope scope(masm());
196 Register temp = scope.AcquireW();
197 __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
198 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
199 i.InputRegister32(0));
203 UseScratchRegisterScope scope(masm());
204 Register temp = scope.AcquireX();
205 __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
206 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
210 UseScratchRegisterScope scope(masm());
211 Register temp = scope.AcquireW();
212 __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
213 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
214 i.InputRegister32(0));
217 // TODO(dcarney): use mvn instr??
219 __ Orn(i.OutputRegister(), xzr, i.InputOperand(0));
222 __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0));
225 __ Neg(i.OutputRegister(), i.InputOperand(0));
228 __ Neg(i.OutputRegister32(), i.InputOperand32(0));
231 __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
234 __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
237 __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
240 __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
243 __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
246 if (FlagsModeField::decode(opcode) != kFlags_none) {
247 __ Subs(i.OutputRegister32(), i.InputRegister32(0),
248 i.InputOperand32(1));
250 __ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
254 ASSEMBLE_SHIFT(Lsl, 64);
257 ASSEMBLE_SHIFT(Lsl, 32);
260 ASSEMBLE_SHIFT(Lsr, 64);
263 ASSEMBLE_SHIFT(Lsr, 32);
266 ASSEMBLE_SHIFT(Asr, 64);
269 ASSEMBLE_SHIFT(Asr, 32);
271 case kArm64CallCodeObject: {
272 if (instr->InputAt(0)->IsImmediate()) {
273 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
274 __ Call(code, RelocInfo::CODE_TARGET);
275 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
276 Safepoint::kNoLazyDeopt);
278 Register reg = i.InputRegister(0);
279 int entry = Code::kHeaderSize - kHeapObjectTag;
280 __ Ldr(reg, MemOperand(reg, entry));
282 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
283 Safepoint::kNoLazyDeopt);
285 bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
287 RecordLazyDeoptimizationEntry(instr);
289 // Meaningless instruction for ICs to overwrite.
290 AddNopForSmiCodeInlining();
293 case kArm64CallJSFunction: {
294 Register func = i.InputRegister(0);
296 // TODO(jarin) The load of the context should be separated from the call.
297 __ Ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset));
298 __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
301 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
302 Safepoint::kNoLazyDeopt);
303 RecordLazyDeoptimizationEntry(instr);
306 case kArm64CallAddress: {
307 DirectCEntryStub stub(isolate());
308 stub.GenerateCall(masm(), i.InputRegister(0));
312 int words = MiscField::decode(instr->opcode());
317 int slot = MiscField::decode(instr->opcode());
318 Operand operand(slot * kPointerSize);
319 __ Poke(i.InputRegister(0), operand);
322 case kArm64PokePairZero: {
323 // TODO(dcarney): test slot offset and register order.
324 int slot = MiscField::decode(instr->opcode()) - 1;
325 __ PokePair(i.InputRegister(0), xzr, slot * kPointerSize);
328 case kArm64PokePair: {
329 int slot = MiscField::decode(instr->opcode()) - 1;
330 __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
334 int words = MiscField::decode(instr->opcode());
339 __ Cmp(i.InputRegister(0), i.InputOperand(1));
342 __ Cmp(i.InputRegister32(0), i.InputOperand32(1));
345 __ Tst(i.InputRegister(0), i.InputOperand(1));
348 __ Tst(i.InputRegister32(0), i.InputOperand32(1));
350 case kArm64Float64Cmp:
351 __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
353 case kArm64Float64Add:
354 __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
355 i.InputDoubleRegister(1));
357 case kArm64Float64Sub:
358 __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
359 i.InputDoubleRegister(1));
361 case kArm64Float64Mul:
362 __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
363 i.InputDoubleRegister(1));
365 case kArm64Float64Div:
366 __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
367 i.InputDoubleRegister(1));
369 case kArm64Float64Mod: {
370 // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
371 FrameScope scope(masm(), StackFrame::MANUAL);
372 DCHECK(d0.is(i.InputDoubleRegister(0)));
373 DCHECK(d1.is(i.InputDoubleRegister(1)));
374 DCHECK(d0.is(i.OutputDoubleRegister()));
375 // TODO(dcarney): make sure this saves all relevant registers.
376 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
380 case kArm64Int32ToInt64:
381 __ Sxtw(i.OutputRegister(), i.InputRegister(0));
383 case kArm64Int64ToInt32:
384 if (!i.OutputRegister().is(i.InputRegister(0))) {
385 __ Mov(i.OutputRegister(), i.InputRegister(0));
388 case kArm64Float64ToInt32:
389 __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
391 case kArm64Float64ToUint32:
392 __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
394 case kArm64Int32ToFloat64:
395 __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
397 case kArm64Uint32ToFloat64:
398 __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
400 case kArm64LoadWord8:
401 __ Ldrb(i.OutputRegister(), i.MemoryOperand());
403 case kArm64StoreWord8:
404 __ Strb(i.InputRegister(2), i.MemoryOperand());
406 case kArm64LoadWord16:
407 __ Ldrh(i.OutputRegister(), i.MemoryOperand());
409 case kArm64StoreWord16:
410 __ Strh(i.InputRegister(2), i.MemoryOperand());
412 case kArm64LoadWord32:
413 __ Ldr(i.OutputRegister32(), i.MemoryOperand());
415 case kArm64StoreWord32:
416 __ Str(i.InputRegister32(2), i.MemoryOperand());
418 case kArm64LoadWord64:
419 __ Ldr(i.OutputRegister(), i.MemoryOperand());
421 case kArm64StoreWord64:
422 __ Str(i.InputRegister(2), i.MemoryOperand());
424 case kArm64Float64Load:
425 __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
427 case kArm64Float64Store:
428 __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
430 case kArm64StoreWriteBarrier: {
431 Register object = i.InputRegister(0);
432 Register index = i.InputRegister(1);
433 Register value = i.InputRegister(2);
434 __ Add(index, object, Operand(index, SXTW));
435 __ Str(value, MemOperand(index));
436 SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
439 // TODO(dcarney): we shouldn't test write barriers from c calls.
440 LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
441 UseScratchRegisterScope scope(masm());
442 Register temp = no_reg;
443 if (csp.is(masm()->StackPointer())) {
444 temp = scope.AcquireX();
445 lr_status = kLRHasBeenSaved;
446 __ Push(lr, temp); // Need to push a pair
448 __ RecordWrite(object, index, value, lr_status, mode);
449 if (csp.is(masm()->StackPointer())) {
458 // Assemble branches after this instruction.
459 void CodeGenerator::AssembleArchBranch(Instruction* instr,
460 FlagsCondition condition) {
461 Arm64OperandConverter i(this, instr);
464 // Emit a branch. The true and false targets are always the last two inputs
465 // to the instruction.
466 BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
467 BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
468 bool fallthru = IsNextInAssemblyOrder(fblock);
469 Label* tlabel = code()->GetLabel(tblock);
470 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
472 case kUnorderedEqual:
478 case kUnorderedNotEqual:
484 case kSignedLessThan:
487 case kSignedGreaterThanOrEqual:
490 case kSignedLessThanOrEqual:
493 case kSignedGreaterThan:
496 case kUnorderedLessThan:
499 case kUnsignedLessThan:
502 case kUnorderedGreaterThanOrEqual:
505 case kUnsignedGreaterThanOrEqual:
508 case kUnorderedLessThanOrEqual:
511 case kUnsignedLessThanOrEqual:
514 case kUnorderedGreaterThan:
517 case kUnsignedGreaterThan:
527 if (!fallthru) __ B(flabel); // no fallthru to flabel.
532 // Assemble boolean materializations after this instruction.
533 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
534 FlagsCondition condition) {
535 Arm64OperandConverter i(this, instr);
538 // Materialize a full 64-bit 1 or 0 value. The result register is always the
539 // last output of the instruction.
541 DCHECK_NE(0, instr->OutputCount());
542 Register reg = i.OutputRegister(instr->OutputCount() - 1);
545 case kUnorderedEqual:
553 case kUnorderedNotEqual:
561 case kSignedLessThan:
564 case kSignedGreaterThanOrEqual:
567 case kSignedLessThanOrEqual:
570 case kSignedGreaterThan:
573 case kUnorderedLessThan:
578 case kUnsignedLessThan:
581 case kUnorderedGreaterThanOrEqual:
586 case kUnsignedGreaterThanOrEqual:
589 case kUnorderedLessThanOrEqual:
594 case kUnsignedLessThanOrEqual:
597 case kUnorderedGreaterThan:
602 case kUnsignedGreaterThan:
618 // TODO(dcarney): increase stack slots in frame once before first use.
619 static int AlignedStackSlots(int stack_slots) {
620 if (stack_slots & 1) stack_slots++;
625 void CodeGenerator::AssemblePrologue() {
626 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
627 if (descriptor->kind() == CallDescriptor::kCallAddress) {
628 __ SetStackPointer(csp);
631 // TODO(dcarney): correct callee saved registers.
632 __ PushCalleeSavedRegisters();
633 frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
634 } else if (descriptor->IsJSFunctionCall()) {
635 CompilationInfo* info = linkage()->info();
636 __ SetStackPointer(jssp);
637 __ Prologue(info->IsCodePreAgingActive());
638 frame()->SetRegisterSaveAreaSize(
639 StandardFrameConstants::kFixedFrameSizeFromFp);
641 // Sloppy mode functions and builtins need to replace the receiver with the
642 // global proxy when called as functions (without an explicit receiver
644 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
645 if (info->strict_mode() == SLOPPY && !info->is_native()) {
647 // +2 for return address and saved frame pointer.
648 int receiver_slot = info->scope()->num_parameters() + 2;
649 __ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize));
650 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
651 __ Ldr(x10, GlobalObjectMemOperand());
652 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
653 __ Str(x10, MemOperand(fp, receiver_slot * kXRegSize));
658 __ SetStackPointer(jssp);
660 frame()->SetRegisterSaveAreaSize(
661 StandardFrameConstants::kFixedFrameSizeFromFp);
663 int stack_slots = frame()->GetSpillSlotCount();
664 if (stack_slots > 0) {
665 Register sp = __ StackPointer();
667 __ Sub(sp, sp, stack_slots * kPointerSize);
669 __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
674 void CodeGenerator::AssembleReturn() {
675 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
676 if (descriptor->kind() == CallDescriptor::kCallAddress) {
677 if (frame()->GetRegisterSaveAreaSize() > 0) {
678 // Remove this frame's spill slots first.
679 int stack_slots = frame()->GetSpillSlotCount();
680 if (stack_slots > 0) {
681 __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
683 // Restore registers.
684 // TODO(dcarney): correct callee saved registers.
685 __ PopCalleeSavedRegisters();
694 descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
701 void CodeGenerator::AssembleMove(InstructionOperand* source,
702 InstructionOperand* destination) {
703 Arm64OperandConverter g(this, NULL);
704 // Dispatch on the source and destination operand kinds. Not all
705 // combinations are possible.
706 if (source->IsRegister()) {
707 DCHECK(destination->IsRegister() || destination->IsStackSlot());
708 Register src = g.ToRegister(source);
709 if (destination->IsRegister()) {
710 __ Mov(g.ToRegister(destination), src);
712 __ Str(src, g.ToMemOperand(destination, masm()));
714 } else if (source->IsStackSlot()) {
715 MemOperand src = g.ToMemOperand(source, masm());
716 DCHECK(destination->IsRegister() || destination->IsStackSlot());
717 if (destination->IsRegister()) {
718 __ Ldr(g.ToRegister(destination), src);
720 UseScratchRegisterScope scope(masm());
721 Register temp = scope.AcquireX();
723 __ Str(temp, g.ToMemOperand(destination, masm()));
725 } else if (source->IsConstant()) {
726 ConstantOperand* constant_source = ConstantOperand::cast(source);
727 if (destination->IsRegister() || destination->IsStackSlot()) {
728 UseScratchRegisterScope scope(masm());
729 Register dst = destination->IsRegister() ? g.ToRegister(destination)
731 Constant src = g.ToConstant(source);
732 if (src.type() == Constant::kHeapObject) {
733 __ LoadObject(dst, src.ToHeapObject());
735 __ Mov(dst, g.ToImmediate(source));
737 if (destination->IsStackSlot()) {
738 __ Str(dst, g.ToMemOperand(destination, masm()));
740 } else if (destination->IsDoubleRegister()) {
741 FPRegister result = g.ToDoubleRegister(destination);
742 __ Fmov(result, g.ToDouble(constant_source));
744 DCHECK(destination->IsDoubleStackSlot());
745 UseScratchRegisterScope scope(masm());
746 FPRegister temp = scope.AcquireD();
747 __ Fmov(temp, g.ToDouble(constant_source));
748 __ Str(temp, g.ToMemOperand(destination, masm()));
750 } else if (source->IsDoubleRegister()) {
751 FPRegister src = g.ToDoubleRegister(source);
752 if (destination->IsDoubleRegister()) {
753 FPRegister dst = g.ToDoubleRegister(destination);
756 DCHECK(destination->IsDoubleStackSlot());
757 __ Str(src, g.ToMemOperand(destination, masm()));
759 } else if (source->IsDoubleStackSlot()) {
760 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
761 MemOperand src = g.ToMemOperand(source, masm());
762 if (destination->IsDoubleRegister()) {
763 __ Ldr(g.ToDoubleRegister(destination), src);
765 UseScratchRegisterScope scope(masm());
766 FPRegister temp = scope.AcquireD();
768 __ Str(temp, g.ToMemOperand(destination, masm()));
776 void CodeGenerator::AssembleSwap(InstructionOperand* source,
777 InstructionOperand* destination) {
778 Arm64OperandConverter g(this, NULL);
779 // Dispatch on the source and destination operand kinds. Not all
780 // combinations are possible.
781 if (source->IsRegister()) {
782 // Register-register.
783 UseScratchRegisterScope scope(masm());
784 Register temp = scope.AcquireX();
785 Register src = g.ToRegister(source);
786 if (destination->IsRegister()) {
787 Register dst = g.ToRegister(destination);
792 DCHECK(destination->IsStackSlot());
793 MemOperand dst = g.ToMemOperand(destination, masm());
798 } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
799 UseScratchRegisterScope scope(masm());
800 CPURegister temp_0 = scope.AcquireX();
801 CPURegister temp_1 = scope.AcquireX();
802 MemOperand src = g.ToMemOperand(source, masm());
803 MemOperand dst = g.ToMemOperand(destination, masm());
808 } else if (source->IsDoubleRegister()) {
809 UseScratchRegisterScope scope(masm());
810 FPRegister temp = scope.AcquireD();
811 FPRegister src = g.ToDoubleRegister(source);
812 if (destination->IsDoubleRegister()) {
813 FPRegister dst = g.ToDoubleRegister(destination);
818 DCHECK(destination->IsDoubleStackSlot());
819 MemOperand dst = g.ToMemOperand(destination, masm());
825 // No other combinations are possible.
831 void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
837 // Checks whether the code between start_pc and end_pc is a no-op.
838 bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
840 if (start_pc + 4 != end_pc) {
843 Address instr_address = code->instruction_start() + start_pc;
845 v8::internal::Instruction* instr =
846 reinterpret_cast<v8::internal::Instruction*>(instr_address);
847 return instr->IsMovz() && instr->Rd() == xzr.code() && instr->SixtyFourBits();
852 } // namespace compiler
853 } // namespace internal