1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/arm/macro-assembler-arm.h"
8 #include "src/compiler/code-generator-impl.h"
9 #include "src/compiler/gap-resolver.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/compiler/node-properties-inl.h"
12 #include "src/scopes.h"
21 #define kScratchReg r9
24 // Adds Arm-specific methods to convert InstructionOperands.
25 class ArmOperandConverter : public InstructionOperandConverter {
27 ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
28 : InstructionOperandConverter(gen, instr) {}
30 SBit OutputSBit() const {
31 switch (instr_->flags_mode()) {
42 Operand InputImmediate(int index) {
43 Constant constant = ToConstant(instr_->InputAt(index));
44 switch (constant.type()) {
45 case Constant::kInt32:
46 return Operand(constant.ToInt32());
47 case Constant::kFloat64:
49 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
50 case Constant::kInt64:
51 case Constant::kExternalReference:
52 case Constant::kHeapObject:
56 return Operand::Zero();
59 Operand InputOperand2(int first_index) {
60 const int index = first_index;
61 switch (AddressingModeField::decode(instr_->opcode())) {
66 case kMode_Operand2_I:
67 return InputImmediate(index + 0);
68 case kMode_Operand2_R:
69 return Operand(InputRegister(index + 0));
70 case kMode_Operand2_R_ASR_I:
71 return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1));
72 case kMode_Operand2_R_ASR_R:
73 return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1));
74 case kMode_Operand2_R_LSL_I:
75 return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1));
76 case kMode_Operand2_R_LSL_R:
77 return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1));
78 case kMode_Operand2_R_LSR_I:
79 return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1));
80 case kMode_Operand2_R_LSR_R:
81 return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1));
82 case kMode_Operand2_R_ROR_I:
83 return Operand(InputRegister(index + 0), ROR, InputInt5(index + 1));
84 case kMode_Operand2_R_ROR_R:
85 return Operand(InputRegister(index + 0), ROR, InputRegister(index + 1));
88 return Operand::Zero();
91 MemOperand InputOffset(int* first_index) {
92 const int index = *first_index;
93 switch (AddressingModeField::decode(instr_->opcode())) {
95 case kMode_Operand2_I:
96 case kMode_Operand2_R:
97 case kMode_Operand2_R_ASR_I:
98 case kMode_Operand2_R_ASR_R:
99 case kMode_Operand2_R_LSL_I:
100 case kMode_Operand2_R_LSL_R:
101 case kMode_Operand2_R_LSR_I:
102 case kMode_Operand2_R_LSR_R:
103 case kMode_Operand2_R_ROR_I:
104 case kMode_Operand2_R_ROR_R:
106 case kMode_Offset_RI:
108 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
109 case kMode_Offset_RR:
111 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
114 return MemOperand(r0);
117 MemOperand InputOffset() {
119 return InputOffset(&index);
122 MemOperand ToMemOperand(InstructionOperand* op) const {
124 DCHECK(!op->IsRegister());
125 DCHECK(!op->IsDoubleRegister());
126 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
127 // The linkage computes where all spill slots are located.
128 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
129 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
134 // Assembles an instruction after register allocation, producing machine code.
135 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
136 ArmOperandConverter i(this, instr);
138 switch (ArchOpcodeField::decode(instr->opcode())) {
139 case kArchCallCodeObject: {
140 EnsureSpaceForLazyDeopt();
141 if (instr->InputAt(0)->IsImmediate()) {
142 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
143 RelocInfo::CODE_TARGET);
145 __ add(ip, i.InputRegister(0),
146 Operand(Code::kHeaderSize - kHeapObjectTag));
149 AddSafepointAndDeopt(instr);
150 DCHECK_EQ(LeaveCC, i.OutputSBit());
153 case kArchCallJSFunction: {
154 EnsureSpaceForLazyDeopt();
155 Register func = i.InputRegister(0);
156 if (FLAG_debug_code) {
157 // Check the function's context matches the context argument.
158 __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
159 __ cmp(cp, kScratchReg);
160 __ Assert(eq, kWrongFunctionContext);
162 __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
164 AddSafepointAndDeopt(instr);
165 DCHECK_EQ(LeaveCC, i.OutputSBit());
169 __ b(code_->GetLabel(i.InputBlock(0)));
170 DCHECK_EQ(LeaveCC, i.OutputSBit());
173 // don't emit code for nops.
174 DCHECK_EQ(LeaveCC, i.OutputSBit());
178 DCHECK_EQ(LeaveCC, i.OutputSBit());
180 case kArchTruncateDoubleToI:
181 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
182 DCHECK_EQ(LeaveCC, i.OutputSBit());
185 __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
189 __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
193 __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
197 __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
201 __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
202 i.InputRegister(2), i.OutputSBit());
205 CpuFeatureScope scope(masm(), MLS);
206 __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
208 DCHECK_EQ(LeaveCC, i.OutputSBit());
212 CpuFeatureScope scope(masm(), SUDIV);
213 __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
214 DCHECK_EQ(LeaveCC, i.OutputSBit());
218 CpuFeatureScope scope(masm(), SUDIV);
219 __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
220 DCHECK_EQ(LeaveCC, i.OutputSBit());
224 __ Move(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
227 __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
230 __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
234 __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
238 __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
242 __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
246 CpuFeatureScope scope(masm(), ARMv7);
247 __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
248 DCHECK_EQ(LeaveCC, i.OutputSBit());
252 CpuFeatureScope scope(masm(), ARMv7);
253 __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
255 DCHECK_EQ(LeaveCC, i.OutputSBit());
259 __ cmp(i.InputRegister(0), i.InputOperand2(1));
260 DCHECK_EQ(SetCC, i.OutputSBit());
263 __ cmn(i.InputRegister(0), i.InputOperand2(1));
264 DCHECK_EQ(SetCC, i.OutputSBit());
267 __ tst(i.InputRegister(0), i.InputOperand2(1));
268 DCHECK_EQ(SetCC, i.OutputSBit());
271 __ teq(i.InputRegister(0), i.InputOperand2(1));
272 DCHECK_EQ(SetCC, i.OutputSBit());
275 __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
276 i.InputDoubleRegister(1));
277 DCHECK_EQ(SetCC, i.OutputSBit());
280 __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
281 i.InputDoubleRegister(1));
282 DCHECK_EQ(LeaveCC, i.OutputSBit());
285 __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
286 i.InputDoubleRegister(1));
287 DCHECK_EQ(LeaveCC, i.OutputSBit());
290 __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
291 i.InputDoubleRegister(1));
292 DCHECK_EQ(LeaveCC, i.OutputSBit());
295 __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
296 i.InputDoubleRegister(2));
297 DCHECK_EQ(LeaveCC, i.OutputSBit());
300 __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
301 i.InputDoubleRegister(2));
302 DCHECK_EQ(LeaveCC, i.OutputSBit());
305 __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
306 i.InputDoubleRegister(1));
307 DCHECK_EQ(LeaveCC, i.OutputSBit());
310 // TODO(bmeurer): We should really get rid of this special instruction,
311 // and generate a CallAddress instruction instead.
312 FrameScope scope(masm(), StackFrame::MANUAL);
313 __ PrepareCallCFunction(0, 2, kScratchReg);
314 __ MovToFloatParameters(i.InputDoubleRegister(0),
315 i.InputDoubleRegister(1));
316 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
318 // Move the result in the double result register.
319 __ MovFromFloatResult(i.OutputDoubleRegister());
320 DCHECK_EQ(LeaveCC, i.OutputSBit());
324 __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
327 __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
329 case kArmVcvtF64S32: {
330 SwVfpRegister scratch = kScratchDoubleReg.low();
331 __ vmov(scratch, i.InputRegister(0));
332 __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
333 DCHECK_EQ(LeaveCC, i.OutputSBit());
336 case kArmVcvtF64U32: {
337 SwVfpRegister scratch = kScratchDoubleReg.low();
338 __ vmov(scratch, i.InputRegister(0));
339 __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
340 DCHECK_EQ(LeaveCC, i.OutputSBit());
343 case kArmVcvtS32F64: {
344 SwVfpRegister scratch = kScratchDoubleReg.low();
345 __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
346 __ vmov(i.OutputRegister(), scratch);
347 DCHECK_EQ(LeaveCC, i.OutputSBit());
350 case kArmVcvtU32F64: {
351 SwVfpRegister scratch = kScratchDoubleReg.low();
352 __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
353 __ vmov(i.OutputRegister(), scratch);
354 DCHECK_EQ(LeaveCC, i.OutputSBit());
358 __ ldrb(i.OutputRegister(), i.InputOffset());
359 DCHECK_EQ(LeaveCC, i.OutputSBit());
362 __ ldrsb(i.OutputRegister(), i.InputOffset());
363 DCHECK_EQ(LeaveCC, i.OutputSBit());
367 MemOperand operand = i.InputOffset(&index);
368 __ strb(i.InputRegister(index), operand);
369 DCHECK_EQ(LeaveCC, i.OutputSBit());
373 __ ldrh(i.OutputRegister(), i.InputOffset());
376 __ ldrsh(i.OutputRegister(), i.InputOffset());
380 MemOperand operand = i.InputOffset(&index);
381 __ strh(i.InputRegister(index), operand);
382 DCHECK_EQ(LeaveCC, i.OutputSBit());
386 __ ldr(i.OutputRegister(), i.InputOffset());
390 MemOperand operand = i.InputOffset(&index);
391 __ str(i.InputRegister(index), operand);
392 DCHECK_EQ(LeaveCC, i.OutputSBit());
396 SwVfpRegister scratch = kScratchDoubleReg.low();
397 __ vldr(scratch, i.InputOffset());
398 __ vcvt_f64_f32(i.OutputDoubleRegister(), scratch);
399 DCHECK_EQ(LeaveCC, i.OutputSBit());
404 SwVfpRegister scratch = kScratchDoubleReg.low();
405 MemOperand operand = i.InputOffset(&index);
406 __ vcvt_f32_f64(scratch, i.InputDoubleRegister(index));
407 __ vstr(scratch, operand);
408 DCHECK_EQ(LeaveCC, i.OutputSBit());
412 __ vldr(i.OutputDoubleRegister(), i.InputOffset());
413 DCHECK_EQ(LeaveCC, i.OutputSBit());
417 MemOperand operand = i.InputOffset(&index);
418 __ vstr(i.InputDoubleRegister(index), operand);
419 DCHECK_EQ(LeaveCC, i.OutputSBit());
423 __ Push(i.InputRegister(0));
424 DCHECK_EQ(LeaveCC, i.OutputSBit());
426 case kArmStoreWriteBarrier: {
427 Register object = i.InputRegister(0);
428 Register index = i.InputRegister(1);
429 Register value = i.InputRegister(2);
430 __ add(index, object, index);
431 __ str(value, MemOperand(index));
432 SaveFPRegsMode mode =
433 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
434 LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
435 __ RecordWrite(object, index, value, lr_status, mode);
436 DCHECK_EQ(LeaveCC, i.OutputSBit());
443 // Assembles branches after an instruction.
444 void CodeGenerator::AssembleArchBranch(Instruction* instr,
445 FlagsCondition condition) {
446 ArmOperandConverter i(this, instr);
449 // Emit a branch. The true and false targets are always the last two inputs
450 // to the instruction.
451 BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
452 BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
453 bool fallthru = IsNextInAssemblyOrder(fblock);
454 Label* tlabel = code()->GetLabel(tblock);
455 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
457 case kUnorderedEqual:
463 case kUnorderedNotEqual:
469 case kSignedLessThan:
472 case kSignedGreaterThanOrEqual:
475 case kSignedLessThanOrEqual:
478 case kSignedGreaterThan:
481 case kUnorderedLessThan:
484 case kUnsignedLessThan:
487 case kUnorderedGreaterThanOrEqual:
490 case kUnsignedGreaterThanOrEqual:
493 case kUnorderedLessThanOrEqual:
496 case kUnsignedLessThanOrEqual:
499 case kUnorderedGreaterThan:
502 case kUnsignedGreaterThan:
512 if (!fallthru) __ b(flabel); // no fallthru to flabel.
517 // Assembles boolean materializations after an instruction.
518 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
519 FlagsCondition condition) {
520 ArmOperandConverter i(this, instr);
523 // Materialize a full 32-bit 1 or 0 value. The result register is always the
524 // last output of the instruction.
526 DCHECK_NE(0, instr->OutputCount());
527 Register reg = i.OutputRegister(instr->OutputCount() - 1);
528 Condition cc = kNoCondition;
530 case kUnorderedEqual:
532 __ mov(reg, Operand(0));
538 case kUnorderedNotEqual:
540 __ mov(reg, Operand(1));
546 case kSignedLessThan:
549 case kSignedGreaterThanOrEqual:
552 case kSignedLessThanOrEqual:
555 case kSignedGreaterThan:
558 case kUnorderedLessThan:
560 __ mov(reg, Operand(0));
563 case kUnsignedLessThan:
566 case kUnorderedGreaterThanOrEqual:
568 __ mov(reg, Operand(1));
571 case kUnsignedGreaterThanOrEqual:
574 case kUnorderedLessThanOrEqual:
576 __ mov(reg, Operand(0));
579 case kUnsignedLessThanOrEqual:
582 case kUnorderedGreaterThan:
584 __ mov(reg, Operand(1));
587 case kUnsignedGreaterThan:
598 __ mov(reg, Operand(0));
599 __ mov(reg, Operand(1), LeaveCC, cc);
604 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
605 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
606 isolate(), deoptimization_id, Deoptimizer::LAZY);
607 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
611 void CodeGenerator::AssemblePrologue() {
612 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
613 if (descriptor->kind() == CallDescriptor::kCallAddress) {
615 if (FLAG_enable_ool_constant_pool) {
617 // Adjust FP to point to saved FP.
618 __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
625 const RegList saves = descriptor->CalleeSavedRegisters();
626 if (saves != 0 || saved_pp) {
627 // Save callee-saved registers.
628 int register_save_area_size = saved_pp ? kPointerSize : 0;
629 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
630 if (!((1 << i) & saves)) continue;
631 register_save_area_size += kPointerSize;
633 frame()->SetRegisterSaveAreaSize(register_save_area_size);
634 __ stm(db_w, sp, saves);
636 } else if (descriptor->IsJSFunctionCall()) {
637 CompilationInfo* info = linkage()->info();
638 __ Prologue(info->IsCodePreAgingActive());
639 frame()->SetRegisterSaveAreaSize(
640 StandardFrameConstants::kFixedFrameSizeFromFp);
642 // Sloppy mode functions and builtins need to replace the receiver with the
643 // global proxy when called as functions (without an explicit receiver
645 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
646 if (info->strict_mode() == SLOPPY && !info->is_native()) {
648 // +2 for return address and saved frame pointer.
649 int receiver_slot = info->scope()->num_parameters() + 2;
650 __ ldr(r2, MemOperand(fp, receiver_slot * kPointerSize));
651 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
653 __ ldr(r2, GlobalObjectOperand());
654 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
655 __ str(r2, MemOperand(fp, receiver_slot * kPointerSize));
661 frame()->SetRegisterSaveAreaSize(
662 StandardFrameConstants::kFixedFrameSizeFromFp);
664 int stack_slots = frame()->GetSpillSlotCount();
665 if (stack_slots > 0) {
666 __ sub(sp, sp, Operand(stack_slots * kPointerSize));
671 void CodeGenerator::AssembleReturn() {
672 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
673 if (descriptor->kind() == CallDescriptor::kCallAddress) {
674 if (frame()->GetRegisterSaveAreaSize() > 0) {
675 // Remove this frame's spill slots first.
676 int stack_slots = frame()->GetSpillSlotCount();
677 if (stack_slots > 0) {
678 __ add(sp, sp, Operand(stack_slots * kPointerSize));
680 // Restore registers.
681 const RegList saves = descriptor->CalleeSavedRegisters();
683 __ ldm(ia_w, sp, saves);
686 __ LeaveFrame(StackFrame::MANUAL);
689 __ LeaveFrame(StackFrame::MANUAL);
690 int pop_count = descriptor->IsJSFunctionCall()
691 ? static_cast<int>(descriptor->JSParameterCount())
699 void CodeGenerator::AssembleMove(InstructionOperand* source,
700 InstructionOperand* destination) {
701 ArmOperandConverter g(this, NULL);
702 // Dispatch on the source and destination operand kinds. Not all
703 // combinations are possible.
704 if (source->IsRegister()) {
705 DCHECK(destination->IsRegister() || destination->IsStackSlot());
706 Register src = g.ToRegister(source);
707 if (destination->IsRegister()) {
708 __ mov(g.ToRegister(destination), src);
710 __ str(src, g.ToMemOperand(destination));
712 } else if (source->IsStackSlot()) {
713 DCHECK(destination->IsRegister() || destination->IsStackSlot());
714 MemOperand src = g.ToMemOperand(source);
715 if (destination->IsRegister()) {
716 __ ldr(g.ToRegister(destination), src);
718 Register temp = kScratchReg;
720 __ str(temp, g.ToMemOperand(destination));
722 } else if (source->IsConstant()) {
723 if (destination->IsRegister() || destination->IsStackSlot()) {
725 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
726 Constant src = g.ToConstant(source);
727 switch (src.type()) {
728 case Constant::kInt32:
729 __ mov(dst, Operand(src.ToInt32()));
731 case Constant::kInt64:
734 case Constant::kFloat64:
736 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
738 case Constant::kExternalReference:
739 __ mov(dst, Operand(src.ToExternalReference()));
741 case Constant::kHeapObject:
742 __ Move(dst, src.ToHeapObject());
745 if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
746 } else if (destination->IsDoubleRegister()) {
747 DwVfpRegister result = g.ToDoubleRegister(destination);
748 __ vmov(result, g.ToDouble(source));
750 DCHECK(destination->IsDoubleStackSlot());
751 DwVfpRegister temp = kScratchDoubleReg;
752 __ vmov(temp, g.ToDouble(source));
753 __ vstr(temp, g.ToMemOperand(destination));
755 } else if (source->IsDoubleRegister()) {
756 DwVfpRegister src = g.ToDoubleRegister(source);
757 if (destination->IsDoubleRegister()) {
758 DwVfpRegister dst = g.ToDoubleRegister(destination);
761 DCHECK(destination->IsDoubleStackSlot());
762 __ vstr(src, g.ToMemOperand(destination));
764 } else if (source->IsDoubleStackSlot()) {
765 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
766 MemOperand src = g.ToMemOperand(source);
767 if (destination->IsDoubleRegister()) {
768 __ vldr(g.ToDoubleRegister(destination), src);
770 DwVfpRegister temp = kScratchDoubleReg;
772 __ vstr(temp, g.ToMemOperand(destination));
780 void CodeGenerator::AssembleSwap(InstructionOperand* source,
781 InstructionOperand* destination) {
782 ArmOperandConverter g(this, NULL);
783 // Dispatch on the source and destination operand kinds. Not all
784 // combinations are possible.
785 if (source->IsRegister()) {
786 // Register-register.
787 Register temp = kScratchReg;
788 Register src = g.ToRegister(source);
789 if (destination->IsRegister()) {
790 Register dst = g.ToRegister(destination);
795 DCHECK(destination->IsStackSlot());
796 MemOperand dst = g.ToMemOperand(destination);
801 } else if (source->IsStackSlot()) {
802 DCHECK(destination->IsStackSlot());
803 Register temp_0 = kScratchReg;
804 SwVfpRegister temp_1 = kScratchDoubleReg.low();
805 MemOperand src = g.ToMemOperand(source);
806 MemOperand dst = g.ToMemOperand(destination);
808 __ vldr(temp_1, dst);
810 __ vstr(temp_1, src);
811 } else if (source->IsDoubleRegister()) {
812 DwVfpRegister temp = kScratchDoubleReg;
813 DwVfpRegister src = g.ToDoubleRegister(source);
814 if (destination->IsDoubleRegister()) {
815 DwVfpRegister dst = g.ToDoubleRegister(destination);
820 DCHECK(destination->IsDoubleStackSlot());
821 MemOperand dst = g.ToMemOperand(destination);
826 } else if (source->IsDoubleStackSlot()) {
827 DCHECK(destination->IsDoubleStackSlot());
828 Register temp_0 = kScratchReg;
829 DwVfpRegister temp_1 = kScratchDoubleReg;
830 MemOperand src0 = g.ToMemOperand(source);
831 MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
832 MemOperand dst0 = g.ToMemOperand(destination);
833 MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
834 __ vldr(temp_1, dst0); // Save destination in temp_1.
835 __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
836 __ str(temp_0, dst0);
837 __ ldr(temp_0, src1);
838 __ str(temp_0, dst1);
839 __ vstr(temp_1, src0);
841 // No other combinations are possible.
847 void CodeGenerator::AddNopForSmiCodeInlining() {
848 // On 32-bit ARM we do not insert nops for inlined Smi code.
852 void CodeGenerator::EnsureSpaceForLazyDeopt() {
853 int space_needed = Deoptimizer::patch_size();
854 if (!linkage()->info()->IsStub()) {
855 // Ensure that we have enough space after the previous lazy-bailout
856 // instruction for patching the code here.
857 int current_pc = masm()->pc_offset();
858 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
859 // Block literal pool emission for duration of padding.
860 v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
861 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
862 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
863 while (padding_size > 0) {
865 padding_size -= v8::internal::Assembler::kInstrSize;
874 } // namespace compiler
875 } // namespace internal