1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/ia32/assembler-ia32.h"
12 #include "src/ia32/macro-assembler-ia32.h"
13 #include "src/scopes.h"
22 // Adds IA-32 specific methods for decoding operands.
23 class IA32OperandConverter : public InstructionOperandConverter {
25 IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
28 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
30 Immediate InputImmediate(int index) {
31 return ToImmediate(instr_->InputAt(index));
34 Operand OutputOperand() { return ToOperand(instr_->Output()); }
36 Operand ToOperand(InstructionOperand* op, int extra = 0) {
37 if (op->IsRegister()) {
39 return Operand(ToRegister(op));
40 } else if (op->IsDoubleRegister()) {
42 return Operand(ToDoubleRegister(op));
44 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
45 // The linkage computes where all spill slots are located.
46 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
47 return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
50 Operand HighOperand(InstructionOperand* op) {
51 DCHECK(op->IsDoubleStackSlot());
52 return ToOperand(op, kPointerSize);
55 Immediate ToImmediate(InstructionOperand* operand) {
56 Constant constant = ToConstant(operand);
57 switch (constant.type()) {
58 case Constant::kInt32:
59 return Immediate(constant.ToInt32());
60 case Constant::kFloat32:
62 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
63 case Constant::kFloat64:
65 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
66 case Constant::kExternalReference:
67 return Immediate(constant.ToExternalReference());
68 case Constant::kHeapObject:
69 return Immediate(constant.ToHeapObject());
70 case Constant::kInt64:
77 static int NextOffset(int* offset) {
83 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
84 STATIC_ASSERT(0 == static_cast<int>(times_1));
85 STATIC_ASSERT(1 == static_cast<int>(times_2));
86 STATIC_ASSERT(2 == static_cast<int>(times_4));
87 STATIC_ASSERT(3 == static_cast<int>(times_8));
88 int scale = static_cast<int>(mode - one);
89 DCHECK(scale >= 0 && scale < 4);
90 return static_cast<ScaleFactor>(scale);
93 Operand MemoryOperand(int* offset) {
94 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
97 Register base = InputRegister(NextOffset(offset));
99 return Operand(base, disp);
102 Register base = InputRegister(NextOffset(offset));
103 int32_t disp = InputInt32(NextOffset(offset));
104 return Operand(base, disp);
110 Register base = InputRegister(NextOffset(offset));
111 Register index = InputRegister(NextOffset(offset));
112 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
114 return Operand(base, index, scale, disp);
120 Register base = InputRegister(NextOffset(offset));
121 Register index = InputRegister(NextOffset(offset));
122 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
123 int32_t disp = InputInt32(NextOffset(offset));
124 return Operand(base, index, scale, disp);
130 Register index = InputRegister(NextOffset(offset));
131 ScaleFactor scale = ScaleFor(kMode_M1, mode);
133 return Operand(index, scale, disp);
139 Register index = InputRegister(NextOffset(offset));
140 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
141 int32_t disp = InputInt32(NextOffset(offset));
142 return Operand(index, scale, disp);
145 int32_t disp = InputInt32(NextOffset(offset));
146 return Operand(Immediate(disp));
150 return Operand(no_reg, 0);
153 return Operand(no_reg, 0);
156 Operand MemoryOperand() {
158 return MemoryOperand(&first_input);
163 static bool HasImmediateInput(Instruction* instr, int index) {
164 return instr->InputAt(index)->IsImmediate();
168 // Assembles an instruction after register allocation, producing machine code.
169 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
170 IA32OperandConverter i(this, instr);
172 switch (ArchOpcodeField::decode(instr->opcode())) {
173 case kArchCallCodeObject: {
174 EnsureSpaceForLazyDeopt();
175 if (HasImmediateInput(instr, 0)) {
176 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
177 __ call(code, RelocInfo::CODE_TARGET);
179 Register reg = i.InputRegister(0);
180 __ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
182 AddSafepointAndDeopt(instr);
185 case kArchCallJSFunction: {
186 EnsureSpaceForLazyDeopt();
187 Register func = i.InputRegister(0);
188 if (FLAG_debug_code) {
189 // Check the function's context matches the context argument.
190 __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
191 __ Assert(equal, kWrongFunctionContext);
193 __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
194 AddSafepointAndDeopt(instr);
198 __ jmp(code()->GetLabel(i.InputRpo(0)));
201 // don't emit code for nops.
206 case kArchStackPointer:
207 __ mov(i.OutputRegister(), esp);
209 case kArchTruncateDoubleToI:
210 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
213 if (HasImmediateInput(instr, 1)) {
214 __ add(i.InputOperand(0), i.InputImmediate(1));
216 __ add(i.InputRegister(0), i.InputOperand(1));
220 if (HasImmediateInput(instr, 1)) {
221 __ and_(i.InputOperand(0), i.InputImmediate(1));
223 __ and_(i.InputRegister(0), i.InputOperand(1));
227 if (HasImmediateInput(instr, 1)) {
228 __ cmp(i.InputOperand(0), i.InputImmediate(1));
230 __ cmp(i.InputRegister(0), i.InputOperand(1));
234 if (HasImmediateInput(instr, 1)) {
235 __ test(i.InputOperand(0), i.InputImmediate(1));
237 __ test(i.InputRegister(0), i.InputOperand(1));
241 if (HasImmediateInput(instr, 1)) {
242 __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
244 __ imul(i.OutputRegister(), i.InputOperand(1));
248 __ imul(i.InputRegister(1));
251 __ mul(i.InputRegister(1));
255 __ idiv(i.InputOperand(1));
258 __ Move(edx, Immediate(0));
259 __ div(i.InputOperand(1));
262 __ not_(i.OutputOperand());
265 __ neg(i.OutputOperand());
268 if (HasImmediateInput(instr, 1)) {
269 __ or_(i.InputOperand(0), i.InputImmediate(1));
271 __ or_(i.InputRegister(0), i.InputOperand(1));
275 if (HasImmediateInput(instr, 1)) {
276 __ xor_(i.InputOperand(0), i.InputImmediate(1));
278 __ xor_(i.InputRegister(0), i.InputOperand(1));
282 if (HasImmediateInput(instr, 1)) {
283 __ sub(i.InputOperand(0), i.InputImmediate(1));
285 __ sub(i.InputRegister(0), i.InputOperand(1));
289 if (HasImmediateInput(instr, 1)) {
290 __ shl(i.OutputOperand(), i.InputInt5(1));
292 __ shl_cl(i.OutputOperand());
296 if (HasImmediateInput(instr, 1)) {
297 __ shr(i.OutputOperand(), i.InputInt5(1));
299 __ shr_cl(i.OutputOperand());
303 if (HasImmediateInput(instr, 1)) {
304 __ sar(i.OutputOperand(), i.InputInt5(1));
306 __ sar_cl(i.OutputOperand());
310 if (HasImmediateInput(instr, 1)) {
311 __ ror(i.OutputOperand(), i.InputInt5(1));
313 __ ror_cl(i.OutputOperand());
317 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
320 __ addsd(i.InputDoubleRegister(0), i.InputOperand(1));
323 __ subsd(i.InputDoubleRegister(0), i.InputOperand(1));
326 __ mulsd(i.InputDoubleRegister(0), i.InputOperand(1));
329 __ divsd(i.InputDoubleRegister(0), i.InputOperand(1));
331 case kSSEFloat64Mod: {
332 // TODO(dcarney): alignment is wrong.
333 __ sub(esp, Immediate(kDoubleSize));
334 // Move values to st(0) and st(1).
335 __ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
336 __ fld_d(Operand(esp, 0));
337 __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
338 __ fld_d(Operand(esp, 0));
339 // Loop while fprem isn't done.
342 // This instructions traps on all kinds inputs, but we are assuming the
343 // floating point control word is set to ignore them all.
345 // The following 2 instruction implicitly use eax.
348 __ j(parity_even, &mod_loop);
349 // Move output to stack and clean up.
351 __ fstp_d(Operand(esp, 0));
352 __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
353 __ add(esp, Immediate(kDoubleSize));
356 case kSSEFloat64Sqrt:
357 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
359 case kSSEFloat64Floor: {
360 CpuFeatureScope sse_scope(masm(), SSE4_1);
361 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
362 v8::internal::Assembler::kRoundDown);
365 case kSSEFloat64Ceil: {
366 CpuFeatureScope sse_scope(masm(), SSE4_1);
367 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
368 v8::internal::Assembler::kRoundUp);
371 case kSSEFloat64RoundTruncate: {
372 CpuFeatureScope sse_scope(masm(), SSE4_1);
373 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
374 v8::internal::Assembler::kRoundToZero);
378 __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
381 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
383 case kSSEFloat64ToInt32:
384 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
386 case kSSEFloat64ToUint32: {
387 XMMRegister scratch = xmm0;
388 __ Move(scratch, -2147483648.0);
389 __ addsd(scratch, i.InputOperand(0));
390 __ cvttsd2si(i.OutputRegister(), scratch);
391 __ add(i.OutputRegister(), Immediate(0x80000000));
394 case kSSEInt32ToFloat64:
395 __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
397 case kSSEUint32ToFloat64:
398 __ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0));
401 __ movsx_b(i.OutputRegister(), i.MemoryOperand());
404 __ movzx_b(i.OutputRegister(), i.MemoryOperand());
408 Operand operand = i.MemoryOperand(&index);
409 if (HasImmediateInput(instr, index)) {
410 __ mov_b(operand, i.InputInt8(index));
412 __ mov_b(operand, i.InputRegister(index));
417 __ movsx_w(i.OutputRegister(), i.MemoryOperand());
420 __ movzx_w(i.OutputRegister(), i.MemoryOperand());
424 Operand operand = i.MemoryOperand(&index);
425 if (HasImmediateInput(instr, index)) {
426 __ mov_w(operand, i.InputInt16(index));
428 __ mov_w(operand, i.InputRegister(index));
433 if (instr->HasOutput()) {
434 __ mov(i.OutputRegister(), i.MemoryOperand());
437 Operand operand = i.MemoryOperand(&index);
438 if (HasImmediateInput(instr, index)) {
439 __ mov(operand, i.InputImmediate(index));
441 __ mov(operand, i.InputRegister(index));
446 if (instr->HasOutput()) {
447 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
450 Operand operand = i.MemoryOperand(&index);
451 __ movsd(operand, i.InputDoubleRegister(index));
455 if (instr->HasOutput()) {
456 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
459 Operand operand = i.MemoryOperand(&index);
460 __ movss(operand, i.InputDoubleRegister(index));
464 __ lea(i.OutputRegister(), i.MemoryOperand());
467 if (HasImmediateInput(instr, 0)) {
468 __ push(i.InputImmediate(0));
470 __ push(i.InputOperand(0));
473 case kIA32StoreWriteBarrier: {
474 Register object = i.InputRegister(0);
475 Register index = i.InputRegister(1);
476 Register value = i.InputRegister(2);
477 __ mov(Operand(object, index, times_1, 0), value);
478 __ lea(index, Operand(object, index, times_1, 0));
479 SaveFPRegsMode mode =
480 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
481 __ RecordWrite(object, index, value, mode);
488 // Assembles branches after an instruction.
489 void CodeGenerator::AssembleArchBranch(Instruction* instr,
490 FlagsCondition condition) {
491 IA32OperandConverter i(this, instr);
494 // Emit a branch. The true and false targets are always the last two inputs
495 // to the instruction.
496 BasicBlock::RpoNumber tblock =
497 i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
498 BasicBlock::RpoNumber fblock =
499 i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
500 bool fallthru = IsNextInAssemblyOrder(fblock);
501 Label* tlabel = code()->GetLabel(tblock);
502 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
503 Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
505 case kUnorderedEqual:
506 __ j(parity_even, flabel, flabel_distance);
511 case kUnorderedNotEqual:
512 __ j(parity_even, tlabel);
515 __ j(not_equal, tlabel);
517 case kSignedLessThan:
520 case kSignedGreaterThanOrEqual:
521 __ j(greater_equal, tlabel);
523 case kSignedLessThanOrEqual:
524 __ j(less_equal, tlabel);
526 case kSignedGreaterThan:
527 __ j(greater, tlabel);
529 case kUnorderedLessThan:
530 __ j(parity_even, flabel, flabel_distance);
532 case kUnsignedLessThan:
535 case kUnorderedGreaterThanOrEqual:
536 __ j(parity_even, tlabel);
538 case kUnsignedGreaterThanOrEqual:
539 __ j(above_equal, tlabel);
541 case kUnorderedLessThanOrEqual:
542 __ j(parity_even, flabel, flabel_distance);
544 case kUnsignedLessThanOrEqual:
545 __ j(below_equal, tlabel);
547 case kUnorderedGreaterThan:
548 __ j(parity_even, tlabel);
550 case kUnsignedGreaterThan:
554 __ j(overflow, tlabel);
557 __ j(no_overflow, tlabel);
560 if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
565 // Assembles boolean materializations after an instruction.
566 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
567 FlagsCondition condition) {
568 IA32OperandConverter i(this, instr);
571 // Materialize a full 32-bit 1 or 0 value. The result register is always the
572 // last output of the instruction.
574 DCHECK_NE(0, instr->OutputCount());
575 Register reg = i.OutputRegister(instr->OutputCount() - 1);
576 Condition cc = no_condition;
578 case kUnorderedEqual:
579 __ j(parity_odd, &check, Label::kNear);
580 __ Move(reg, Immediate(0));
581 __ jmp(&done, Label::kNear);
586 case kUnorderedNotEqual:
587 __ j(parity_odd, &check, Label::kNear);
588 __ mov(reg, Immediate(1));
589 __ jmp(&done, Label::kNear);
594 case kSignedLessThan:
597 case kSignedGreaterThanOrEqual:
600 case kSignedLessThanOrEqual:
603 case kSignedGreaterThan:
606 case kUnorderedLessThan:
607 __ j(parity_odd, &check, Label::kNear);
608 __ Move(reg, Immediate(0));
609 __ jmp(&done, Label::kNear);
611 case kUnsignedLessThan:
614 case kUnorderedGreaterThanOrEqual:
615 __ j(parity_odd, &check, Label::kNear);
616 __ mov(reg, Immediate(1));
617 __ jmp(&done, Label::kNear);
619 case kUnsignedGreaterThanOrEqual:
622 case kUnorderedLessThanOrEqual:
623 __ j(parity_odd, &check, Label::kNear);
624 __ Move(reg, Immediate(0));
625 __ jmp(&done, Label::kNear);
627 case kUnsignedLessThanOrEqual:
630 case kUnorderedGreaterThan:
631 __ j(parity_odd, &check, Label::kNear);
632 __ mov(reg, Immediate(1));
633 __ jmp(&done, Label::kNear);
635 case kUnsignedGreaterThan:
646 if (reg.is_byte_register()) {
647 // setcc for byte registers (al, bl, cl, dl).
649 __ movzx_b(reg, reg);
651 // Emit a branch to set a register to either 1 or 0.
653 __ j(cc, &set, Label::kNear);
654 __ Move(reg, Immediate(0));
655 __ jmp(&done, Label::kNear);
657 __ mov(reg, Immediate(1));
663 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
664 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
665 isolate(), deoptimization_id, Deoptimizer::LAZY);
666 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
670 // The calling convention for JSFunctions on IA32 passes arguments on the
671 // stack and the JSFunction and context in EDI and ESI, respectively, thus
672 // the steps of the call look as follows:
674 // --{ before the call instruction }--------------------------------------------
678 // --{ push arguments and setup ESI, EDI }--------------------------------------
679 // | args + receiver | caller frame |
681 // [edi = JSFunction, esi = context]
683 // --{ call [edi + kCodeEntryOffset] }------------------------------------------
684 // | RET | args + receiver | caller frame |
687 // =={ prologue of called function }============================================
688 // --{ push ebp }---------------------------------------------------------------
689 // | FP | RET | args + receiver | caller frame |
692 // --{ mov ebp, esp }-----------------------------------------------------------
693 // | FP | RET | args + receiver | caller frame |
696 // --{ push esi }---------------------------------------------------------------
697 // | CTX | FP | RET | args + receiver | caller frame |
700 // --{ push edi }---------------------------------------------------------------
701 // | FNC | CTX | FP | RET | args + receiver | caller frame |
704 // --{ subi esp, #N }-----------------------------------------------------------
705 // | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame |
708 // =={ body of called function }================================================
710 // =={ epilogue of called function }============================================
711 // --{ mov esp, ebp }-----------------------------------------------------------
712 // | FP | RET | args + receiver | caller frame |
715 // --{ pop ebp }-----------------------------------------------------------
716 // | | RET | args + receiver | caller frame |
719 // --{ ret #A+1 }-----------------------------------------------------------
720 // | | caller frame |
724 // Runtime function calls are accomplished by doing a stub call to the
725 // CEntryStub (a real code object). On IA32 passes arguments on the
726 // stack, the number of arguments in EAX, the address of the runtime function
727 // in EBX, and the context in ESI.
729 // --{ before the call instruction }--------------------------------------------
733 // --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
734 // | args + receiver | caller frame |
736 // [eax = #args, ebx = runtime function, esi = context]
738 // --{ call #CEntryStub }-------------------------------------------------------
739 // | RET | args + receiver | caller frame |
742 // =={ body of runtime function }===============================================
744 // --{ runtime returns }--------------------------------------------------------
748 // Other custom linkages (e.g. for calling directly into and out of C++) may
749 // need to save callee-saved registers on the stack, which is done in the
750 // function prologue of generated code.
752 // --{ before the call instruction }--------------------------------------------
756 // --{ set up arguments in registers on stack }---------------------------------
757 // | args | caller frame |
759 // [r0 = arg0, r1 = arg1, ...]
761 // --{ call code }--------------------------------------------------------------
762 // | RET | args | caller frame |
765 // =={ prologue of called function }============================================
766 // --{ push ebp }---------------------------------------------------------------
767 // | FP | RET | args | caller frame |
770 // --{ mov ebp, esp }-----------------------------------------------------------
771 // | FP | RET | args | caller frame |
774 // --{ save registers }---------------------------------------------------------
775 // | regs | FP | RET | args | caller frame |
778 // --{ subi esp, #N }-----------------------------------------------------------
779 // | callee frame | regs | FP | RET | args | caller frame |
782 // =={ body of called function }================================================
784 // =={ epilogue of called function }============================================
785 // --{ restore registers }------------------------------------------------------
786 // | regs | FP | RET | args | caller frame |
789 // --{ mov esp, ebp }-----------------------------------------------------------
790 // | FP | RET | args | caller frame |
793 // --{ pop ebp }----------------------------------------------------------------
794 // | RET | args | caller frame |
798 void CodeGenerator::AssemblePrologue() {
799 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
800 Frame* frame = this->frame();
801 int stack_slots = frame->GetSpillSlotCount();
802 if (descriptor->kind() == CallDescriptor::kCallAddress) {
803 // Assemble a prologue similar the to cdecl calling convention.
806 const RegList saves = descriptor->CalleeSavedRegisters();
807 if (saves != 0) { // Save callee-saved registers.
808 int register_save_area_size = 0;
809 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
810 if (!((1 << i) & saves)) continue;
811 __ push(Register::from_code(i));
812 register_save_area_size += kPointerSize;
814 frame->SetRegisterSaveAreaSize(register_save_area_size);
816 } else if (descriptor->IsJSFunctionCall()) {
817 CompilationInfo* info = this->info();
818 __ Prologue(info->IsCodePreAgingActive());
819 frame->SetRegisterSaveAreaSize(
820 StandardFrameConstants::kFixedFrameSizeFromFp);
822 // Sloppy mode functions and builtins need to replace the receiver with the
823 // global proxy when called as functions (without an explicit receiver
825 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
826 if (info->strict_mode() == SLOPPY && !info->is_native()) {
828 // +2 for return address and saved frame pointer.
829 int receiver_slot = info->scope()->num_parameters() + 2;
830 __ mov(ecx, Operand(ebp, receiver_slot * kPointerSize));
831 __ cmp(ecx, isolate()->factory()->undefined_value());
832 __ j(not_equal, &ok, Label::kNear);
833 __ mov(ecx, GlobalObjectOperand());
834 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
835 __ mov(Operand(ebp, receiver_slot * kPointerSize), ecx);
841 frame->SetRegisterSaveAreaSize(
842 StandardFrameConstants::kFixedFrameSizeFromFp);
844 if (stack_slots > 0) {
845 __ sub(esp, Immediate(stack_slots * kPointerSize));
850 void CodeGenerator::AssembleReturn() {
851 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
852 if (descriptor->kind() == CallDescriptor::kCallAddress) {
853 const RegList saves = descriptor->CalleeSavedRegisters();
854 if (frame()->GetRegisterSaveAreaSize() > 0) {
855 // Remove this frame's spill slots first.
856 int stack_slots = frame()->GetSpillSlotCount();
857 if (stack_slots > 0) {
858 __ add(esp, Immediate(stack_slots * kPointerSize));
860 // Restore registers.
862 for (int i = 0; i < Register::kNumRegisters; i++) {
863 if (!((1 << i) & saves)) continue;
864 __ pop(Register::from_code(i));
867 __ pop(ebp); // Pop caller's frame pointer.
870 // No saved registers.
871 __ mov(esp, ebp); // Move stack pointer back to frame pointer.
872 __ pop(ebp); // Pop caller's frame pointer.
876 __ mov(esp, ebp); // Move stack pointer back to frame pointer.
877 __ pop(ebp); // Pop caller's frame pointer.
878 int pop_count = descriptor->IsJSFunctionCall()
879 ? static_cast<int>(descriptor->JSParameterCount())
881 __ ret(pop_count * kPointerSize);
886 void CodeGenerator::AssembleMove(InstructionOperand* source,
887 InstructionOperand* destination) {
888 IA32OperandConverter g(this, NULL);
889 // Dispatch on the source and destination operand kinds. Not all
890 // combinations are possible.
891 if (source->IsRegister()) {
892 DCHECK(destination->IsRegister() || destination->IsStackSlot());
893 Register src = g.ToRegister(source);
894 Operand dst = g.ToOperand(destination);
896 } else if (source->IsStackSlot()) {
897 DCHECK(destination->IsRegister() || destination->IsStackSlot());
898 Operand src = g.ToOperand(source);
899 if (destination->IsRegister()) {
900 Register dst = g.ToRegister(destination);
903 Operand dst = g.ToOperand(destination);
907 } else if (source->IsConstant()) {
908 Constant src_constant = g.ToConstant(source);
909 if (src_constant.type() == Constant::kHeapObject) {
910 Handle<HeapObject> src = src_constant.ToHeapObject();
911 if (destination->IsRegister()) {
912 Register dst = g.ToRegister(destination);
913 __ LoadHeapObject(dst, src);
915 DCHECK(destination->IsStackSlot());
916 Operand dst = g.ToOperand(destination);
917 AllowDeferredHandleDereference embedding_raw_address;
918 if (isolate()->heap()->InNewSpace(*src)) {
919 __ PushHeapObject(src);
925 } else if (destination->IsRegister()) {
926 Register dst = g.ToRegister(destination);
927 __ Move(dst, g.ToImmediate(source));
928 } else if (destination->IsStackSlot()) {
929 Operand dst = g.ToOperand(destination);
930 __ Move(dst, g.ToImmediate(source));
931 } else if (src_constant.type() == Constant::kFloat32) {
932 // TODO(turbofan): Can we do better here?
933 uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
934 if (destination->IsDoubleRegister()) {
935 XMMRegister dst = g.ToDoubleRegister(destination);
938 DCHECK(destination->IsDoubleStackSlot());
939 Operand dst = g.ToOperand(destination);
940 __ Move(dst, Immediate(src));
943 DCHECK_EQ(Constant::kFloat64, src_constant.type());
944 uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
945 uint32_t lower = static_cast<uint32_t>(src);
946 uint32_t upper = static_cast<uint32_t>(src >> 32);
947 if (destination->IsDoubleRegister()) {
948 XMMRegister dst = g.ToDoubleRegister(destination);
951 DCHECK(destination->IsDoubleStackSlot());
952 Operand dst0 = g.ToOperand(destination);
953 Operand dst1 = g.HighOperand(destination);
954 __ Move(dst0, Immediate(lower));
955 __ Move(dst1, Immediate(upper));
958 } else if (source->IsDoubleRegister()) {
959 XMMRegister src = g.ToDoubleRegister(source);
960 if (destination->IsDoubleRegister()) {
961 XMMRegister dst = g.ToDoubleRegister(destination);
964 DCHECK(destination->IsDoubleStackSlot());
965 Operand dst = g.ToOperand(destination);
968 } else if (source->IsDoubleStackSlot()) {
969 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
970 Operand src = g.ToOperand(source);
971 if (destination->IsDoubleRegister()) {
972 XMMRegister dst = g.ToDoubleRegister(destination);
975 // We rely on having xmm0 available as a fixed scratch register.
976 Operand dst = g.ToOperand(destination);
986 void CodeGenerator::AssembleSwap(InstructionOperand* source,
987 InstructionOperand* destination) {
988 IA32OperandConverter g(this, NULL);
989 // Dispatch on the source and destination operand kinds. Not all
990 // combinations are possible.
991 if (source->IsRegister() && destination->IsRegister()) {
992 // Register-register.
993 Register src = g.ToRegister(source);
994 Register dst = g.ToRegister(destination);
996 } else if (source->IsRegister() && destination->IsStackSlot()) {
998 __ xchg(g.ToRegister(source), g.ToOperand(destination));
999 } else if (source->IsStackSlot() && destination->IsStackSlot()) {
1001 Operand src = g.ToOperand(source);
1002 Operand dst = g.ToOperand(destination);
1007 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1008 // XMM register-register swap. We rely on having xmm0
1009 // available as a fixed scratch register.
1010 XMMRegister src = g.ToDoubleRegister(source);
1011 XMMRegister dst = g.ToDoubleRegister(destination);
1012 __ movaps(xmm0, src);
1013 __ movaps(src, dst);
1014 __ movaps(dst, xmm0);
1015 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
1016 // XMM register-memory swap. We rely on having xmm0
1017 // available as a fixed scratch register.
1018 XMMRegister reg = g.ToDoubleRegister(source);
1019 Operand other = g.ToOperand(destination);
1020 __ movsd(xmm0, other);
1021 __ movsd(other, reg);
1022 __ movaps(reg, xmm0);
1023 } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
1024 // Double-width memory-to-memory.
1025 Operand src0 = g.ToOperand(source);
1026 Operand src1 = g.HighOperand(source);
1027 Operand dst0 = g.ToOperand(destination);
1028 Operand dst1 = g.HighOperand(destination);
1029 __ movsd(xmm0, dst0); // Save destination in xmm0.
1030 __ push(src0); // Then use stack to copy source to destination.
1034 __ movsd(src0, xmm0);
1036 // No other combinations are possible.
1042 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1045 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1046 int space_needed = Deoptimizer::patch_size();
1047 if (!info()->IsStub()) {
1048 // Ensure that we have enough space after the previous lazy-bailout
1049 // instruction for patching the code here.
1050 int current_pc = masm()->pc_offset();
1051 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1052 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1053 __ Nop(padding_size);
1056 MarkLazyDeoptSite();
1061 } // namespace compiler
1062 } // namespace internal