Update To 11.40.268.0
[platform/framework/web/crosswalk.git] / src / v8 / src / compiler / x64 / instruction-selector-x64.cc
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/generic-node-inl.h"
6 #include "src/compiler/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8
9 namespace v8 {
10 namespace internal {
11 namespace compiler {
12
13 // Adds X64-specific methods for generating operands.
14 class X64OperandGenerator FINAL : public OperandGenerator {
15  public:
16   explicit X64OperandGenerator(InstructionSelector* selector)
17       : OperandGenerator(selector) {}
18
19   InstructionOperand* TempRegister(Register reg) {
20     return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
21                                            Register::ToAllocationIndex(reg));
22   }
23
24   bool CanBeImmediate(Node* node) {
25     switch (node->opcode()) {
26       case IrOpcode::kInt32Constant:
27         return true;
28       case IrOpcode::kInt64Constant: {
29         const int64_t value = OpParameter<int64_t>(node);
30         return value == static_cast<int64_t>(static_cast<int32_t>(value));
31       }
32       default:
33         return false;
34     }
35   }
36
37   bool CanBeBetterLeftOperand(Node* node) const {
38     return !selector()->IsLive(node);
39   }
40 };
41
42
43 void InstructionSelector::VisitLoad(Node* node) {
44   MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
45   MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
46   X64OperandGenerator g(this);
47   Node* const base = node->InputAt(0);
48   Node* const index = node->InputAt(1);
49
50   ArchOpcode opcode;
51   switch (rep) {
52     case kRepFloat32:
53       opcode = kX64Movss;
54       break;
55     case kRepFloat64:
56       opcode = kX64Movsd;
57       break;
58     case kRepBit:  // Fall through.
59     case kRepWord8:
60       opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
61       break;
62     case kRepWord16:
63       opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
64       break;
65     case kRepWord32:
66       opcode = kX64Movl;
67       break;
68     case kRepTagged:  // Fall through.
69     case kRepWord64:
70       opcode = kX64Movq;
71       break;
72     default:
73       UNREACHABLE();
74       return;
75   }
76   if (g.CanBeImmediate(base)) {
77     // load [#base + %index]
78     Emit(opcode | AddressingModeField::encode(kMode_MRI),
79          g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
80   } else if (g.CanBeImmediate(index)) {
81     // load [%base + #index]
82     Emit(opcode | AddressingModeField::encode(kMode_MRI),
83          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
84   } else {
85     // load [%base + %index*1]
86     Emit(opcode | AddressingModeField::encode(kMode_MR1),
87          g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
88   }
89 }
90
91
92 void InstructionSelector::VisitStore(Node* node) {
93   X64OperandGenerator g(this);
94   Node* base = node->InputAt(0);
95   Node* index = node->InputAt(1);
96   Node* value = node->InputAt(2);
97
98   StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
99   MachineType rep = RepresentationOf(store_rep.machine_type());
100   if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
101     DCHECK(rep == kRepTagged);
102     // TODO(dcarney): refactor RecordWrite function to take temp registers
103     //                and pass them here instead of using fixed regs
104     // TODO(dcarney): handle immediate indices.
105     InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
106     Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx),
107          g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps),
108          temps);
109     return;
110   }
111   DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
112   ArchOpcode opcode;
113   switch (rep) {
114     case kRepFloat32:
115       opcode = kX64Movss;
116       break;
117     case kRepFloat64:
118       opcode = kX64Movsd;
119       break;
120     case kRepBit:  // Fall through.
121     case kRepWord8:
122       opcode = kX64Movb;
123       break;
124     case kRepWord16:
125       opcode = kX64Movw;
126       break;
127     case kRepWord32:
128       opcode = kX64Movl;
129       break;
130     case kRepTagged:  // Fall through.
131     case kRepWord64:
132       opcode = kX64Movq;
133       break;
134     default:
135       UNREACHABLE();
136       return;
137   }
138   InstructionOperand* value_operand =
139       g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
140   if (g.CanBeImmediate(base)) {
141     // store [#base + %index], %|#value
142     Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
143          g.UseRegister(index), g.UseImmediate(base), value_operand);
144   } else if (g.CanBeImmediate(index)) {
145     // store [%base + #index], %|#value
146     Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
147          g.UseRegister(base), g.UseImmediate(index), value_operand);
148   } else {
149     // store [%base + %index*1], %|#value
150     Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
151          g.UseRegister(base), g.UseRegister(index), value_operand);
152   }
153 }
154
155
156 // Shared routine for multiple binary operations.
157 static void VisitBinop(InstructionSelector* selector, Node* node,
158                        InstructionCode opcode, FlagsContinuation* cont) {
159   X64OperandGenerator g(selector);
160   Int32BinopMatcher m(node);
161   Node* left = m.left().node();
162   Node* right = m.right().node();
163   InstructionOperand* inputs[4];
164   size_t input_count = 0;
165   InstructionOperand* outputs[2];
166   size_t output_count = 0;
167
168   // TODO(turbofan): match complex addressing modes.
169   if (left == right) {
170     // If both inputs refer to the same operand, enforce allocating a register
171     // for both of them to ensure that we don't end up generating code like
172     // this:
173     //
174     //   mov rax, [rbp-0x10]
175     //   add rax, [rbp-0x10]
176     //   jo label
177     InstructionOperand* const input = g.UseRegister(left);
178     inputs[input_count++] = input;
179     inputs[input_count++] = input;
180   } else if (g.CanBeImmediate(right)) {
181     inputs[input_count++] = g.UseRegister(left);
182     inputs[input_count++] = g.UseImmediate(right);
183   } else {
184     if (node->op()->HasProperty(Operator::kCommutative) &&
185         g.CanBeBetterLeftOperand(right)) {
186       std::swap(left, right);
187     }
188     inputs[input_count++] = g.UseRegister(left);
189     inputs[input_count++] = g.Use(right);
190   }
191
192   if (cont->IsBranch()) {
193     inputs[input_count++] = g.Label(cont->true_block());
194     inputs[input_count++] = g.Label(cont->false_block());
195   }
196
197   outputs[output_count++] = g.DefineSameAsFirst(node);
198   if (cont->IsSet()) {
199     outputs[output_count++] = g.DefineAsRegister(cont->result());
200   }
201
202   DCHECK_NE(0, static_cast<int>(input_count));
203   DCHECK_NE(0, static_cast<int>(output_count));
204   DCHECK_GE(arraysize(inputs), input_count);
205   DCHECK_GE(arraysize(outputs), output_count);
206
207   Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
208                                       outputs, input_count, inputs);
209   if (cont->IsBranch()) instr->MarkAsControl();
210 }
211
212
213 // Shared routine for multiple binary operations.
214 static void VisitBinop(InstructionSelector* selector, Node* node,
215                        InstructionCode opcode) {
216   FlagsContinuation cont;
217   VisitBinop(selector, node, opcode, &cont);
218 }
219
220
221 void InstructionSelector::VisitWord32And(Node* node) {
222   VisitBinop(this, node, kX64And32);
223 }
224
225
226 void InstructionSelector::VisitWord64And(Node* node) {
227   VisitBinop(this, node, kX64And);
228 }
229
230
231 void InstructionSelector::VisitWord32Or(Node* node) {
232   VisitBinop(this, node, kX64Or32);
233 }
234
235
236 void InstructionSelector::VisitWord64Or(Node* node) {
237   VisitBinop(this, node, kX64Or);
238 }
239
240
241 void InstructionSelector::VisitWord32Xor(Node* node) {
242   X64OperandGenerator g(this);
243   Uint32BinopMatcher m(node);
244   if (m.right().Is(-1)) {
245     Emit(kX64Not32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
246   } else {
247     VisitBinop(this, node, kX64Xor32);
248   }
249 }
250
251
252 void InstructionSelector::VisitWord64Xor(Node* node) {
253   X64OperandGenerator g(this);
254   Uint64BinopMatcher m(node);
255   if (m.right().Is(-1)) {
256     Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
257   } else {
258     VisitBinop(this, node, kX64Xor);
259   }
260 }
261
262
263 namespace {
264
265 // Shared routine for multiple 32-bit shift operations.
266 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
267 void VisitWord32Shift(InstructionSelector* selector, Node* node,
268                       ArchOpcode opcode) {
269   X64OperandGenerator g(selector);
270   Int32BinopMatcher m(node);
271   Node* left = m.left().node();
272   Node* right = m.right().node();
273
274   if (g.CanBeImmediate(right)) {
275     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
276                    g.UseImmediate(right));
277   } else {
278     if (m.right().IsWord32And()) {
279       Int32BinopMatcher mright(right);
280       if (mright.right().Is(0x1F)) {
281         right = mright.left().node();
282       }
283     }
284     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
285                    g.UseFixed(right, rcx));
286   }
287 }
288
289
290 // Shared routine for multiple 64-bit shift operations.
291 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
292 void VisitWord64Shift(InstructionSelector* selector, Node* node,
293                       ArchOpcode opcode) {
294   X64OperandGenerator g(selector);
295   Int64BinopMatcher m(node);
296   Node* left = m.left().node();
297   Node* right = m.right().node();
298
299   if (g.CanBeImmediate(right)) {
300     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
301                    g.UseImmediate(right));
302   } else {
303     if (m.right().IsWord64And()) {
304       Int64BinopMatcher mright(right);
305       if (mright.right().Is(0x3F)) {
306         right = mright.left().node();
307       }
308     }
309     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
310                    g.UseFixed(right, rcx));
311   }
312 }
313
314 }  // namespace
315
316
317 void InstructionSelector::VisitWord32Shl(Node* node) {
318   VisitWord32Shift(this, node, kX64Shl32);
319 }
320
321
322 void InstructionSelector::VisitWord64Shl(Node* node) {
323   VisitWord64Shift(this, node, kX64Shl);
324 }
325
326
327 void InstructionSelector::VisitWord32Shr(Node* node) {
328   VisitWord32Shift(this, node, kX64Shr32);
329 }
330
331
332 void InstructionSelector::VisitWord64Shr(Node* node) {
333   VisitWord64Shift(this, node, kX64Shr);
334 }
335
336
337 void InstructionSelector::VisitWord32Sar(Node* node) {
338   VisitWord32Shift(this, node, kX64Sar32);
339 }
340
341
342 void InstructionSelector::VisitWord64Sar(Node* node) {
343   VisitWord64Shift(this, node, kX64Sar);
344 }
345
346
347 void InstructionSelector::VisitWord32Ror(Node* node) {
348   VisitWord32Shift(this, node, kX64Ror32);
349 }
350
351
352 void InstructionSelector::VisitWord64Ror(Node* node) {
353   VisitWord64Shift(this, node, kX64Ror);
354 }
355
356
357 void InstructionSelector::VisitInt32Add(Node* node) {
358   VisitBinop(this, node, kX64Add32);
359 }
360
361
362 void InstructionSelector::VisitInt64Add(Node* node) {
363   VisitBinop(this, node, kX64Add);
364 }
365
366
367 void InstructionSelector::VisitInt32Sub(Node* node) {
368   X64OperandGenerator g(this);
369   Int32BinopMatcher m(node);
370   if (m.left().Is(0)) {
371     Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
372   } else {
373     VisitBinop(this, node, kX64Sub32);
374   }
375 }
376
377
378 void InstructionSelector::VisitInt64Sub(Node* node) {
379   X64OperandGenerator g(this);
380   Int64BinopMatcher m(node);
381   if (m.left().Is(0)) {
382     Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
383   } else {
384     VisitBinop(this, node, kX64Sub);
385   }
386 }
387
388
389 namespace {
390
391 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
392   X64OperandGenerator g(selector);
393   Int32BinopMatcher m(node);
394   Node* left = m.left().node();
395   Node* right = m.right().node();
396   if (g.CanBeImmediate(right)) {
397     selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
398                    g.UseImmediate(right));
399   } else {
400     if (g.CanBeBetterLeftOperand(right)) {
401       std::swap(left, right);
402     }
403     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
404                    g.Use(right));
405   }
406 }
407
408
409 void VisitMulHigh(InstructionSelector* selector, Node* node,
410                   ArchOpcode opcode) {
411   X64OperandGenerator g(selector);
412   Node* left = node->InputAt(0);
413   Node* right = node->InputAt(1);
414   if (selector->IsLive(left) && !selector->IsLive(right)) {
415     std::swap(left, right);
416   }
417   // TODO(turbofan): We use UseUniqueRegister here to improve register
418   // allocation.
419   selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
420                  g.UseUniqueRegister(right));
421 }
422
423
424 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
425   X64OperandGenerator g(selector);
426   InstructionOperand* temps[] = {g.TempRegister(rdx)};
427   selector->Emit(
428       opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
429       g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
430 }
431
432
433 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
434   X64OperandGenerator g(selector);
435   selector->Emit(opcode, g.DefineAsFixed(node, rdx),
436                  g.UseFixed(node->InputAt(0), rax),
437                  g.UseUniqueRegister(node->InputAt(1)));
438 }
439
440 }  // namespace
441
442
443 void InstructionSelector::VisitInt32Mul(Node* node) {
444   VisitMul(this, node, kX64Imul32);
445 }
446
447
448 void InstructionSelector::VisitInt64Mul(Node* node) {
449   VisitMul(this, node, kX64Imul);
450 }
451
452
453 void InstructionSelector::VisitInt32MulHigh(Node* node) {
454   VisitMulHigh(this, node, kX64ImulHigh32);
455 }
456
457
458 void InstructionSelector::VisitInt32Div(Node* node) {
459   VisitDiv(this, node, kX64Idiv32);
460 }
461
462
463 void InstructionSelector::VisitInt64Div(Node* node) {
464   VisitDiv(this, node, kX64Idiv);
465 }
466
467
468 void InstructionSelector::VisitUint32Div(Node* node) {
469   VisitDiv(this, node, kX64Udiv32);
470 }
471
472
473 void InstructionSelector::VisitUint64Div(Node* node) {
474   VisitDiv(this, node, kX64Udiv);
475 }
476
477
478 void InstructionSelector::VisitInt32Mod(Node* node) {
479   VisitMod(this, node, kX64Idiv32);
480 }
481
482
483 void InstructionSelector::VisitInt64Mod(Node* node) {
484   VisitMod(this, node, kX64Idiv);
485 }
486
487
488 void InstructionSelector::VisitUint32Mod(Node* node) {
489   VisitMod(this, node, kX64Udiv32);
490 }
491
492
493 void InstructionSelector::VisitUint64Mod(Node* node) {
494   VisitMod(this, node, kX64Udiv);
495 }
496
497
498 void InstructionSelector::VisitUint32MulHigh(Node* node) {
499   VisitMulHigh(this, node, kX64UmulHigh32);
500 }
501
502
503 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
504   X64OperandGenerator g(this);
505   Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
506 }
507
508
509 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
510   X64OperandGenerator g(this);
511   Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
512 }
513
514
515 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
516   X64OperandGenerator g(this);
517   Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
518 }
519
520
521 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
522   X64OperandGenerator g(this);
523   Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
524 }
525
526
527 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
528   X64OperandGenerator g(this);
529   Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
530 }
531
532
533 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
534   X64OperandGenerator g(this);
535   Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
536 }
537
538
539 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
540   X64OperandGenerator g(this);
541   Node* value = node->InputAt(0);
542   switch (value->opcode()) {
543     case IrOpcode::kWord32And:
544     case IrOpcode::kWord32Or:
545     case IrOpcode::kWord32Xor:
546     case IrOpcode::kWord32Shl:
547     case IrOpcode::kWord32Shr:
548     case IrOpcode::kWord32Sar:
549     case IrOpcode::kWord32Ror:
550     case IrOpcode::kWord32Equal:
551     case IrOpcode::kInt32Add:
552     case IrOpcode::kInt32Sub:
553     case IrOpcode::kInt32Mul:
554     case IrOpcode::kInt32MulHigh:
555     case IrOpcode::kInt32Div:
556     case IrOpcode::kInt32LessThan:
557     case IrOpcode::kInt32LessThanOrEqual:
558     case IrOpcode::kInt32Mod:
559     case IrOpcode::kUint32Div:
560     case IrOpcode::kUint32LessThan:
561     case IrOpcode::kUint32LessThanOrEqual:
562     case IrOpcode::kUint32Mod:
563     case IrOpcode::kUint32MulHigh: {
564       // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
565       // zero-extension is a no-op.
566       Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
567       return;
568     }
569     default:
570       break;
571   }
572   Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
573 }
574
575
576 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
577   X64OperandGenerator g(this);
578   Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
579 }
580
581
582 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
583   X64OperandGenerator g(this);
584   Node* value = node->InputAt(0);
585   if (CanCover(node, value)) {
586     switch (value->opcode()) {
587       case IrOpcode::kWord64Sar:
588       case IrOpcode::kWord64Shr: {
589         Int64BinopMatcher m(value);
590         if (m.right().Is(32)) {
591           Emit(kX64Shr, g.DefineSameAsFirst(node),
592                g.UseRegister(m.left().node()), g.TempImmediate(32));
593           return;
594         }
595         break;
596       }
597       default:
598         break;
599     }
600   }
601   Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
602 }
603
604
605 void InstructionSelector::VisitFloat64Add(Node* node) {
606   X64OperandGenerator g(this);
607   Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
608        g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
609 }
610
611
612 void InstructionSelector::VisitFloat64Sub(Node* node) {
613   X64OperandGenerator g(this);
614   Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
615        g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
616 }
617
618
619 void InstructionSelector::VisitFloat64Mul(Node* node) {
620   X64OperandGenerator g(this);
621   Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
622        g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
623 }
624
625
626 void InstructionSelector::VisitFloat64Div(Node* node) {
627   X64OperandGenerator g(this);
628   Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
629        g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
630 }
631
632
633 void InstructionSelector::VisitFloat64Mod(Node* node) {
634   X64OperandGenerator g(this);
635   InstructionOperand* temps[] = {g.TempRegister(rax)};
636   Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
637        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
638        temps);
639 }
640
641
642 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
643   X64OperandGenerator g(this);
644   Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
645 }
646
647
648 namespace {
649
650 void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
651                     Node* node) {
652   X64OperandGenerator g(selector);
653   selector->Emit(opcode, g.DefineAsRegister(node),
654                  g.UseRegister(node->InputAt(0)));
655 }
656
657 }  // namespace
658
659
660 void InstructionSelector::VisitFloat64Floor(Node* node) {
661   DCHECK(CpuFeatures::IsSupported(SSE4_1));
662   VisitRRFloat64(this, kSSEFloat64Floor, node);
663 }
664
665
666 void InstructionSelector::VisitFloat64Ceil(Node* node) {
667   DCHECK(CpuFeatures::IsSupported(SSE4_1));
668   VisitRRFloat64(this, kSSEFloat64Ceil, node);
669 }
670
671
672 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
673   DCHECK(CpuFeatures::IsSupported(SSE4_1));
674   VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
675 }
676
677
678 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
679   UNREACHABLE();
680 }
681
682
683 void InstructionSelector::VisitCall(Node* node) {
684   X64OperandGenerator g(this);
685   CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
686
687   FrameStateDescriptor* frame_state_descriptor = NULL;
688   if (descriptor->NeedsFrameState()) {
689     frame_state_descriptor = GetFrameStateDescriptor(
690         node->InputAt(static_cast<int>(descriptor->InputCount())));
691   }
692
693   CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
694
695   // Compute InstructionOperands for inputs and outputs.
696   InitializeCallBuffer(node, &buffer, true, true);
697
698   // Push any stack arguments.
699   for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
700        input != buffer.pushed_nodes.rend(); input++) {
701     // TODO(titzer): handle pushing double parameters.
702     Emit(kX64Push, NULL,
703          g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
704   }
705
706   // Select the appropriate opcode based on the call type.
707   InstructionCode opcode;
708   switch (descriptor->kind()) {
709     case CallDescriptor::kCallCodeObject: {
710       opcode = kArchCallCodeObject;
711       break;
712     }
713     case CallDescriptor::kCallJSFunction:
714       opcode = kArchCallJSFunction;
715       break;
716     default:
717       UNREACHABLE();
718       return;
719   }
720   opcode |= MiscField::encode(descriptor->flags());
721
722   // Emit the call instruction.
723   InstructionOperand** first_output =
724       buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
725   Instruction* call_instr =
726       Emit(opcode, buffer.outputs.size(), first_output,
727            buffer.instruction_args.size(), &buffer.instruction_args.front());
728   call_instr->MarkAsCall();
729 }
730
731
732 // Shared routine for multiple compare operations.
733 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
734                          InstructionOperand* left, InstructionOperand* right,
735                          FlagsContinuation* cont) {
736   X64OperandGenerator g(selector);
737   opcode = cont->Encode(opcode);
738   if (cont->IsBranch()) {
739     selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
740                    g.Label(cont->false_block()))->MarkAsControl();
741   } else {
742     DCHECK(cont->IsSet());
743     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
744   }
745 }
746
747
748 // Shared routine for multiple compare operations.
749 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
750                          Node* left, Node* right, FlagsContinuation* cont,
751                          bool commutative) {
752   X64OperandGenerator g(selector);
753   if (commutative && g.CanBeBetterLeftOperand(right)) {
754     std::swap(left, right);
755   }
756   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
757 }
758
759
760 // Shared routine for multiple word compare operations.
761 static void VisitWordCompare(InstructionSelector* selector, Node* node,
762                              InstructionCode opcode, FlagsContinuation* cont) {
763   X64OperandGenerator g(selector);
764   Node* const left = node->InputAt(0);
765   Node* const right = node->InputAt(1);
766
767   // Match immediates on left or right side of comparison.
768   if (g.CanBeImmediate(right)) {
769     VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
770   } else if (g.CanBeImmediate(left)) {
771     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
772     VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
773   } else {
774     VisitCompare(selector, opcode, left, right, cont,
775                  node->op()->HasProperty(Operator::kCommutative));
776   }
777 }
778
779
780 // Shared routine for comparison with zero.
781 static void VisitCompareZero(InstructionSelector* selector, Node* node,
782                              InstructionCode opcode, FlagsContinuation* cont) {
783   X64OperandGenerator g(selector);
784   VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
785 }
786
787
788 // Shared routine for multiple float64 compare operations.
789 static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
790                                 FlagsContinuation* cont) {
791   VisitCompare(selector, kSSEFloat64Cmp, node->InputAt(0), node->InputAt(1),
792                cont, node->op()->HasProperty(Operator::kCommutative));
793 }
794
795
796 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
797                                       BasicBlock* fbranch) {
798   X64OperandGenerator g(this);
799   Node* user = branch;
800   Node* value = branch->InputAt(0);
801
802   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
803
804   // If we can fall through to the true block, invert the branch.
805   if (IsNextInAssemblyOrder(tbranch)) {
806     cont.Negate();
807     cont.SwapBlocks();
808   }
809
810   // Try to combine with comparisons against 0 by simply inverting the branch.
811   while (CanCover(user, value)) {
812     if (value->opcode() == IrOpcode::kWord32Equal) {
813       Int32BinopMatcher m(value);
814       if (m.right().Is(0)) {
815         user = value;
816         value = m.left().node();
817         cont.Negate();
818       } else {
819         break;
820       }
821     } else if (value->opcode() == IrOpcode::kWord64Equal) {
822       Int64BinopMatcher m(value);
823       if (m.right().Is(0)) {
824         user = value;
825         value = m.left().node();
826         cont.Negate();
827       } else {
828         break;
829       }
830     } else {
831       break;
832     }
833   }
834
835   // Try to combine the branch with a comparison.
836   if (CanCover(user, value)) {
837     switch (value->opcode()) {
838       case IrOpcode::kWord32Equal:
839         cont.OverwriteAndNegateIfEqual(kEqual);
840         return VisitWordCompare(this, value, kX64Cmp32, &cont);
841       case IrOpcode::kInt32LessThan:
842         cont.OverwriteAndNegateIfEqual(kSignedLessThan);
843         return VisitWordCompare(this, value, kX64Cmp32, &cont);
844       case IrOpcode::kInt32LessThanOrEqual:
845         cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
846         return VisitWordCompare(this, value, kX64Cmp32, &cont);
847       case IrOpcode::kUint32LessThan:
848         cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
849         return VisitWordCompare(this, value, kX64Cmp32, &cont);
850       case IrOpcode::kUint32LessThanOrEqual:
851         cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
852         return VisitWordCompare(this, value, kX64Cmp32, &cont);
853       case IrOpcode::kWord64Equal:
854         cont.OverwriteAndNegateIfEqual(kEqual);
855         return VisitWordCompare(this, value, kX64Cmp, &cont);
856       case IrOpcode::kInt64LessThan:
857         cont.OverwriteAndNegateIfEqual(kSignedLessThan);
858         return VisitWordCompare(this, value, kX64Cmp, &cont);
859       case IrOpcode::kInt64LessThanOrEqual:
860         cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
861         return VisitWordCompare(this, value, kX64Cmp, &cont);
862       case IrOpcode::kUint64LessThan:
863         cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
864         return VisitWordCompare(this, value, kX64Cmp, &cont);
865       case IrOpcode::kFloat64Equal:
866         cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
867         return VisitFloat64Compare(this, value, &cont);
868       case IrOpcode::kFloat64LessThan:
869         cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
870         return VisitFloat64Compare(this, value, &cont);
871       case IrOpcode::kFloat64LessThanOrEqual:
872         cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
873         return VisitFloat64Compare(this, value, &cont);
874       case IrOpcode::kProjection:
875         // Check if this is the overflow output projection of an
876         // <Operation>WithOverflow node.
877         if (OpParameter<size_t>(value) == 1u) {
878           // We cannot combine the <Operation>WithOverflow with this branch
879           // unless the 0th projection (the use of the actual value of the
880           // <Operation> is either NULL, which means there's no use of the
881           // actual value, or was already defined, which means it is scheduled
882           // *AFTER* this branch).
883           Node* node = value->InputAt(0);
884           Node* result = node->FindProjection(0);
885           if (result == NULL || IsDefined(result)) {
886             switch (node->opcode()) {
887               case IrOpcode::kInt32AddWithOverflow:
888                 cont.OverwriteAndNegateIfEqual(kOverflow);
889                 return VisitBinop(this, node, kX64Add32, &cont);
890               case IrOpcode::kInt32SubWithOverflow:
891                 cont.OverwriteAndNegateIfEqual(kOverflow);
892                 return VisitBinop(this, node, kX64Sub32, &cont);
893               default:
894                 break;
895             }
896           }
897         }
898         break;
899       case IrOpcode::kInt32Sub:
900         return VisitWordCompare(this, value, kX64Cmp32, &cont);
901       case IrOpcode::kInt64Sub:
902         return VisitWordCompare(this, value, kX64Cmp, &cont);
903       case IrOpcode::kWord32And:
904         return VisitWordCompare(this, value, kX64Test32, &cont);
905       case IrOpcode::kWord64And:
906         return VisitWordCompare(this, value, kX64Test, &cont);
907       default:
908         break;
909     }
910   }
911
912   // Branch could not be combined with a compare, emit compare against 0.
913   VisitCompareZero(this, value, kX64Cmp32, &cont);
914 }
915
916
917 void InstructionSelector::VisitWord32Equal(Node* const node) {
918   Node* user = node;
919   FlagsContinuation cont(kEqual, node);
920   Int32BinopMatcher m(user);
921   if (m.right().Is(0)) {
922     Node* value = m.left().node();
923
924     // Try to combine with comparisons against 0 by simply inverting the branch.
925     while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
926       Int32BinopMatcher m(value);
927       if (m.right().Is(0)) {
928         user = value;
929         value = m.left().node();
930         cont.Negate();
931       } else {
932         break;
933       }
934     }
935
936     // Try to combine the branch with a comparison.
937     if (CanCover(user, value)) {
938       switch (value->opcode()) {
939         case IrOpcode::kInt32Sub:
940           return VisitWordCompare(this, value, kX64Cmp32, &cont);
941         case IrOpcode::kWord32And:
942           return VisitWordCompare(this, value, kX64Test32, &cont);
943         default:
944           break;
945       }
946     }
947     return VisitCompareZero(this, value, kX64Cmp32, &cont);
948   }
949   VisitWordCompare(this, node, kX64Cmp32, &cont);
950 }
951
952
953 void InstructionSelector::VisitInt32LessThan(Node* node) {
954   FlagsContinuation cont(kSignedLessThan, node);
955   VisitWordCompare(this, node, kX64Cmp32, &cont);
956 }
957
958
959 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
960   FlagsContinuation cont(kSignedLessThanOrEqual, node);
961   VisitWordCompare(this, node, kX64Cmp32, &cont);
962 }
963
964
965 void InstructionSelector::VisitUint32LessThan(Node* node) {
966   FlagsContinuation cont(kUnsignedLessThan, node);
967   VisitWordCompare(this, node, kX64Cmp32, &cont);
968 }
969
970
971 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
972   FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
973   VisitWordCompare(this, node, kX64Cmp32, &cont);
974 }
975
976
977 void InstructionSelector::VisitWord64Equal(Node* const node) {
978   Node* user = node;
979   FlagsContinuation cont(kEqual, node);
980   Int64BinopMatcher m(user);
981   if (m.right().Is(0)) {
982     Node* value = m.left().node();
983
984     // Try to combine with comparisons against 0 by simply inverting the branch.
985     while (CanCover(user, value) && value->opcode() == IrOpcode::kWord64Equal) {
986       Int64BinopMatcher m(value);
987       if (m.right().Is(0)) {
988         user = value;
989         value = m.left().node();
990         cont.Negate();
991       } else {
992         break;
993       }
994     }
995
996     // Try to combine the branch with a comparison.
997     if (CanCover(user, value)) {
998       switch (value->opcode()) {
999         case IrOpcode::kInt64Sub:
1000           return VisitWordCompare(this, value, kX64Cmp, &cont);
1001         case IrOpcode::kWord64And:
1002           return VisitWordCompare(this, value, kX64Test, &cont);
1003         default:
1004           break;
1005       }
1006     }
1007     return VisitCompareZero(this, value, kX64Cmp, &cont);
1008   }
1009   VisitWordCompare(this, node, kX64Cmp, &cont);
1010 }
1011
1012
1013 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1014   if (Node* ovf = node->FindProjection(1)) {
1015     FlagsContinuation cont(kOverflow, ovf);
1016     VisitBinop(this, node, kX64Add32, &cont);
1017   }
1018   FlagsContinuation cont;
1019   VisitBinop(this, node, kX64Add32, &cont);
1020 }
1021
1022
1023 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1024   if (Node* ovf = node->FindProjection(1)) {
1025     FlagsContinuation cont(kOverflow, ovf);
1026     return VisitBinop(this, node, kX64Sub32, &cont);
1027   }
1028   FlagsContinuation cont;
1029   VisitBinop(this, node, kX64Sub32, &cont);
1030 }
1031
1032
1033 void InstructionSelector::VisitInt64LessThan(Node* node) {
1034   FlagsContinuation cont(kSignedLessThan, node);
1035   VisitWordCompare(this, node, kX64Cmp, &cont);
1036 }
1037
1038
1039 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1040   FlagsContinuation cont(kSignedLessThanOrEqual, node);
1041   VisitWordCompare(this, node, kX64Cmp, &cont);
1042 }
1043
1044
1045 void InstructionSelector::VisitUint64LessThan(Node* node) {
1046   FlagsContinuation cont(kUnsignedLessThan, node);
1047   VisitWordCompare(this, node, kX64Cmp, &cont);
1048 }
1049
1050
1051 void InstructionSelector::VisitFloat64Equal(Node* node) {
1052   FlagsContinuation cont(kUnorderedEqual, node);
1053   VisitFloat64Compare(this, node, &cont);
1054 }
1055
1056
1057 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1058   FlagsContinuation cont(kUnorderedLessThan, node);
1059   VisitFloat64Compare(this, node, &cont);
1060 }
1061
1062
1063 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1064   FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
1065   VisitFloat64Compare(this, node, &cont);
1066 }
1067
1068
1069 // static
1070 MachineOperatorBuilder::Flags
1071 InstructionSelector::SupportedMachineOperatorFlags() {
1072   if (CpuFeatures::IsSupported(SSE4_1)) {
1073     return MachineOperatorBuilder::kFloat64Floor |
1074            MachineOperatorBuilder::kFloat64Ceil |
1075            MachineOperatorBuilder::kFloat64RoundTruncate;
1076   }
1077   return MachineOperatorBuilder::kNoFlags;
1078 }
1079 }  // namespace compiler
1080 }  // namespace internal
1081 }  // namespace v8