1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h"
7 #include "src/compiler/node-properties.h"
13 // Adds IA32-specific methods for generating operands.
14 class IA32OperandGenerator FINAL : public OperandGenerator {
16 explicit IA32OperandGenerator(InstructionSelector* selector)
17 : OperandGenerator(selector) {}
19 InstructionOperand UseByteRegister(Node* node) {
20 // TODO(titzer): encode byte register use constraints.
21 return UseFixed(node, edx);
24 InstructionOperand DefineAsByteRegister(Node* node) {
25 // TODO(titzer): encode byte register def constraints.
26 return DefineAsRegister(node);
29 bool CanBeImmediate(Node* node) {
30 switch (node->opcode()) {
31 case IrOpcode::kInt32Constant:
32 case IrOpcode::kNumberConstant:
33 case IrOpcode::kExternalConstant:
35 case IrOpcode::kHeapConstant: {
36 // Constants in new space cannot be used as immediates in V8 because
37 // the GC does not scan code objects when collecting the new generation.
38 Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
39 Isolate* isolate = value.handle()->GetIsolate();
40 return !isolate->heap()->InNewSpace(*value.handle());
47 AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
48 Node* displacement_node,
49 InstructionOperand inputs[],
50 size_t* input_count) {
51 AddressingMode mode = kMode_MRI;
52 int32_t displacement = (displacement_node == NULL)
54 : OpParameter<int32_t>(displacement_node);
56 if (base->opcode() == IrOpcode::kInt32Constant) {
57 displacement += OpParameter<int32_t>(base);
62 inputs[(*input_count)++] = UseRegister(base);
64 DCHECK(scale >= 0 && scale <= 3);
65 inputs[(*input_count)++] = UseRegister(index);
66 if (displacement != 0) {
67 inputs[(*input_count)++] = TempImmediate(displacement);
68 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
69 kMode_MR4I, kMode_MR8I};
70 mode = kMRnI_modes[scale];
72 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
73 kMode_MR4, kMode_MR8};
74 mode = kMRn_modes[scale];
77 if (displacement == 0) {
80 inputs[(*input_count)++] = TempImmediate(displacement);
85 DCHECK(scale >= 0 && scale <= 3);
87 inputs[(*input_count)++] = UseRegister(index);
88 if (displacement != 0) {
89 inputs[(*input_count)++] = TempImmediate(displacement);
90 static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
91 kMode_M4I, kMode_M8I};
92 mode = kMnI_modes[scale];
94 static const AddressingMode kMn_modes[] = {kMode_MR, kMode_M2,
96 mode = kMn_modes[scale];
99 inputs[(*input_count)++] = TempImmediate(displacement);
106 AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
107 InstructionOperand inputs[],
108 size_t* input_count) {
109 BaseWithIndexAndDisplacement32Matcher m(node, true);
111 if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
112 return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
113 m.displacement(), inputs, input_count);
115 inputs[(*input_count)++] = UseRegister(node->InputAt(0));
116 inputs[(*input_count)++] = UseRegister(node->InputAt(1));
121 bool CanBeBetterLeftOperand(Node* node) const {
122 return !selector()->IsLive(node);
127 static void VisitRRFloat64(InstructionSelector* selector,
128 InstructionCode opcode, Node* node) {
129 IA32OperandGenerator g(selector);
130 selector->Emit(opcode, g.DefineAsRegister(node),
131 g.UseRegister(node->InputAt(0)));
135 void InstructionSelector::VisitLoad(Node* node) {
136 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
137 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
147 case kRepBit: // Fall through.
149 opcode = typ == kTypeInt32 ? kIA32Movsxbl : kIA32Movzxbl;
152 opcode = typ == kTypeInt32 ? kIA32Movsxwl : kIA32Movzxwl;
154 case kRepTagged: // Fall through.
163 IA32OperandGenerator g(this);
164 InstructionOperand outputs[1];
165 outputs[0] = g.DefineAsRegister(node);
166 InstructionOperand inputs[3];
167 size_t input_count = 0;
168 AddressingMode mode =
169 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
170 InstructionCode code = opcode | AddressingModeField::encode(mode);
171 Emit(code, 1, outputs, input_count, inputs);
175 void InstructionSelector::VisitStore(Node* node) {
176 IA32OperandGenerator g(this);
177 Node* base = node->InputAt(0);
178 Node* index = node->InputAt(1);
179 Node* value = node->InputAt(2);
181 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
182 MachineType rep = RepresentationOf(store_rep.machine_type());
183 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
184 DCHECK_EQ(kRepTagged, rep);
185 // TODO(dcarney): refactor RecordWrite function to take temp registers
186 // and pass them here instead of using fixed regs
187 // TODO(dcarney): handle immediate indices.
188 InstructionOperand temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
189 Emit(kIA32StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, ebx),
190 g.UseFixed(index, ecx), g.UseFixed(value, edx), arraysize(temps),
194 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
204 case kRepBit: // Fall through.
211 case kRepTagged: // Fall through.
220 InstructionOperand val;
221 if (g.CanBeImmediate(value)) {
222 val = g.UseImmediate(value);
223 } else if (rep == kRepWord8 || rep == kRepBit) {
224 val = g.UseByteRegister(value);
226 val = g.UseRegister(value);
229 InstructionOperand inputs[4];
230 size_t input_count = 0;
231 AddressingMode mode =
232 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
233 InstructionCode code = opcode | AddressingModeField::encode(mode);
234 inputs[input_count++] = val;
235 Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
239 void InstructionSelector::VisitCheckedLoad(Node* node) {
240 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
241 MachineType typ = TypeOf(OpParameter<MachineType>(node));
242 IA32OperandGenerator g(this);
243 Node* const buffer = node->InputAt(0);
244 Node* const offset = node->InputAt(1);
245 Node* const length = node->InputAt(2);
249 opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
252 opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
255 opcode = kCheckedLoadWord32;
258 opcode = kCheckedLoadFloat32;
261 opcode = kCheckedLoadFloat64;
267 InstructionOperand offset_operand = g.UseRegister(offset);
268 InstructionOperand length_operand =
269 g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
270 if (g.CanBeImmediate(buffer)) {
271 Emit(opcode | AddressingModeField::encode(kMode_MRI),
272 g.DefineAsRegister(node), offset_operand, length_operand,
273 offset_operand, g.UseImmediate(buffer));
275 Emit(opcode | AddressingModeField::encode(kMode_MR1),
276 g.DefineAsRegister(node), offset_operand, length_operand,
277 g.UseRegister(buffer), offset_operand);
282 void InstructionSelector::VisitCheckedStore(Node* node) {
283 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
284 IA32OperandGenerator g(this);
285 Node* const buffer = node->InputAt(0);
286 Node* const offset = node->InputAt(1);
287 Node* const length = node->InputAt(2);
288 Node* const value = node->InputAt(3);
292 opcode = kCheckedStoreWord8;
295 opcode = kCheckedStoreWord16;
298 opcode = kCheckedStoreWord32;
301 opcode = kCheckedStoreFloat32;
304 opcode = kCheckedStoreFloat64;
310 InstructionOperand value_operand =
311 g.CanBeImmediate(value)
312 ? g.UseImmediate(value)
313 : ((rep == kRepWord8 || rep == kRepBit) ? g.UseByteRegister(value)
314 : g.UseRegister(value));
315 InstructionOperand offset_operand = g.UseRegister(offset);
316 InstructionOperand length_operand =
317 g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
318 if (g.CanBeImmediate(buffer)) {
319 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
320 offset_operand, length_operand, value_operand, offset_operand,
321 g.UseImmediate(buffer));
323 Emit(opcode | AddressingModeField::encode(kMode_MR1), g.NoOutput(),
324 offset_operand, length_operand, value_operand, g.UseRegister(buffer),
330 // Shared routine for multiple binary operations.
331 static void VisitBinop(InstructionSelector* selector, Node* node,
332 InstructionCode opcode, FlagsContinuation* cont) {
333 IA32OperandGenerator g(selector);
334 Int32BinopMatcher m(node);
335 Node* left = m.left().node();
336 Node* right = m.right().node();
337 InstructionOperand inputs[4];
338 size_t input_count = 0;
339 InstructionOperand outputs[2];
340 size_t output_count = 0;
342 // TODO(turbofan): match complex addressing modes.
344 // If both inputs refer to the same operand, enforce allocating a register
345 // for both of them to ensure that we don't end up generating code like
348 // mov eax, [ebp-0x10]
349 // add eax, [ebp-0x10]
351 InstructionOperand const input = g.UseRegister(left);
352 inputs[input_count++] = input;
353 inputs[input_count++] = input;
354 } else if (g.CanBeImmediate(right)) {
355 inputs[input_count++] = g.UseRegister(left);
356 inputs[input_count++] = g.UseImmediate(right);
358 if (node->op()->HasProperty(Operator::kCommutative) &&
359 g.CanBeBetterLeftOperand(right)) {
360 std::swap(left, right);
362 inputs[input_count++] = g.UseRegister(left);
363 inputs[input_count++] = g.Use(right);
366 if (cont->IsBranch()) {
367 inputs[input_count++] = g.Label(cont->true_block());
368 inputs[input_count++] = g.Label(cont->false_block());
371 outputs[output_count++] = g.DefineSameAsFirst(node);
373 outputs[output_count++] = g.DefineAsByteRegister(cont->result());
376 DCHECK_NE(0u, input_count);
377 DCHECK_NE(0u, output_count);
378 DCHECK_GE(arraysize(inputs), input_count);
379 DCHECK_GE(arraysize(outputs), output_count);
381 selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
386 // Shared routine for multiple binary operations.
387 static void VisitBinop(InstructionSelector* selector, Node* node,
388 InstructionCode opcode) {
389 FlagsContinuation cont;
390 VisitBinop(selector, node, opcode, &cont);
394 void InstructionSelector::VisitWord32And(Node* node) {
395 VisitBinop(this, node, kIA32And);
399 void InstructionSelector::VisitWord32Or(Node* node) {
400 VisitBinop(this, node, kIA32Or);
404 void InstructionSelector::VisitWord32Xor(Node* node) {
405 IA32OperandGenerator g(this);
406 Int32BinopMatcher m(node);
407 if (m.right().Is(-1)) {
408 Emit(kIA32Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
410 VisitBinop(this, node, kIA32Xor);
415 // Shared routine for multiple shift operations.
416 static inline void VisitShift(InstructionSelector* selector, Node* node,
418 IA32OperandGenerator g(selector);
419 Node* left = node->InputAt(0);
420 Node* right = node->InputAt(1);
422 if (g.CanBeImmediate(right)) {
423 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
424 g.UseImmediate(right));
426 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
427 g.UseFixed(right, ecx));
434 void VisitMulHigh(InstructionSelector* selector, Node* node,
436 IA32OperandGenerator g(selector);
437 selector->Emit(opcode, g.DefineAsFixed(node, edx),
438 g.UseFixed(node->InputAt(0), eax),
439 g.UseUniqueRegister(node->InputAt(1)));
443 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
444 IA32OperandGenerator g(selector);
445 InstructionOperand temps[] = {g.TempRegister(edx)};
446 selector->Emit(opcode, g.DefineAsFixed(node, eax),
447 g.UseFixed(node->InputAt(0), eax),
448 g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
452 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
453 IA32OperandGenerator g(selector);
454 selector->Emit(opcode, g.DefineAsFixed(node, edx),
455 g.UseFixed(node->InputAt(0), eax),
456 g.UseUnique(node->InputAt(1)));
459 void EmitLea(InstructionSelector* selector, Node* result, Node* index,
460 int scale, Node* base, Node* displacement) {
461 IA32OperandGenerator g(selector);
462 InstructionOperand inputs[4];
463 size_t input_count = 0;
464 AddressingMode mode = g.GenerateMemoryOperandInputs(
465 index, scale, base, displacement, inputs, &input_count);
467 DCHECK_NE(0u, input_count);
468 DCHECK_GE(arraysize(inputs), input_count);
470 InstructionOperand outputs[1];
471 outputs[0] = g.DefineAsRegister(result);
473 InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
475 selector->Emit(opcode, 1, outputs, input_count, inputs);
481 void InstructionSelector::VisitWord32Shl(Node* node) {
482 Int32ScaleMatcher m(node, true);
484 Node* index = node->InputAt(0);
485 Node* base = m.power_of_two_plus_one() ? index : NULL;
486 EmitLea(this, node, index, m.scale(), base, NULL);
489 VisitShift(this, node, kIA32Shl);
493 void InstructionSelector::VisitWord32Shr(Node* node) {
494 VisitShift(this, node, kIA32Shr);
498 void InstructionSelector::VisitWord32Sar(Node* node) {
499 VisitShift(this, node, kIA32Sar);
503 void InstructionSelector::VisitWord32Ror(Node* node) {
504 VisitShift(this, node, kIA32Ror);
508 void InstructionSelector::VisitWord32Clz(Node* node) {
509 IA32OperandGenerator g(this);
510 Emit(kIA32Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
514 void InstructionSelector::VisitInt32Add(Node* node) {
515 IA32OperandGenerator g(this);
517 // Try to match the Add to a lea pattern
518 BaseWithIndexAndDisplacement32Matcher m(node);
520 (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
521 InstructionOperand inputs[4];
522 size_t input_count = 0;
523 AddressingMode mode = g.GenerateMemoryOperandInputs(
524 m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count);
526 DCHECK_NE(0u, input_count);
527 DCHECK_GE(arraysize(inputs), input_count);
529 InstructionOperand outputs[1];
530 outputs[0] = g.DefineAsRegister(node);
532 InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
533 Emit(opcode, 1, outputs, input_count, inputs);
537 // No lea pattern match, use add
538 VisitBinop(this, node, kIA32Add);
542 void InstructionSelector::VisitInt32Sub(Node* node) {
543 IA32OperandGenerator g(this);
544 Int32BinopMatcher m(node);
545 if (m.left().Is(0)) {
546 Emit(kIA32Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
548 VisitBinop(this, node, kIA32Sub);
553 void InstructionSelector::VisitInt32Mul(Node* node) {
554 Int32ScaleMatcher m(node, true);
556 Node* index = node->InputAt(0);
557 Node* base = m.power_of_two_plus_one() ? index : NULL;
558 EmitLea(this, node, index, m.scale(), base, NULL);
561 IA32OperandGenerator g(this);
562 Node* left = node->InputAt(0);
563 Node* right = node->InputAt(1);
564 if (g.CanBeImmediate(right)) {
565 Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
566 g.UseImmediate(right));
568 if (g.CanBeBetterLeftOperand(right)) {
569 std::swap(left, right);
571 Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
577 void InstructionSelector::VisitInt32MulHigh(Node* node) {
578 VisitMulHigh(this, node, kIA32ImulHigh);
582 void InstructionSelector::VisitUint32MulHigh(Node* node) {
583 VisitMulHigh(this, node, kIA32UmulHigh);
587 void InstructionSelector::VisitInt32Div(Node* node) {
588 VisitDiv(this, node, kIA32Idiv);
592 void InstructionSelector::VisitUint32Div(Node* node) {
593 VisitDiv(this, node, kIA32Udiv);
597 void InstructionSelector::VisitInt32Mod(Node* node) {
598 VisitMod(this, node, kIA32Idiv);
602 void InstructionSelector::VisitUint32Mod(Node* node) {
603 VisitMod(this, node, kIA32Udiv);
607 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
608 IA32OperandGenerator g(this);
609 Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
613 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
614 IA32OperandGenerator g(this);
615 Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
619 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
620 IA32OperandGenerator g(this);
621 Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
625 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
626 IA32OperandGenerator g(this);
627 Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
631 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
632 IA32OperandGenerator g(this);
633 Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
637 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
638 IA32OperandGenerator g(this);
639 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
643 void InstructionSelector::VisitFloat64Add(Node* node) {
644 IA32OperandGenerator g(this);
645 if (IsSupported(AVX)) {
646 Emit(kAVXFloat64Add, g.DefineAsRegister(node),
647 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
649 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
650 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
655 void InstructionSelector::VisitFloat64Sub(Node* node) {
656 IA32OperandGenerator g(this);
657 Float64BinopMatcher m(node);
658 if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
659 CanCover(m.node(), m.right().node())) {
660 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
661 CanCover(m.right().node(), m.right().InputAt(0))) {
662 Float64BinopMatcher mright0(m.right().InputAt(0));
663 if (mright0.left().IsMinusZero()) {
664 Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
665 g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
670 if (IsSupported(AVX)) {
671 Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
672 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
674 Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
675 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
680 void InstructionSelector::VisitFloat64Mul(Node* node) {
681 IA32OperandGenerator g(this);
682 if (IsSupported(AVX)) {
683 Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
684 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
686 Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
687 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
692 void InstructionSelector::VisitFloat64Div(Node* node) {
693 IA32OperandGenerator g(this);
694 if (IsSupported(AVX)) {
695 Emit(kAVXFloat64Div, g.DefineAsRegister(node),
696 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
698 Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
699 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
704 void InstructionSelector::VisitFloat64Mod(Node* node) {
705 IA32OperandGenerator g(this);
706 InstructionOperand temps[] = {g.TempRegister(eax)};
707 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
708 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
713 void InstructionSelector::VisitFloat64Max(Node* node) {
714 IA32OperandGenerator g(this);
715 if (IsSupported(AVX)) {
716 Emit(kAVXFloat64Max, g.DefineAsRegister(node),
717 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
719 Emit(kSSEFloat64Max, g.DefineSameAsFirst(node),
720 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
725 void InstructionSelector::VisitFloat64Min(Node* node) {
726 IA32OperandGenerator g(this);
727 if (IsSupported(AVX)) {
728 Emit(kAVXFloat64Min, g.DefineAsRegister(node),
729 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
731 Emit(kSSEFloat64Min, g.DefineSameAsFirst(node),
732 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
737 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
738 IA32OperandGenerator g(this);
739 Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
743 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
744 VisitRRFloat64(this, kSSEFloat64Round | MiscField::encode(kRoundDown), node);
748 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
749 VisitRRFloat64(this, kSSEFloat64Round | MiscField::encode(kRoundToZero),
754 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
759 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
760 IA32OperandGenerator g(this);
761 const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
763 FrameStateDescriptor* frame_state_descriptor = NULL;
765 if (descriptor->NeedsFrameState()) {
766 frame_state_descriptor =
767 GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
770 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
772 // Compute InstructionOperands for inputs and outputs.
773 InitializeCallBuffer(node, &buffer, true, true);
775 // Push any stack arguments.
776 for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
778 // TODO(titzer): handle pushing double parameters.
779 InstructionOperand value =
780 g.CanBeImmediate(*i) ? g.UseImmediate(*i) : IsSupported(ATOM)
783 Emit(kIA32Push, g.NoOutput(), value);
786 // Pass label of exception handler block.
787 CallDescriptor::Flags flags = descriptor->flags();
788 if (handler != nullptr) {
789 flags |= CallDescriptor::kHasExceptionHandler;
790 buffer.instruction_args.push_back(g.Label(handler));
793 // Select the appropriate opcode based on the call type.
794 InstructionCode opcode;
795 switch (descriptor->kind()) {
796 case CallDescriptor::kCallCodeObject: {
797 opcode = kArchCallCodeObject;
800 case CallDescriptor::kCallJSFunction:
801 opcode = kArchCallJSFunction;
807 opcode |= MiscField::encode(flags);
809 // Emit the call instruction.
810 InstructionOperand* first_output =
811 buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
812 Instruction* call_instr =
813 Emit(opcode, buffer.outputs.size(), first_output,
814 buffer.instruction_args.size(), &buffer.instruction_args.front());
815 call_instr->MarkAsCall();
821 // Shared routine for multiple compare operations.
822 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
823 InstructionOperand left, InstructionOperand right,
824 FlagsContinuation* cont) {
825 IA32OperandGenerator g(selector);
826 if (cont->IsBranch()) {
827 selector->Emit(cont->Encode(opcode), g.NoOutput(), left, right,
828 g.Label(cont->true_block()), g.Label(cont->false_block()));
830 DCHECK(cont->IsSet());
831 selector->Emit(cont->Encode(opcode), g.DefineAsByteRegister(cont->result()),
837 // Shared routine for multiple compare operations.
838 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
839 Node* left, Node* right, FlagsContinuation* cont,
841 IA32OperandGenerator g(selector);
842 if (commutative && g.CanBeBetterLeftOperand(right)) {
843 std::swap(left, right);
845 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
849 // Shared routine for multiple float64 compare operations (inputs commuted).
850 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
851 FlagsContinuation* cont) {
852 Node* const left = node->InputAt(0);
853 Node* const right = node->InputAt(1);
854 VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
858 // Shared routine for multiple word compare operations.
859 void VisitWordCompare(InstructionSelector* selector, Node* node,
860 InstructionCode opcode, FlagsContinuation* cont) {
861 IA32OperandGenerator g(selector);
862 Node* const left = node->InputAt(0);
863 Node* const right = node->InputAt(1);
865 // Match immediates on left or right side of comparison.
866 if (g.CanBeImmediate(right)) {
867 VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
868 } else if (g.CanBeImmediate(left)) {
869 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
870 VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
872 VisitCompare(selector, opcode, left, right, cont,
873 node->op()->HasProperty(Operator::kCommutative));
878 void VisitWordCompare(InstructionSelector* selector, Node* node,
879 FlagsContinuation* cont) {
880 IA32OperandGenerator g(selector);
881 Int32BinopMatcher m(node);
882 if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
883 LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
884 ExternalReference js_stack_limit =
885 ExternalReference::address_of_stack_limit(selector->isolate());
886 if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
887 // Compare(Load(js_stack_limit), LoadStackPointer)
888 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
889 InstructionCode opcode = cont->Encode(kIA32StackCheck);
890 if (cont->IsBranch()) {
891 selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
892 g.Label(cont->false_block()));
894 DCHECK(cont->IsSet());
895 selector->Emit(opcode, g.DefineAsRegister(cont->result()));
900 VisitWordCompare(selector, node, kIA32Cmp, cont);
904 // Shared routine for word comparison with zero.
905 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
906 Node* value, FlagsContinuation* cont) {
907 // Try to combine the branch with a comparison.
908 while (selector->CanCover(user, value)) {
909 switch (value->opcode()) {
910 case IrOpcode::kWord32Equal: {
911 // Try to combine with comparisons against 0 by simply inverting the
913 Int32BinopMatcher m(value);
914 if (m.right().Is(0)) {
916 value = m.left().node();
920 cont->OverwriteAndNegateIfEqual(kEqual);
921 return VisitWordCompare(selector, value, cont);
923 case IrOpcode::kInt32LessThan:
924 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
925 return VisitWordCompare(selector, value, cont);
926 case IrOpcode::kInt32LessThanOrEqual:
927 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
928 return VisitWordCompare(selector, value, cont);
929 case IrOpcode::kUint32LessThan:
930 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
931 return VisitWordCompare(selector, value, cont);
932 case IrOpcode::kUint32LessThanOrEqual:
933 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
934 return VisitWordCompare(selector, value, cont);
935 case IrOpcode::kFloat64Equal:
936 cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
937 return VisitFloat64Compare(selector, value, cont);
938 case IrOpcode::kFloat64LessThan:
939 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
940 return VisitFloat64Compare(selector, value, cont);
941 case IrOpcode::kFloat64LessThanOrEqual:
942 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
943 return VisitFloat64Compare(selector, value, cont);
944 case IrOpcode::kProjection:
945 // Check if this is the overflow output projection of an
946 // <Operation>WithOverflow node.
947 if (ProjectionIndexOf(value->op()) == 1u) {
948 // We cannot combine the <Operation>WithOverflow with this branch
949 // unless the 0th projection (the use of the actual value of the
950 // <Operation> is either NULL, which means there's no use of the
951 // actual value, or was already defined, which means it is scheduled
952 // *AFTER* this branch).
953 Node* const node = value->InputAt(0);
954 Node* const result = NodeProperties::FindProjection(node, 0);
955 if (result == NULL || selector->IsDefined(result)) {
956 switch (node->opcode()) {
957 case IrOpcode::kInt32AddWithOverflow:
958 cont->OverwriteAndNegateIfEqual(kOverflow);
959 return VisitBinop(selector, node, kIA32Add, cont);
960 case IrOpcode::kInt32SubWithOverflow:
961 cont->OverwriteAndNegateIfEqual(kOverflow);
962 return VisitBinop(selector, node, kIA32Sub, cont);
969 case IrOpcode::kInt32Sub:
970 return VisitWordCompare(selector, value, cont);
971 case IrOpcode::kWord32And:
972 return VisitWordCompare(selector, value, kIA32Test, cont);
979 // Continuation could not be combined with a compare, emit compare against 0.
980 IA32OperandGenerator g(selector);
981 VisitCompare(selector, kIA32Cmp, g.Use(value), g.TempImmediate(0), cont);
987 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
988 BasicBlock* fbranch) {
989 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
990 VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
994 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
995 IA32OperandGenerator g(this);
996 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
998 // Emit either ArchTableSwitch or ArchLookupSwitch.
999 size_t table_space_cost = 4 + sw.value_range;
1000 size_t table_time_cost = 3;
1001 size_t lookup_space_cost = 3 + 2 * sw.case_count;
1002 size_t lookup_time_cost = sw.case_count;
1003 if (sw.case_count > 4 &&
1004 table_space_cost + 3 * table_time_cost <=
1005 lookup_space_cost + 3 * lookup_time_cost &&
1006 sw.min_value > std::numeric_limits<int32_t>::min()) {
1007 InstructionOperand index_operand = value_operand;
1009 index_operand = g.TempRegister();
1010 Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
1011 value_operand, g.TempImmediate(-sw.min_value));
1013 // Generate a table lookup.
1014 return EmitTableSwitch(sw, index_operand);
1017 // Generate a sequence of conditional jumps.
1018 return EmitLookupSwitch(sw, value_operand);
1022 void InstructionSelector::VisitWord32Equal(Node* const node) {
1023 FlagsContinuation cont(kEqual, node);
1024 Int32BinopMatcher m(node);
1025 if (m.right().Is(0)) {
1026 return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1028 VisitWordCompare(this, node, &cont);
1032 void InstructionSelector::VisitInt32LessThan(Node* node) {
1033 FlagsContinuation cont(kSignedLessThan, node);
1034 VisitWordCompare(this, node, &cont);
1038 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1039 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1040 VisitWordCompare(this, node, &cont);
1044 void InstructionSelector::VisitUint32LessThan(Node* node) {
1045 FlagsContinuation cont(kUnsignedLessThan, node);
1046 VisitWordCompare(this, node, &cont);
1050 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1051 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1052 VisitWordCompare(this, node, &cont);
1056 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1057 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1058 FlagsContinuation cont(kOverflow, ovf);
1059 return VisitBinop(this, node, kIA32Add, &cont);
1061 FlagsContinuation cont;
1062 VisitBinop(this, node, kIA32Add, &cont);
1066 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1067 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1068 FlagsContinuation cont(kOverflow, ovf);
1069 return VisitBinop(this, node, kIA32Sub, &cont);
1071 FlagsContinuation cont;
1072 VisitBinop(this, node, kIA32Sub, &cont);
1076 void InstructionSelector::VisitFloat64Equal(Node* node) {
1077 FlagsContinuation cont(kUnorderedEqual, node);
1078 VisitFloat64Compare(this, node, &cont);
1082 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1083 FlagsContinuation cont(kUnsignedGreaterThan, node);
1084 VisitFloat64Compare(this, node, &cont);
1088 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1089 FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
1090 VisitFloat64Compare(this, node, &cont);
1094 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1095 IA32OperandGenerator g(this);
1096 Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
1097 g.Use(node->InputAt(0)));
1101 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1102 IA32OperandGenerator g(this);
1103 Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
1104 g.Use(node->InputAt(0)));
1108 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1109 IA32OperandGenerator g(this);
1110 Node* left = node->InputAt(0);
1111 Node* right = node->InputAt(1);
1112 Float64Matcher mleft(left);
1113 if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
1114 Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
1117 Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
1118 g.UseRegister(left), g.Use(right));
1122 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1123 IA32OperandGenerator g(this);
1124 Node* left = node->InputAt(0);
1125 Node* right = node->InputAt(1);
1126 Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
1127 g.UseRegister(left), g.Use(right));
1132 MachineOperatorBuilder::Flags
1133 InstructionSelector::SupportedMachineOperatorFlags() {
1134 MachineOperatorBuilder::Flags flags =
1135 MachineOperatorBuilder::kFloat64Max |
1136 MachineOperatorBuilder::kFloat64Min |
1137 MachineOperatorBuilder::kWord32ShiftIsSafe;
1138 if (CpuFeatures::IsSupported(SSE4_1)) {
1139 flags |= MachineOperatorBuilder::kFloat64RoundDown |
1140 MachineOperatorBuilder::kFloat64RoundTruncate;
1145 } // namespace compiler
1146 } // namespace internal