1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h"
7 #include "src/compiler/node-properties-inl.h"
13 // Adds IA32-specific methods for generating operands.
14 class IA32OperandGenerator FINAL : public OperandGenerator {
16 explicit IA32OperandGenerator(InstructionSelector* selector)
17 : OperandGenerator(selector) {}
19 InstructionOperand* UseByteRegister(Node* node) {
20 // TODO(dcarney): relax constraint.
21 return UseFixed(node, edx);
24 bool CanBeImmediate(Node* node) {
25 switch (node->opcode()) {
26 case IrOpcode::kInt32Constant:
27 case IrOpcode::kNumberConstant:
28 case IrOpcode::kExternalConstant:
30 case IrOpcode::kHeapConstant: {
31 // Constants in new space cannot be used as immediates in V8 because
32 // the GC does not scan code objects when collecting the new generation.
33 Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
34 return !isolate()->heap()->InNewSpace(*value.handle());
41 bool CanBeBetterLeftOperand(Node* node) const {
42 return !selector()->IsLive(node);
47 // Get the AddressingMode of scale factor N from the AddressingMode of scale
49 static AddressingMode AdjustAddressingMode(AddressingMode base_mode,
51 DCHECK(0 <= power && power < 4);
52 return static_cast<AddressingMode>(static_cast<int>(base_mode) + power);
56 // Fairly intel-specify node matcher used for matching scale factors in
58 // Matches nodes of form [x * N] for N in {1,2,4,8}
59 class ScaleFactorMatcher : public NodeMatcher {
61 static const int kMatchedFactors[4];
63 explicit ScaleFactorMatcher(Node* node);
65 bool Matches() const { return left_ != NULL; }
81 // Fairly intel-specify node matcher used for matching index and displacement
82 // operands in addressing modes.
83 // Matches nodes of form:
87 // [x] -- fallback case
88 // for N in {1,2,4,8} and K int32_t
89 class IndexAndDisplacementMatcher : public NodeMatcher {
91 explicit IndexAndDisplacementMatcher(Node* node);
93 Node* index_node() const { return index_node_; }
94 int displacement() const { return displacement_; }
95 int power() const { return power_; }
104 // Fairly intel-specify node matcher used for matching multiplies that can be
105 // transformed to lea instructions.
106 // Matches nodes of form:
108 // for N in {1,2,3,4,5,8,9}
109 class LeaMultiplyMatcher : public NodeMatcher {
111 static const int kMatchedFactors[7];
113 explicit LeaMultiplyMatcher(Node* node);
115 bool Matches() const { return left_ != NULL; }
124 // Displacement will be either 0 or 1.
125 int32_t Displacement() const {
127 return displacement_;
137 const int ScaleFactorMatcher::kMatchedFactors[] = {1, 2, 4, 8};
140 ScaleFactorMatcher::ScaleFactorMatcher(Node* node)
141 : NodeMatcher(node), left_(NULL), power_(0) {
142 if (opcode() != IrOpcode::kInt32Mul) return;
143 // TODO(dcarney): should test 64 bit ints as well.
144 Int32BinopMatcher m(this->node());
145 if (!m.right().HasValue()) return;
146 int32_t value = m.right().Value();
149 power_++; // Fall through.
151 power_++; // Fall through.
153 power_++; // Fall through.
159 left_ = m.left().node();
163 IndexAndDisplacementMatcher::IndexAndDisplacementMatcher(Node* node)
164 : NodeMatcher(node), index_node_(node), displacement_(0), power_(0) {
165 if (opcode() == IrOpcode::kInt32Add) {
166 Int32BinopMatcher m(this->node());
167 if (m.right().HasValue()) {
168 displacement_ = m.right().Value();
169 index_node_ = m.left().node();
172 // Test scale factor.
173 ScaleFactorMatcher scale_matcher(index_node_);
174 if (scale_matcher.Matches()) {
175 index_node_ = scale_matcher.Left();
176 power_ = scale_matcher.Power();
181 const int LeaMultiplyMatcher::kMatchedFactors[7] = {1, 2, 3, 4, 5, 8, 9};
184 LeaMultiplyMatcher::LeaMultiplyMatcher(Node* node)
185 : NodeMatcher(node), left_(NULL), power_(0), displacement_(0) {
186 if (opcode() != IrOpcode::kInt32Mul && opcode() != IrOpcode::kInt64Mul) {
192 Int32BinopMatcher m(this->node());
193 if (m.right().HasValue()) {
194 value = m.right().Value();
195 left = m.left().node();
197 Int64BinopMatcher m(this->node());
198 if (m.right().HasValue()) {
199 value = m.right().Value();
200 left = m.left().node();
209 power_++; // Fall through.
212 power_++; // Fall through.
215 power_++; // Fall through.
221 if (!base::bits::IsPowerOfTwo64(value)) {
228 class AddressingModeMatcher {
230 AddressingModeMatcher(IA32OperandGenerator* g, Node* base, Node* index)
231 : base_operand_(NULL),
232 index_operand_(NULL),
233 displacement_operand_(NULL),
235 Int32Matcher index_imm(index);
236 if (index_imm.HasValue()) {
237 int32_t displacement = index_imm.Value();
238 // Compute base operand and fold base immediate into displacement.
239 Int32Matcher base_imm(base);
240 if (!base_imm.HasValue()) {
241 base_operand_ = g->UseRegister(base);
243 displacement += base_imm.Value();
245 if (displacement != 0 || base_operand_ == NULL) {
246 displacement_operand_ = g->TempImmediate(displacement);
248 if (base_operand_ == NULL) {
251 if (displacement == 0) {
258 // Compute index and displacement.
259 IndexAndDisplacementMatcher matcher(index);
260 index_operand_ = g->UseRegister(matcher.index_node());
261 int32_t displacement = matcher.displacement();
262 // Compute base operand and fold base immediate into displacement.
263 Int32Matcher base_imm(base);
264 if (!base_imm.HasValue()) {
265 base_operand_ = g->UseRegister(base);
267 displacement += base_imm.Value();
269 // Compute displacement operand.
270 if (displacement != 0) {
271 displacement_operand_ = g->TempImmediate(displacement);
273 // Compute mode with scale factor one.
274 if (base_operand_ == NULL) {
275 if (displacement_operand_ == NULL) {
281 if (displacement_operand_ == NULL) {
287 // Adjust mode to actual scale factor.
288 mode_ = AdjustAddressingMode(mode_, matcher.power());
290 DCHECK_NE(kMode_None, mode_);
293 size_t SetInputs(InstructionOperand** inputs) {
294 size_t input_count = 0;
295 // Compute inputs_ and input_count.
296 if (base_operand_ != NULL) {
297 inputs[input_count++] = base_operand_;
299 if (index_operand_ != NULL) {
300 inputs[input_count++] = index_operand_;
302 if (displacement_operand_ != NULL) {
303 inputs[input_count++] = displacement_operand_;
305 DCHECK_NE(input_count, 0);
309 static const int kMaxInputCount = 3;
310 InstructionOperand* base_operand_;
311 InstructionOperand* index_operand_;
312 InstructionOperand* displacement_operand_;
313 AddressingMode mode_;
317 static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
319 IA32OperandGenerator g(selector);
320 selector->Emit(opcode, g.DefineAsRegister(node),
321 g.UseRegister(node->InputAt(0)));
325 void InstructionSelector::VisitLoad(Node* node) {
326 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
327 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
328 Node* base = node->InputAt(0);
329 Node* index = node->InputAt(1);
332 // TODO(titzer): signed/unsigned small loads
340 case kRepBit: // Fall through.
342 opcode = typ == kTypeInt32 ? kIA32Movsxbl : kIA32Movzxbl;
345 opcode = typ == kTypeInt32 ? kIA32Movsxwl : kIA32Movzxwl;
347 case kRepTagged: // Fall through.
356 IA32OperandGenerator g(this);
357 AddressingModeMatcher matcher(&g, base, index);
358 InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
359 InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
360 InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount];
361 size_t input_count = matcher.SetInputs(inputs);
362 Emit(code, 1, outputs, input_count, inputs);
366 void InstructionSelector::VisitStore(Node* node) {
367 IA32OperandGenerator g(this);
368 Node* base = node->InputAt(0);
369 Node* index = node->InputAt(1);
370 Node* value = node->InputAt(2);
372 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
373 MachineType rep = RepresentationOf(store_rep.machine_type());
374 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
375 DCHECK_EQ(kRepTagged, rep);
376 // TODO(dcarney): refactor RecordWrite function to take temp registers
377 // and pass them here instead of using fixed regs
378 // TODO(dcarney): handle immediate indices.
379 InstructionOperand* temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
380 Emit(kIA32StoreWriteBarrier, NULL, g.UseFixed(base, ebx),
381 g.UseFixed(index, ecx), g.UseFixed(value, edx), arraysize(temps),
385 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
395 case kRepBit: // Fall through.
402 case kRepTagged: // Fall through.
411 InstructionOperand* val;
412 if (g.CanBeImmediate(value)) {
413 val = g.UseImmediate(value);
414 } else if (rep == kRepWord8 || rep == kRepBit) {
415 val = g.UseByteRegister(value);
417 val = g.UseRegister(value);
420 AddressingModeMatcher matcher(&g, base, index);
421 InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
422 InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount + 1];
423 size_t input_count = matcher.SetInputs(inputs);
424 inputs[input_count++] = val;
425 Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
429 // Shared routine for multiple binary operations.
430 static void VisitBinop(InstructionSelector* selector, Node* node,
431 InstructionCode opcode, FlagsContinuation* cont) {
432 IA32OperandGenerator g(selector);
433 Int32BinopMatcher m(node);
434 Node* left = m.left().node();
435 Node* right = m.right().node();
436 InstructionOperand* inputs[4];
437 size_t input_count = 0;
438 InstructionOperand* outputs[2];
439 size_t output_count = 0;
441 // TODO(turbofan): match complex addressing modes.
443 // If both inputs refer to the same operand, enforce allocating a register
444 // for both of them to ensure that we don't end up generating code like
447 // mov eax, [ebp-0x10]
448 // add eax, [ebp-0x10]
450 InstructionOperand* const input = g.UseRegister(left);
451 inputs[input_count++] = input;
452 inputs[input_count++] = input;
453 } else if (g.CanBeImmediate(right)) {
454 inputs[input_count++] = g.UseRegister(left);
455 inputs[input_count++] = g.UseImmediate(right);
457 if (node->op()->HasProperty(Operator::kCommutative) &&
458 g.CanBeBetterLeftOperand(right)) {
459 std::swap(left, right);
461 inputs[input_count++] = g.UseRegister(left);
462 inputs[input_count++] = g.Use(right);
465 if (cont->IsBranch()) {
466 inputs[input_count++] = g.Label(cont->true_block());
467 inputs[input_count++] = g.Label(cont->false_block());
470 outputs[output_count++] = g.DefineSameAsFirst(node);
472 // TODO(turbofan): Use byte register here.
473 outputs[output_count++] = g.DefineAsRegister(cont->result());
476 DCHECK_NE(0, input_count);
477 DCHECK_NE(0, output_count);
478 DCHECK_GE(arraysize(inputs), input_count);
479 DCHECK_GE(arraysize(outputs), output_count);
481 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
482 outputs, input_count, inputs);
483 if (cont->IsBranch()) instr->MarkAsControl();
487 // Shared routine for multiple binary operations.
488 static void VisitBinop(InstructionSelector* selector, Node* node,
489 InstructionCode opcode) {
490 FlagsContinuation cont;
491 VisitBinop(selector, node, opcode, &cont);
495 void InstructionSelector::VisitWord32And(Node* node) {
496 VisitBinop(this, node, kIA32And);
500 void InstructionSelector::VisitWord32Or(Node* node) {
501 VisitBinop(this, node, kIA32Or);
505 void InstructionSelector::VisitWord32Xor(Node* node) {
506 IA32OperandGenerator g(this);
507 Int32BinopMatcher m(node);
508 if (m.right().Is(-1)) {
509 Emit(kIA32Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
511 VisitBinop(this, node, kIA32Xor);
516 // Shared routine for multiple shift operations.
517 static inline void VisitShift(InstructionSelector* selector, Node* node,
519 IA32OperandGenerator g(selector);
520 Node* left = node->InputAt(0);
521 Node* right = node->InputAt(1);
523 if (g.CanBeImmediate(right)) {
524 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
525 g.UseImmediate(right));
527 Int32BinopMatcher m(node);
528 if (m.right().IsWord32And()) {
529 Int32BinopMatcher mright(right);
530 if (mright.right().Is(0x1F)) {
531 right = mright.left().node();
534 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
535 g.UseFixed(right, ecx));
540 void InstructionSelector::VisitWord32Shl(Node* node) {
541 VisitShift(this, node, kIA32Shl);
545 void InstructionSelector::VisitWord32Shr(Node* node) {
546 VisitShift(this, node, kIA32Shr);
550 void InstructionSelector::VisitWord32Sar(Node* node) {
551 VisitShift(this, node, kIA32Sar);
555 void InstructionSelector::VisitWord32Ror(Node* node) {
556 VisitShift(this, node, kIA32Ror);
560 static bool TryEmitLeaMultAdd(InstructionSelector* selector, Node* node) {
561 Int32BinopMatcher m(node);
562 if (!m.right().HasValue()) return false;
563 int32_t displacement_value = m.right().Value();
564 Node* left = m.left().node();
565 LeaMultiplyMatcher lmm(left);
566 if (!lmm.Matches()) return false;
569 IA32OperandGenerator g(selector);
570 InstructionOperand* index = g.UseRegister(lmm.Left());
571 InstructionOperand* displacement = g.TempImmediate(displacement_value);
572 InstructionOperand* inputs[] = {index, displacement, displacement};
573 if (lmm.Displacement() != 0) {
581 mode = AdjustAddressingMode(mode, lmm.Power());
582 InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
583 selector->Emit(kIA32Lea | AddressingModeField::encode(mode), 1, outputs,
584 input_count, inputs);
589 void InstructionSelector::VisitInt32Add(Node* node) {
590 if (TryEmitLeaMultAdd(this, node)) return;
591 VisitBinop(this, node, kIA32Add);
595 void InstructionSelector::VisitInt32Sub(Node* node) {
596 IA32OperandGenerator g(this);
597 Int32BinopMatcher m(node);
598 if (m.left().Is(0)) {
599 Emit(kIA32Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
601 VisitBinop(this, node, kIA32Sub);
606 static bool TryEmitLeaMult(InstructionSelector* selector, Node* node) {
607 LeaMultiplyMatcher lea(node);
609 if (!lea.Matches()) return false;
612 IA32OperandGenerator g(selector);
613 InstructionOperand* left = g.UseRegister(lea.Left());
614 InstructionOperand* inputs[] = {left, left};
615 if (lea.Displacement() != 0) {
622 mode = AdjustAddressingMode(mode, lea.Power());
623 InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
624 selector->Emit(kIA32Lea | AddressingModeField::encode(mode), 1, outputs,
625 input_count, inputs);
630 void InstructionSelector::VisitInt32Mul(Node* node) {
631 if (TryEmitLeaMult(this, node)) return;
632 IA32OperandGenerator g(this);
633 Int32BinopMatcher m(node);
634 Node* left = m.left().node();
635 Node* right = m.right().node();
636 if (g.CanBeImmediate(right)) {
637 Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
638 g.UseImmediate(right));
640 if (g.CanBeBetterLeftOperand(right)) {
641 std::swap(left, right);
643 Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
651 void VisitMulHigh(InstructionSelector* selector, Node* node,
653 IA32OperandGenerator g(selector);
654 selector->Emit(opcode, g.DefineAsFixed(node, edx),
655 g.UseFixed(node->InputAt(0), eax),
656 g.UseUniqueRegister(node->InputAt(1)));
660 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
661 IA32OperandGenerator g(selector);
662 InstructionOperand* temps[] = {g.TempRegister(edx)};
663 selector->Emit(opcode, g.DefineAsFixed(node, eax),
664 g.UseFixed(node->InputAt(0), eax),
665 g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
669 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
670 IA32OperandGenerator g(selector);
671 selector->Emit(opcode, g.DefineAsFixed(node, edx),
672 g.UseFixed(node->InputAt(0), eax),
673 g.UseUnique(node->InputAt(1)));
679 void InstructionSelector::VisitInt32MulHigh(Node* node) {
680 VisitMulHigh(this, node, kIA32ImulHigh);
684 void InstructionSelector::VisitUint32MulHigh(Node* node) {
685 VisitMulHigh(this, node, kIA32UmulHigh);
689 void InstructionSelector::VisitInt32Div(Node* node) {
690 VisitDiv(this, node, kIA32Idiv);
694 void InstructionSelector::VisitUint32Div(Node* node) {
695 VisitDiv(this, node, kIA32Udiv);
699 void InstructionSelector::VisitInt32Mod(Node* node) {
700 VisitMod(this, node, kIA32Idiv);
704 void InstructionSelector::VisitUint32Mod(Node* node) {
705 VisitMod(this, node, kIA32Udiv);
709 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
710 IA32OperandGenerator g(this);
711 Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
715 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
716 IA32OperandGenerator g(this);
717 Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
721 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
722 IA32OperandGenerator g(this);
723 Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
727 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
728 IA32OperandGenerator g(this);
729 Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
733 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
734 IA32OperandGenerator g(this);
735 Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
739 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
740 IA32OperandGenerator g(this);
741 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
745 void InstructionSelector::VisitFloat64Add(Node* node) {
746 IA32OperandGenerator g(this);
747 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
748 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
752 void InstructionSelector::VisitFloat64Sub(Node* node) {
753 IA32OperandGenerator g(this);
754 Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
755 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
759 void InstructionSelector::VisitFloat64Mul(Node* node) {
760 IA32OperandGenerator g(this);
761 Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
762 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
766 void InstructionSelector::VisitFloat64Div(Node* node) {
767 IA32OperandGenerator g(this);
768 Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
769 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
773 void InstructionSelector::VisitFloat64Mod(Node* node) {
774 IA32OperandGenerator g(this);
775 InstructionOperand* temps[] = {g.TempRegister(eax)};
776 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
777 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
782 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
783 IA32OperandGenerator g(this);
784 Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
788 void InstructionSelector::VisitFloat64Floor(Node* node) {
789 DCHECK(CpuFeatures::IsSupported(SSE4_1));
790 VisitRRFloat64(this, kSSEFloat64Floor, node);
794 void InstructionSelector::VisitFloat64Ceil(Node* node) {
795 DCHECK(CpuFeatures::IsSupported(SSE4_1));
796 VisitRRFloat64(this, kSSEFloat64Ceil, node);
800 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
801 DCHECK(CpuFeatures::IsSupported(SSE4_1));
802 VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
806 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
811 void InstructionSelector::VisitCall(Node* node) {
812 IA32OperandGenerator g(this);
813 CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
815 FrameStateDescriptor* frame_state_descriptor = NULL;
817 if (descriptor->NeedsFrameState()) {
818 frame_state_descriptor =
819 GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
822 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
824 // Compute InstructionOperands for inputs and outputs.
825 InitializeCallBuffer(node, &buffer, true, true);
827 // Push any stack arguments.
828 for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
829 input != buffer.pushed_nodes.rend(); input++) {
830 // TODO(titzer): handle pushing double parameters.
831 Emit(kIA32Push, NULL,
832 g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
835 // Select the appropriate opcode based on the call type.
836 InstructionCode opcode;
837 switch (descriptor->kind()) {
838 case CallDescriptor::kCallCodeObject: {
839 opcode = kArchCallCodeObject;
842 case CallDescriptor::kCallJSFunction:
843 opcode = kArchCallJSFunction;
849 opcode |= MiscField::encode(descriptor->flags());
851 // Emit the call instruction.
852 InstructionOperand** first_output =
853 buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
854 Instruction* call_instr =
855 Emit(opcode, buffer.outputs.size(), first_output,
856 buffer.instruction_args.size(), &buffer.instruction_args.front());
857 call_instr->MarkAsCall();
863 // Shared routine for multiple compare operations.
864 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
865 InstructionOperand* left, InstructionOperand* right,
866 FlagsContinuation* cont) {
867 IA32OperandGenerator g(selector);
868 if (cont->IsBranch()) {
869 selector->Emit(cont->Encode(opcode), NULL, left, right,
870 g.Label(cont->true_block()),
871 g.Label(cont->false_block()))->MarkAsControl();
873 DCHECK(cont->IsSet());
874 // TODO(titzer): Needs byte register.
875 selector->Emit(cont->Encode(opcode), g.DefineAsRegister(cont->result()),
881 // Shared routine for multiple compare operations.
882 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
883 Node* left, Node* right, FlagsContinuation* cont,
885 IA32OperandGenerator g(selector);
886 if (commutative && g.CanBeBetterLeftOperand(right)) {
887 std::swap(left, right);
889 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
893 // Shared routine for multiple float compare operations.
894 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
895 FlagsContinuation* cont) {
896 VisitCompare(selector, kSSEFloat64Cmp, node->InputAt(0), node->InputAt(1),
897 cont, node->op()->HasProperty(Operator::kCommutative));
901 // Shared routine for multiple word compare operations.
902 void VisitWordCompare(InstructionSelector* selector, Node* node,
903 InstructionCode opcode, FlagsContinuation* cont) {
904 IA32OperandGenerator g(selector);
905 Node* const left = node->InputAt(0);
906 Node* const right = node->InputAt(1);
908 // Match immediates on left or right side of comparison.
909 if (g.CanBeImmediate(right)) {
910 VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
911 } else if (g.CanBeImmediate(left)) {
912 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
913 VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
915 VisitCompare(selector, opcode, left, right, cont,
916 node->op()->HasProperty(Operator::kCommutative));
921 void VisitWordCompare(InstructionSelector* selector, Node* node,
922 FlagsContinuation* cont) {
923 VisitWordCompare(selector, node, kIA32Cmp, cont);
927 // Shared routine for word comparison with zero.
928 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
929 Node* value, FlagsContinuation* cont) {
930 // Try to combine the branch with a comparison.
931 while (selector->CanCover(user, value)) {
932 switch (value->opcode()) {
933 case IrOpcode::kWord32Equal: {
934 // Try to combine with comparisons against 0 by simply inverting the
936 Int32BinopMatcher m(value);
937 if (m.right().Is(0)) {
939 value = m.left().node();
943 cont->OverwriteAndNegateIfEqual(kEqual);
944 return VisitWordCompare(selector, value, cont);
946 case IrOpcode::kInt32LessThan:
947 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
948 return VisitWordCompare(selector, value, cont);
949 case IrOpcode::kInt32LessThanOrEqual:
950 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
951 return VisitWordCompare(selector, value, cont);
952 case IrOpcode::kUint32LessThan:
953 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
954 return VisitWordCompare(selector, value, cont);
955 case IrOpcode::kUint32LessThanOrEqual:
956 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
957 return VisitWordCompare(selector, value, cont);
958 case IrOpcode::kFloat64Equal:
959 cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
960 return VisitFloat64Compare(selector, value, cont);
961 case IrOpcode::kFloat64LessThan:
962 cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
963 return VisitFloat64Compare(selector, value, cont);
964 case IrOpcode::kFloat64LessThanOrEqual:
965 cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
966 return VisitFloat64Compare(selector, value, cont);
967 case IrOpcode::kProjection:
968 // Check if this is the overflow output projection of an
969 // <Operation>WithOverflow node.
970 if (OpParameter<size_t>(value) == 1u) {
971 // We cannot combine the <Operation>WithOverflow with this branch
972 // unless the 0th projection (the use of the actual value of the
973 // <Operation> is either NULL, which means there's no use of the
974 // actual value, or was already defined, which means it is scheduled
975 // *AFTER* this branch).
976 Node* node = value->InputAt(0);
977 Node* result = node->FindProjection(0);
978 if (result == NULL || selector->IsDefined(result)) {
979 switch (node->opcode()) {
980 case IrOpcode::kInt32AddWithOverflow:
981 cont->OverwriteAndNegateIfEqual(kOverflow);
982 return VisitBinop(selector, node, kIA32Add, cont);
983 case IrOpcode::kInt32SubWithOverflow:
984 cont->OverwriteAndNegateIfEqual(kOverflow);
985 return VisitBinop(selector, node, kIA32Sub, cont);
992 case IrOpcode::kInt32Sub:
993 return VisitWordCompare(selector, value, cont);
994 case IrOpcode::kWord32And:
995 return VisitWordCompare(selector, value, kIA32Test, cont);
1002 // Continuation could not be combined with a compare, emit compare against 0.
1003 IA32OperandGenerator g(selector);
1004 VisitCompare(selector, kIA32Cmp, g.Use(value), g.TempImmediate(0), cont);
1010 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1011 BasicBlock* fbranch) {
1012 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1013 if (IsNextInAssemblyOrder(tbranch)) { // We can fallthru to the true block.
1017 VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
1021 void InstructionSelector::VisitWord32Equal(Node* const node) {
1022 FlagsContinuation cont(kEqual, node);
1023 Int32BinopMatcher m(node);
1024 if (m.right().Is(0)) {
1025 return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1027 VisitWordCompare(this, node, &cont);
1031 void InstructionSelector::VisitInt32LessThan(Node* node) {
1032 FlagsContinuation cont(kSignedLessThan, node);
1033 VisitWordCompare(this, node, &cont);
1037 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1038 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1039 VisitWordCompare(this, node, &cont);
1043 void InstructionSelector::VisitUint32LessThan(Node* node) {
1044 FlagsContinuation cont(kUnsignedLessThan, node);
1045 VisitWordCompare(this, node, &cont);
1049 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1050 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1051 VisitWordCompare(this, node, &cont);
1055 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1056 if (Node* ovf = node->FindProjection(1)) {
1057 FlagsContinuation cont(kOverflow, ovf);
1058 return VisitBinop(this, node, kIA32Add, &cont);
1060 FlagsContinuation cont;
1061 VisitBinop(this, node, kIA32Add, &cont);
1065 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1066 if (Node* ovf = node->FindProjection(1)) {
1067 FlagsContinuation cont(kOverflow, ovf);
1068 return VisitBinop(this, node, kIA32Sub, &cont);
1070 FlagsContinuation cont;
1071 VisitBinop(this, node, kIA32Sub, &cont);
1075 void InstructionSelector::VisitFloat64Equal(Node* node) {
1076 FlagsContinuation cont(kUnorderedEqual, node);
1077 VisitFloat64Compare(this, node, &cont);
1081 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1082 FlagsContinuation cont(kUnorderedLessThan, node);
1083 VisitFloat64Compare(this, node, &cont);
1087 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1088 FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
1089 VisitFloat64Compare(this, node, &cont);
1094 MachineOperatorBuilder::Flags
1095 InstructionSelector::SupportedMachineOperatorFlags() {
1096 if (CpuFeatures::IsSupported(SSE4_1)) {
1097 return MachineOperatorBuilder::kFloat64Floor |
1098 MachineOperatorBuilder::kFloat64Ceil |
1099 MachineOperatorBuilder::kFloat64RoundTruncate;
1101 return MachineOperatorBuilder::Flag::kNoFlags;
1103 } // namespace compiler
1104 } // namespace internal