1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/generic-node-inl.h"
6 #include "src/compiler/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
13 // Adds X64-specific methods for generating operands.
14 class X64OperandGenerator FINAL : public OperandGenerator {
16 explicit X64OperandGenerator(InstructionSelector* selector)
17 : OperandGenerator(selector) {}
19 InstructionOperand* TempRegister(Register reg) {
20 return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
21 Register::ToAllocationIndex(reg));
24 bool CanBeImmediate(Node* node) {
25 switch (node->opcode()) {
26 case IrOpcode::kInt32Constant:
28 case IrOpcode::kInt64Constant: {
29 const int64_t value = OpParameter<int64_t>(node);
30 return value == static_cast<int64_t>(static_cast<int32_t>(value));
37 bool CanBeBetterLeftOperand(Node* node) const {
38 return !selector()->IsLive(node);
43 void InstructionSelector::VisitLoad(Node* node) {
44 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
45 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
46 X64OperandGenerator g(this);
47 Node* const base = node->InputAt(0);
48 Node* const index = node->InputAt(1);
58 case kRepBit: // Fall through.
60 opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
63 opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
68 case kRepTagged: // Fall through.
76 if (g.CanBeImmediate(base)) {
77 // load [#base + %index]
78 Emit(opcode | AddressingModeField::encode(kMode_MRI),
79 g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
80 } else if (g.CanBeImmediate(index)) {
81 // load [%base + #index]
82 Emit(opcode | AddressingModeField::encode(kMode_MRI),
83 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
85 // load [%base + %index*1]
86 Emit(opcode | AddressingModeField::encode(kMode_MR1),
87 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
92 void InstructionSelector::VisitStore(Node* node) {
93 X64OperandGenerator g(this);
94 Node* base = node->InputAt(0);
95 Node* index = node->InputAt(1);
96 Node* value = node->InputAt(2);
98 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
99 MachineType rep = RepresentationOf(store_rep.machine_type());
100 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
101 DCHECK(rep == kRepTagged);
102 // TODO(dcarney): refactor RecordWrite function to take temp registers
103 // and pass them here instead of using fixed regs
104 // TODO(dcarney): handle immediate indices.
105 InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
106 Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx),
107 g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps),
111 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
120 case kRepBit: // Fall through.
130 case kRepTagged: // Fall through.
138 InstructionOperand* value_operand =
139 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
140 if (g.CanBeImmediate(base)) {
141 // store [#base + %index], %|#value
142 Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
143 g.UseRegister(index), g.UseImmediate(base), value_operand);
144 } else if (g.CanBeImmediate(index)) {
145 // store [%base + #index], %|#value
146 Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
147 g.UseRegister(base), g.UseImmediate(index), value_operand);
149 // store [%base + %index*1], %|#value
150 Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
151 g.UseRegister(base), g.UseRegister(index), value_operand);
156 // Shared routine for multiple binary operations.
157 static void VisitBinop(InstructionSelector* selector, Node* node,
158 InstructionCode opcode, FlagsContinuation* cont) {
159 X64OperandGenerator g(selector);
160 Int32BinopMatcher m(node);
161 Node* left = m.left().node();
162 Node* right = m.right().node();
163 InstructionOperand* inputs[4];
164 size_t input_count = 0;
165 InstructionOperand* outputs[2];
166 size_t output_count = 0;
168 // TODO(turbofan): match complex addressing modes.
170 // If both inputs refer to the same operand, enforce allocating a register
171 // for both of them to ensure that we don't end up generating code like
174 // mov rax, [rbp-0x10]
175 // add rax, [rbp-0x10]
177 InstructionOperand* const input = g.UseRegister(left);
178 inputs[input_count++] = input;
179 inputs[input_count++] = input;
180 } else if (g.CanBeImmediate(right)) {
181 inputs[input_count++] = g.UseRegister(left);
182 inputs[input_count++] = g.UseImmediate(right);
184 if (node->op()->HasProperty(Operator::kCommutative) &&
185 g.CanBeBetterLeftOperand(right)) {
186 std::swap(left, right);
188 inputs[input_count++] = g.UseRegister(left);
189 inputs[input_count++] = g.Use(right);
192 if (cont->IsBranch()) {
193 inputs[input_count++] = g.Label(cont->true_block());
194 inputs[input_count++] = g.Label(cont->false_block());
197 outputs[output_count++] = g.DefineSameAsFirst(node);
199 outputs[output_count++] = g.DefineAsRegister(cont->result());
202 DCHECK_NE(0, static_cast<int>(input_count));
203 DCHECK_NE(0, static_cast<int>(output_count));
204 DCHECK_GE(arraysize(inputs), input_count);
205 DCHECK_GE(arraysize(outputs), output_count);
207 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
208 outputs, input_count, inputs);
209 if (cont->IsBranch()) instr->MarkAsControl();
213 // Shared routine for multiple binary operations.
214 static void VisitBinop(InstructionSelector* selector, Node* node,
215 InstructionCode opcode) {
216 FlagsContinuation cont;
217 VisitBinop(selector, node, opcode, &cont);
221 void InstructionSelector::VisitWord32And(Node* node) {
222 VisitBinop(this, node, kX64And32);
226 void InstructionSelector::VisitWord64And(Node* node) {
227 VisitBinop(this, node, kX64And);
231 void InstructionSelector::VisitWord32Or(Node* node) {
232 VisitBinop(this, node, kX64Or32);
236 void InstructionSelector::VisitWord64Or(Node* node) {
237 VisitBinop(this, node, kX64Or);
241 void InstructionSelector::VisitWord32Xor(Node* node) {
242 X64OperandGenerator g(this);
243 Uint32BinopMatcher m(node);
244 if (m.right().Is(-1)) {
245 Emit(kX64Not32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
247 VisitBinop(this, node, kX64Xor32);
252 void InstructionSelector::VisitWord64Xor(Node* node) {
253 X64OperandGenerator g(this);
254 Uint64BinopMatcher m(node);
255 if (m.right().Is(-1)) {
256 Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
258 VisitBinop(this, node, kX64Xor);
265 // Shared routine for multiple 32-bit shift operations.
266 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
267 void VisitWord32Shift(InstructionSelector* selector, Node* node,
269 X64OperandGenerator g(selector);
270 Int32BinopMatcher m(node);
271 Node* left = m.left().node();
272 Node* right = m.right().node();
274 if (g.CanBeImmediate(right)) {
275 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
276 g.UseImmediate(right));
278 if (m.right().IsWord32And()) {
279 Int32BinopMatcher mright(right);
280 if (mright.right().Is(0x1F)) {
281 right = mright.left().node();
284 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
285 g.UseFixed(right, rcx));
290 // Shared routine for multiple 64-bit shift operations.
291 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
292 void VisitWord64Shift(InstructionSelector* selector, Node* node,
294 X64OperandGenerator g(selector);
295 Int64BinopMatcher m(node);
296 Node* left = m.left().node();
297 Node* right = m.right().node();
299 if (g.CanBeImmediate(right)) {
300 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
301 g.UseImmediate(right));
303 if (m.right().IsWord64And()) {
304 Int64BinopMatcher mright(right);
305 if (mright.right().Is(0x3F)) {
306 right = mright.left().node();
309 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
310 g.UseFixed(right, rcx));
317 void InstructionSelector::VisitWord32Shl(Node* node) {
318 VisitWord32Shift(this, node, kX64Shl32);
322 void InstructionSelector::VisitWord64Shl(Node* node) {
323 VisitWord64Shift(this, node, kX64Shl);
327 void InstructionSelector::VisitWord32Shr(Node* node) {
328 VisitWord32Shift(this, node, kX64Shr32);
332 void InstructionSelector::VisitWord64Shr(Node* node) {
333 VisitWord64Shift(this, node, kX64Shr);
337 void InstructionSelector::VisitWord32Sar(Node* node) {
338 VisitWord32Shift(this, node, kX64Sar32);
342 void InstructionSelector::VisitWord64Sar(Node* node) {
343 VisitWord64Shift(this, node, kX64Sar);
347 void InstructionSelector::VisitWord32Ror(Node* node) {
348 VisitWord32Shift(this, node, kX64Ror32);
352 void InstructionSelector::VisitWord64Ror(Node* node) {
353 VisitWord64Shift(this, node, kX64Ror);
357 void InstructionSelector::VisitInt32Add(Node* node) {
358 VisitBinop(this, node, kX64Add32);
362 void InstructionSelector::VisitInt64Add(Node* node) {
363 VisitBinop(this, node, kX64Add);
367 void InstructionSelector::VisitInt32Sub(Node* node) {
368 X64OperandGenerator g(this);
369 Int32BinopMatcher m(node);
370 if (m.left().Is(0)) {
371 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
373 VisitBinop(this, node, kX64Sub32);
378 void InstructionSelector::VisitInt64Sub(Node* node) {
379 X64OperandGenerator g(this);
380 Int64BinopMatcher m(node);
381 if (m.left().Is(0)) {
382 Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
384 VisitBinop(this, node, kX64Sub);
391 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
392 X64OperandGenerator g(selector);
393 Int32BinopMatcher m(node);
394 Node* left = m.left().node();
395 Node* right = m.right().node();
396 if (g.CanBeImmediate(right)) {
397 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
398 g.UseImmediate(right));
400 if (g.CanBeBetterLeftOperand(right)) {
401 std::swap(left, right);
403 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
409 void VisitMulHigh(InstructionSelector* selector, Node* node,
411 X64OperandGenerator g(selector);
412 Node* left = node->InputAt(0);
413 Node* right = node->InputAt(1);
414 if (selector->IsLive(left) && !selector->IsLive(right)) {
415 std::swap(left, right);
417 // TODO(turbofan): We use UseUniqueRegister here to improve register
419 selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
420 g.UseUniqueRegister(right));
424 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
425 X64OperandGenerator g(selector);
426 InstructionOperand* temps[] = {g.TempRegister(rdx)};
428 opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
429 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
433 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
434 X64OperandGenerator g(selector);
435 selector->Emit(opcode, g.DefineAsFixed(node, rdx),
436 g.UseFixed(node->InputAt(0), rax),
437 g.UseUniqueRegister(node->InputAt(1)));
443 void InstructionSelector::VisitInt32Mul(Node* node) {
444 VisitMul(this, node, kX64Imul32);
448 void InstructionSelector::VisitInt64Mul(Node* node) {
449 VisitMul(this, node, kX64Imul);
453 void InstructionSelector::VisitInt32MulHigh(Node* node) {
454 VisitMulHigh(this, node, kX64ImulHigh32);
458 void InstructionSelector::VisitInt32Div(Node* node) {
459 VisitDiv(this, node, kX64Idiv32);
463 void InstructionSelector::VisitInt64Div(Node* node) {
464 VisitDiv(this, node, kX64Idiv);
468 void InstructionSelector::VisitUint32Div(Node* node) {
469 VisitDiv(this, node, kX64Udiv32);
473 void InstructionSelector::VisitUint64Div(Node* node) {
474 VisitDiv(this, node, kX64Udiv);
478 void InstructionSelector::VisitInt32Mod(Node* node) {
479 VisitMod(this, node, kX64Idiv32);
483 void InstructionSelector::VisitInt64Mod(Node* node) {
484 VisitMod(this, node, kX64Idiv);
488 void InstructionSelector::VisitUint32Mod(Node* node) {
489 VisitMod(this, node, kX64Udiv32);
493 void InstructionSelector::VisitUint64Mod(Node* node) {
494 VisitMod(this, node, kX64Udiv);
498 void InstructionSelector::VisitUint32MulHigh(Node* node) {
499 VisitMulHigh(this, node, kX64UmulHigh32);
503 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
504 X64OperandGenerator g(this);
505 Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
509 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
510 X64OperandGenerator g(this);
511 Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
515 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
516 X64OperandGenerator g(this);
517 Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
521 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
522 X64OperandGenerator g(this);
523 Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
527 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
528 X64OperandGenerator g(this);
529 Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
533 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
534 X64OperandGenerator g(this);
535 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
539 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
540 X64OperandGenerator g(this);
541 Node* value = node->InputAt(0);
542 switch (value->opcode()) {
543 case IrOpcode::kWord32And:
544 case IrOpcode::kWord32Or:
545 case IrOpcode::kWord32Xor:
546 case IrOpcode::kWord32Shl:
547 case IrOpcode::kWord32Shr:
548 case IrOpcode::kWord32Sar:
549 case IrOpcode::kWord32Ror:
550 case IrOpcode::kWord32Equal:
551 case IrOpcode::kInt32Add:
552 case IrOpcode::kInt32Sub:
553 case IrOpcode::kInt32Mul:
554 case IrOpcode::kInt32MulHigh:
555 case IrOpcode::kInt32Div:
556 case IrOpcode::kInt32LessThan:
557 case IrOpcode::kInt32LessThanOrEqual:
558 case IrOpcode::kInt32Mod:
559 case IrOpcode::kUint32Div:
560 case IrOpcode::kUint32LessThan:
561 case IrOpcode::kUint32LessThanOrEqual:
562 case IrOpcode::kUint32Mod:
563 case IrOpcode::kUint32MulHigh: {
564 // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
565 // zero-extension is a no-op.
566 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
572 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
576 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
577 X64OperandGenerator g(this);
578 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
582 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
583 X64OperandGenerator g(this);
584 Node* value = node->InputAt(0);
585 if (CanCover(node, value)) {
586 switch (value->opcode()) {
587 case IrOpcode::kWord64Sar:
588 case IrOpcode::kWord64Shr: {
589 Int64BinopMatcher m(value);
590 if (m.right().Is(32)) {
591 Emit(kX64Shr, g.DefineSameAsFirst(node),
592 g.UseRegister(m.left().node()), g.TempImmediate(32));
601 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
605 void InstructionSelector::VisitFloat64Add(Node* node) {
606 X64OperandGenerator g(this);
607 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
608 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
612 void InstructionSelector::VisitFloat64Sub(Node* node) {
613 X64OperandGenerator g(this);
614 Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
615 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
619 void InstructionSelector::VisitFloat64Mul(Node* node) {
620 X64OperandGenerator g(this);
621 Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
622 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
626 void InstructionSelector::VisitFloat64Div(Node* node) {
627 X64OperandGenerator g(this);
628 Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
629 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
633 void InstructionSelector::VisitFloat64Mod(Node* node) {
634 X64OperandGenerator g(this);
635 InstructionOperand* temps[] = {g.TempRegister(rax)};
636 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
637 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
642 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
643 X64OperandGenerator g(this);
644 Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
650 void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
652 X64OperandGenerator g(selector);
653 selector->Emit(opcode, g.DefineAsRegister(node),
654 g.UseRegister(node->InputAt(0)));
660 void InstructionSelector::VisitFloat64Floor(Node* node) {
661 DCHECK(CpuFeatures::IsSupported(SSE4_1));
662 VisitRRFloat64(this, kSSEFloat64Floor, node);
666 void InstructionSelector::VisitFloat64Ceil(Node* node) {
667 DCHECK(CpuFeatures::IsSupported(SSE4_1));
668 VisitRRFloat64(this, kSSEFloat64Ceil, node);
672 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
673 DCHECK(CpuFeatures::IsSupported(SSE4_1));
674 VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
678 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
683 void InstructionSelector::VisitCall(Node* node) {
684 X64OperandGenerator g(this);
685 CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
687 FrameStateDescriptor* frame_state_descriptor = NULL;
688 if (descriptor->NeedsFrameState()) {
689 frame_state_descriptor = GetFrameStateDescriptor(
690 node->InputAt(static_cast<int>(descriptor->InputCount())));
693 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
695 // Compute InstructionOperands for inputs and outputs.
696 InitializeCallBuffer(node, &buffer, true, true);
698 // Push any stack arguments.
699 for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
700 input != buffer.pushed_nodes.rend(); input++) {
701 // TODO(titzer): handle pushing double parameters.
703 g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
706 // Select the appropriate opcode based on the call type.
707 InstructionCode opcode;
708 switch (descriptor->kind()) {
709 case CallDescriptor::kCallCodeObject: {
710 opcode = kArchCallCodeObject;
713 case CallDescriptor::kCallJSFunction:
714 opcode = kArchCallJSFunction;
720 opcode |= MiscField::encode(descriptor->flags());
722 // Emit the call instruction.
723 InstructionOperand** first_output =
724 buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
725 Instruction* call_instr =
726 Emit(opcode, buffer.outputs.size(), first_output,
727 buffer.instruction_args.size(), &buffer.instruction_args.front());
728 call_instr->MarkAsCall();
732 // Shared routine for multiple compare operations.
733 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
734 InstructionOperand* left, InstructionOperand* right,
735 FlagsContinuation* cont) {
736 X64OperandGenerator g(selector);
737 opcode = cont->Encode(opcode);
738 if (cont->IsBranch()) {
739 selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
740 g.Label(cont->false_block()))->MarkAsControl();
742 DCHECK(cont->IsSet());
743 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
748 // Shared routine for multiple compare operations.
749 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
750 Node* left, Node* right, FlagsContinuation* cont,
752 X64OperandGenerator g(selector);
753 if (commutative && g.CanBeBetterLeftOperand(right)) {
754 std::swap(left, right);
756 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
760 // Shared routine for multiple word compare operations.
761 static void VisitWordCompare(InstructionSelector* selector, Node* node,
762 InstructionCode opcode, FlagsContinuation* cont) {
763 X64OperandGenerator g(selector);
764 Node* const left = node->InputAt(0);
765 Node* const right = node->InputAt(1);
767 // Match immediates on left or right side of comparison.
768 if (g.CanBeImmediate(right)) {
769 VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
770 } else if (g.CanBeImmediate(left)) {
771 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
772 VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
774 VisitCompare(selector, opcode, left, right, cont,
775 node->op()->HasProperty(Operator::kCommutative));
780 // Shared routine for comparison with zero.
781 static void VisitCompareZero(InstructionSelector* selector, Node* node,
782 InstructionCode opcode, FlagsContinuation* cont) {
783 X64OperandGenerator g(selector);
784 VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
788 // Shared routine for multiple float64 compare operations.
789 static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
790 FlagsContinuation* cont) {
791 VisitCompare(selector, kSSEFloat64Cmp, node->InputAt(0), node->InputAt(1),
792 cont, node->op()->HasProperty(Operator::kCommutative));
796 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
797 BasicBlock* fbranch) {
798 X64OperandGenerator g(this);
800 Node* value = branch->InputAt(0);
802 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
804 // If we can fall through to the true block, invert the branch.
805 if (IsNextInAssemblyOrder(tbranch)) {
810 // Try to combine with comparisons against 0 by simply inverting the branch.
811 while (CanCover(user, value)) {
812 if (value->opcode() == IrOpcode::kWord32Equal) {
813 Int32BinopMatcher m(value);
814 if (m.right().Is(0)) {
816 value = m.left().node();
821 } else if (value->opcode() == IrOpcode::kWord64Equal) {
822 Int64BinopMatcher m(value);
823 if (m.right().Is(0)) {
825 value = m.left().node();
835 // Try to combine the branch with a comparison.
836 if (CanCover(user, value)) {
837 switch (value->opcode()) {
838 case IrOpcode::kWord32Equal:
839 cont.OverwriteAndNegateIfEqual(kEqual);
840 return VisitWordCompare(this, value, kX64Cmp32, &cont);
841 case IrOpcode::kInt32LessThan:
842 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
843 return VisitWordCompare(this, value, kX64Cmp32, &cont);
844 case IrOpcode::kInt32LessThanOrEqual:
845 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
846 return VisitWordCompare(this, value, kX64Cmp32, &cont);
847 case IrOpcode::kUint32LessThan:
848 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
849 return VisitWordCompare(this, value, kX64Cmp32, &cont);
850 case IrOpcode::kUint32LessThanOrEqual:
851 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
852 return VisitWordCompare(this, value, kX64Cmp32, &cont);
853 case IrOpcode::kWord64Equal:
854 cont.OverwriteAndNegateIfEqual(kEqual);
855 return VisitWordCompare(this, value, kX64Cmp, &cont);
856 case IrOpcode::kInt64LessThan:
857 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
858 return VisitWordCompare(this, value, kX64Cmp, &cont);
859 case IrOpcode::kInt64LessThanOrEqual:
860 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
861 return VisitWordCompare(this, value, kX64Cmp, &cont);
862 case IrOpcode::kUint64LessThan:
863 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
864 return VisitWordCompare(this, value, kX64Cmp, &cont);
865 case IrOpcode::kFloat64Equal:
866 cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
867 return VisitFloat64Compare(this, value, &cont);
868 case IrOpcode::kFloat64LessThan:
869 cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
870 return VisitFloat64Compare(this, value, &cont);
871 case IrOpcode::kFloat64LessThanOrEqual:
872 cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
873 return VisitFloat64Compare(this, value, &cont);
874 case IrOpcode::kProjection:
875 // Check if this is the overflow output projection of an
876 // <Operation>WithOverflow node.
877 if (OpParameter<size_t>(value) == 1u) {
878 // We cannot combine the <Operation>WithOverflow with this branch
879 // unless the 0th projection (the use of the actual value of the
880 // <Operation> is either NULL, which means there's no use of the
881 // actual value, or was already defined, which means it is scheduled
882 // *AFTER* this branch).
883 Node* node = value->InputAt(0);
884 Node* result = node->FindProjection(0);
885 if (result == NULL || IsDefined(result)) {
886 switch (node->opcode()) {
887 case IrOpcode::kInt32AddWithOverflow:
888 cont.OverwriteAndNegateIfEqual(kOverflow);
889 return VisitBinop(this, node, kX64Add32, &cont);
890 case IrOpcode::kInt32SubWithOverflow:
891 cont.OverwriteAndNegateIfEqual(kOverflow);
892 return VisitBinop(this, node, kX64Sub32, &cont);
899 case IrOpcode::kInt32Sub:
900 return VisitWordCompare(this, value, kX64Cmp32, &cont);
901 case IrOpcode::kInt64Sub:
902 return VisitWordCompare(this, value, kX64Cmp, &cont);
903 case IrOpcode::kWord32And:
904 return VisitWordCompare(this, value, kX64Test32, &cont);
905 case IrOpcode::kWord64And:
906 return VisitWordCompare(this, value, kX64Test, &cont);
912 // Branch could not be combined with a compare, emit compare against 0.
913 VisitCompareZero(this, value, kX64Cmp32, &cont);
917 void InstructionSelector::VisitWord32Equal(Node* const node) {
919 FlagsContinuation cont(kEqual, node);
920 Int32BinopMatcher m(user);
921 if (m.right().Is(0)) {
922 Node* value = m.left().node();
924 // Try to combine with comparisons against 0 by simply inverting the branch.
925 while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
926 Int32BinopMatcher m(value);
927 if (m.right().Is(0)) {
929 value = m.left().node();
936 // Try to combine the branch with a comparison.
937 if (CanCover(user, value)) {
938 switch (value->opcode()) {
939 case IrOpcode::kInt32Sub:
940 return VisitWordCompare(this, value, kX64Cmp32, &cont);
941 case IrOpcode::kWord32And:
942 return VisitWordCompare(this, value, kX64Test32, &cont);
947 return VisitCompareZero(this, value, kX64Cmp32, &cont);
949 VisitWordCompare(this, node, kX64Cmp32, &cont);
953 void InstructionSelector::VisitInt32LessThan(Node* node) {
954 FlagsContinuation cont(kSignedLessThan, node);
955 VisitWordCompare(this, node, kX64Cmp32, &cont);
959 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
960 FlagsContinuation cont(kSignedLessThanOrEqual, node);
961 VisitWordCompare(this, node, kX64Cmp32, &cont);
965 void InstructionSelector::VisitUint32LessThan(Node* node) {
966 FlagsContinuation cont(kUnsignedLessThan, node);
967 VisitWordCompare(this, node, kX64Cmp32, &cont);
971 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
972 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
973 VisitWordCompare(this, node, kX64Cmp32, &cont);
977 void InstructionSelector::VisitWord64Equal(Node* const node) {
979 FlagsContinuation cont(kEqual, node);
980 Int64BinopMatcher m(user);
981 if (m.right().Is(0)) {
982 Node* value = m.left().node();
984 // Try to combine with comparisons against 0 by simply inverting the branch.
985 while (CanCover(user, value) && value->opcode() == IrOpcode::kWord64Equal) {
986 Int64BinopMatcher m(value);
987 if (m.right().Is(0)) {
989 value = m.left().node();
996 // Try to combine the branch with a comparison.
997 if (CanCover(user, value)) {
998 switch (value->opcode()) {
999 case IrOpcode::kInt64Sub:
1000 return VisitWordCompare(this, value, kX64Cmp, &cont);
1001 case IrOpcode::kWord64And:
1002 return VisitWordCompare(this, value, kX64Test, &cont);
1007 return VisitCompareZero(this, value, kX64Cmp, &cont);
1009 VisitWordCompare(this, node, kX64Cmp, &cont);
1013 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1014 if (Node* ovf = node->FindProjection(1)) {
1015 FlagsContinuation cont(kOverflow, ovf);
1016 VisitBinop(this, node, kX64Add32, &cont);
1018 FlagsContinuation cont;
1019 VisitBinop(this, node, kX64Add32, &cont);
1023 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1024 if (Node* ovf = node->FindProjection(1)) {
1025 FlagsContinuation cont(kOverflow, ovf);
1026 return VisitBinop(this, node, kX64Sub32, &cont);
1028 FlagsContinuation cont;
1029 VisitBinop(this, node, kX64Sub32, &cont);
1033 void InstructionSelector::VisitInt64LessThan(Node* node) {
1034 FlagsContinuation cont(kSignedLessThan, node);
1035 VisitWordCompare(this, node, kX64Cmp, &cont);
1039 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1040 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1041 VisitWordCompare(this, node, kX64Cmp, &cont);
1045 void InstructionSelector::VisitUint64LessThan(Node* node) {
1046 FlagsContinuation cont(kUnsignedLessThan, node);
1047 VisitWordCompare(this, node, kX64Cmp, &cont);
1051 void InstructionSelector::VisitFloat64Equal(Node* node) {
1052 FlagsContinuation cont(kUnorderedEqual, node);
1053 VisitFloat64Compare(this, node, &cont);
1057 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1058 FlagsContinuation cont(kUnorderedLessThan, node);
1059 VisitFloat64Compare(this, node, &cont);
1063 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1064 FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
1065 VisitFloat64Compare(this, node, &cont);
1070 MachineOperatorBuilder::Flags
1071 InstructionSelector::SupportedMachineOperatorFlags() {
1072 if (CpuFeatures::IsSupported(SSE4_1)) {
1073 return MachineOperatorBuilder::kFloat64Floor |
1074 MachineOperatorBuilder::kFloat64Ceil |
1075 MachineOperatorBuilder::kFloat64RoundTruncate;
1077 return MachineOperatorBuilder::kNoFlags;
1079 } // namespace compiler
1080 } // namespace internal