1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/compiler/instruction-selector-impl.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/compiler/node-properties.h"
15 // Adds X64-specific methods for generating operands.
16 class X64OperandGenerator FINAL : public OperandGenerator {
18 explicit X64OperandGenerator(InstructionSelector* selector)
19 : OperandGenerator(selector) {}
21 bool CanBeImmediate(Node* node) {
22 switch (node->opcode()) {
23 case IrOpcode::kInt32Constant:
25 case IrOpcode::kInt64Constant: {
26 const int64_t value = OpParameter<int64_t>(node);
27 return value == static_cast<int64_t>(static_cast<int32_t>(value));
34 AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
35 Node* base, Node* displacement,
36 InstructionOperand inputs[],
37 size_t* input_count) {
38 AddressingMode mode = kMode_MRI;
40 inputs[(*input_count)++] = UseRegister(base);
42 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
43 inputs[(*input_count)++] = UseRegister(index);
44 if (displacement != NULL) {
45 inputs[(*input_count)++] = UseImmediate(displacement);
46 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
47 kMode_MR4I, kMode_MR8I};
48 mode = kMRnI_modes[scale_exponent];
50 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
51 kMode_MR4, kMode_MR8};
52 mode = kMRn_modes[scale_exponent];
55 if (displacement == NULL) {
58 inputs[(*input_count)++] = UseImmediate(displacement);
63 DCHECK(index != NULL);
64 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
65 inputs[(*input_count)++] = UseRegister(index);
66 if (displacement != NULL) {
67 inputs[(*input_count)++] = UseImmediate(displacement);
68 static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
69 kMode_M4I, kMode_M8I};
70 mode = kMnI_modes[scale_exponent];
72 static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
74 mode = kMn_modes[scale_exponent];
75 if (mode == kMode_MR1) {
76 // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0]
77 inputs[(*input_count)++] = UseRegister(index);
84 AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
85 InstructionOperand inputs[],
86 size_t* input_count) {
87 BaseWithIndexAndDisplacement64Matcher m(operand, true);
89 if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
90 return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
91 m.displacement(), inputs, input_count);
93 inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
94 inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
99 bool CanBeBetterLeftOperand(Node* node) const {
100 return !selector()->IsLive(node);
105 void InstructionSelector::VisitLoad(Node* node) {
106 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
107 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
108 X64OperandGenerator g(this);
118 case kRepBit: // Fall through.
120 opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
123 opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
128 case kRepTagged: // Fall through.
137 InstructionOperand outputs[1];
138 outputs[0] = g.DefineAsRegister(node);
139 InstructionOperand inputs[3];
140 size_t input_count = 0;
141 AddressingMode mode =
142 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
143 InstructionCode code = opcode | AddressingModeField::encode(mode);
144 Emit(code, 1, outputs, input_count, inputs);
148 void InstructionSelector::VisitStore(Node* node) {
149 X64OperandGenerator g(this);
150 Node* base = node->InputAt(0);
151 Node* index = node->InputAt(1);
152 Node* value = node->InputAt(2);
154 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
155 MachineType rep = RepresentationOf(store_rep.machine_type());
156 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
157 DCHECK(rep == kRepTagged);
158 // TODO(dcarney): refactor RecordWrite function to take temp registers
159 // and pass them here instead of using fixed regs
160 // TODO(dcarney): handle immediate indices.
161 InstructionOperand temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
162 Emit(kX64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, rbx),
163 g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps),
167 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
176 case kRepBit: // Fall through.
186 case kRepTagged: // Fall through.
194 InstructionOperand inputs[4];
195 size_t input_count = 0;
196 AddressingMode mode =
197 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
198 InstructionCode code = opcode | AddressingModeField::encode(mode);
199 InstructionOperand value_operand =
200 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
201 inputs[input_count++] = value_operand;
202 Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
206 void InstructionSelector::VisitCheckedLoad(Node* node) {
207 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
208 MachineType typ = TypeOf(OpParameter<MachineType>(node));
209 X64OperandGenerator g(this);
210 Node* const buffer = node->InputAt(0);
211 Node* const offset = node->InputAt(1);
212 Node* const length = node->InputAt(2);
216 opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
219 opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
222 opcode = kCheckedLoadWord32;
225 opcode = kCheckedLoadFloat32;
228 opcode = kCheckedLoadFloat64;
234 if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
235 Int32Matcher mlength(length);
236 Int32BinopMatcher moffset(offset);
237 if (mlength.HasValue() && moffset.right().HasValue() &&
238 moffset.right().Value() >= 0 &&
239 mlength.Value() >= moffset.right().Value()) {
240 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
241 g.UseRegister(moffset.left().node()),
242 g.UseImmediate(moffset.right().node()), g.UseImmediate(length));
246 InstructionOperand length_operand =
247 g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
248 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
249 g.UseRegister(offset), g.TempImmediate(0), length_operand);
253 void InstructionSelector::VisitCheckedStore(Node* node) {
254 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
255 X64OperandGenerator g(this);
256 Node* const buffer = node->InputAt(0);
257 Node* const offset = node->InputAt(1);
258 Node* const length = node->InputAt(2);
259 Node* const value = node->InputAt(3);
263 opcode = kCheckedStoreWord8;
266 opcode = kCheckedStoreWord16;
269 opcode = kCheckedStoreWord32;
272 opcode = kCheckedStoreFloat32;
275 opcode = kCheckedStoreFloat64;
281 InstructionOperand value_operand =
282 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
283 if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
284 Int32Matcher mlength(length);
285 Int32BinopMatcher moffset(offset);
286 if (mlength.HasValue() && moffset.right().HasValue() &&
287 moffset.right().Value() >= 0 &&
288 mlength.Value() >= moffset.right().Value()) {
289 Emit(opcode, g.NoOutput(), g.UseRegister(buffer),
290 g.UseRegister(moffset.left().node()),
291 g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
296 InstructionOperand length_operand =
297 g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
298 Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
299 g.TempImmediate(0), length_operand, value_operand);
303 // Shared routine for multiple binary operations.
304 static void VisitBinop(InstructionSelector* selector, Node* node,
305 InstructionCode opcode, FlagsContinuation* cont) {
306 X64OperandGenerator g(selector);
307 Int32BinopMatcher m(node);
308 Node* left = m.left().node();
309 Node* right = m.right().node();
310 InstructionOperand inputs[4];
311 size_t input_count = 0;
312 InstructionOperand outputs[2];
313 size_t output_count = 0;
315 // TODO(turbofan): match complex addressing modes.
317 // If both inputs refer to the same operand, enforce allocating a register
318 // for both of them to ensure that we don't end up generating code like
321 // mov rax, [rbp-0x10]
322 // add rax, [rbp-0x10]
324 InstructionOperand const input = g.UseRegister(left);
325 inputs[input_count++] = input;
326 inputs[input_count++] = input;
327 } else if (g.CanBeImmediate(right)) {
328 inputs[input_count++] = g.UseRegister(left);
329 inputs[input_count++] = g.UseImmediate(right);
331 if (node->op()->HasProperty(Operator::kCommutative) &&
332 g.CanBeBetterLeftOperand(right)) {
333 std::swap(left, right);
335 inputs[input_count++] = g.UseRegister(left);
336 inputs[input_count++] = g.Use(right);
339 if (cont->IsBranch()) {
340 inputs[input_count++] = g.Label(cont->true_block());
341 inputs[input_count++] = g.Label(cont->false_block());
344 outputs[output_count++] = g.DefineSameAsFirst(node);
346 outputs[output_count++] = g.DefineAsRegister(cont->result());
349 DCHECK_NE(0, static_cast<int>(input_count));
350 DCHECK_NE(0, static_cast<int>(output_count));
351 DCHECK_GE(arraysize(inputs), input_count);
352 DCHECK_GE(arraysize(outputs), output_count);
354 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
355 outputs, input_count, inputs);
356 if (cont->IsBranch()) instr->MarkAsControl();
360 // Shared routine for multiple binary operations.
361 static void VisitBinop(InstructionSelector* selector, Node* node,
362 InstructionCode opcode) {
363 FlagsContinuation cont;
364 VisitBinop(selector, node, opcode, &cont);
368 void InstructionSelector::VisitWord32And(Node* node) {
369 X64OperandGenerator g(this);
370 Uint32BinopMatcher m(node);
371 if (m.right().Is(0xff)) {
372 Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
373 } else if (m.right().Is(0xffff)) {
374 Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
376 VisitBinop(this, node, kX64And32);
381 void InstructionSelector::VisitWord64And(Node* node) {
382 VisitBinop(this, node, kX64And);
386 void InstructionSelector::VisitWord32Or(Node* node) {
387 VisitBinop(this, node, kX64Or32);
391 void InstructionSelector::VisitWord64Or(Node* node) {
392 VisitBinop(this, node, kX64Or);
396 void InstructionSelector::VisitWord32Xor(Node* node) {
397 X64OperandGenerator g(this);
398 Uint32BinopMatcher m(node);
399 if (m.right().Is(-1)) {
400 Emit(kX64Not32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
402 VisitBinop(this, node, kX64Xor32);
407 void InstructionSelector::VisitWord64Xor(Node* node) {
408 X64OperandGenerator g(this);
409 Uint64BinopMatcher m(node);
410 if (m.right().Is(-1)) {
411 Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
413 VisitBinop(this, node, kX64Xor);
420 // Shared routine for multiple 32-bit shift operations.
421 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
422 void VisitWord32Shift(InstructionSelector* selector, Node* node,
424 X64OperandGenerator g(selector);
425 Int32BinopMatcher m(node);
426 Node* left = m.left().node();
427 Node* right = m.right().node();
429 if (g.CanBeImmediate(right)) {
430 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
431 g.UseImmediate(right));
433 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
434 g.UseFixed(right, rcx));
439 // Shared routine for multiple 64-bit shift operations.
440 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
441 void VisitWord64Shift(InstructionSelector* selector, Node* node,
443 X64OperandGenerator g(selector);
444 Int64BinopMatcher m(node);
445 Node* left = m.left().node();
446 Node* right = m.right().node();
448 if (g.CanBeImmediate(right)) {
449 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
450 g.UseImmediate(right));
452 if (m.right().IsWord64And()) {
453 Int64BinopMatcher mright(right);
454 if (mright.right().Is(0x3F)) {
455 right = mright.left().node();
458 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
459 g.UseFixed(right, rcx));
464 void EmitLea(InstructionSelector* selector, InstructionCode opcode,
465 Node* result, Node* index, int scale, Node* base,
466 Node* displacement) {
467 X64OperandGenerator g(selector);
469 InstructionOperand inputs[4];
470 size_t input_count = 0;
471 AddressingMode mode = g.GenerateMemoryOperandInputs(
472 index, scale, base, displacement, inputs, &input_count);
474 DCHECK_NE(0, static_cast<int>(input_count));
475 DCHECK_GE(arraysize(inputs), input_count);
477 InstructionOperand outputs[1];
478 outputs[0] = g.DefineAsRegister(result);
480 opcode = AddressingModeField::encode(mode) | opcode;
482 selector->Emit(opcode, 1, outputs, input_count, inputs);
488 void InstructionSelector::VisitWord32Shl(Node* node) {
489 Int32ScaleMatcher m(node, true);
491 Node* index = node->InputAt(0);
492 Node* base = m.power_of_two_plus_one() ? index : NULL;
493 EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
496 VisitWord32Shift(this, node, kX64Shl32);
500 void InstructionSelector::VisitWord64Shl(Node* node) {
501 X64OperandGenerator g(this);
502 Int64BinopMatcher m(node);
503 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
504 m.right().IsInRange(32, 63)) {
505 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
507 Emit(kX64Shl, g.DefineSameAsFirst(node),
508 g.UseRegister(m.left().node()->InputAt(0)),
509 g.UseImmediate(m.right().node()));
512 VisitWord64Shift(this, node, kX64Shl);
516 void InstructionSelector::VisitWord32Shr(Node* node) {
517 VisitWord32Shift(this, node, kX64Shr32);
521 void InstructionSelector::VisitWord64Shr(Node* node) {
522 VisitWord64Shift(this, node, kX64Shr);
526 void InstructionSelector::VisitWord32Sar(Node* node) {
527 X64OperandGenerator g(this);
528 Int32BinopMatcher m(node);
529 if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
530 Int32BinopMatcher mleft(m.left().node());
531 if (mleft.right().Is(16) && m.right().Is(16)) {
532 Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
534 } else if (mleft.right().Is(24) && m.right().Is(24)) {
535 Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
539 VisitWord32Shift(this, node, kX64Sar32);
543 void InstructionSelector::VisitWord64Sar(Node* node) {
544 VisitWord64Shift(this, node, kX64Sar);
548 void InstructionSelector::VisitWord32Ror(Node* node) {
549 VisitWord32Shift(this, node, kX64Ror32);
553 void InstructionSelector::VisitWord64Ror(Node* node) {
554 VisitWord64Shift(this, node, kX64Ror);
558 void InstructionSelector::VisitInt32Add(Node* node) {
559 X64OperandGenerator g(this);
561 // Try to match the Add to a leal pattern
562 BaseWithIndexAndDisplacement32Matcher m(node);
564 (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
565 EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
570 // No leal pattern match, use addl
571 VisitBinop(this, node, kX64Add32);
575 void InstructionSelector::VisitInt64Add(Node* node) {
576 VisitBinop(this, node, kX64Add);
580 void InstructionSelector::VisitInt32Sub(Node* node) {
581 X64OperandGenerator g(this);
582 Int32BinopMatcher m(node);
583 if (m.left().Is(0)) {
584 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
586 if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
587 // Turn subtractions of constant values into immediate "leal" instructions
588 // by negating the value.
589 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
590 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
591 g.TempImmediate(-m.right().Value()));
594 VisitBinop(this, node, kX64Sub32);
599 void InstructionSelector::VisitInt64Sub(Node* node) {
600 X64OperandGenerator g(this);
601 Int64BinopMatcher m(node);
602 if (m.left().Is(0)) {
603 Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
605 VisitBinop(this, node, kX64Sub);
612 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
613 X64OperandGenerator g(selector);
614 Int32BinopMatcher m(node);
615 Node* left = m.left().node();
616 Node* right = m.right().node();
617 if (g.CanBeImmediate(right)) {
618 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
619 g.UseImmediate(right));
621 if (g.CanBeBetterLeftOperand(right)) {
622 std::swap(left, right);
624 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
630 void VisitMulHigh(InstructionSelector* selector, Node* node,
632 X64OperandGenerator g(selector);
633 Node* left = node->InputAt(0);
634 Node* right = node->InputAt(1);
635 if (selector->IsLive(left) && !selector->IsLive(right)) {
636 std::swap(left, right);
638 // TODO(turbofan): We use UseUniqueRegister here to improve register
640 selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
641 g.UseUniqueRegister(right));
645 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
646 X64OperandGenerator g(selector);
647 InstructionOperand temps[] = {g.TempRegister(rdx)};
649 opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
650 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
654 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
655 X64OperandGenerator g(selector);
656 selector->Emit(opcode, g.DefineAsFixed(node, rdx),
657 g.UseFixed(node->InputAt(0), rax),
658 g.UseUniqueRegister(node->InputAt(1)));
664 void InstructionSelector::VisitInt32Mul(Node* node) {
665 Int32ScaleMatcher m(node, true);
667 Node* index = node->InputAt(0);
668 Node* base = m.power_of_two_plus_one() ? index : NULL;
669 EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
672 VisitMul(this, node, kX64Imul32);
676 void InstructionSelector::VisitInt64Mul(Node* node) {
677 VisitMul(this, node, kX64Imul);
681 void InstructionSelector::VisitInt32MulHigh(Node* node) {
682 VisitMulHigh(this, node, kX64ImulHigh32);
686 void InstructionSelector::VisitInt32Div(Node* node) {
687 VisitDiv(this, node, kX64Idiv32);
691 void InstructionSelector::VisitInt64Div(Node* node) {
692 VisitDiv(this, node, kX64Idiv);
696 void InstructionSelector::VisitUint32Div(Node* node) {
697 VisitDiv(this, node, kX64Udiv32);
701 void InstructionSelector::VisitUint64Div(Node* node) {
702 VisitDiv(this, node, kX64Udiv);
706 void InstructionSelector::VisitInt32Mod(Node* node) {
707 VisitMod(this, node, kX64Idiv32);
711 void InstructionSelector::VisitInt64Mod(Node* node) {
712 VisitMod(this, node, kX64Idiv);
716 void InstructionSelector::VisitUint32Mod(Node* node) {
717 VisitMod(this, node, kX64Udiv32);
721 void InstructionSelector::VisitUint64Mod(Node* node) {
722 VisitMod(this, node, kX64Udiv);
726 void InstructionSelector::VisitUint32MulHigh(Node* node) {
727 VisitMulHigh(this, node, kX64UmulHigh32);
731 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
732 X64OperandGenerator g(this);
733 Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
737 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
738 X64OperandGenerator g(this);
739 Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
743 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
744 X64OperandGenerator g(this);
745 Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
749 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
750 X64OperandGenerator g(this);
751 Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
755 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
756 X64OperandGenerator g(this);
757 Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
761 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
762 X64OperandGenerator g(this);
763 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
767 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
768 X64OperandGenerator g(this);
769 Node* value = node->InputAt(0);
770 switch (value->opcode()) {
771 case IrOpcode::kWord32And:
772 case IrOpcode::kWord32Or:
773 case IrOpcode::kWord32Xor:
774 case IrOpcode::kWord32Shl:
775 case IrOpcode::kWord32Shr:
776 case IrOpcode::kWord32Sar:
777 case IrOpcode::kWord32Ror:
778 case IrOpcode::kWord32Equal:
779 case IrOpcode::kInt32Add:
780 case IrOpcode::kInt32Sub:
781 case IrOpcode::kInt32Mul:
782 case IrOpcode::kInt32MulHigh:
783 case IrOpcode::kInt32Div:
784 case IrOpcode::kInt32LessThan:
785 case IrOpcode::kInt32LessThanOrEqual:
786 case IrOpcode::kInt32Mod:
787 case IrOpcode::kUint32Div:
788 case IrOpcode::kUint32LessThan:
789 case IrOpcode::kUint32LessThanOrEqual:
790 case IrOpcode::kUint32Mod:
791 case IrOpcode::kUint32MulHigh: {
792 // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
793 // zero-extension is a no-op.
794 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
800 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
804 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
805 X64OperandGenerator g(this);
806 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
810 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
811 X64OperandGenerator g(this);
812 Node* value = node->InputAt(0);
813 if (CanCover(node, value)) {
814 switch (value->opcode()) {
815 case IrOpcode::kWord64Sar:
816 case IrOpcode::kWord64Shr: {
817 Int64BinopMatcher m(value);
818 if (m.right().Is(32)) {
819 Emit(kX64Shr, g.DefineSameAsFirst(node),
820 g.UseRegister(m.left().node()), g.TempImmediate(32));
829 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
833 void InstructionSelector::VisitFloat64Add(Node* node) {
834 X64OperandGenerator g(this);
835 if (IsSupported(AVX)) {
836 Emit(kAVXFloat64Add, g.DefineAsRegister(node),
837 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
839 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
840 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
845 void InstructionSelector::VisitFloat64Sub(Node* node) {
846 X64OperandGenerator g(this);
847 if (IsSupported(AVX)) {
848 Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
849 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
851 Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
852 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
857 void InstructionSelector::VisitFloat64Mul(Node* node) {
858 X64OperandGenerator g(this);
859 if (IsSupported(AVX)) {
860 Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
861 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
863 Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
864 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
869 void InstructionSelector::VisitFloat64Div(Node* node) {
870 X64OperandGenerator g(this);
871 if (IsSupported(AVX)) {
872 Emit(kAVXFloat64Div, g.DefineAsRegister(node),
873 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
875 Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
876 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
881 void InstructionSelector::VisitFloat64Mod(Node* node) {
882 X64OperandGenerator g(this);
883 InstructionOperand temps[] = {g.TempRegister(rax)};
884 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
885 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
890 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
891 X64OperandGenerator g(this);
892 Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
898 void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
900 X64OperandGenerator g(selector);
901 selector->Emit(opcode, g.DefineAsRegister(node),
902 g.UseRegister(node->InputAt(0)));
908 void InstructionSelector::VisitFloat64Floor(Node* node) {
909 DCHECK(CpuFeatures::IsSupported(SSE4_1));
910 VisitRRFloat64(this, kSSEFloat64Floor, node);
914 void InstructionSelector::VisitFloat64Ceil(Node* node) {
915 DCHECK(CpuFeatures::IsSupported(SSE4_1));
916 VisitRRFloat64(this, kSSEFloat64Ceil, node);
920 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
921 DCHECK(CpuFeatures::IsSupported(SSE4_1));
922 VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
926 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
931 void InstructionSelector::VisitCall(Node* node) {
932 X64OperandGenerator g(this);
933 const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
935 FrameStateDescriptor* frame_state_descriptor = NULL;
936 if (descriptor->NeedsFrameState()) {
937 frame_state_descriptor = GetFrameStateDescriptor(
938 node->InputAt(static_cast<int>(descriptor->InputCount())));
941 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
943 // Compute InstructionOperands for inputs and outputs.
944 InitializeCallBuffer(node, &buffer, true, true);
946 // Push any stack arguments.
947 for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
949 // TODO(titzer): handle pushing double parameters.
950 InstructionOperand value =
951 g.CanBeImmediate(*i) ? g.UseImmediate(*i) : IsSupported(ATOM)
954 Emit(kX64Push, g.NoOutput(), value);
957 // Select the appropriate opcode based on the call type.
958 InstructionCode opcode;
959 switch (descriptor->kind()) {
960 case CallDescriptor::kCallCodeObject: {
961 opcode = kArchCallCodeObject;
964 case CallDescriptor::kCallJSFunction:
965 opcode = kArchCallJSFunction;
971 opcode |= MiscField::encode(descriptor->flags());
973 // Emit the call instruction.
974 InstructionOperand* first_output =
975 buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
976 Instruction* call_instr =
977 Emit(opcode, buffer.outputs.size(), first_output,
978 buffer.instruction_args.size(), &buffer.instruction_args.front());
979 call_instr->MarkAsCall();
983 // Shared routine for multiple compare operations.
984 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
985 InstructionOperand left, InstructionOperand right,
986 FlagsContinuation* cont) {
987 X64OperandGenerator g(selector);
988 opcode = cont->Encode(opcode);
989 if (cont->IsBranch()) {
990 selector->Emit(opcode, g.NoOutput(), left, right,
991 g.Label(cont->true_block()),
992 g.Label(cont->false_block()))->MarkAsControl();
994 DCHECK(cont->IsSet());
995 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1000 // Shared routine for multiple compare operations.
1001 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1002 Node* left, Node* right, FlagsContinuation* cont,
1004 X64OperandGenerator g(selector);
1005 if (commutative && g.CanBeBetterLeftOperand(right)) {
1006 std::swap(left, right);
1008 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
1012 // Shared routine for multiple word compare operations.
1013 static void VisitWordCompare(InstructionSelector* selector, Node* node,
1014 InstructionCode opcode, FlagsContinuation* cont) {
1015 X64OperandGenerator g(selector);
1016 Node* const left = node->InputAt(0);
1017 Node* const right = node->InputAt(1);
1019 // Match immediates on left or right side of comparison.
1020 if (g.CanBeImmediate(right)) {
1021 VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
1022 } else if (g.CanBeImmediate(left)) {
1023 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1024 VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
1026 VisitCompare(selector, opcode, left, right, cont,
1027 node->op()->HasProperty(Operator::kCommutative));
1032 // Shared routine for comparison with zero.
1033 static void VisitCompareZero(InstructionSelector* selector, Node* node,
1034 InstructionCode opcode, FlagsContinuation* cont) {
1035 X64OperandGenerator g(selector);
1036 VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
1040 // Shared routine for multiple float64 compare operations (inputs commuted).
1041 static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1042 FlagsContinuation* cont) {
1043 Node* const left = node->InputAt(0);
1044 Node* const right = node->InputAt(1);
1045 VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
1049 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1050 BasicBlock* fbranch) {
1051 X64OperandGenerator g(this);
1052 Node* user = branch;
1053 Node* value = branch->InputAt(0);
1055 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1057 // Try to combine with comparisons against 0 by simply inverting the branch.
1058 while (CanCover(user, value)) {
1059 if (value->opcode() == IrOpcode::kWord32Equal) {
1060 Int32BinopMatcher m(value);
1061 if (m.right().Is(0)) {
1063 value = m.left().node();
1068 } else if (value->opcode() == IrOpcode::kWord64Equal) {
1069 Int64BinopMatcher m(value);
1070 if (m.right().Is(0)) {
1072 value = m.left().node();
1082 // Try to combine the branch with a comparison.
1083 if (CanCover(user, value)) {
1084 switch (value->opcode()) {
1085 case IrOpcode::kWord32Equal:
1086 cont.OverwriteAndNegateIfEqual(kEqual);
1087 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1088 case IrOpcode::kInt32LessThan:
1089 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1090 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1091 case IrOpcode::kInt32LessThanOrEqual:
1092 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1093 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1094 case IrOpcode::kUint32LessThan:
1095 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1096 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1097 case IrOpcode::kUint32LessThanOrEqual:
1098 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1099 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1100 case IrOpcode::kWord64Equal:
1101 cont.OverwriteAndNegateIfEqual(kEqual);
1102 return VisitWordCompare(this, value, kX64Cmp, &cont);
1103 case IrOpcode::kInt64LessThan:
1104 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1105 return VisitWordCompare(this, value, kX64Cmp, &cont);
1106 case IrOpcode::kInt64LessThanOrEqual:
1107 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1108 return VisitWordCompare(this, value, kX64Cmp, &cont);
1109 case IrOpcode::kUint64LessThan:
1110 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1111 return VisitWordCompare(this, value, kX64Cmp, &cont);
1112 case IrOpcode::kFloat64Equal:
1113 cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
1114 return VisitFloat64Compare(this, value, &cont);
1115 case IrOpcode::kFloat64LessThan:
1116 cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1117 return VisitFloat64Compare(this, value, &cont);
1118 case IrOpcode::kFloat64LessThanOrEqual:
1119 cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1120 return VisitFloat64Compare(this, value, &cont);
1121 case IrOpcode::kProjection:
1122 // Check if this is the overflow output projection of an
1123 // <Operation>WithOverflow node.
1124 if (ProjectionIndexOf(value->op()) == 1u) {
1125 // We cannot combine the <Operation>WithOverflow with this branch
1126 // unless the 0th projection (the use of the actual value of the
1127 // <Operation> is either NULL, which means there's no use of the
1128 // actual value, or was already defined, which means it is scheduled
1129 // *AFTER* this branch).
1130 Node* const node = value->InputAt(0);
1131 Node* const result = NodeProperties::FindProjection(node, 0);
1132 if (result == NULL || IsDefined(result)) {
1133 switch (node->opcode()) {
1134 case IrOpcode::kInt32AddWithOverflow:
1135 cont.OverwriteAndNegateIfEqual(kOverflow);
1136 return VisitBinop(this, node, kX64Add32, &cont);
1137 case IrOpcode::kInt32SubWithOverflow:
1138 cont.OverwriteAndNegateIfEqual(kOverflow);
1139 return VisitBinop(this, node, kX64Sub32, &cont);
1146 case IrOpcode::kInt32Sub:
1147 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1148 case IrOpcode::kInt64Sub:
1149 return VisitWordCompare(this, value, kX64Cmp, &cont);
1150 case IrOpcode::kWord32And:
1151 return VisitWordCompare(this, value, kX64Test32, &cont);
1152 case IrOpcode::kWord64And:
1153 return VisitWordCompare(this, value, kX64Test, &cont);
1159 // Branch could not be combined with a compare, emit compare against 0.
1160 VisitCompareZero(this, value, kX64Cmp32, &cont);
1164 void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
1165 BasicBlock** case_branches,
1166 int32_t* case_values, size_t case_count,
1167 int32_t min_value, int32_t max_value) {
1168 X64OperandGenerator g(this);
1169 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1170 InstructionOperand default_operand = g.Label(default_branch);
1172 // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
1173 // is 2^31-1, so don't assume that it's non-zero below.
1174 size_t value_range =
1175 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
1177 // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
1179 size_t table_space_cost = 4 + value_range;
1180 size_t table_time_cost = 3;
1181 size_t lookup_space_cost = 3 + 2 * case_count;
1182 size_t lookup_time_cost = case_count;
1183 if (case_count > 4 &&
1184 table_space_cost + 3 * table_time_cost <=
1185 lookup_space_cost + 3 * lookup_time_cost &&
1186 min_value > std::numeric_limits<int32_t>::min()) {
1187 InstructionOperand index_operand = g.TempRegister();
1189 // The leal automatically zero extends, so result is a valid 64-bit index.
1190 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
1191 value_operand, g.TempImmediate(-min_value));
1193 // Zero extend, because we use it as 64-bit index into the jump table.
1194 Emit(kX64Movl, index_operand, value_operand);
1196 size_t input_count = 2 + value_range;
1197 auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
1198 inputs[0] = index_operand;
1199 std::fill(&inputs[1], &inputs[input_count], default_operand);
1200 for (size_t index = 0; index < case_count; ++index) {
1201 size_t value = case_values[index] - min_value;
1202 BasicBlock* branch = case_branches[index];
1203 DCHECK_LE(0u, value);
1204 DCHECK_LT(value + 2, input_count);
1205 inputs[value + 2] = g.Label(branch);
1207 Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
1212 // Generate a sequence of conditional jumps.
1213 size_t input_count = 2 + case_count * 2;
1214 auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
1215 inputs[0] = value_operand;
1216 inputs[1] = default_operand;
1217 for (size_t index = 0; index < case_count; ++index) {
1218 int32_t value = case_values[index];
1219 BasicBlock* branch = case_branches[index];
1220 inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
1221 inputs[index * 2 + 2 + 1] = g.Label(branch);
1223 Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
1228 void InstructionSelector::VisitWord32Equal(Node* const node) {
1230 FlagsContinuation cont(kEqual, node);
1231 Int32BinopMatcher m(user);
1232 if (m.right().Is(0)) {
1233 Node* value = m.left().node();
1235 // Try to combine with comparisons against 0 by simply inverting the branch.
1236 while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
1237 Int32BinopMatcher m(value);
1238 if (m.right().Is(0)) {
1240 value = m.left().node();
1247 // Try to combine the branch with a comparison.
1248 if (CanCover(user, value)) {
1249 switch (value->opcode()) {
1250 case IrOpcode::kInt32Sub:
1251 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1252 case IrOpcode::kWord32And:
1253 return VisitWordCompare(this, value, kX64Test32, &cont);
1258 return VisitCompareZero(this, value, kX64Cmp32, &cont);
1260 VisitWordCompare(this, node, kX64Cmp32, &cont);
1264 void InstructionSelector::VisitInt32LessThan(Node* node) {
1265 FlagsContinuation cont(kSignedLessThan, node);
1266 VisitWordCompare(this, node, kX64Cmp32, &cont);
1270 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1271 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1272 VisitWordCompare(this, node, kX64Cmp32, &cont);
1276 void InstructionSelector::VisitUint32LessThan(Node* node) {
1277 FlagsContinuation cont(kUnsignedLessThan, node);
1278 VisitWordCompare(this, node, kX64Cmp32, &cont);
1282 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1283 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1284 VisitWordCompare(this, node, kX64Cmp32, &cont);
1288 void InstructionSelector::VisitWord64Equal(Node* const node) {
1290 FlagsContinuation cont(kEqual, node);
1291 Int64BinopMatcher m(user);
1292 if (m.right().Is(0)) {
1293 Node* value = m.left().node();
1295 // Try to combine with comparisons against 0 by simply inverting the branch.
1296 while (CanCover(user, value) && value->opcode() == IrOpcode::kWord64Equal) {
1297 Int64BinopMatcher m(value);
1298 if (m.right().Is(0)) {
1300 value = m.left().node();
1307 // Try to combine the branch with a comparison.
1308 if (CanCover(user, value)) {
1309 switch (value->opcode()) {
1310 case IrOpcode::kInt64Sub:
1311 return VisitWordCompare(this, value, kX64Cmp, &cont);
1312 case IrOpcode::kWord64And:
1313 return VisitWordCompare(this, value, kX64Test, &cont);
1318 return VisitCompareZero(this, value, kX64Cmp, &cont);
1320 VisitWordCompare(this, node, kX64Cmp, &cont);
1324 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1325 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1326 FlagsContinuation cont(kOverflow, ovf);
1327 VisitBinop(this, node, kX64Add32, &cont);
1329 FlagsContinuation cont;
1330 VisitBinop(this, node, kX64Add32, &cont);
1334 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1335 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1336 FlagsContinuation cont(kOverflow, ovf);
1337 return VisitBinop(this, node, kX64Sub32, &cont);
1339 FlagsContinuation cont;
1340 VisitBinop(this, node, kX64Sub32, &cont);
1344 void InstructionSelector::VisitInt64LessThan(Node* node) {
1345 FlagsContinuation cont(kSignedLessThan, node);
1346 VisitWordCompare(this, node, kX64Cmp, &cont);
1350 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1351 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1352 VisitWordCompare(this, node, kX64Cmp, &cont);
1356 void InstructionSelector::VisitUint64LessThan(Node* node) {
1357 FlagsContinuation cont(kUnsignedLessThan, node);
1358 VisitWordCompare(this, node, kX64Cmp, &cont);
1362 void InstructionSelector::VisitFloat64Equal(Node* node) {
1363 FlagsContinuation cont(kUnorderedEqual, node);
1364 VisitFloat64Compare(this, node, &cont);
1368 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1369 FlagsContinuation cont(kUnsignedGreaterThan, node);
1370 VisitFloat64Compare(this, node, &cont);
1374 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1375 FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
1376 VisitFloat64Compare(this, node, &cont);
1381 MachineOperatorBuilder::Flags
1382 InstructionSelector::SupportedMachineOperatorFlags() {
1383 if (CpuFeatures::IsSupported(SSE4_1)) {
1384 return MachineOperatorBuilder::kFloat64Floor |
1385 MachineOperatorBuilder::kFloat64Ceil |
1386 MachineOperatorBuilder::kFloat64RoundTruncate |
1387 MachineOperatorBuilder::kWord32ShiftIsSafe;
1389 return MachineOperatorBuilder::kNoFlags;
1392 } // namespace compiler
1393 } // namespace internal