1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/compiler/instruction-selector-impl.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/compiler/node-properties.h"
15 // Adds X64-specific methods for generating operands.
16 class X64OperandGenerator FINAL : public OperandGenerator {
18 explicit X64OperandGenerator(InstructionSelector* selector)
19 : OperandGenerator(selector) {}
21 bool CanBeImmediate(Node* node) {
22 switch (node->opcode()) {
23 case IrOpcode::kInt32Constant:
25 case IrOpcode::kInt64Constant: {
26 const int64_t value = OpParameter<int64_t>(node);
27 return value == static_cast<int64_t>(static_cast<int32_t>(value));
34 AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
35 Node* base, Node* displacement,
36 InstructionOperand inputs[],
37 size_t* input_count) {
38 AddressingMode mode = kMode_MRI;
40 inputs[(*input_count)++] = UseRegister(base);
42 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
43 inputs[(*input_count)++] = UseRegister(index);
44 if (displacement != NULL) {
45 inputs[(*input_count)++] = UseImmediate(displacement);
46 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
47 kMode_MR4I, kMode_MR8I};
48 mode = kMRnI_modes[scale_exponent];
50 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
51 kMode_MR4, kMode_MR8};
52 mode = kMRn_modes[scale_exponent];
55 if (displacement == NULL) {
58 inputs[(*input_count)++] = UseImmediate(displacement);
63 DCHECK(index != NULL);
64 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
65 inputs[(*input_count)++] = UseRegister(index);
66 if (displacement != NULL) {
67 inputs[(*input_count)++] = UseImmediate(displacement);
68 static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
69 kMode_M4I, kMode_M8I};
70 mode = kMnI_modes[scale_exponent];
72 static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
74 mode = kMn_modes[scale_exponent];
75 if (mode == kMode_MR1) {
76 // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0]
77 inputs[(*input_count)++] = UseRegister(index);
84 AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
85 InstructionOperand inputs[],
86 size_t* input_count) {
87 BaseWithIndexAndDisplacement64Matcher m(operand, true);
89 if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
90 return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
91 m.displacement(), inputs, input_count);
93 inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
94 inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
99 bool CanBeBetterLeftOperand(Node* node) const {
100 return !selector()->IsLive(node);
105 void InstructionSelector::VisitLoad(Node* node) {
106 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
107 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
108 X64OperandGenerator g(this);
118 case kRepBit: // Fall through.
120 opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
123 opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
128 case kRepTagged: // Fall through.
137 InstructionOperand outputs[1];
138 outputs[0] = g.DefineAsRegister(node);
139 InstructionOperand inputs[3];
140 size_t input_count = 0;
141 AddressingMode mode =
142 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
143 InstructionCode code = opcode | AddressingModeField::encode(mode);
144 Emit(code, 1, outputs, input_count, inputs);
148 void InstructionSelector::VisitStore(Node* node) {
149 X64OperandGenerator g(this);
150 Node* base = node->InputAt(0);
151 Node* index = node->InputAt(1);
152 Node* value = node->InputAt(2);
154 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
155 MachineType rep = RepresentationOf(store_rep.machine_type());
156 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
157 DCHECK(rep == kRepTagged);
158 // TODO(dcarney): refactor RecordWrite function to take temp registers
159 // and pass them here instead of using fixed regs
160 // TODO(dcarney): handle immediate indices.
161 InstructionOperand temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
162 Emit(kX64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, rbx),
163 g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps),
167 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
176 case kRepBit: // Fall through.
186 case kRepTagged: // Fall through.
194 InstructionOperand inputs[4];
195 size_t input_count = 0;
196 AddressingMode mode =
197 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
198 InstructionCode code = opcode | AddressingModeField::encode(mode);
199 InstructionOperand value_operand =
200 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
201 inputs[input_count++] = value_operand;
202 Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
206 void InstructionSelector::VisitCheckedLoad(Node* node) {
207 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
208 MachineType typ = TypeOf(OpParameter<MachineType>(node));
209 X64OperandGenerator g(this);
210 Node* const buffer = node->InputAt(0);
211 Node* const offset = node->InputAt(1);
212 Node* const length = node->InputAt(2);
216 opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
219 opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
222 opcode = kCheckedLoadWord32;
225 opcode = kCheckedLoadFloat32;
228 opcode = kCheckedLoadFloat64;
234 if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
235 Int32Matcher mlength(length);
236 Int32BinopMatcher moffset(offset);
237 if (mlength.HasValue() && moffset.right().HasValue() &&
238 moffset.right().Value() >= 0 &&
239 mlength.Value() >= moffset.right().Value()) {
240 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
241 g.UseRegister(moffset.left().node()),
242 g.UseImmediate(moffset.right().node()), g.UseImmediate(length));
246 InstructionOperand length_operand =
247 g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
248 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
249 g.UseRegister(offset), g.TempImmediate(0), length_operand);
253 void InstructionSelector::VisitCheckedStore(Node* node) {
254 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
255 X64OperandGenerator g(this);
256 Node* const buffer = node->InputAt(0);
257 Node* const offset = node->InputAt(1);
258 Node* const length = node->InputAt(2);
259 Node* const value = node->InputAt(3);
263 opcode = kCheckedStoreWord8;
266 opcode = kCheckedStoreWord16;
269 opcode = kCheckedStoreWord32;
272 opcode = kCheckedStoreFloat32;
275 opcode = kCheckedStoreFloat64;
281 InstructionOperand value_operand =
282 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
283 if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
284 Int32Matcher mlength(length);
285 Int32BinopMatcher moffset(offset);
286 if (mlength.HasValue() && moffset.right().HasValue() &&
287 moffset.right().Value() >= 0 &&
288 mlength.Value() >= moffset.right().Value()) {
289 Emit(opcode, g.NoOutput(), g.UseRegister(buffer),
290 g.UseRegister(moffset.left().node()),
291 g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
296 InstructionOperand length_operand =
297 g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
298 Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
299 g.TempImmediate(0), length_operand, value_operand);
303 // Shared routine for multiple binary operations.
304 static void VisitBinop(InstructionSelector* selector, Node* node,
305 InstructionCode opcode, FlagsContinuation* cont) {
306 X64OperandGenerator g(selector);
307 Int32BinopMatcher m(node);
308 Node* left = m.left().node();
309 Node* right = m.right().node();
310 InstructionOperand inputs[4];
311 size_t input_count = 0;
312 InstructionOperand outputs[2];
313 size_t output_count = 0;
315 // TODO(turbofan): match complex addressing modes.
317 // If both inputs refer to the same operand, enforce allocating a register
318 // for both of them to ensure that we don't end up generating code like
321 // mov rax, [rbp-0x10]
322 // add rax, [rbp-0x10]
324 InstructionOperand const input = g.UseRegister(left);
325 inputs[input_count++] = input;
326 inputs[input_count++] = input;
327 } else if (g.CanBeImmediate(right)) {
328 inputs[input_count++] = g.UseRegister(left);
329 inputs[input_count++] = g.UseImmediate(right);
331 if (node->op()->HasProperty(Operator::kCommutative) &&
332 g.CanBeBetterLeftOperand(right)) {
333 std::swap(left, right);
335 inputs[input_count++] = g.UseRegister(left);
336 inputs[input_count++] = g.Use(right);
339 if (cont->IsBranch()) {
340 inputs[input_count++] = g.Label(cont->true_block());
341 inputs[input_count++] = g.Label(cont->false_block());
344 outputs[output_count++] = g.DefineSameAsFirst(node);
346 outputs[output_count++] = g.DefineAsRegister(cont->result());
349 DCHECK_NE(0u, input_count);
350 DCHECK_NE(0u, output_count);
351 DCHECK_GE(arraysize(inputs), input_count);
352 DCHECK_GE(arraysize(outputs), output_count);
354 selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
359 // Shared routine for multiple binary operations.
360 static void VisitBinop(InstructionSelector* selector, Node* node,
361 InstructionCode opcode) {
362 FlagsContinuation cont;
363 VisitBinop(selector, node, opcode, &cont);
367 void InstructionSelector::VisitWord32And(Node* node) {
368 X64OperandGenerator g(this);
369 Uint32BinopMatcher m(node);
370 if (m.right().Is(0xff)) {
371 Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
372 } else if (m.right().Is(0xffff)) {
373 Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
375 VisitBinop(this, node, kX64And32);
380 void InstructionSelector::VisitWord64And(Node* node) {
381 VisitBinop(this, node, kX64And);
385 void InstructionSelector::VisitWord32Or(Node* node) {
386 VisitBinop(this, node, kX64Or32);
390 void InstructionSelector::VisitWord64Or(Node* node) {
391 VisitBinop(this, node, kX64Or);
395 void InstructionSelector::VisitWord32Xor(Node* node) {
396 X64OperandGenerator g(this);
397 Uint32BinopMatcher m(node);
398 if (m.right().Is(-1)) {
399 Emit(kX64Not32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
401 VisitBinop(this, node, kX64Xor32);
406 void InstructionSelector::VisitWord64Xor(Node* node) {
407 X64OperandGenerator g(this);
408 Uint64BinopMatcher m(node);
409 if (m.right().Is(-1)) {
410 Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
412 VisitBinop(this, node, kX64Xor);
419 // Shared routine for multiple 32-bit shift operations.
420 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
421 void VisitWord32Shift(InstructionSelector* selector, Node* node,
423 X64OperandGenerator g(selector);
424 Int32BinopMatcher m(node);
425 Node* left = m.left().node();
426 Node* right = m.right().node();
428 if (g.CanBeImmediate(right)) {
429 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
430 g.UseImmediate(right));
432 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
433 g.UseFixed(right, rcx));
438 // Shared routine for multiple 64-bit shift operations.
439 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
440 void VisitWord64Shift(InstructionSelector* selector, Node* node,
442 X64OperandGenerator g(selector);
443 Int64BinopMatcher m(node);
444 Node* left = m.left().node();
445 Node* right = m.right().node();
447 if (g.CanBeImmediate(right)) {
448 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
449 g.UseImmediate(right));
451 if (m.right().IsWord64And()) {
452 Int64BinopMatcher mright(right);
453 if (mright.right().Is(0x3F)) {
454 right = mright.left().node();
457 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
458 g.UseFixed(right, rcx));
463 void EmitLea(InstructionSelector* selector, InstructionCode opcode,
464 Node* result, Node* index, int scale, Node* base,
465 Node* displacement) {
466 X64OperandGenerator g(selector);
468 InstructionOperand inputs[4];
469 size_t input_count = 0;
470 AddressingMode mode = g.GenerateMemoryOperandInputs(
471 index, scale, base, displacement, inputs, &input_count);
473 DCHECK_NE(0u, input_count);
474 DCHECK_GE(arraysize(inputs), input_count);
476 InstructionOperand outputs[1];
477 outputs[0] = g.DefineAsRegister(result);
479 opcode = AddressingModeField::encode(mode) | opcode;
481 selector->Emit(opcode, 1, outputs, input_count, inputs);
487 void InstructionSelector::VisitWord32Shl(Node* node) {
488 Int32ScaleMatcher m(node, true);
490 Node* index = node->InputAt(0);
491 Node* base = m.power_of_two_plus_one() ? index : NULL;
492 EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
495 VisitWord32Shift(this, node, kX64Shl32);
499 void InstructionSelector::VisitWord64Shl(Node* node) {
500 X64OperandGenerator g(this);
501 Int64BinopMatcher m(node);
502 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
503 m.right().IsInRange(32, 63)) {
504 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
506 Emit(kX64Shl, g.DefineSameAsFirst(node),
507 g.UseRegister(m.left().node()->InputAt(0)),
508 g.UseImmediate(m.right().node()));
511 VisitWord64Shift(this, node, kX64Shl);
515 void InstructionSelector::VisitWord32Shr(Node* node) {
516 VisitWord32Shift(this, node, kX64Shr32);
520 void InstructionSelector::VisitWord64Shr(Node* node) {
521 VisitWord64Shift(this, node, kX64Shr);
525 void InstructionSelector::VisitWord32Sar(Node* node) {
526 X64OperandGenerator g(this);
527 Int32BinopMatcher m(node);
528 if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
529 Int32BinopMatcher mleft(m.left().node());
530 if (mleft.right().Is(16) && m.right().Is(16)) {
531 Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
533 } else if (mleft.right().Is(24) && m.right().Is(24)) {
534 Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
538 VisitWord32Shift(this, node, kX64Sar32);
542 void InstructionSelector::VisitWord64Sar(Node* node) {
543 VisitWord64Shift(this, node, kX64Sar);
547 void InstructionSelector::VisitWord32Ror(Node* node) {
548 VisitWord32Shift(this, node, kX64Ror32);
552 void InstructionSelector::VisitWord64Ror(Node* node) {
553 VisitWord64Shift(this, node, kX64Ror);
557 void InstructionSelector::VisitWord32Clz(Node* node) {
558 X64OperandGenerator g(this);
559 Emit(kX64Lzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
563 void InstructionSelector::VisitInt32Add(Node* node) {
564 X64OperandGenerator g(this);
566 // Try to match the Add to a leal pattern
567 BaseWithIndexAndDisplacement32Matcher m(node);
569 (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
570 EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
575 // No leal pattern match, use addl
576 VisitBinop(this, node, kX64Add32);
580 void InstructionSelector::VisitInt64Add(Node* node) {
581 VisitBinop(this, node, kX64Add);
585 void InstructionSelector::VisitInt32Sub(Node* node) {
586 X64OperandGenerator g(this);
587 Int32BinopMatcher m(node);
588 if (m.left().Is(0)) {
589 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
591 if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
592 // Turn subtractions of constant values into immediate "leal" instructions
593 // by negating the value.
594 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
595 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
596 g.TempImmediate(-m.right().Value()));
599 VisitBinop(this, node, kX64Sub32);
604 void InstructionSelector::VisitInt64Sub(Node* node) {
605 X64OperandGenerator g(this);
606 Int64BinopMatcher m(node);
607 if (m.left().Is(0)) {
608 Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
610 VisitBinop(this, node, kX64Sub);
617 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
618 X64OperandGenerator g(selector);
619 Int32BinopMatcher m(node);
620 Node* left = m.left().node();
621 Node* right = m.right().node();
622 if (g.CanBeImmediate(right)) {
623 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
624 g.UseImmediate(right));
626 if (g.CanBeBetterLeftOperand(right)) {
627 std::swap(left, right);
629 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
635 void VisitMulHigh(InstructionSelector* selector, Node* node,
637 X64OperandGenerator g(selector);
638 Node* left = node->InputAt(0);
639 Node* right = node->InputAt(1);
640 if (selector->IsLive(left) && !selector->IsLive(right)) {
641 std::swap(left, right);
643 // TODO(turbofan): We use UseUniqueRegister here to improve register
645 selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
646 g.UseUniqueRegister(right));
650 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
651 X64OperandGenerator g(selector);
652 InstructionOperand temps[] = {g.TempRegister(rdx)};
654 opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
655 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
659 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
660 X64OperandGenerator g(selector);
661 selector->Emit(opcode, g.DefineAsFixed(node, rdx),
662 g.UseFixed(node->InputAt(0), rax),
663 g.UseUniqueRegister(node->InputAt(1)));
669 void InstructionSelector::VisitInt32Mul(Node* node) {
670 Int32ScaleMatcher m(node, true);
672 Node* index = node->InputAt(0);
673 Node* base = m.power_of_two_plus_one() ? index : NULL;
674 EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
677 VisitMul(this, node, kX64Imul32);
681 void InstructionSelector::VisitInt64Mul(Node* node) {
682 VisitMul(this, node, kX64Imul);
686 void InstructionSelector::VisitInt32MulHigh(Node* node) {
687 VisitMulHigh(this, node, kX64ImulHigh32);
691 void InstructionSelector::VisitInt32Div(Node* node) {
692 VisitDiv(this, node, kX64Idiv32);
696 void InstructionSelector::VisitInt64Div(Node* node) {
697 VisitDiv(this, node, kX64Idiv);
701 void InstructionSelector::VisitUint32Div(Node* node) {
702 VisitDiv(this, node, kX64Udiv32);
706 void InstructionSelector::VisitUint64Div(Node* node) {
707 VisitDiv(this, node, kX64Udiv);
711 void InstructionSelector::VisitInt32Mod(Node* node) {
712 VisitMod(this, node, kX64Idiv32);
716 void InstructionSelector::VisitInt64Mod(Node* node) {
717 VisitMod(this, node, kX64Idiv);
721 void InstructionSelector::VisitUint32Mod(Node* node) {
722 VisitMod(this, node, kX64Udiv32);
726 void InstructionSelector::VisitUint64Mod(Node* node) {
727 VisitMod(this, node, kX64Udiv);
731 void InstructionSelector::VisitUint32MulHigh(Node* node) {
732 VisitMulHigh(this, node, kX64UmulHigh32);
736 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
737 X64OperandGenerator g(this);
738 Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
742 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
743 X64OperandGenerator g(this);
744 Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
748 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
749 X64OperandGenerator g(this);
750 Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
754 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
755 X64OperandGenerator g(this);
756 Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
760 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
761 X64OperandGenerator g(this);
762 Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
766 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
767 X64OperandGenerator g(this);
768 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
772 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
773 X64OperandGenerator g(this);
774 Node* value = node->InputAt(0);
775 switch (value->opcode()) {
776 case IrOpcode::kWord32And:
777 case IrOpcode::kWord32Or:
778 case IrOpcode::kWord32Xor:
779 case IrOpcode::kWord32Shl:
780 case IrOpcode::kWord32Shr:
781 case IrOpcode::kWord32Sar:
782 case IrOpcode::kWord32Ror:
783 case IrOpcode::kWord32Equal:
784 case IrOpcode::kInt32Add:
785 case IrOpcode::kInt32Sub:
786 case IrOpcode::kInt32Mul:
787 case IrOpcode::kInt32MulHigh:
788 case IrOpcode::kInt32Div:
789 case IrOpcode::kInt32LessThan:
790 case IrOpcode::kInt32LessThanOrEqual:
791 case IrOpcode::kInt32Mod:
792 case IrOpcode::kUint32Div:
793 case IrOpcode::kUint32LessThan:
794 case IrOpcode::kUint32LessThanOrEqual:
795 case IrOpcode::kUint32Mod:
796 case IrOpcode::kUint32MulHigh: {
797 // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
798 // zero-extension is a no-op.
799 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
805 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
809 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
810 X64OperandGenerator g(this);
811 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
815 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
816 X64OperandGenerator g(this);
817 Node* value = node->InputAt(0);
818 if (CanCover(node, value)) {
819 switch (value->opcode()) {
820 case IrOpcode::kWord64Sar:
821 case IrOpcode::kWord64Shr: {
822 Int64BinopMatcher m(value);
823 if (m.right().Is(32)) {
824 Emit(kX64Shr, g.DefineSameAsFirst(node),
825 g.UseRegister(m.left().node()), g.TempImmediate(32));
834 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
838 void InstructionSelector::VisitFloat64Add(Node* node) {
839 X64OperandGenerator g(this);
840 if (IsSupported(AVX)) {
841 Emit(kAVXFloat64Add, g.DefineAsRegister(node),
842 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
844 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
845 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
850 void InstructionSelector::VisitFloat64Sub(Node* node) {
851 X64OperandGenerator g(this);
852 Float64BinopMatcher m(node);
853 if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
854 CanCover(m.node(), m.right().node())) {
855 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
856 CanCover(m.right().node(), m.right().InputAt(0))) {
857 Float64BinopMatcher mright0(m.right().InputAt(0));
858 if (mright0.left().IsMinusZero()) {
859 Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
860 g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
865 if (IsSupported(AVX)) {
866 Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
867 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
869 Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
870 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
875 void InstructionSelector::VisitFloat64Mul(Node* node) {
876 X64OperandGenerator g(this);
877 if (IsSupported(AVX)) {
878 Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
879 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
881 Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
882 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
887 void InstructionSelector::VisitFloat64Div(Node* node) {
888 X64OperandGenerator g(this);
889 if (IsSupported(AVX)) {
890 Emit(kAVXFloat64Div, g.DefineAsRegister(node),
891 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
893 Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
894 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
899 void InstructionSelector::VisitFloat64Mod(Node* node) {
900 X64OperandGenerator g(this);
901 InstructionOperand temps[] = {g.TempRegister(rax)};
902 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
903 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
908 void InstructionSelector::VisitFloat64Max(Node* node) {
909 X64OperandGenerator g(this);
910 if (IsSupported(AVX)) {
911 Emit(kAVXFloat64Max, g.DefineAsRegister(node),
912 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
914 Emit(kSSEFloat64Max, g.DefineSameAsFirst(node),
915 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
920 void InstructionSelector::VisitFloat64Min(Node* node) {
921 X64OperandGenerator g(this);
922 if (IsSupported(AVX)) {
923 Emit(kAVXFloat64Min, g.DefineAsRegister(node),
924 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
926 Emit(kSSEFloat64Min, g.DefineSameAsFirst(node),
927 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
932 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
933 X64OperandGenerator g(this);
934 Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
940 void VisitRRFloat64(InstructionSelector* selector, InstructionCode opcode,
942 X64OperandGenerator g(selector);
943 selector->Emit(opcode, g.DefineAsRegister(node),
944 g.UseRegister(node->InputAt(0)));
950 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
951 VisitRRFloat64(this, kSSEFloat64Round | MiscField::encode(kRoundDown), node);
955 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
956 VisitRRFloat64(this, kSSEFloat64Round | MiscField::encode(kRoundToZero),
961 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
966 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
967 X64OperandGenerator g(this);
968 const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
970 FrameStateDescriptor* frame_state_descriptor = NULL;
971 if (descriptor->NeedsFrameState()) {
972 frame_state_descriptor = GetFrameStateDescriptor(
973 node->InputAt(static_cast<int>(descriptor->InputCount())));
976 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
978 // Compute InstructionOperands for inputs and outputs.
979 InitializeCallBuffer(node, &buffer, true, true);
981 // Push any stack arguments.
982 for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
984 // TODO(titzer): handle pushing double parameters.
985 InstructionOperand value =
986 g.CanBeImmediate(*i) ? g.UseImmediate(*i) : IsSupported(ATOM)
989 Emit(kX64Push, g.NoOutput(), value);
992 // Pass label of exception handler block.
993 CallDescriptor::Flags flags = descriptor->flags();
994 if (handler != nullptr) {
995 flags |= CallDescriptor::kHasExceptionHandler;
996 buffer.instruction_args.push_back(g.Label(handler));
999 // Select the appropriate opcode based on the call type.
1000 InstructionCode opcode;
1001 switch (descriptor->kind()) {
1002 case CallDescriptor::kCallCodeObject: {
1003 opcode = kArchCallCodeObject;
1006 case CallDescriptor::kCallJSFunction:
1007 opcode = kArchCallJSFunction;
1013 opcode |= MiscField::encode(flags);
1015 // Emit the call instruction.
1016 InstructionOperand* first_output =
1017 buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
1018 Instruction* call_instr =
1019 Emit(opcode, buffer.outputs.size(), first_output,
1020 buffer.instruction_args.size(), &buffer.instruction_args.front());
1021 call_instr->MarkAsCall();
1027 // Shared routine for multiple compare operations.
1028 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1029 InstructionOperand left, InstructionOperand right,
1030 FlagsContinuation* cont) {
1031 X64OperandGenerator g(selector);
1032 opcode = cont->Encode(opcode);
1033 if (cont->IsBranch()) {
1034 selector->Emit(opcode, g.NoOutput(), left, right,
1035 g.Label(cont->true_block()), g.Label(cont->false_block()));
1037 DCHECK(cont->IsSet());
1038 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1043 // Shared routine for multiple compare operations.
1044 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1045 Node* left, Node* right, FlagsContinuation* cont,
1047 X64OperandGenerator g(selector);
1048 if (commutative && g.CanBeBetterLeftOperand(right)) {
1049 std::swap(left, right);
1051 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
1055 // Shared routine for multiple word compare operations.
1056 void VisitWordCompare(InstructionSelector* selector, Node* node,
1057 InstructionCode opcode, FlagsContinuation* cont) {
1058 X64OperandGenerator g(selector);
1059 Node* const left = node->InputAt(0);
1060 Node* const right = node->InputAt(1);
1062 // Match immediates on left or right side of comparison.
1063 if (g.CanBeImmediate(right)) {
1064 VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
1065 } else if (g.CanBeImmediate(left)) {
1066 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1067 VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
1069 VisitCompare(selector, opcode, left, right, cont,
1070 node->op()->HasProperty(Operator::kCommutative));
1075 // Shared routine for 64-bit word comparison operations.
1076 void VisitWord64Compare(InstructionSelector* selector, Node* node,
1077 FlagsContinuation* cont) {
1078 X64OperandGenerator g(selector);
1079 Int64BinopMatcher m(node);
1080 if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
1081 LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
1082 ExternalReference js_stack_limit =
1083 ExternalReference::address_of_stack_limit(selector->isolate());
1084 if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
1085 // Compare(Load(js_stack_limit), LoadStackPointer)
1086 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1087 InstructionCode opcode = cont->Encode(kX64StackCheck);
1088 if (cont->IsBranch()) {
1089 selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
1090 g.Label(cont->false_block()));
1092 DCHECK(cont->IsSet());
1093 selector->Emit(opcode, g.DefineAsRegister(cont->result()));
1098 VisitWordCompare(selector, node, kX64Cmp, cont);
1102 // Shared routine for comparison with zero.
1103 void VisitCompareZero(InstructionSelector* selector, Node* node,
1104 InstructionCode opcode, FlagsContinuation* cont) {
1105 X64OperandGenerator g(selector);
1106 VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
1110 // Shared routine for multiple float64 compare operations (inputs commuted).
1111 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1112 FlagsContinuation* cont) {
1113 Node* const left = node->InputAt(0);
1114 Node* const right = node->InputAt(1);
1115 VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
1121 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1122 BasicBlock* fbranch) {
1123 X64OperandGenerator g(this);
1124 Node* user = branch;
1125 Node* value = branch->InputAt(0);
1127 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1129 // Try to combine with comparisons against 0 by simply inverting the branch.
1130 while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
1131 Int32BinopMatcher m(value);
1132 if (m.right().Is(0)) {
1134 value = m.left().node();
1141 // Try to combine the branch with a comparison.
1142 if (CanCover(user, value)) {
1143 switch (value->opcode()) {
1144 case IrOpcode::kWord32Equal:
1145 cont.OverwriteAndNegateIfEqual(kEqual);
1146 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1147 case IrOpcode::kInt32LessThan:
1148 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1149 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1150 case IrOpcode::kInt32LessThanOrEqual:
1151 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1152 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1153 case IrOpcode::kUint32LessThan:
1154 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1155 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1156 case IrOpcode::kUint32LessThanOrEqual:
1157 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1158 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1159 case IrOpcode::kWord64Equal:
1160 cont.OverwriteAndNegateIfEqual(kEqual);
1161 return VisitWord64Compare(this, value, &cont);
1162 case IrOpcode::kInt64LessThan:
1163 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1164 return VisitWord64Compare(this, value, &cont);
1165 case IrOpcode::kInt64LessThanOrEqual:
1166 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1167 return VisitWord64Compare(this, value, &cont);
1168 case IrOpcode::kUint64LessThan:
1169 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1170 return VisitWord64Compare(this, value, &cont);
1171 case IrOpcode::kFloat64Equal:
1172 cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
1173 return VisitFloat64Compare(this, value, &cont);
1174 case IrOpcode::kFloat64LessThan:
1175 cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1176 return VisitFloat64Compare(this, value, &cont);
1177 case IrOpcode::kFloat64LessThanOrEqual:
1178 cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1179 return VisitFloat64Compare(this, value, &cont);
1180 case IrOpcode::kProjection:
1181 // Check if this is the overflow output projection of an
1182 // <Operation>WithOverflow node.
1183 if (ProjectionIndexOf(value->op()) == 1u) {
1184 // We cannot combine the <Operation>WithOverflow with this branch
1185 // unless the 0th projection (the use of the actual value of the
1186 // <Operation> is either NULL, which means there's no use of the
1187 // actual value, or was already defined, which means it is scheduled
1188 // *AFTER* this branch).
1189 Node* const node = value->InputAt(0);
1190 Node* const result = NodeProperties::FindProjection(node, 0);
1191 if (result == NULL || IsDefined(result)) {
1192 switch (node->opcode()) {
1193 case IrOpcode::kInt32AddWithOverflow:
1194 cont.OverwriteAndNegateIfEqual(kOverflow);
1195 return VisitBinop(this, node, kX64Add32, &cont);
1196 case IrOpcode::kInt32SubWithOverflow:
1197 cont.OverwriteAndNegateIfEqual(kOverflow);
1198 return VisitBinop(this, node, kX64Sub32, &cont);
1205 case IrOpcode::kInt32Sub:
1206 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1207 case IrOpcode::kInt64Sub:
1208 return VisitWord64Compare(this, value, &cont);
1209 case IrOpcode::kWord32And:
1210 return VisitWordCompare(this, value, kX64Test32, &cont);
1211 case IrOpcode::kWord64And:
1212 return VisitWordCompare(this, value, kX64Test, &cont);
1218 // Branch could not be combined with a compare, emit compare against 0.
1219 VisitCompareZero(this, value, kX64Cmp32, &cont);
1223 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1224 X64OperandGenerator g(this);
1225 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1227 // Emit either ArchTableSwitch or ArchLookupSwitch.
1228 size_t table_space_cost = 4 + sw.value_range;
1229 size_t table_time_cost = 3;
1230 size_t lookup_space_cost = 3 + 2 * sw.case_count;
1231 size_t lookup_time_cost = sw.case_count;
1232 if (sw.case_count > 4 &&
1233 table_space_cost + 3 * table_time_cost <=
1234 lookup_space_cost + 3 * lookup_time_cost &&
1235 sw.min_value > std::numeric_limits<int32_t>::min()) {
1236 InstructionOperand index_operand = g.TempRegister();
1238 // The leal automatically zero extends, so result is a valid 64-bit index.
1239 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
1240 value_operand, g.TempImmediate(-sw.min_value));
1242 // Zero extend, because we use it as 64-bit index into the jump table.
1243 Emit(kX64Movl, index_operand, value_operand);
1245 // Generate a table lookup.
1246 return EmitTableSwitch(sw, index_operand);
1249 // Generate a sequence of conditional jumps.
1250 return EmitLookupSwitch(sw, value_operand);
1254 void InstructionSelector::VisitWord32Equal(Node* const node) {
1256 FlagsContinuation cont(kEqual, node);
1257 Int32BinopMatcher m(user);
1258 if (m.right().Is(0)) {
1259 Node* value = m.left().node();
1261 // Try to combine with comparisons against 0 by simply inverting the branch.
1262 while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
1263 Int32BinopMatcher m(value);
1264 if (m.right().Is(0)) {
1266 value = m.left().node();
1273 // Try to combine the branch with a comparison.
1274 if (CanCover(user, value)) {
1275 switch (value->opcode()) {
1276 case IrOpcode::kInt32Sub:
1277 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1278 case IrOpcode::kWord32And:
1279 return VisitWordCompare(this, value, kX64Test32, &cont);
1284 return VisitCompareZero(this, value, kX64Cmp32, &cont);
1286 VisitWordCompare(this, node, kX64Cmp32, &cont);
1290 void InstructionSelector::VisitInt32LessThan(Node* node) {
1291 FlagsContinuation cont(kSignedLessThan, node);
1292 VisitWordCompare(this, node, kX64Cmp32, &cont);
1296 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1297 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1298 VisitWordCompare(this, node, kX64Cmp32, &cont);
1302 void InstructionSelector::VisitUint32LessThan(Node* node) {
1303 FlagsContinuation cont(kUnsignedLessThan, node);
1304 VisitWordCompare(this, node, kX64Cmp32, &cont);
1308 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1309 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1310 VisitWordCompare(this, node, kX64Cmp32, &cont);
1314 void InstructionSelector::VisitWord64Equal(Node* const node) {
1316 FlagsContinuation cont(kEqual, node);
1317 Int64BinopMatcher m(user);
1318 if (m.right().Is(0)) {
1319 Node* value = m.left().node();
1321 // Try to combine with comparisons against 0 by simply inverting the branch.
1322 while (CanCover(user, value) && value->opcode() == IrOpcode::kWord64Equal) {
1323 Int64BinopMatcher m(value);
1324 if (m.right().Is(0)) {
1326 value = m.left().node();
1333 // Try to combine the branch with a comparison.
1334 if (CanCover(user, value)) {
1335 switch (value->opcode()) {
1336 case IrOpcode::kInt64Sub:
1337 return VisitWord64Compare(this, value, &cont);
1338 case IrOpcode::kWord64And:
1339 return VisitWordCompare(this, value, kX64Test, &cont);
1344 return VisitCompareZero(this, value, kX64Cmp, &cont);
1346 VisitWord64Compare(this, node, &cont);
1350 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1351 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1352 FlagsContinuation cont(kOverflow, ovf);
1353 VisitBinop(this, node, kX64Add32, &cont);
1355 FlagsContinuation cont;
1356 VisitBinop(this, node, kX64Add32, &cont);
1360 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1361 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1362 FlagsContinuation cont(kOverflow, ovf);
1363 return VisitBinop(this, node, kX64Sub32, &cont);
1365 FlagsContinuation cont;
1366 VisitBinop(this, node, kX64Sub32, &cont);
1370 void InstructionSelector::VisitInt64LessThan(Node* node) {
1371 FlagsContinuation cont(kSignedLessThan, node);
1372 VisitWord64Compare(this, node, &cont);
1376 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1377 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1378 VisitWord64Compare(this, node, &cont);
1382 void InstructionSelector::VisitUint64LessThan(Node* node) {
1383 FlagsContinuation cont(kUnsignedLessThan, node);
1384 VisitWord64Compare(this, node, &cont);
1388 void InstructionSelector::VisitFloat64Equal(Node* node) {
1389 FlagsContinuation cont(kUnorderedEqual, node);
1390 VisitFloat64Compare(this, node, &cont);
1394 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1395 FlagsContinuation cont(kUnsignedGreaterThan, node);
1396 VisitFloat64Compare(this, node, &cont);
1400 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1401 FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
1402 VisitFloat64Compare(this, node, &cont);
1406 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1407 X64OperandGenerator g(this);
1408 Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
1409 g.Use(node->InputAt(0)));
1413 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1414 X64OperandGenerator g(this);
1415 Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
1416 g.Use(node->InputAt(0)));
1420 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1421 X64OperandGenerator g(this);
1422 Node* left = node->InputAt(0);
1423 Node* right = node->InputAt(1);
1424 Float64Matcher mleft(left);
1425 if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
1426 Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
1429 Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
1430 g.UseRegister(left), g.Use(right));
1434 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1435 X64OperandGenerator g(this);
1436 Node* left = node->InputAt(0);
1437 Node* right = node->InputAt(1);
1438 Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
1439 g.UseRegister(left), g.Use(right));
1444 MachineOperatorBuilder::Flags
1445 InstructionSelector::SupportedMachineOperatorFlags() {
1446 MachineOperatorBuilder::Flags flags =
1447 MachineOperatorBuilder::kFloat64Max |
1448 MachineOperatorBuilder::kFloat64Min |
1449 MachineOperatorBuilder::kWord32ShiftIsSafe;
1450 if (CpuFeatures::IsSupported(SSE4_1)) {
1451 flags |= MachineOperatorBuilder::kFloat64RoundDown |
1452 MachineOperatorBuilder::kFloat64RoundTruncate;
1457 } // namespace compiler
1458 } // namespace internal