1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h"
7 #include "src/compiler/node-properties.h"
17 kInt16Imm_4ByteAligned,
24 // Adds PPC-specific methods for generating operands.
25 class PPCOperandGenerator FINAL : public OperandGenerator {
27 explicit PPCOperandGenerator(InstructionSelector* selector)
28 : OperandGenerator(selector) {}
30 InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
31 if (CanBeImmediate(node, mode)) {
32 return UseImmediate(node);
34 return UseRegister(node);
37 bool CanBeImmediate(Node* node, ImmediateMode mode) {
39 if (node->opcode() == IrOpcode::kInt32Constant)
40 value = OpParameter<int32_t>(node);
41 else if (node->opcode() == IrOpcode::kInt64Constant)
42 value = OpParameter<int64_t>(node);
45 return CanBeImmediate(value, mode);
48 bool CanBeImmediate(int64_t value, ImmediateMode mode) {
51 return is_int16(value);
52 case kInt16Imm_Unsigned:
53 return is_uint16(value);
54 case kInt16Imm_Negate:
55 return is_int16(-value);
56 case kInt16Imm_4ByteAligned:
57 return is_int16(value) && !(value & 3);
59 return 0 <= value && value < 32;
61 return 0 <= value && value < 64;
70 static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
72 PPCOperandGenerator g(selector);
73 selector->Emit(opcode, g.DefineAsRegister(node),
74 g.UseRegister(node->InputAt(0)));
78 static void VisitRRR(InstructionSelector* selector, Node* node,
80 PPCOperandGenerator g(selector);
81 selector->Emit(opcode, g.DefineAsRegister(node),
82 g.UseRegister(node->InputAt(0)),
83 g.UseRegister(node->InputAt(1)));
87 static void VisitRRRFloat64(InstructionSelector* selector, Node* node,
89 PPCOperandGenerator g(selector);
90 selector->Emit(opcode, g.DefineAsRegister(node),
91 g.UseRegister(node->InputAt(0)),
92 g.UseRegister(node->InputAt(1)));
96 static void VisitRRO(InstructionSelector* selector, Node* node,
97 ArchOpcode opcode, ImmediateMode operand_mode) {
98 PPCOperandGenerator g(selector);
99 selector->Emit(opcode, g.DefineAsRegister(node),
100 g.UseRegister(node->InputAt(0)),
101 g.UseOperand(node->InputAt(1), operand_mode));
105 // Shared routine for multiple binary operations.
106 template <typename Matcher>
107 static void VisitBinop(InstructionSelector* selector, Node* node,
108 InstructionCode opcode, ImmediateMode operand_mode,
109 FlagsContinuation* cont) {
110 PPCOperandGenerator g(selector);
112 InstructionOperand inputs[4];
113 size_t input_count = 0;
114 InstructionOperand outputs[2];
115 size_t output_count = 0;
117 inputs[input_count++] = g.UseRegister(m.left().node());
118 inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
120 if (cont->IsBranch()) {
121 inputs[input_count++] = g.Label(cont->true_block());
122 inputs[input_count++] = g.Label(cont->false_block());
125 outputs[output_count++] = g.DefineAsRegister(node);
127 outputs[output_count++] = g.DefineAsRegister(cont->result());
130 DCHECK_NE(0u, input_count);
131 DCHECK_NE(0u, output_count);
132 DCHECK_GE(arraysize(inputs), input_count);
133 DCHECK_GE(arraysize(outputs), output_count);
135 selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
140 // Shared routine for multiple binary operations.
141 template <typename Matcher>
142 static void VisitBinop(InstructionSelector* selector, Node* node,
143 ArchOpcode opcode, ImmediateMode operand_mode) {
144 FlagsContinuation cont;
145 VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
149 void InstructionSelector::VisitLoad(Node* node) {
150 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
151 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
152 PPCOperandGenerator g(this);
153 Node* base = node->InputAt(0);
154 Node* offset = node->InputAt(1);
157 ImmediateMode mode = kInt16Imm;
160 opcode = kPPC_LoadFloat32;
163 opcode = kPPC_LoadFloat64;
165 case kRepBit: // Fall through.
167 opcode = (typ == kTypeInt32) ? kPPC_LoadWordS8 : kPPC_LoadWordU8;
170 opcode = (typ == kTypeInt32) ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
172 #if !V8_TARGET_ARCH_PPC64
173 case kRepTagged: // Fall through.
176 opcode = kPPC_LoadWordS32;
177 #if V8_TARGET_ARCH_PPC64
178 // TODO(mbrandy): this applies to signed loads only (lwa)
179 mode = kInt16Imm_4ByteAligned;
182 #if V8_TARGET_ARCH_PPC64
183 case kRepTagged: // Fall through.
185 opcode = kPPC_LoadWord64;
186 mode = kInt16Imm_4ByteAligned;
193 if (g.CanBeImmediate(offset, mode)) {
194 Emit(opcode | AddressingModeField::encode(kMode_MRI),
195 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset));
196 } else if (g.CanBeImmediate(base, mode)) {
197 Emit(opcode | AddressingModeField::encode(kMode_MRI),
198 g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base));
200 Emit(opcode | AddressingModeField::encode(kMode_MRR),
201 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset));
206 void InstructionSelector::VisitStore(Node* node) {
207 PPCOperandGenerator g(this);
208 Node* base = node->InputAt(0);
209 Node* offset = node->InputAt(1);
210 Node* value = node->InputAt(2);
212 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
213 MachineType rep = RepresentationOf(store_rep.machine_type());
214 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
215 DCHECK(rep == kRepTagged);
216 // TODO(dcarney): refactor RecordWrite function to take temp registers
217 // and pass them here instead of using fixed regs
218 // TODO(dcarney): handle immediate indices.
219 InstructionOperand temps[] = {g.TempRegister(r8), g.TempRegister(r9)};
220 Emit(kPPC_StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, r7),
221 g.UseFixed(offset, r8), g.UseFixed(value, r9), arraysize(temps),
225 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
227 ImmediateMode mode = kInt16Imm;
230 opcode = kPPC_StoreFloat32;
233 opcode = kPPC_StoreFloat64;
235 case kRepBit: // Fall through.
237 opcode = kPPC_StoreWord8;
240 opcode = kPPC_StoreWord16;
242 #if !V8_TARGET_ARCH_PPC64
243 case kRepTagged: // Fall through.
246 opcode = kPPC_StoreWord32;
248 #if V8_TARGET_ARCH_PPC64
249 case kRepTagged: // Fall through.
251 opcode = kPPC_StoreWord64;
252 mode = kInt16Imm_4ByteAligned;
259 if (g.CanBeImmediate(offset, mode)) {
260 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
261 g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
262 } else if (g.CanBeImmediate(base, mode)) {
263 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
264 g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
266 Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
267 g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
272 void InstructionSelector::VisitCheckedLoad(Node* node) {
273 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
274 MachineType typ = TypeOf(OpParameter<MachineType>(node));
275 PPCOperandGenerator g(this);
276 Node* const base = node->InputAt(0);
277 Node* const offset = node->InputAt(1);
278 Node* const length = node->InputAt(2);
282 opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
285 opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
288 opcode = kCheckedLoadWord32;
291 opcode = kCheckedLoadFloat32;
294 opcode = kCheckedLoadFloat64;
300 AddressingMode addressingMode = kMode_MRR;
301 Emit(opcode | AddressingModeField::encode(addressingMode),
302 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
303 g.UseOperand(length, kInt16Imm_Unsigned));
307 void InstructionSelector::VisitCheckedStore(Node* node) {
308 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
309 PPCOperandGenerator g(this);
310 Node* const base = node->InputAt(0);
311 Node* const offset = node->InputAt(1);
312 Node* const length = node->InputAt(2);
313 Node* const value = node->InputAt(3);
317 opcode = kCheckedStoreWord8;
320 opcode = kCheckedStoreWord16;
323 opcode = kCheckedStoreWord32;
326 opcode = kCheckedStoreFloat32;
329 opcode = kCheckedStoreFloat64;
335 AddressingMode addressingMode = kMode_MRR;
336 Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
337 g.UseRegister(base), g.UseRegister(offset),
338 g.UseOperand(length, kInt16Imm_Unsigned), g.UseRegister(value));
342 template <typename Matcher>
343 static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
344 ArchOpcode opcode, bool left_can_cover,
345 bool right_can_cover, ImmediateMode imm_mode) {
346 PPCOperandGenerator g(selector);
348 // Map instruction to equivalent operation with inverted right input.
349 ArchOpcode inv_opcode = opcode;
352 inv_opcode = kPPC_AndComplement32;
355 inv_opcode = kPPC_AndComplement64;
358 inv_opcode = kPPC_OrComplement32;
361 inv_opcode = kPPC_OrComplement64;
367 // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
368 if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
369 Matcher mleft(m->left().node());
370 if (mleft.right().Is(-1)) {
371 selector->Emit(inv_opcode, g.DefineAsRegister(node),
372 g.UseRegister(m->right().node()),
373 g.UseRegister(mleft.left().node()));
378 // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
379 if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
381 Matcher mright(m->right().node());
382 if (mright.right().Is(-1)) {
383 // TODO(all): support shifted operand on right.
384 selector->Emit(inv_opcode, g.DefineAsRegister(node),
385 g.UseRegister(m->left().node()),
386 g.UseRegister(mright.left().node()));
391 VisitBinop<Matcher>(selector, node, opcode, imm_mode);
395 static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
396 int mask_width = base::bits::CountPopulation32(value);
397 int mask_msb = base::bits::CountLeadingZeros32(value);
398 int mask_lsb = base::bits::CountTrailingZeros32(value);
399 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
401 *mb = mask_lsb + mask_width - 1;
407 #if V8_TARGET_ARCH_PPC64
408 static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
409 int mask_width = base::bits::CountPopulation64(value);
410 int mask_msb = base::bits::CountLeadingZeros64(value);
411 int mask_lsb = base::bits::CountTrailingZeros64(value);
412 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
414 *mb = mask_lsb + mask_width - 1;
421 // TODO(mbrandy): Absorb rotate-right into rlwinm?
422 void InstructionSelector::VisitWord32And(Node* node) {
423 PPCOperandGenerator g(this);
424 Int32BinopMatcher m(node);
427 if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
429 Node* left = m.left().node();
430 if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
431 CanCover(node, left)) {
432 // Try to absorb left/right shift into rlwinm
433 Int32BinopMatcher mleft(m.left().node());
434 if (mleft.right().IsInRange(0, 31)) {
435 left = mleft.left().node();
436 sh = mleft.right().Value();
437 if (m.left().IsWord32Shr()) {
438 // Adjust the mask such that it doesn't include any rotated bits.
439 if (mb > 31 - sh) mb = 31 - sh;
440 sh = (32 - sh) & 0x1f;
442 // Adjust the mask such that it doesn't include any rotated bits.
443 if (me < sh) me = sh;
448 Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), g.UseRegister(left),
449 g.TempImmediate(sh), g.TempImmediate(mb), g.TempImmediate(me));
453 VisitLogical<Int32BinopMatcher>(
454 this, node, &m, kPPC_And32, CanCover(node, m.left().node()),
455 CanCover(node, m.right().node()), kInt16Imm_Unsigned);
459 #if V8_TARGET_ARCH_PPC64
460 // TODO(mbrandy): Absorb rotate-right into rldic?
461 void InstructionSelector::VisitWord64And(Node* node) {
462 PPCOperandGenerator g(this);
463 Int64BinopMatcher m(node);
466 if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
468 Node* left = m.left().node();
469 if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
470 CanCover(node, left)) {
471 // Try to absorb left/right shift into rldic
472 Int64BinopMatcher mleft(m.left().node());
473 if (mleft.right().IsInRange(0, 63)) {
474 left = mleft.left().node();
475 sh = mleft.right().Value();
476 if (m.left().IsWord64Shr()) {
477 // Adjust the mask such that it doesn't include any rotated bits.
478 if (mb > 63 - sh) mb = 63 - sh;
479 sh = (64 - sh) & 0x3f;
481 // Adjust the mask such that it doesn't include any rotated bits.
482 if (me < sh) me = sh;
492 opcode = kPPC_RotLeftAndClearLeft64;
494 } else if (mb == 63) {
496 opcode = kPPC_RotLeftAndClearRight64;
498 } else if (sh && me <= sh && m.left().IsWord64Shl()) {
500 opcode = kPPC_RotLeftAndClear64;
504 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
505 g.TempImmediate(sh), g.TempImmediate(mask));
510 VisitLogical<Int64BinopMatcher>(
511 this, node, &m, kPPC_And64, CanCover(node, m.left().node()),
512 CanCover(node, m.right().node()), kInt16Imm_Unsigned);
517 void InstructionSelector::VisitWord32Or(Node* node) {
518 Int32BinopMatcher m(node);
519 VisitLogical<Int32BinopMatcher>(
520 this, node, &m, kPPC_Or32, CanCover(node, m.left().node()),
521 CanCover(node, m.right().node()), kInt16Imm_Unsigned);
525 #if V8_TARGET_ARCH_PPC64
526 void InstructionSelector::VisitWord64Or(Node* node) {
527 Int64BinopMatcher m(node);
528 VisitLogical<Int64BinopMatcher>(
529 this, node, &m, kPPC_Or64, CanCover(node, m.left().node()),
530 CanCover(node, m.right().node()), kInt16Imm_Unsigned);
535 void InstructionSelector::VisitWord32Xor(Node* node) {
536 PPCOperandGenerator g(this);
537 Int32BinopMatcher m(node);
538 if (m.right().Is(-1)) {
539 Emit(kPPC_Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
541 VisitBinop<Int32BinopMatcher>(this, node, kPPC_Xor32, kInt16Imm_Unsigned);
546 #if V8_TARGET_ARCH_PPC64
547 void InstructionSelector::VisitWord64Xor(Node* node) {
548 PPCOperandGenerator g(this);
549 Int64BinopMatcher m(node);
550 if (m.right().Is(-1)) {
551 Emit(kPPC_Not64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
553 VisitBinop<Int64BinopMatcher>(this, node, kPPC_Xor64, kInt16Imm_Unsigned);
559 void InstructionSelector::VisitWord32Shl(Node* node) {
560 PPCOperandGenerator g(this);
561 Int32BinopMatcher m(node);
562 if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
563 // Try to absorb logical-and into rlwinm
564 Int32BinopMatcher mleft(m.left().node());
565 int sh = m.right().Value();
568 if (mleft.right().HasValue() &&
569 IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
570 // Adjust the mask such that it doesn't include any rotated bits.
571 if (me < sh) me = sh;
573 Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
574 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
575 g.TempImmediate(mb), g.TempImmediate(me));
580 VisitRRO(this, node, kPPC_ShiftLeft32, kShift32Imm);
584 #if V8_TARGET_ARCH_PPC64
585 void InstructionSelector::VisitWord64Shl(Node* node) {
586 PPCOperandGenerator g(this);
587 Int64BinopMatcher m(node);
588 // TODO(mbrandy): eliminate left sign extension if right >= 32
589 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
590 // Try to absorb logical-and into rldic
591 Int64BinopMatcher mleft(m.left().node());
592 int sh = m.right().Value();
595 if (mleft.right().HasValue() &&
596 IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
597 // Adjust the mask such that it doesn't include any rotated bits.
598 if (me < sh) me = sh;
605 opcode = kPPC_RotLeftAndClearLeft64;
607 } else if (mb == 63) {
609 opcode = kPPC_RotLeftAndClearRight64;
611 } else if (sh && me <= sh) {
613 opcode = kPPC_RotLeftAndClear64;
617 Emit(opcode, g.DefineAsRegister(node),
618 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
619 g.TempImmediate(mask));
625 VisitRRO(this, node, kPPC_ShiftLeft64, kShift64Imm);
630 void InstructionSelector::VisitWord32Shr(Node* node) {
631 PPCOperandGenerator g(this);
632 Int32BinopMatcher m(node);
633 if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
634 // Try to absorb logical-and into rlwinm
635 Int32BinopMatcher mleft(m.left().node());
636 int sh = m.right().Value();
639 if (mleft.right().HasValue() &&
640 IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
641 // Adjust the mask such that it doesn't include any rotated bits.
642 if (mb > 31 - sh) mb = 31 - sh;
643 sh = (32 - sh) & 0x1f;
645 Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
646 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
647 g.TempImmediate(mb), g.TempImmediate(me));
652 VisitRRO(this, node, kPPC_ShiftRight32, kShift32Imm);
656 #if V8_TARGET_ARCH_PPC64
657 void InstructionSelector::VisitWord64Shr(Node* node) {
658 PPCOperandGenerator g(this);
659 Int64BinopMatcher m(node);
660 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
661 // Try to absorb logical-and into rldic
662 Int64BinopMatcher mleft(m.left().node());
663 int sh = m.right().Value();
666 if (mleft.right().HasValue() &&
667 IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
668 // Adjust the mask such that it doesn't include any rotated bits.
669 if (mb > 63 - sh) mb = 63 - sh;
670 sh = (64 - sh) & 0x3f;
677 opcode = kPPC_RotLeftAndClearLeft64;
679 } else if (mb == 63) {
681 opcode = kPPC_RotLeftAndClearRight64;
685 Emit(opcode, g.DefineAsRegister(node),
686 g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
687 g.TempImmediate(mask));
693 VisitRRO(this, node, kPPC_ShiftRight64, kShift64Imm);
698 void InstructionSelector::VisitWord32Sar(Node* node) {
699 PPCOperandGenerator g(this);
700 Int32BinopMatcher m(node);
701 // Replace with sign extension for (x << K) >> K where K is 16 or 24.
702 if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
703 Int32BinopMatcher mleft(m.left().node());
704 if (mleft.right().Is(16) && m.right().Is(16)) {
705 Emit(kPPC_ExtendSignWord16, g.DefineAsRegister(node),
706 g.UseRegister(mleft.left().node()));
708 } else if (mleft.right().Is(24) && m.right().Is(24)) {
709 Emit(kPPC_ExtendSignWord8, g.DefineAsRegister(node),
710 g.UseRegister(mleft.left().node()));
714 VisitRRO(this, node, kPPC_ShiftRightAlg32, kShift32Imm);
718 #if V8_TARGET_ARCH_PPC64
719 void InstructionSelector::VisitWord64Sar(Node* node) {
720 VisitRRO(this, node, kPPC_ShiftRightAlg64, kShift64Imm);
725 // TODO(mbrandy): Absorb logical-and into rlwinm?
726 void InstructionSelector::VisitWord32Ror(Node* node) {
727 VisitRRO(this, node, kPPC_RotRight32, kShift32Imm);
731 #if V8_TARGET_ARCH_PPC64
732 // TODO(mbrandy): Absorb logical-and into rldic?
733 void InstructionSelector::VisitWord64Ror(Node* node) {
734 VisitRRO(this, node, kPPC_RotRight64, kShift64Imm);
739 void InstructionSelector::VisitWord32Clz(Node* node) {
740 PPCOperandGenerator g(this);
741 Emit(kPPC_Cntlz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
745 void InstructionSelector::VisitInt32Add(Node* node) {
746 VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
750 #if V8_TARGET_ARCH_PPC64
751 void InstructionSelector::VisitInt64Add(Node* node) {
752 VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm);
757 void InstructionSelector::VisitInt32Sub(Node* node) {
758 PPCOperandGenerator g(this);
759 Int32BinopMatcher m(node);
760 if (m.left().Is(0)) {
761 Emit(kPPC_Neg32, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
763 VisitBinop<Int32BinopMatcher>(this, node, kPPC_Sub32, kInt16Imm_Negate);
768 #if V8_TARGET_ARCH_PPC64
769 void InstructionSelector::VisitInt64Sub(Node* node) {
770 PPCOperandGenerator g(this);
771 Int64BinopMatcher m(node);
772 if (m.left().Is(0)) {
773 Emit(kPPC_Neg64, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
775 VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub64, kInt16Imm_Negate);
781 void InstructionSelector::VisitInt32Mul(Node* node) {
782 VisitRRR(this, node, kPPC_Mul32);
786 #if V8_TARGET_ARCH_PPC64
787 void InstructionSelector::VisitInt64Mul(Node* node) {
788 VisitRRR(this, node, kPPC_Mul64);
793 void InstructionSelector::VisitInt32MulHigh(Node* node) {
794 PPCOperandGenerator g(this);
795 Emit(kPPC_MulHigh32, g.DefineAsRegister(node),
796 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
800 void InstructionSelector::VisitUint32MulHigh(Node* node) {
801 PPCOperandGenerator g(this);
802 Emit(kPPC_MulHighU32, g.DefineAsRegister(node),
803 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
807 void InstructionSelector::VisitInt32Div(Node* node) {
808 VisitRRR(this, node, kPPC_Div32);
812 #if V8_TARGET_ARCH_PPC64
813 void InstructionSelector::VisitInt64Div(Node* node) {
814 VisitRRR(this, node, kPPC_Div64);
819 void InstructionSelector::VisitUint32Div(Node* node) {
820 VisitRRR(this, node, kPPC_DivU32);
824 #if V8_TARGET_ARCH_PPC64
825 void InstructionSelector::VisitUint64Div(Node* node) {
826 VisitRRR(this, node, kPPC_DivU64);
831 void InstructionSelector::VisitInt32Mod(Node* node) {
832 VisitRRR(this, node, kPPC_Mod32);
836 #if V8_TARGET_ARCH_PPC64
837 void InstructionSelector::VisitInt64Mod(Node* node) {
838 VisitRRR(this, node, kPPC_Mod64);
843 void InstructionSelector::VisitUint32Mod(Node* node) {
844 VisitRRR(this, node, kPPC_ModU32);
848 #if V8_TARGET_ARCH_PPC64
849 void InstructionSelector::VisitUint64Mod(Node* node) {
850 VisitRRR(this, node, kPPC_ModU64);
855 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
856 PPCOperandGenerator g(this);
857 Emit(kPPC_Float32ToFloat64, g.DefineAsRegister(node),
858 g.UseRegister(node->InputAt(0)));
862 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
863 PPCOperandGenerator g(this);
864 Emit(kPPC_Int32ToFloat64, g.DefineAsRegister(node),
865 g.UseRegister(node->InputAt(0)));
869 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
870 PPCOperandGenerator g(this);
871 Emit(kPPC_Uint32ToFloat64, g.DefineAsRegister(node),
872 g.UseRegister(node->InputAt(0)));
876 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
877 PPCOperandGenerator g(this);
878 Emit(kPPC_Float64ToInt32, g.DefineAsRegister(node),
879 g.UseRegister(node->InputAt(0)));
883 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
884 PPCOperandGenerator g(this);
885 Emit(kPPC_Float64ToUint32, g.DefineAsRegister(node),
886 g.UseRegister(node->InputAt(0)));
890 #if V8_TARGET_ARCH_PPC64
891 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
892 // TODO(mbrandy): inspect input to see if nop is appropriate.
893 PPCOperandGenerator g(this);
894 Emit(kPPC_ExtendSignWord32, g.DefineAsRegister(node),
895 g.UseRegister(node->InputAt(0)));
899 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
900 // TODO(mbrandy): inspect input to see if nop is appropriate.
901 PPCOperandGenerator g(this);
902 Emit(kPPC_Uint32ToUint64, g.DefineAsRegister(node),
903 g.UseRegister(node->InputAt(0)));
908 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
909 PPCOperandGenerator g(this);
910 Emit(kPPC_Float64ToFloat32, g.DefineAsRegister(node),
911 g.UseRegister(node->InputAt(0)));
915 #if V8_TARGET_ARCH_PPC64
916 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
917 PPCOperandGenerator g(this);
918 // TODO(mbrandy): inspect input to see if nop is appropriate.
919 Emit(kPPC_Int64ToInt32, g.DefineAsRegister(node),
920 g.UseRegister(node->InputAt(0)));
925 void InstructionSelector::VisitFloat64Add(Node* node) {
926 // TODO(mbrandy): detect multiply-add
927 VisitRRRFloat64(this, node, kPPC_AddFloat64);
931 void InstructionSelector::VisitFloat64Sub(Node* node) {
932 // TODO(mbrandy): detect multiply-subtract
933 PPCOperandGenerator g(this);
934 Float64BinopMatcher m(node);
935 if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
936 CanCover(m.node(), m.right().node())) {
937 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
938 CanCover(m.right().node(), m.right().InputAt(0))) {
939 Float64BinopMatcher mright0(m.right().InputAt(0));
940 if (mright0.left().IsMinusZero()) {
941 // -floor(-x) = ceil(x)
942 Emit(kPPC_CeilFloat64, g.DefineAsRegister(node),
943 g.UseRegister(mright0.right().node()));
948 VisitRRRFloat64(this, node, kPPC_SubFloat64);
952 void InstructionSelector::VisitFloat64Mul(Node* node) {
953 // TODO(mbrandy): detect negate
954 VisitRRRFloat64(this, node, kPPC_MulFloat64);
958 void InstructionSelector::VisitFloat64Div(Node* node) {
959 VisitRRRFloat64(this, node, kPPC_DivFloat64);
963 void InstructionSelector::VisitFloat64Mod(Node* node) {
964 PPCOperandGenerator g(this);
965 Emit(kPPC_ModFloat64, g.DefineAsFixed(node, d1),
966 g.UseFixed(node->InputAt(0), d1),
967 g.UseFixed(node->InputAt(1), d2))->MarkAsCall();
971 void InstructionSelector::VisitFloat64Max(Node* node) {
972 VisitRRRFloat64(this, node, kPPC_MaxFloat64);
976 void InstructionSelector::VisitFloat64Min(Node* node) {
977 VisitRRRFloat64(this, node, kPPC_MinFloat64);
981 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
982 VisitRRFloat64(this, kPPC_SqrtFloat64, node);
986 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
987 VisitRRFloat64(this, kPPC_FloorFloat64, node);
991 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
992 VisitRRFloat64(this, kPPC_TruncateFloat64, node);
996 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
997 VisitRRFloat64(this, kPPC_RoundFloat64, node);
1001 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1002 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1003 FlagsContinuation cont(kOverflow, ovf);
1004 return VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32,
1007 FlagsContinuation cont;
1008 VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32, kInt16Imm,
1013 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1014 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1015 FlagsContinuation cont(kOverflow, ovf);
1016 return VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
1017 kInt16Imm_Negate, &cont);
1019 FlagsContinuation cont;
1020 VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
1021 kInt16Imm_Negate, &cont);
1025 static bool CompareLogical(FlagsContinuation* cont) {
1026 switch (cont->condition()) {
1027 case kUnsignedLessThan:
1028 case kUnsignedGreaterThanOrEqual:
1029 case kUnsignedLessThanOrEqual:
1030 case kUnsignedGreaterThan:
1040 // Shared routine for multiple compare operations.
1041 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1042 InstructionOperand left, InstructionOperand right,
1043 FlagsContinuation* cont) {
1044 PPCOperandGenerator g(selector);
1045 opcode = cont->Encode(opcode);
1046 if (cont->IsBranch()) {
1047 selector->Emit(opcode, g.NoOutput(), left, right,
1048 g.Label(cont->true_block()), g.Label(cont->false_block()));
1050 DCHECK(cont->IsSet());
1051 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1056 // Shared routine for multiple word compare operations.
1057 static void VisitWordCompare(InstructionSelector* selector, Node* node,
1058 InstructionCode opcode, FlagsContinuation* cont,
1059 bool commutative, ImmediateMode immediate_mode) {
1060 PPCOperandGenerator g(selector);
1061 Node* left = node->InputAt(0);
1062 Node* right = node->InputAt(1);
1064 // Match immediates on left or right side of comparison.
1065 if (g.CanBeImmediate(right, immediate_mode)) {
1066 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1068 } else if (g.CanBeImmediate(left, immediate_mode)) {
1069 if (!commutative) cont->Commute();
1070 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1073 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1079 static void VisitWord32Compare(InstructionSelector* selector, Node* node,
1080 FlagsContinuation* cont) {
1081 ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
1082 VisitWordCompare(selector, node, kPPC_Cmp32, cont, false, mode);
1086 #if V8_TARGET_ARCH_PPC64
1087 static void VisitWord64Compare(InstructionSelector* selector, Node* node,
1088 FlagsContinuation* cont) {
1089 ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
1090 VisitWordCompare(selector, node, kPPC_Cmp64, cont, false, mode);
1095 // Shared routine for multiple float compare operations.
1096 static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1097 FlagsContinuation* cont) {
1098 PPCOperandGenerator g(selector);
1099 Node* left = node->InputAt(0);
1100 Node* right = node->InputAt(1);
1101 VisitCompare(selector, kPPC_CmpFloat64, g.UseRegister(left),
1102 g.UseRegister(right), cont);
1106 // Shared routine for word comparisons against zero.
1107 static void VisitWordCompareZero(InstructionSelector* selector, Node* user,
1108 Node* value, InstructionCode opcode,
1109 FlagsContinuation* cont) {
1110 while (selector->CanCover(user, value)) {
1111 switch (value->opcode()) {
1112 case IrOpcode::kWord32Equal: {
1113 // Combine with comparisons against 0 by simply inverting the
1115 Int32BinopMatcher m(value);
1116 if (m.right().Is(0)) {
1118 value = m.left().node();
1122 cont->OverwriteAndNegateIfEqual(kEqual);
1123 return VisitWord32Compare(selector, value, cont);
1125 case IrOpcode::kInt32LessThan:
1126 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1127 return VisitWord32Compare(selector, value, cont);
1128 case IrOpcode::kInt32LessThanOrEqual:
1129 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1130 return VisitWord32Compare(selector, value, cont);
1131 case IrOpcode::kUint32LessThan:
1132 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1133 return VisitWord32Compare(selector, value, cont);
1134 case IrOpcode::kUint32LessThanOrEqual:
1135 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1136 return VisitWord32Compare(selector, value, cont);
1137 #if V8_TARGET_ARCH_PPC64
1138 case IrOpcode::kWord64Equal:
1139 cont->OverwriteAndNegateIfEqual(kEqual);
1140 return VisitWord64Compare(selector, value, cont);
1141 case IrOpcode::kInt64LessThan:
1142 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1143 return VisitWord64Compare(selector, value, cont);
1144 case IrOpcode::kInt64LessThanOrEqual:
1145 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1146 return VisitWord64Compare(selector, value, cont);
1147 case IrOpcode::kUint64LessThan:
1148 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1149 return VisitWord64Compare(selector, value, cont);
1151 case IrOpcode::kFloat64Equal:
1152 cont->OverwriteAndNegateIfEqual(kEqual);
1153 return VisitFloat64Compare(selector, value, cont);
1154 case IrOpcode::kFloat64LessThan:
1155 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1156 return VisitFloat64Compare(selector, value, cont);
1157 case IrOpcode::kFloat64LessThanOrEqual:
1158 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1159 return VisitFloat64Compare(selector, value, cont);
1160 case IrOpcode::kProjection:
1161 // Check if this is the overflow output projection of an
1162 // <Operation>WithOverflow node.
1163 if (ProjectionIndexOf(value->op()) == 1u) {
1164 // We cannot combine the <Operation>WithOverflow with this branch
1165 // unless the 0th projection (the use of the actual value of the
1166 // <Operation> is either NULL, which means there's no use of the
1167 // actual value, or was already defined, which means it is scheduled
1168 // *AFTER* this branch).
1169 Node* const node = value->InputAt(0);
1170 Node* const result = NodeProperties::FindProjection(node, 0);
1171 if (result == NULL || selector->IsDefined(result)) {
1172 switch (node->opcode()) {
1173 case IrOpcode::kInt32AddWithOverflow:
1174 cont->OverwriteAndNegateIfEqual(kOverflow);
1175 return VisitBinop<Int32BinopMatcher>(
1176 selector, node, kPPC_AddWithOverflow32, kInt16Imm, cont);
1177 case IrOpcode::kInt32SubWithOverflow:
1178 cont->OverwriteAndNegateIfEqual(kOverflow);
1179 return VisitBinop<Int32BinopMatcher>(selector, node,
1180 kPPC_SubWithOverflow32,
1181 kInt16Imm_Negate, cont);
1188 case IrOpcode::kInt32Sub:
1189 return VisitWord32Compare(selector, value, cont);
1190 case IrOpcode::kWord32And:
1191 // TODO(mbandy): opportunity for rlwinm?
1192 return VisitWordCompare(selector, value, kPPC_Tst32, cont, true,
1193 kInt16Imm_Unsigned);
1194 // TODO(mbrandy): Handle?
1195 // case IrOpcode::kInt32Add:
1196 // case IrOpcode::kWord32Or:
1197 // case IrOpcode::kWord32Xor:
1198 // case IrOpcode::kWord32Sar:
1199 // case IrOpcode::kWord32Shl:
1200 // case IrOpcode::kWord32Shr:
1201 // case IrOpcode::kWord32Ror:
1202 #if V8_TARGET_ARCH_PPC64
1203 case IrOpcode::kInt64Sub:
1204 return VisitWord64Compare(selector, value, cont);
1205 case IrOpcode::kWord64And:
1206 // TODO(mbandy): opportunity for rldic?
1207 return VisitWordCompare(selector, value, kPPC_Tst64, cont, true,
1208 kInt16Imm_Unsigned);
1209 // TODO(mbrandy): Handle?
1210 // case IrOpcode::kInt64Add:
1211 // case IrOpcode::kWord64Or:
1212 // case IrOpcode::kWord64Xor:
1213 // case IrOpcode::kWord64Sar:
1214 // case IrOpcode::kWord64Shl:
1215 // case IrOpcode::kWord64Shr:
1216 // case IrOpcode::kWord64Ror:
1224 // Branch could not be combined with a compare, emit compare against 0.
1225 PPCOperandGenerator g(selector);
1226 VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0),
1231 static void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
1232 Node* value, FlagsContinuation* cont) {
1233 VisitWordCompareZero(selector, user, value, kPPC_Cmp32, cont);
1237 #if V8_TARGET_ARCH_PPC64
1238 static void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
1239 Node* value, FlagsContinuation* cont) {
1240 VisitWordCompareZero(selector, user, value, kPPC_Cmp64, cont);
1245 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1246 BasicBlock* fbranch) {
1247 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1248 VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
1252 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1253 PPCOperandGenerator g(this);
1254 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1256 // Emit either ArchTableSwitch or ArchLookupSwitch.
1257 size_t table_space_cost = 4 + sw.value_range;
1258 size_t table_time_cost = 3;
1259 size_t lookup_space_cost = 3 + 2 * sw.case_count;
1260 size_t lookup_time_cost = sw.case_count;
1261 if (sw.case_count > 0 &&
1262 table_space_cost + 3 * table_time_cost <=
1263 lookup_space_cost + 3 * lookup_time_cost &&
1264 sw.min_value > std::numeric_limits<int32_t>::min()) {
1265 InstructionOperand index_operand = value_operand;
1267 index_operand = g.TempRegister();
1268 Emit(kPPC_Sub32, index_operand, value_operand,
1269 g.TempImmediate(sw.min_value));
1271 // Generate a table lookup.
1272 return EmitTableSwitch(sw, index_operand);
1275 // Generate a sequence of conditional jumps.
1276 return EmitLookupSwitch(sw, value_operand);
1280 void InstructionSelector::VisitWord32Equal(Node* const node) {
1281 FlagsContinuation cont(kEqual, node);
1282 Int32BinopMatcher m(node);
1283 if (m.right().Is(0)) {
1284 return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
1286 VisitWord32Compare(this, node, &cont);
1290 void InstructionSelector::VisitInt32LessThan(Node* node) {
1291 FlagsContinuation cont(kSignedLessThan, node);
1292 VisitWord32Compare(this, node, &cont);
1296 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1297 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1298 VisitWord32Compare(this, node, &cont);
1302 void InstructionSelector::VisitUint32LessThan(Node* node) {
1303 FlagsContinuation cont(kUnsignedLessThan, node);
1304 VisitWord32Compare(this, node, &cont);
1308 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1309 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1310 VisitWord32Compare(this, node, &cont);
1314 #if V8_TARGET_ARCH_PPC64
1315 void InstructionSelector::VisitWord64Equal(Node* const node) {
1316 FlagsContinuation cont(kEqual, node);
1317 Int64BinopMatcher m(node);
1318 if (m.right().Is(0)) {
1319 return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
1321 VisitWord64Compare(this, node, &cont);
1325 void InstructionSelector::VisitInt64LessThan(Node* node) {
1326 FlagsContinuation cont(kSignedLessThan, node);
1327 VisitWord64Compare(this, node, &cont);
1331 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1332 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1333 VisitWord64Compare(this, node, &cont);
1337 void InstructionSelector::VisitUint64LessThan(Node* node) {
1338 FlagsContinuation cont(kUnsignedLessThan, node);
1339 VisitWord64Compare(this, node, &cont);
1344 void InstructionSelector::VisitFloat64Equal(Node* node) {
1345 FlagsContinuation cont(kEqual, node);
1346 VisitFloat64Compare(this, node, &cont);
1350 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1351 FlagsContinuation cont(kUnsignedLessThan, node);
1352 VisitFloat64Compare(this, node, &cont);
1356 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1357 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1358 VisitFloat64Compare(this, node, &cont);
1362 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
1363 PPCOperandGenerator g(this);
1364 const CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
1366 FrameStateDescriptor* frame_state_descriptor = NULL;
1367 if (descriptor->NeedsFrameState()) {
1368 frame_state_descriptor =
1369 GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
1372 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
1374 // Compute InstructionOperands for inputs and outputs.
1375 // TODO(turbofan): on PPC it's probably better to use the code object in a
1376 // register if there are multiple uses of it. Improve constant pool and the
1377 // heuristics in the register allocator for where to emit constants.
1378 InitializeCallBuffer(node, &buffer, true, false);
1380 // Push any stack arguments.
1381 // TODO(mbrandy): reverse order and use push only for first
1382 for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
1384 Emit(kPPC_Push, g.NoOutput(), g.UseRegister(*i));
1387 // Pass label of exception handler block.
1388 CallDescriptor::Flags flags = descriptor->flags();
1389 if (handler != nullptr) {
1390 flags |= CallDescriptor::kHasExceptionHandler;
1391 buffer.instruction_args.push_back(g.Label(handler));
1394 // Select the appropriate opcode based on the call type.
1395 InstructionCode opcode;
1396 switch (descriptor->kind()) {
1397 case CallDescriptor::kCallCodeObject: {
1398 opcode = kArchCallCodeObject;
1401 case CallDescriptor::kCallJSFunction:
1402 opcode = kArchCallJSFunction;
1408 opcode |= MiscField::encode(flags);
1410 // Emit the call instruction.
1411 InstructionOperand* first_output =
1412 buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
1413 Instruction* call_instr =
1414 Emit(opcode, buffer.outputs.size(), first_output,
1415 buffer.instruction_args.size(), &buffer.instruction_args.front());
1416 call_instr->MarkAsCall();
1420 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1421 PPCOperandGenerator g(this);
1422 Emit(kPPC_Float64ExtractLowWord32, g.DefineAsRegister(node),
1423 g.UseRegister(node->InputAt(0)));
1427 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1428 PPCOperandGenerator g(this);
1429 Emit(kPPC_Float64ExtractHighWord32, g.DefineAsRegister(node),
1430 g.UseRegister(node->InputAt(0)));
1434 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1435 PPCOperandGenerator g(this);
1436 Node* left = node->InputAt(0);
1437 Node* right = node->InputAt(1);
1438 if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
1439 CanCover(node, left)) {
1440 left = left->InputAt(1);
1441 Emit(kPPC_Float64Construct, g.DefineAsRegister(node), g.UseRegister(left),
1442 g.UseRegister(right));
1445 Emit(kPPC_Float64InsertLowWord32, g.DefineSameAsFirst(node),
1446 g.UseRegister(left), g.UseRegister(right));
1450 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1451 PPCOperandGenerator g(this);
1452 Node* left = node->InputAt(0);
1453 Node* right = node->InputAt(1);
1454 if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
1455 CanCover(node, left)) {
1456 left = left->InputAt(1);
1457 Emit(kPPC_Float64Construct, g.DefineAsRegister(node), g.UseRegister(right),
1458 g.UseRegister(left));
1461 Emit(kPPC_Float64InsertHighWord32, g.DefineSameAsFirst(node),
1462 g.UseRegister(left), g.UseRegister(right));
1467 MachineOperatorBuilder::Flags
1468 InstructionSelector::SupportedMachineOperatorFlags() {
1469 return MachineOperatorBuilder::kFloat64Max |
1470 MachineOperatorBuilder::kFloat64Min |
1471 MachineOperatorBuilder::kFloat64RoundDown |
1472 MachineOperatorBuilder::kFloat64RoundTruncate |
1473 MachineOperatorBuilder::kFloat64RoundTiesAway;
1474 // We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
1477 } // namespace compiler
1478 } // namespace internal