1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/base/bits.h"
6 #include "src/compiler/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
14 #define TRACE_UNIMPL() \
15 PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
17 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
20 // Adds Mips-specific methods for generating InstructionOperands.
21 class Mips64OperandGenerator FINAL : public OperandGenerator {
23 explicit Mips64OperandGenerator(InstructionSelector* selector)
24 : OperandGenerator(selector) {}
26 InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
27 if (CanBeImmediate(node, opcode)) {
28 return UseImmediate(node);
30 return UseRegister(node);
33 bool CanBeImmediate(Node* node, InstructionCode opcode) {
35 if (node->opcode() == IrOpcode::kInt32Constant)
36 value = OpParameter<int32_t>(node);
37 else if (node->opcode() == IrOpcode::kInt64Constant)
38 value = OpParameter<int64_t>(node);
41 switch (ArchOpcodeField::decode(opcode)) {
45 return is_uint5(value);
49 return is_uint6(value);
51 return is_uint16(value);
54 return is_int16(value + kIntSize);
56 return is_int16(value);
61 bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
68 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
70 Mips64OperandGenerator g(selector);
71 selector->Emit(opcode, g.DefineAsRegister(node),
72 g.UseRegister(node->InputAt(0)));
76 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
78 Mips64OperandGenerator g(selector);
79 selector->Emit(opcode, g.DefineAsRegister(node),
80 g.UseRegister(node->InputAt(0)),
81 g.UseRegister(node->InputAt(1)));
85 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
87 Mips64OperandGenerator g(selector);
88 selector->Emit(opcode, g.DefineAsRegister(node),
89 g.UseRegister(node->InputAt(0)),
90 g.UseOperand(node->InputAt(1), opcode));
94 static void VisitBinop(InstructionSelector* selector, Node* node,
95 InstructionCode opcode, FlagsContinuation* cont) {
96 Mips64OperandGenerator g(selector);
97 Int32BinopMatcher m(node);
98 InstructionOperand inputs[4];
99 size_t input_count = 0;
100 InstructionOperand outputs[2];
101 size_t output_count = 0;
103 inputs[input_count++] = g.UseRegister(m.left().node());
104 inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
106 if (cont->IsBranch()) {
107 inputs[input_count++] = g.Label(cont->true_block());
108 inputs[input_count++] = g.Label(cont->false_block());
111 outputs[output_count++] = g.DefineAsRegister(node);
113 outputs[output_count++] = g.DefineAsRegister(cont->result());
116 DCHECK_NE(0u, input_count);
117 DCHECK_NE(0u, output_count);
118 DCHECK_GE(arraysize(inputs), input_count);
119 DCHECK_GE(arraysize(outputs), output_count);
121 selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
126 static void VisitBinop(InstructionSelector* selector, Node* node,
127 InstructionCode opcode) {
128 FlagsContinuation cont;
129 VisitBinop(selector, node, opcode, &cont);
133 void InstructionSelector::VisitLoad(Node* node) {
134 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
135 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
136 Mips64OperandGenerator g(this);
137 Node* base = node->InputAt(0);
138 Node* index = node->InputAt(1);
143 opcode = kMips64Lwc1;
146 opcode = kMips64Ldc1;
148 case kRepBit: // Fall through.
150 opcode = typ == kTypeUint32 ? kMips64Lbu : kMips64Lb;
153 opcode = typ == kTypeUint32 ? kMips64Lhu : kMips64Lh;
158 case kRepTagged: // Fall through.
167 if (g.CanBeImmediate(index, opcode)) {
168 Emit(opcode | AddressingModeField::encode(kMode_MRI),
169 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
171 InstructionOperand addr_reg = g.TempRegister();
172 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
173 g.UseRegister(index), g.UseRegister(base));
174 // Emit desired load opcode, using temp addr_reg.
175 Emit(opcode | AddressingModeField::encode(kMode_MRI),
176 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
181 void InstructionSelector::VisitStore(Node* node) {
182 Mips64OperandGenerator g(this);
183 Node* base = node->InputAt(0);
184 Node* index = node->InputAt(1);
185 Node* value = node->InputAt(2);
187 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
188 MachineType rep = RepresentationOf(store_rep.machine_type());
189 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
190 DCHECK(rep == kRepTagged);
191 // TODO(dcarney): refactor RecordWrite function to take temp registers
192 // and pass them here instead of using fixed regs
193 // TODO(dcarney): handle immediate indices.
194 InstructionOperand temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
195 Emit(kMips64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, t0),
196 g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
199 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
204 opcode = kMips64Swc1;
207 opcode = kMips64Sdc1;
209 case kRepBit: // Fall through.
219 case kRepTagged: // Fall through.
228 if (g.CanBeImmediate(index, opcode)) {
229 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
230 g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
232 InstructionOperand addr_reg = g.TempRegister();
233 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
234 g.UseRegister(index), g.UseRegister(base));
235 // Emit desired store opcode, using temp addr_reg.
236 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
237 addr_reg, g.TempImmediate(0), g.UseRegister(value));
242 void InstructionSelector::VisitWord32And(Node* node) {
243 VisitBinop(this, node, kMips64And);
247 void InstructionSelector::VisitWord64And(Node* node) {
248 VisitBinop(this, node, kMips64And);
252 void InstructionSelector::VisitWord32Or(Node* node) {
253 VisitBinop(this, node, kMips64Or);
257 void InstructionSelector::VisitWord64Or(Node* node) {
258 VisitBinop(this, node, kMips64Or);
262 void InstructionSelector::VisitWord32Xor(Node* node) {
263 VisitBinop(this, node, kMips64Xor);
267 void InstructionSelector::VisitWord64Xor(Node* node) {
268 VisitBinop(this, node, kMips64Xor);
272 void InstructionSelector::VisitWord32Shl(Node* node) {
273 VisitRRO(this, kMips64Shl, node);
277 void InstructionSelector::VisitWord32Shr(Node* node) {
278 VisitRRO(this, kMips64Shr, node);
282 void InstructionSelector::VisitWord32Sar(Node* node) {
283 VisitRRO(this, kMips64Sar, node);
287 void InstructionSelector::VisitWord64Shl(Node* node) {
288 VisitRRO(this, kMips64Dshl, node);
292 void InstructionSelector::VisitWord64Shr(Node* node) {
293 VisitRRO(this, kMips64Dshr, node);
297 void InstructionSelector::VisitWord64Sar(Node* node) {
298 VisitRRO(this, kMips64Dsar, node);
302 void InstructionSelector::VisitWord32Ror(Node* node) {
303 VisitRRO(this, kMips64Ror, node);
307 void InstructionSelector::VisitWord32Clz(Node* node) {
308 Mips64OperandGenerator g(this);
309 Emit(kMips64Clz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
313 void InstructionSelector::VisitWord64Ror(Node* node) {
314 VisitRRO(this, kMips64Dror, node);
318 void InstructionSelector::VisitInt32Add(Node* node) {
319 Mips64OperandGenerator g(this);
320 // TODO(plind): Consider multiply & add optimization from arm port.
321 VisitBinop(this, node, kMips64Add);
325 void InstructionSelector::VisitInt64Add(Node* node) {
326 Mips64OperandGenerator g(this);
327 // TODO(plind): Consider multiply & add optimization from arm port.
328 VisitBinop(this, node, kMips64Dadd);
332 void InstructionSelector::VisitInt32Sub(Node* node) {
333 VisitBinop(this, node, kMips64Sub);
337 void InstructionSelector::VisitInt64Sub(Node* node) {
338 VisitBinop(this, node, kMips64Dsub);
342 void InstructionSelector::VisitInt32Mul(Node* node) {
343 Mips64OperandGenerator g(this);
344 Int32BinopMatcher m(node);
345 if (m.right().HasValue() && m.right().Value() > 0) {
346 int32_t value = m.right().Value();
347 if (base::bits::IsPowerOfTwo32(value)) {
348 Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
349 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
350 g.TempImmediate(WhichPowerOf2(value)));
353 if (base::bits::IsPowerOfTwo32(value - 1)) {
354 InstructionOperand temp = g.TempRegister();
355 Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
356 g.UseRegister(m.left().node()),
357 g.TempImmediate(WhichPowerOf2(value - 1)));
358 Emit(kMips64Add | AddressingModeField::encode(kMode_None),
359 g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
362 if (base::bits::IsPowerOfTwo32(value + 1)) {
363 InstructionOperand temp = g.TempRegister();
364 Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
365 g.UseRegister(m.left().node()),
366 g.TempImmediate(WhichPowerOf2(value + 1)));
367 Emit(kMips64Sub | AddressingModeField::encode(kMode_None),
368 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
372 Emit(kMips64Mul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
373 g.UseRegister(m.right().node()));
377 void InstructionSelector::VisitInt32MulHigh(Node* node) {
378 Mips64OperandGenerator g(this);
379 Emit(kMips64MulHigh, g.DefineAsRegister(node),
380 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
384 void InstructionSelector::VisitUint32MulHigh(Node* node) {
385 Mips64OperandGenerator g(this);
386 InstructionOperand const dmul_operand = g.TempRegister();
387 Emit(kMips64MulHighU, dmul_operand, g.UseRegister(node->InputAt(0)),
388 g.UseRegister(node->InputAt(1)));
389 Emit(kMips64Ext, g.DefineAsRegister(node), dmul_operand, g.TempImmediate(0),
390 g.TempImmediate(32));
394 void InstructionSelector::VisitInt64Mul(Node* node) {
395 Mips64OperandGenerator g(this);
396 Int64BinopMatcher m(node);
397 // TODO(dusmil): Add optimization for shifts larger than 32.
398 if (m.right().HasValue() && m.right().Value() > 0) {
399 int64_t value = m.right().Value();
400 if (base::bits::IsPowerOfTwo32(value)) {
401 Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
402 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
403 g.TempImmediate(WhichPowerOf2(value)));
406 if (base::bits::IsPowerOfTwo32(value - 1)) {
407 InstructionOperand temp = g.TempRegister();
408 Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
409 g.UseRegister(m.left().node()),
410 g.TempImmediate(WhichPowerOf2(value - 1)));
411 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
412 g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
415 if (base::bits::IsPowerOfTwo32(value + 1)) {
416 InstructionOperand temp = g.TempRegister();
417 Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
418 g.UseRegister(m.left().node()),
419 g.TempImmediate(WhichPowerOf2(value + 1)));
420 Emit(kMips64Dsub | AddressingModeField::encode(kMode_None),
421 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
425 Emit(kMips64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
426 g.UseRegister(m.right().node()));
430 void InstructionSelector::VisitInt32Div(Node* node) {
431 Mips64OperandGenerator g(this);
432 Int32BinopMatcher m(node);
433 Emit(kMips64Div, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
434 g.UseRegister(m.right().node()));
438 void InstructionSelector::VisitUint32Div(Node* node) {
439 Mips64OperandGenerator g(this);
440 Int32BinopMatcher m(node);
441 Emit(kMips64DivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
442 g.UseRegister(m.right().node()));
446 void InstructionSelector::VisitInt32Mod(Node* node) {
447 Mips64OperandGenerator g(this);
448 Int32BinopMatcher m(node);
449 Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
450 g.UseRegister(m.right().node()));
454 void InstructionSelector::VisitUint32Mod(Node* node) {
455 Mips64OperandGenerator g(this);
456 Int32BinopMatcher m(node);
457 Emit(kMips64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
458 g.UseRegister(m.right().node()));
462 void InstructionSelector::VisitInt64Div(Node* node) {
463 Mips64OperandGenerator g(this);
464 Int64BinopMatcher m(node);
465 Emit(kMips64Ddiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
466 g.UseRegister(m.right().node()));
470 void InstructionSelector::VisitUint64Div(Node* node) {
471 Mips64OperandGenerator g(this);
472 Int64BinopMatcher m(node);
473 Emit(kMips64DdivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
474 g.UseRegister(m.right().node()));
478 void InstructionSelector::VisitInt64Mod(Node* node) {
479 Mips64OperandGenerator g(this);
480 Int64BinopMatcher m(node);
481 Emit(kMips64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
482 g.UseRegister(m.right().node()));
486 void InstructionSelector::VisitUint64Mod(Node* node) {
487 Mips64OperandGenerator g(this);
488 Int64BinopMatcher m(node);
489 Emit(kMips64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
490 g.UseRegister(m.right().node()));
494 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
495 Mips64OperandGenerator g(this);
496 Emit(kMips64CvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
500 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
501 Mips64OperandGenerator g(this);
502 Emit(kMips64CvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
506 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
507 Mips64OperandGenerator g(this);
508 Emit(kMips64CvtDUw, g.DefineAsRegister(node),
509 g.UseRegister(node->InputAt(0)));
513 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
514 Mips64OperandGenerator g(this);
515 Emit(kMips64TruncWD, g.DefineAsRegister(node),
516 g.UseRegister(node->InputAt(0)));
520 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
521 Mips64OperandGenerator g(this);
522 Emit(kMips64TruncUwD, g.DefineAsRegister(node),
523 g.UseRegister(node->InputAt(0)));
527 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
528 Mips64OperandGenerator g(this);
529 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
534 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
535 Mips64OperandGenerator g(this);
536 Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
537 g.TempImmediate(0), g.TempImmediate(32));
541 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
542 Mips64OperandGenerator g(this);
543 Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
544 g.TempImmediate(0), g.TempImmediate(32));
548 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
549 Mips64OperandGenerator g(this);
550 Emit(kMips64CvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
554 void InstructionSelector::VisitFloat64Add(Node* node) {
555 VisitRRR(this, kMips64AddD, node);
559 void InstructionSelector::VisitFloat64Sub(Node* node) {
560 Mips64OperandGenerator g(this);
561 Float64BinopMatcher m(node);
562 if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
563 CanCover(m.node(), m.right().node())) {
564 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
565 CanCover(m.right().node(), m.right().InputAt(0))) {
566 Float64BinopMatcher mright0(m.right().InputAt(0));
567 if (mright0.left().IsMinusZero()) {
568 Emit(kMips64Float64RoundUp, g.DefineAsRegister(node),
569 g.UseRegister(mright0.right().node()));
574 VisitRRR(this, kMips64SubD, node);
578 void InstructionSelector::VisitFloat64Mul(Node* node) {
579 VisitRRR(this, kMips64MulD, node);
583 void InstructionSelector::VisitFloat64Div(Node* node) {
584 VisitRRR(this, kMips64DivD, node);
588 void InstructionSelector::VisitFloat64Mod(Node* node) {
589 Mips64OperandGenerator g(this);
590 Emit(kMips64ModD, g.DefineAsFixed(node, f0),
591 g.UseFixed(node->InputAt(0), f12),
592 g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
596 void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
599 void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
602 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
603 Mips64OperandGenerator g(this);
604 Emit(kMips64SqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
608 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
609 VisitRR(this, kMips64Float64RoundDown, node);
613 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
614 VisitRR(this, kMips64Float64RoundTruncate, node);
618 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
623 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
624 Mips64OperandGenerator g(this);
625 const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
627 FrameStateDescriptor* frame_state_descriptor = NULL;
628 if (descriptor->NeedsFrameState()) {
629 frame_state_descriptor =
630 GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
633 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
635 // Compute InstructionOperands for inputs and outputs.
636 InitializeCallBuffer(node, &buffer, true, false);
638 int push_count = buffer.pushed_nodes.size();
639 if (push_count > 0) {
640 Emit(kMips64StackClaim | MiscField::encode(push_count), g.NoOutput());
642 int slot = buffer.pushed_nodes.size() - 1;
643 for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
645 Emit(kMips64StoreToStackSlot | MiscField::encode(slot), g.NoOutput(),
650 // Pass label of exception handler block.
651 CallDescriptor::Flags flags = descriptor->flags();
652 if (handler != nullptr) {
653 flags |= CallDescriptor::kHasExceptionHandler;
654 buffer.instruction_args.push_back(g.Label(handler));
657 // Select the appropriate opcode based on the call type.
658 InstructionCode opcode;
659 switch (descriptor->kind()) {
660 case CallDescriptor::kCallCodeObject: {
661 opcode = kArchCallCodeObject;
664 case CallDescriptor::kCallJSFunction:
665 opcode = kArchCallJSFunction;
671 opcode |= MiscField::encode(flags);
673 // Emit the call instruction.
674 Instruction* call_instr =
675 Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
676 buffer.instruction_args.size(), &buffer.instruction_args.front());
678 call_instr->MarkAsCall();
682 void InstructionSelector::VisitCheckedLoad(Node* node) {
683 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
684 MachineType typ = TypeOf(OpParameter<MachineType>(node));
685 Mips64OperandGenerator g(this);
686 Node* const buffer = node->InputAt(0);
687 Node* const offset = node->InputAt(1);
688 Node* const length = node->InputAt(2);
692 opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
695 opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
698 opcode = kCheckedLoadWord32;
701 opcode = kCheckedLoadFloat32;
704 opcode = kCheckedLoadFloat64;
710 InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
711 ? g.UseImmediate(offset)
712 : g.UseRegister(offset);
714 InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
715 ? g.CanBeImmediate(length, opcode)
716 ? g.UseImmediate(length)
717 : g.UseRegister(length)
718 : g.UseRegister(length);
720 Emit(opcode | AddressingModeField::encode(kMode_MRI),
721 g.DefineAsRegister(node), offset_operand, length_operand,
722 g.UseRegister(buffer));
726 void InstructionSelector::VisitCheckedStore(Node* node) {
727 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
728 Mips64OperandGenerator g(this);
729 Node* const buffer = node->InputAt(0);
730 Node* const offset = node->InputAt(1);
731 Node* const length = node->InputAt(2);
732 Node* const value = node->InputAt(3);
736 opcode = kCheckedStoreWord8;
739 opcode = kCheckedStoreWord16;
742 opcode = kCheckedStoreWord32;
745 opcode = kCheckedStoreFloat32;
748 opcode = kCheckedStoreFloat64;
754 InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
755 ? g.UseImmediate(offset)
756 : g.UseRegister(offset);
758 InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
759 ? g.CanBeImmediate(length, opcode)
760 ? g.UseImmediate(length)
761 : g.UseRegister(length)
762 : g.UseRegister(length);
764 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
765 offset_operand, length_operand, g.UseRegister(value),
766 g.UseRegister(buffer));
772 // Shared routine for multiple compare operations.
773 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
774 InstructionOperand left, InstructionOperand right,
775 FlagsContinuation* cont) {
776 Mips64OperandGenerator g(selector);
777 opcode = cont->Encode(opcode);
778 if (cont->IsBranch()) {
779 selector->Emit(opcode, g.NoOutput(), left, right,
780 g.Label(cont->true_block()), g.Label(cont->false_block()));
782 DCHECK(cont->IsSet());
783 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
788 // Shared routine for multiple float compare operations.
789 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
790 FlagsContinuation* cont) {
791 Mips64OperandGenerator g(selector);
792 Node* left = node->InputAt(0);
793 Node* right = node->InputAt(1);
794 VisitCompare(selector, kMips64CmpD, g.UseRegister(left), g.UseRegister(right),
799 // Shared routine for multiple word compare operations.
800 void VisitWordCompare(InstructionSelector* selector, Node* node,
801 InstructionCode opcode, FlagsContinuation* cont,
803 Mips64OperandGenerator g(selector);
804 Node* left = node->InputAt(0);
805 Node* right = node->InputAt(1);
807 // Match immediates on left or right side of comparison.
808 if (g.CanBeImmediate(right, opcode)) {
809 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
811 } else if (g.CanBeImmediate(left, opcode)) {
812 if (!commutative) cont->Commute();
813 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
816 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
822 void VisitWord32Compare(InstructionSelector* selector, Node* node,
823 FlagsContinuation* cont) {
824 VisitWordCompare(selector, node, kMips64Cmp, cont, false);
828 void VisitWord64Compare(InstructionSelector* selector, Node* node,
829 FlagsContinuation* cont) {
830 VisitWordCompare(selector, node, kMips64Cmp, cont, false);
836 void EmitWordCompareZero(InstructionSelector* selector, Node* value,
837 FlagsContinuation* cont) {
838 Mips64OperandGenerator g(selector);
839 InstructionCode opcode = cont->Encode(kMips64Cmp);
840 InstructionOperand const value_operand = g.UseRegister(value);
841 if (cont->IsBranch()) {
842 selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
843 g.Label(cont->true_block()), g.Label(cont->false_block()));
845 selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
851 // Shared routine for word comparisons against zero.
852 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
853 Node* value, FlagsContinuation* cont) {
854 while (selector->CanCover(user, value)) {
855 switch (value->opcode()) {
856 case IrOpcode::kWord32Equal: {
857 // Combine with comparisons against 0 by simply inverting the
859 Int32BinopMatcher m(value);
860 if (m.right().Is(0)) {
862 value = m.left().node();
866 cont->OverwriteAndNegateIfEqual(kEqual);
867 return VisitWord32Compare(selector, value, cont);
869 case IrOpcode::kInt32LessThan:
870 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
871 return VisitWord32Compare(selector, value, cont);
872 case IrOpcode::kInt32LessThanOrEqual:
873 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
874 return VisitWord32Compare(selector, value, cont);
875 case IrOpcode::kUint32LessThan:
876 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
877 return VisitWord32Compare(selector, value, cont);
878 case IrOpcode::kUint32LessThanOrEqual:
879 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
880 return VisitWord32Compare(selector, value, cont);
881 case IrOpcode::kWord64Equal: {
882 // Combine with comparisons against 0 by simply inverting the
884 Int64BinopMatcher m(value);
885 if (m.right().Is(0)) {
887 value = m.left().node();
891 cont->OverwriteAndNegateIfEqual(kEqual);
892 return VisitWord64Compare(selector, value, cont);
894 case IrOpcode::kInt64LessThan:
895 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
896 return VisitWord64Compare(selector, value, cont);
897 case IrOpcode::kInt64LessThanOrEqual:
898 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
899 return VisitWord64Compare(selector, value, cont);
900 case IrOpcode::kUint64LessThan:
901 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
902 return VisitWord64Compare(selector, value, cont);
903 case IrOpcode::kFloat64Equal:
904 cont->OverwriteAndNegateIfEqual(kEqual);
905 return VisitFloat64Compare(selector, value, cont);
906 case IrOpcode::kFloat64LessThan:
907 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
908 return VisitFloat64Compare(selector, value, cont);
909 case IrOpcode::kFloat64LessThanOrEqual:
910 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
911 return VisitFloat64Compare(selector, value, cont);
912 case IrOpcode::kProjection:
913 // Check if this is the overflow output projection of an
914 // <Operation>WithOverflow node.
915 if (ProjectionIndexOf(value->op()) == 1u) {
916 // We cannot combine the <Operation>WithOverflow with this branch
917 // unless the 0th projection (the use of the actual value of the
918 // <Operation> is either NULL, which means there's no use of the
919 // actual value, or was already defined, which means it is scheduled
920 // *AFTER* this branch).
921 Node* const node = value->InputAt(0);
922 Node* const result = NodeProperties::FindProjection(node, 0);
923 if (result == NULL || selector->IsDefined(result)) {
924 switch (node->opcode()) {
925 case IrOpcode::kInt32AddWithOverflow:
926 cont->OverwriteAndNegateIfEqual(kOverflow);
927 return VisitBinop(selector, node, kMips64Dadd, cont);
928 case IrOpcode::kInt32SubWithOverflow:
929 cont->OverwriteAndNegateIfEqual(kOverflow);
930 return VisitBinop(selector, node, kMips64Dsub, cont);
937 case IrOpcode::kWord32And:
938 case IrOpcode::kWord64And:
939 return VisitWordCompare(selector, value, kMips64Tst, cont, true);
946 // Continuation could not be combined with a compare, emit compare against 0.
947 EmitWordCompareZero(selector, value, cont);
951 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
952 BasicBlock* fbranch) {
953 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
954 VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
958 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
959 Mips64OperandGenerator g(this);
960 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
962 // Emit either ArchTableSwitch or ArchLookupSwitch.
963 size_t table_space_cost = 10 + 2 * sw.value_range;
964 size_t table_time_cost = 3;
965 size_t lookup_space_cost = 2 + 2 * sw.case_count;
966 size_t lookup_time_cost = sw.case_count;
967 if (sw.case_count > 0 &&
968 table_space_cost + 3 * table_time_cost <=
969 lookup_space_cost + 3 * lookup_time_cost &&
970 sw.min_value > std::numeric_limits<int32_t>::min()) {
971 InstructionOperand index_operand = value_operand;
973 index_operand = g.TempRegister();
974 Emit(kMips64Sub, index_operand, value_operand,
975 g.TempImmediate(sw.min_value));
977 // Generate a table lookup.
978 return EmitTableSwitch(sw, index_operand);
981 // Generate a sequence of conditional jumps.
982 return EmitLookupSwitch(sw, value_operand);
986 void InstructionSelector::VisitWord32Equal(Node* const node) {
987 FlagsContinuation cont(kEqual, node);
988 Int32BinopMatcher m(node);
989 if (m.right().Is(0)) {
990 return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
993 VisitWord32Compare(this, node, &cont);
997 void InstructionSelector::VisitInt32LessThan(Node* node) {
998 FlagsContinuation cont(kSignedLessThan, node);
999 VisitWord32Compare(this, node, &cont);
1003 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1004 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1005 VisitWord32Compare(this, node, &cont);
1009 void InstructionSelector::VisitUint32LessThan(Node* node) {
1010 FlagsContinuation cont(kUnsignedLessThan, node);
1011 VisitWord32Compare(this, node, &cont);
1015 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1016 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1017 VisitWord32Compare(this, node, &cont);
1021 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1022 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1023 FlagsContinuation cont(kOverflow, ovf);
1024 return VisitBinop(this, node, kMips64Dadd, &cont);
1026 FlagsContinuation cont;
1027 VisitBinop(this, node, kMips64Dadd, &cont);
1031 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1032 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1033 FlagsContinuation cont(kOverflow, ovf);
1034 return VisitBinop(this, node, kMips64Dsub, &cont);
1036 FlagsContinuation cont;
1037 VisitBinop(this, node, kMips64Dsub, &cont);
1041 void InstructionSelector::VisitWord64Equal(Node* const node) {
1042 FlagsContinuation cont(kEqual, node);
1043 Int64BinopMatcher m(node);
1044 if (m.right().Is(0)) {
1045 return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1048 VisitWord64Compare(this, node, &cont);
1052 void InstructionSelector::VisitInt64LessThan(Node* node) {
1053 FlagsContinuation cont(kSignedLessThan, node);
1054 VisitWord64Compare(this, node, &cont);
1058 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1059 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1060 VisitWord64Compare(this, node, &cont);
1064 void InstructionSelector::VisitUint64LessThan(Node* node) {
1065 FlagsContinuation cont(kUnsignedLessThan, node);
1066 VisitWord64Compare(this, node, &cont);
1070 void InstructionSelector::VisitFloat64Equal(Node* node) {
1071 FlagsContinuation cont(kEqual, node);
1072 VisitFloat64Compare(this, node, &cont);
1076 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1077 FlagsContinuation cont(kUnsignedLessThan, node);
1078 VisitFloat64Compare(this, node, &cont);
1082 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1083 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1084 VisitFloat64Compare(this, node, &cont);
1088 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1089 Mips64OperandGenerator g(this);
1090 Emit(kMips64Float64ExtractLowWord32, g.DefineAsRegister(node),
1091 g.UseRegister(node->InputAt(0)));
1095 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1096 Mips64OperandGenerator g(this);
1097 Emit(kMips64Float64ExtractHighWord32, g.DefineAsRegister(node),
1098 g.UseRegister(node->InputAt(0)));
1102 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1103 Mips64OperandGenerator g(this);
1104 Node* left = node->InputAt(0);
1105 Node* right = node->InputAt(1);
1106 Emit(kMips64Float64InsertLowWord32, g.DefineSameAsFirst(node),
1107 g.UseRegister(left), g.UseRegister(right));
1111 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1112 Mips64OperandGenerator g(this);
1113 Node* left = node->InputAt(0);
1114 Node* right = node->InputAt(1);
1115 Emit(kMips64Float64InsertHighWord32, g.DefineSameAsFirst(node),
1116 g.UseRegister(left), g.UseRegister(right));
1121 MachineOperatorBuilder::Flags
1122 InstructionSelector::SupportedMachineOperatorFlags() {
1123 return MachineOperatorBuilder::kFloat64RoundDown |
1124 MachineOperatorBuilder::kFloat64RoundTruncate;
1127 } // namespace compiler
1128 } // namespace internal