1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/base/bits.h"
6 #include "src/compiler/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
14 #define TRACE_UNIMPL() \
15 PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
17 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
20 // Adds Mips-specific methods for generating InstructionOperands.
21 class Mips64OperandGenerator FINAL : public OperandGenerator {
23 explicit Mips64OperandGenerator(InstructionSelector* selector)
24 : OperandGenerator(selector) {}
26 InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
27 if (CanBeImmediate(node, opcode)) {
28 return UseImmediate(node);
30 return UseRegister(node);
33 bool CanBeImmediate(Node* node, InstructionCode opcode) {
35 if (node->opcode() == IrOpcode::kInt32Constant)
36 value = OpParameter<int32_t>(node);
37 else if (node->opcode() == IrOpcode::kInt64Constant)
38 value = OpParameter<int64_t>(node);
41 switch (ArchOpcodeField::decode(opcode)) {
45 return is_uint5(value);
49 return is_uint6(value);
51 return is_uint16(value);
54 return is_int16(value + kIntSize);
56 return is_int16(value);
61 bool CanBeImmediate(Node* node, InstructionCode opcode,
62 FlagsContinuation* cont) {
64 if (node->opcode() == IrOpcode::kInt32Constant)
65 value = OpParameter<int32_t>(node);
66 else if (node->opcode() == IrOpcode::kInt64Constant)
67 value = OpParameter<int64_t>(node);
70 switch (ArchOpcodeField::decode(opcode)) {
72 switch (cont->condition()) {
73 case kUnsignedLessThan:
74 case kUnsignedGreaterThanOrEqual:
75 case kUnsignedLessThanOrEqual:
76 case kUnsignedGreaterThan:
77 // Immediate operands for unsigned 32-bit compare operations
78 // should not be sign-extended.
79 return is_uint15(value);
84 return is_int16(value);
90 bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
97 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
99 Mips64OperandGenerator g(selector);
100 selector->Emit(opcode, g.DefineAsRegister(node),
101 g.UseRegister(node->InputAt(0)));
105 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
107 Mips64OperandGenerator g(selector);
108 selector->Emit(opcode, g.DefineAsRegister(node),
109 g.UseRegister(node->InputAt(0)),
110 g.UseRegister(node->InputAt(1)));
114 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
116 Mips64OperandGenerator g(selector);
117 selector->Emit(opcode, g.DefineAsRegister(node),
118 g.UseRegister(node->InputAt(0)),
119 g.UseOperand(node->InputAt(1), opcode));
123 static void VisitBinop(InstructionSelector* selector, Node* node,
124 InstructionCode opcode, FlagsContinuation* cont) {
125 Mips64OperandGenerator g(selector);
126 Int32BinopMatcher m(node);
127 InstructionOperand inputs[4];
128 size_t input_count = 0;
129 InstructionOperand outputs[2];
130 size_t output_count = 0;
132 inputs[input_count++] = g.UseRegister(m.left().node());
133 inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
135 if (cont->IsBranch()) {
136 inputs[input_count++] = g.Label(cont->true_block());
137 inputs[input_count++] = g.Label(cont->false_block());
140 outputs[output_count++] = g.DefineAsRegister(node);
142 outputs[output_count++] = g.DefineAsRegister(cont->result());
145 DCHECK_NE(0u, input_count);
146 DCHECK_NE(0u, output_count);
147 DCHECK_GE(arraysize(inputs), input_count);
148 DCHECK_GE(arraysize(outputs), output_count);
150 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
151 outputs, input_count, inputs);
152 if (cont->IsBranch()) instr->MarkAsControl();
156 static void VisitBinop(InstructionSelector* selector, Node* node,
157 InstructionCode opcode) {
158 FlagsContinuation cont;
159 VisitBinop(selector, node, opcode, &cont);
163 void InstructionSelector::VisitLoad(Node* node) {
164 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
165 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
166 Mips64OperandGenerator g(this);
167 Node* base = node->InputAt(0);
168 Node* index = node->InputAt(1);
173 opcode = kMips64Lwc1;
176 opcode = kMips64Ldc1;
178 case kRepBit: // Fall through.
180 opcode = typ == kTypeUint32 ? kMips64Lbu : kMips64Lb;
183 opcode = typ == kTypeUint32 ? kMips64Lhu : kMips64Lh;
188 case kRepTagged: // Fall through.
197 if (g.CanBeImmediate(index, opcode)) {
198 Emit(opcode | AddressingModeField::encode(kMode_MRI),
199 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
201 InstructionOperand addr_reg = g.TempRegister();
202 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
203 g.UseRegister(index), g.UseRegister(base));
204 // Emit desired load opcode, using temp addr_reg.
205 Emit(opcode | AddressingModeField::encode(kMode_MRI),
206 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
211 void InstructionSelector::VisitStore(Node* node) {
212 Mips64OperandGenerator g(this);
213 Node* base = node->InputAt(0);
214 Node* index = node->InputAt(1);
215 Node* value = node->InputAt(2);
217 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
218 MachineType rep = RepresentationOf(store_rep.machine_type());
219 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
220 DCHECK(rep == kRepTagged);
221 // TODO(dcarney): refactor RecordWrite function to take temp registers
222 // and pass them here instead of using fixed regs
223 // TODO(dcarney): handle immediate indices.
224 InstructionOperand temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
225 Emit(kMips64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, t0),
226 g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
229 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
234 opcode = kMips64Swc1;
237 opcode = kMips64Sdc1;
239 case kRepBit: // Fall through.
249 case kRepTagged: // Fall through.
258 if (g.CanBeImmediate(index, opcode)) {
259 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
260 g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
262 InstructionOperand addr_reg = g.TempRegister();
263 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
264 g.UseRegister(index), g.UseRegister(base));
265 // Emit desired store opcode, using temp addr_reg.
266 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
267 addr_reg, g.TempImmediate(0), g.UseRegister(value));
272 void InstructionSelector::VisitWord32And(Node* node) {
273 VisitBinop(this, node, kMips64And);
277 void InstructionSelector::VisitWord64And(Node* node) {
278 VisitBinop(this, node, kMips64And);
282 void InstructionSelector::VisitWord32Or(Node* node) {
283 VisitBinop(this, node, kMips64Or);
287 void InstructionSelector::VisitWord64Or(Node* node) {
288 VisitBinop(this, node, kMips64Or);
292 void InstructionSelector::VisitWord32Xor(Node* node) {
293 VisitBinop(this, node, kMips64Xor);
297 void InstructionSelector::VisitWord64Xor(Node* node) {
298 VisitBinop(this, node, kMips64Xor);
302 void InstructionSelector::VisitWord32Shl(Node* node) {
303 VisitRRO(this, kMips64Shl, node);
307 void InstructionSelector::VisitWord32Shr(Node* node) {
308 VisitRRO(this, kMips64Shr, node);
312 void InstructionSelector::VisitWord32Sar(Node* node) {
313 VisitRRO(this, kMips64Sar, node);
317 void InstructionSelector::VisitWord64Shl(Node* node) {
318 VisitRRO(this, kMips64Dshl, node);
322 void InstructionSelector::VisitWord64Shr(Node* node) {
323 VisitRRO(this, kMips64Dshr, node);
327 void InstructionSelector::VisitWord64Sar(Node* node) {
328 VisitRRO(this, kMips64Dsar, node);
332 void InstructionSelector::VisitWord32Ror(Node* node) {
333 VisitRRO(this, kMips64Ror, node);
337 void InstructionSelector::VisitWord64Ror(Node* node) {
338 VisitRRO(this, kMips64Dror, node);
342 void InstructionSelector::VisitInt32Add(Node* node) {
343 Mips64OperandGenerator g(this);
344 // TODO(plind): Consider multiply & add optimization from arm port.
345 VisitBinop(this, node, kMips64Add);
349 void InstructionSelector::VisitInt64Add(Node* node) {
350 Mips64OperandGenerator g(this);
351 // TODO(plind): Consider multiply & add optimization from arm port.
352 VisitBinop(this, node, kMips64Dadd);
356 void InstructionSelector::VisitInt32Sub(Node* node) {
357 VisitBinop(this, node, kMips64Sub);
361 void InstructionSelector::VisitInt64Sub(Node* node) {
362 VisitBinop(this, node, kMips64Dsub);
366 void InstructionSelector::VisitInt32Mul(Node* node) {
367 Mips64OperandGenerator g(this);
368 Int32BinopMatcher m(node);
369 if (m.right().HasValue() && m.right().Value() > 0) {
370 int32_t value = m.right().Value();
371 if (base::bits::IsPowerOfTwo32(value)) {
372 Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
373 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
374 g.TempImmediate(WhichPowerOf2(value)));
377 if (base::bits::IsPowerOfTwo32(value - 1)) {
378 InstructionOperand temp = g.TempRegister();
379 Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
380 g.UseRegister(m.left().node()),
381 g.TempImmediate(WhichPowerOf2(value - 1)));
382 Emit(kMips64Add | AddressingModeField::encode(kMode_None),
383 g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
386 if (base::bits::IsPowerOfTwo32(value + 1)) {
387 InstructionOperand temp = g.TempRegister();
388 Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
389 g.UseRegister(m.left().node()),
390 g.TempImmediate(WhichPowerOf2(value + 1)));
391 Emit(kMips64Sub | AddressingModeField::encode(kMode_None),
392 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
396 Emit(kMips64Mul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
397 g.UseRegister(m.right().node()));
401 void InstructionSelector::VisitInt32MulHigh(Node* node) {
402 Mips64OperandGenerator g(this);
403 Emit(kMips64MulHigh, g.DefineAsRegister(node),
404 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
408 void InstructionSelector::VisitUint32MulHigh(Node* node) {
409 Mips64OperandGenerator g(this);
410 InstructionOperand const dmul_operand = g.TempRegister();
411 Emit(kMips64MulHighU, dmul_operand, g.UseRegister(node->InputAt(0)),
412 g.UseRegister(node->InputAt(1)));
413 Emit(kMips64Ext, g.DefineAsRegister(node), dmul_operand, g.TempImmediate(0),
414 g.TempImmediate(32));
418 void InstructionSelector::VisitInt64Mul(Node* node) {
419 Mips64OperandGenerator g(this);
420 Int64BinopMatcher m(node);
421 // TODO(dusmil): Add optimization for shifts larger than 32.
422 if (m.right().HasValue() && m.right().Value() > 0) {
423 int64_t value = m.right().Value();
424 if (base::bits::IsPowerOfTwo32(value)) {
425 Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
426 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
427 g.TempImmediate(WhichPowerOf2(value)));
430 if (base::bits::IsPowerOfTwo32(value - 1)) {
431 InstructionOperand temp = g.TempRegister();
432 Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
433 g.UseRegister(m.left().node()),
434 g.TempImmediate(WhichPowerOf2(value - 1)));
435 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
436 g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
439 if (base::bits::IsPowerOfTwo32(value + 1)) {
440 InstructionOperand temp = g.TempRegister();
441 Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
442 g.UseRegister(m.left().node()),
443 g.TempImmediate(WhichPowerOf2(value + 1)));
444 Emit(kMips64Dsub | AddressingModeField::encode(kMode_None),
445 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
449 Emit(kMips64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
450 g.UseRegister(m.right().node()));
454 void InstructionSelector::VisitInt32Div(Node* node) {
455 Mips64OperandGenerator g(this);
456 Int32BinopMatcher m(node);
457 Emit(kMips64Div, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
458 g.UseRegister(m.right().node()));
462 void InstructionSelector::VisitUint32Div(Node* node) {
463 Mips64OperandGenerator g(this);
464 Int32BinopMatcher m(node);
465 Emit(kMips64DivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
466 g.UseRegister(m.right().node()));
470 void InstructionSelector::VisitInt32Mod(Node* node) {
471 Mips64OperandGenerator g(this);
472 Int32BinopMatcher m(node);
473 Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
474 g.UseRegister(m.right().node()));
478 void InstructionSelector::VisitUint32Mod(Node* node) {
479 Mips64OperandGenerator g(this);
480 Int32BinopMatcher m(node);
481 Emit(kMips64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
482 g.UseRegister(m.right().node()));
486 void InstructionSelector::VisitInt64Div(Node* node) {
487 Mips64OperandGenerator g(this);
488 Int64BinopMatcher m(node);
489 Emit(kMips64Ddiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
490 g.UseRegister(m.right().node()));
494 void InstructionSelector::VisitUint64Div(Node* node) {
495 Mips64OperandGenerator g(this);
496 Int64BinopMatcher m(node);
497 Emit(kMips64DdivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
498 g.UseRegister(m.right().node()));
502 void InstructionSelector::VisitInt64Mod(Node* node) {
503 Mips64OperandGenerator g(this);
504 Int64BinopMatcher m(node);
505 Emit(kMips64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
506 g.UseRegister(m.right().node()));
510 void InstructionSelector::VisitUint64Mod(Node* node) {
511 Mips64OperandGenerator g(this);
512 Int64BinopMatcher m(node);
513 Emit(kMips64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
514 g.UseRegister(m.right().node()));
518 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
519 Mips64OperandGenerator g(this);
520 Emit(kMips64CvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
524 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
525 Mips64OperandGenerator g(this);
526 Emit(kMips64CvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
530 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
531 Mips64OperandGenerator g(this);
532 Emit(kMips64CvtDUw, g.DefineAsRegister(node),
533 g.UseRegister(node->InputAt(0)));
537 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
538 Mips64OperandGenerator g(this);
539 Emit(kMips64TruncWD, g.DefineAsRegister(node),
540 g.UseRegister(node->InputAt(0)));
544 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
545 Mips64OperandGenerator g(this);
546 Emit(kMips64TruncUwD, g.DefineAsRegister(node),
547 g.UseRegister(node->InputAt(0)));
551 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
552 Mips64OperandGenerator g(this);
553 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
558 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
559 Mips64OperandGenerator g(this);
560 Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
561 g.TempImmediate(0), g.TempImmediate(32));
565 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
566 Mips64OperandGenerator g(this);
567 Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
568 g.TempImmediate(0), g.TempImmediate(32));
572 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
573 Mips64OperandGenerator g(this);
574 Emit(kMips64CvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
578 void InstructionSelector::VisitFloat64Add(Node* node) {
579 VisitRRR(this, kMips64AddD, node);
583 void InstructionSelector::VisitFloat64Sub(Node* node) {
584 VisitRRR(this, kMips64SubD, node);
588 void InstructionSelector::VisitFloat64Mul(Node* node) {
589 VisitRRR(this, kMips64MulD, node);
593 void InstructionSelector::VisitFloat64Div(Node* node) {
594 VisitRRR(this, kMips64DivD, node);
598 void InstructionSelector::VisitFloat64Mod(Node* node) {
599 Mips64OperandGenerator g(this);
600 Emit(kMips64ModD, g.DefineAsFixed(node, f0),
601 g.UseFixed(node->InputAt(0), f12),
602 g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
606 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
607 Mips64OperandGenerator g(this);
608 Emit(kMips64SqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
612 void InstructionSelector::VisitFloat64Floor(Node* node) {
613 VisitRR(this, kMips64Float64Floor, node);
617 void InstructionSelector::VisitFloat64Ceil(Node* node) {
618 VisitRR(this, kMips64Float64Ceil, node);
622 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
623 VisitRR(this, kMips64Float64RoundTruncate, node);
627 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
632 void InstructionSelector::VisitCall(Node* node) {
633 Mips64OperandGenerator g(this);
634 const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
636 FrameStateDescriptor* frame_state_descriptor = NULL;
637 if (descriptor->NeedsFrameState()) {
638 frame_state_descriptor =
639 GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
642 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
644 // Compute InstructionOperands for inputs and outputs.
645 InitializeCallBuffer(node, &buffer, true, false);
647 int push_count = buffer.pushed_nodes.size();
648 if (push_count > 0) {
649 Emit(kMips64StackClaim | MiscField::encode(push_count), g.NoOutput());
651 int slot = buffer.pushed_nodes.size() - 1;
652 for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
654 Emit(kMips64StoreToStackSlot | MiscField::encode(slot), g.NoOutput(),
659 // Select the appropriate opcode based on the call type.
660 InstructionCode opcode;
661 switch (descriptor->kind()) {
662 case CallDescriptor::kCallCodeObject: {
663 opcode = kArchCallCodeObject;
666 case CallDescriptor::kCallJSFunction:
667 opcode = kArchCallJSFunction;
673 opcode |= MiscField::encode(descriptor->flags());
675 // Emit the call instruction.
676 Instruction* call_instr =
677 Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
678 buffer.instruction_args.size(), &buffer.instruction_args.front());
680 call_instr->MarkAsCall();
684 void InstructionSelector::VisitCheckedLoad(Node* node) {
685 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
686 MachineType typ = TypeOf(OpParameter<MachineType>(node));
687 Mips64OperandGenerator g(this);
688 Node* const buffer = node->InputAt(0);
689 Node* const offset = node->InputAt(1);
690 Node* const length = node->InputAt(2);
694 opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
697 opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
700 opcode = kCheckedLoadWord32;
703 opcode = kCheckedLoadFloat32;
706 opcode = kCheckedLoadFloat64;
712 InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
713 ? g.UseImmediate(offset)
714 : g.UseRegister(offset);
716 InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
717 ? g.CanBeImmediate(length, opcode)
718 ? g.UseImmediate(length)
719 : g.UseRegister(length)
720 : g.UseRegister(length);
722 Emit(opcode | AddressingModeField::encode(kMode_MRI),
723 g.DefineAsRegister(node), offset_operand, length_operand,
724 g.UseRegister(buffer));
728 void InstructionSelector::VisitCheckedStore(Node* node) {
729 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
730 Mips64OperandGenerator g(this);
731 Node* const buffer = node->InputAt(0);
732 Node* const offset = node->InputAt(1);
733 Node* const length = node->InputAt(2);
734 Node* const value = node->InputAt(3);
738 opcode = kCheckedStoreWord8;
741 opcode = kCheckedStoreWord16;
744 opcode = kCheckedStoreWord32;
747 opcode = kCheckedStoreFloat32;
750 opcode = kCheckedStoreFloat64;
756 InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
757 ? g.UseImmediate(offset)
758 : g.UseRegister(offset);
760 InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
761 ? g.CanBeImmediate(length, opcode)
762 ? g.UseImmediate(length)
763 : g.UseRegister(length)
764 : g.UseRegister(length);
766 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
767 offset_operand, length_operand, g.UseRegister(value),
768 g.UseRegister(buffer));
774 // Shared routine for multiple compare operations.
775 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
776 InstructionOperand left, InstructionOperand right,
777 FlagsContinuation* cont) {
778 Mips64OperandGenerator g(selector);
779 opcode = cont->Encode(opcode);
780 if (cont->IsBranch()) {
781 selector->Emit(opcode, g.NoOutput(), left, right,
782 g.Label(cont->true_block()),
783 g.Label(cont->false_block()))->MarkAsControl();
785 DCHECK(cont->IsSet());
786 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
791 // Shared routine for multiple float compare operations.
792 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
793 FlagsContinuation* cont) {
794 Mips64OperandGenerator g(selector);
795 Node* left = node->InputAt(0);
796 Node* right = node->InputAt(1);
797 VisitCompare(selector, kMips64CmpD, g.UseRegister(left), g.UseRegister(right),
802 // Shared routine for multiple word compare operations.
803 void VisitWordCompare(InstructionSelector* selector, Node* node,
804 InstructionCode opcode, FlagsContinuation* cont,
806 Mips64OperandGenerator g(selector);
807 Node* left = node->InputAt(0);
808 Node* right = node->InputAt(1);
810 // Match immediates on left or right side of comparison.
811 if (g.CanBeImmediate(right, opcode, cont)) {
812 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
814 } else if (g.CanBeImmediate(left, opcode, cont)) {
815 if (!commutative) cont->Commute();
816 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
819 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
825 void VisitWord32Compare(InstructionSelector* selector, Node* node,
826 FlagsContinuation* cont) {
827 VisitWordCompare(selector, node, kMips64Cmp32, cont, false);
831 void VisitWord64Compare(InstructionSelector* selector, Node* node,
832 FlagsContinuation* cont) {
833 VisitWordCompare(selector, node, kMips64Cmp, cont, false);
839 void EmitWordCompareZero(InstructionSelector* selector, InstructionCode opcode,
840 Node* value, FlagsContinuation* cont) {
841 Mips64OperandGenerator g(selector);
842 opcode = cont->Encode(opcode);
843 InstructionOperand const value_operand = g.UseRegister(value);
844 if (cont->IsBranch()) {
845 selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
846 g.Label(cont->true_block()),
847 g.Label(cont->false_block()))->MarkAsControl();
849 selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
855 // Shared routine for word comparisons against zero.
856 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
857 Node* value, FlagsContinuation* cont) {
858 // Initially set comparison against 0 to be 64-bit variant for branches that
860 InstructionCode opcode = kMips64Cmp;
861 while (selector->CanCover(user, value)) {
862 if (user->opcode() == IrOpcode::kWord32Equal) {
863 opcode = kMips64Cmp32;
865 switch (value->opcode()) {
866 case IrOpcode::kWord32Equal: {
867 // Combine with comparisons against 0 by simply inverting the
869 Int32BinopMatcher m(value);
870 if (m.right().Is(0)) {
872 value = m.left().node();
874 opcode = kMips64Cmp32;
877 cont->OverwriteAndNegateIfEqual(kEqual);
878 return VisitWord32Compare(selector, value, cont);
880 case IrOpcode::kInt32LessThan:
881 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
882 return VisitWord32Compare(selector, value, cont);
883 case IrOpcode::kInt32LessThanOrEqual:
884 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
885 return VisitWord32Compare(selector, value, cont);
886 case IrOpcode::kUint32LessThan:
887 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
888 return VisitWord32Compare(selector, value, cont);
889 case IrOpcode::kUint32LessThanOrEqual:
890 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
891 return VisitWord32Compare(selector, value, cont);
892 case IrOpcode::kWord64Equal: {
893 // Combine with comparisons against 0 by simply inverting the
895 Int64BinopMatcher m(value);
896 if (m.right().Is(0)) {
898 value = m.left().node();
902 cont->OverwriteAndNegateIfEqual(kEqual);
903 return VisitWord64Compare(selector, value, cont);
905 case IrOpcode::kInt64LessThan:
906 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
907 return VisitWord64Compare(selector, value, cont);
908 case IrOpcode::kInt64LessThanOrEqual:
909 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
910 return VisitWord64Compare(selector, value, cont);
911 case IrOpcode::kUint64LessThan:
912 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
913 return VisitWord64Compare(selector, value, cont);
914 case IrOpcode::kFloat64Equal:
915 cont->OverwriteAndNegateIfEqual(kEqual);
916 return VisitFloat64Compare(selector, value, cont);
917 case IrOpcode::kFloat64LessThan:
918 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
919 return VisitFloat64Compare(selector, value, cont);
920 case IrOpcode::kFloat64LessThanOrEqual:
921 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
922 return VisitFloat64Compare(selector, value, cont);
923 case IrOpcode::kProjection:
924 // Check if this is the overflow output projection of an
925 // <Operation>WithOverflow node.
926 if (ProjectionIndexOf(value->op()) == 1u) {
927 // We cannot combine the <Operation>WithOverflow with this branch
928 // unless the 0th projection (the use of the actual value of the
929 // <Operation> is either NULL, which means there's no use of the
930 // actual value, or was already defined, which means it is scheduled
931 // *AFTER* this branch).
932 Node* const node = value->InputAt(0);
933 Node* const result = NodeProperties::FindProjection(node, 0);
934 if (result == NULL || selector->IsDefined(result)) {
935 switch (node->opcode()) {
936 case IrOpcode::kInt32AddWithOverflow:
937 cont->OverwriteAndNegateIfEqual(kOverflow);
938 return VisitBinop(selector, node, kMips64Dadd, cont);
939 case IrOpcode::kInt32SubWithOverflow:
940 cont->OverwriteAndNegateIfEqual(kOverflow);
941 return VisitBinop(selector, node, kMips64Dsub, cont);
948 case IrOpcode::kWord32And:
949 return VisitWordCompare(selector, value, kMips64Tst32, cont, true);
950 case IrOpcode::kWord64And:
951 return VisitWordCompare(selector, value, kMips64Tst, cont, true);
958 // Continuation could not be combined with a compare, emit compare against 0.
959 EmitWordCompareZero(selector, opcode, value, cont);
963 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
964 BasicBlock* fbranch) {
965 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
966 VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
970 void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
971 BasicBlock** case_branches,
972 int32_t* case_values, size_t case_count,
973 int32_t min_value, int32_t max_value) {
974 Mips64OperandGenerator g(this);
975 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
976 InstructionOperand default_operand = g.Label(default_branch);
978 // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
979 // is 2^31-1, so don't assume that it's non-zero below.
981 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
983 // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
985 size_t table_space_cost = 10 + 2 * value_range;
986 size_t table_time_cost = 10;
987 size_t lookup_space_cost = 2 + 2 * case_count;
988 size_t lookup_time_cost = case_count;
989 if (case_count > 0 &&
990 table_space_cost + 3 * table_time_cost <=
991 lookup_space_cost + 3 * lookup_time_cost &&
992 min_value > std::numeric_limits<int32_t>::min()) {
993 InstructionOperand index_operand = value_operand;
995 index_operand = g.TempRegister();
996 Emit(kMips64Sub, index_operand, value_operand,
997 g.TempImmediate(min_value));
999 size_t input_count = 2 + value_range;
1000 auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
1001 inputs[0] = index_operand;
1002 std::fill(&inputs[1], &inputs[input_count], default_operand);
1003 for (size_t index = 0; index < case_count; ++index) {
1004 size_t value = case_values[index] - min_value;
1005 BasicBlock* branch = case_branches[index];
1006 DCHECK_LE(0u, value);
1007 DCHECK_LT(value + 2, input_count);
1008 inputs[value + 2] = g.Label(branch);
1010 Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
1015 // Generate a sequence of conditional jumps.
1016 size_t input_count = 2 + case_count * 2;
1017 auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
1018 inputs[0] = value_operand;
1019 inputs[1] = default_operand;
1020 for (size_t index = 0; index < case_count; ++index) {
1021 int32_t value = case_values[index];
1022 BasicBlock* branch = case_branches[index];
1023 inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
1024 inputs[index * 2 + 2 + 1] = g.Label(branch);
1026 Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
1031 void InstructionSelector::VisitWord32Equal(Node* const node) {
1032 FlagsContinuation cont(kEqual, node);
1033 Int32BinopMatcher m(node);
1034 if (m.right().Is(0)) {
1035 return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1038 VisitWord32Compare(this, node, &cont);
1042 void InstructionSelector::VisitInt32LessThan(Node* node) {
1043 FlagsContinuation cont(kSignedLessThan, node);
1044 VisitWord32Compare(this, node, &cont);
1048 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1049 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1050 VisitWord32Compare(this, node, &cont);
1054 void InstructionSelector::VisitUint32LessThan(Node* node) {
1055 FlagsContinuation cont(kUnsignedLessThan, node);
1056 VisitWord32Compare(this, node, &cont);
1060 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1061 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1062 VisitWord32Compare(this, node, &cont);
1066 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1067 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1068 FlagsContinuation cont(kOverflow, ovf);
1069 return VisitBinop(this, node, kMips64Dadd, &cont);
1071 FlagsContinuation cont;
1072 VisitBinop(this, node, kMips64Dadd, &cont);
1076 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1077 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1078 FlagsContinuation cont(kOverflow, ovf);
1079 return VisitBinop(this, node, kMips64Dsub, &cont);
1081 FlagsContinuation cont;
1082 VisitBinop(this, node, kMips64Dsub, &cont);
1086 void InstructionSelector::VisitWord64Equal(Node* const node) {
1087 FlagsContinuation cont(kEqual, node);
1088 Int64BinopMatcher m(node);
1089 if (m.right().Is(0)) {
1090 return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1093 VisitWord64Compare(this, node, &cont);
1097 void InstructionSelector::VisitInt64LessThan(Node* node) {
1098 FlagsContinuation cont(kSignedLessThan, node);
1099 VisitWord64Compare(this, node, &cont);
1103 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1104 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1105 VisitWord64Compare(this, node, &cont);
1109 void InstructionSelector::VisitUint64LessThan(Node* node) {
1110 FlagsContinuation cont(kUnsignedLessThan, node);
1111 VisitWord64Compare(this, node, &cont);
1115 void InstructionSelector::VisitFloat64Equal(Node* node) {
1116 FlagsContinuation cont(kEqual, node);
1117 VisitFloat64Compare(this, node, &cont);
1121 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1122 FlagsContinuation cont(kUnsignedLessThan, node);
1123 VisitFloat64Compare(this, node, &cont);
1127 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1128 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1129 VisitFloat64Compare(this, node, &cont);
1134 MachineOperatorBuilder::Flags
1135 InstructionSelector::SupportedMachineOperatorFlags() {
1136 return MachineOperatorBuilder::kFloat64Floor |
1137 MachineOperatorBuilder::kFloat64Ceil |
1138 MachineOperatorBuilder::kFloat64RoundTruncate;
1141 } // namespace compiler
1142 } // namespace internal