1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/base/bits.h"
6 #include "src/compiler/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
14 #define TRACE_UNIMPL() \
15 PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
17 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
20 // Adds Mips-specific methods for generating InstructionOperands.
21 class MipsOperandGenerator FINAL : public OperandGenerator {
23 explicit MipsOperandGenerator(InstructionSelector* selector)
24 : OperandGenerator(selector) {}
26 InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
27 if (CanBeImmediate(node, opcode)) {
28 return UseImmediate(node);
30 return UseRegister(node);
33 bool CanBeImmediate(Node* node, InstructionCode opcode) {
35 if (!m.HasValue()) return false;
36 int32_t value = m.Value();
37 switch (ArchOpcodeField::decode(opcode)) {
41 return is_uint5(value);
43 return is_uint16(value);
46 case kCheckedLoadFloat32:
47 case kCheckedLoadFloat64:
48 case kCheckedStoreFloat32:
49 case kCheckedStoreFloat64:
50 return is_int16(value + kIntSize);
52 return is_int16(value);
57 bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
64 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
66 MipsOperandGenerator g(selector);
67 selector->Emit(opcode, g.DefineAsRegister(node),
68 g.UseRegister(node->InputAt(0)),
69 g.UseRegister(node->InputAt(1)));
73 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
75 MipsOperandGenerator g(selector);
76 selector->Emit(opcode, g.DefineAsRegister(node),
77 g.UseRegister(node->InputAt(0)));
81 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
83 MipsOperandGenerator g(selector);
84 selector->Emit(opcode, g.DefineAsRegister(node),
85 g.UseRegister(node->InputAt(0)),
86 g.UseOperand(node->InputAt(1), opcode));
90 static void VisitBinop(InstructionSelector* selector, Node* node,
91 InstructionCode opcode, FlagsContinuation* cont) {
92 MipsOperandGenerator g(selector);
93 Int32BinopMatcher m(node);
94 InstructionOperand inputs[4];
95 size_t input_count = 0;
96 InstructionOperand outputs[2];
97 size_t output_count = 0;
99 inputs[input_count++] = g.UseRegister(m.left().node());
100 inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
102 if (cont->IsBranch()) {
103 inputs[input_count++] = g.Label(cont->true_block());
104 inputs[input_count++] = g.Label(cont->false_block());
107 outputs[output_count++] = g.DefineAsRegister(node);
109 outputs[output_count++] = g.DefineAsRegister(cont->result());
112 DCHECK_NE(0u, input_count);
113 DCHECK_NE(0u, output_count);
114 DCHECK_GE(arraysize(inputs), input_count);
115 DCHECK_GE(arraysize(outputs), output_count);
117 selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
122 static void VisitBinop(InstructionSelector* selector, Node* node,
123 InstructionCode opcode) {
124 FlagsContinuation cont;
125 VisitBinop(selector, node, opcode, &cont);
129 void InstructionSelector::VisitLoad(Node* node) {
130 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
131 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
132 MipsOperandGenerator g(this);
133 Node* base = node->InputAt(0);
134 Node* index = node->InputAt(1);
144 case kRepBit: // Fall through.
146 opcode = typ == kTypeUint32 ? kMipsLbu : kMipsLb;
149 opcode = typ == kTypeUint32 ? kMipsLhu : kMipsLh;
151 case kRepTagged: // Fall through.
160 if (g.CanBeImmediate(index, opcode)) {
161 Emit(opcode | AddressingModeField::encode(kMode_MRI),
162 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
164 InstructionOperand addr_reg = g.TempRegister();
165 Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
166 g.UseRegister(index), g.UseRegister(base));
167 // Emit desired load opcode, using temp addr_reg.
168 Emit(opcode | AddressingModeField::encode(kMode_MRI),
169 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
174 void InstructionSelector::VisitStore(Node* node) {
175 MipsOperandGenerator g(this);
176 Node* base = node->InputAt(0);
177 Node* index = node->InputAt(1);
178 Node* value = node->InputAt(2);
180 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
181 MachineType rep = RepresentationOf(store_rep.machine_type());
182 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
183 DCHECK(rep == kRepTagged);
184 // TODO(dcarney): refactor RecordWrite function to take temp registers
185 // and pass them here instead of using fixed regs
186 // TODO(dcarney): handle immediate indices.
187 InstructionOperand temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
188 Emit(kMipsStoreWriteBarrier, g.NoOutput(), g.UseFixed(base, t0),
189 g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
192 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
202 case kRepBit: // Fall through.
209 case kRepTagged: // Fall through.
218 if (g.CanBeImmediate(index, opcode)) {
219 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
220 g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
222 InstructionOperand addr_reg = g.TempRegister();
223 Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
224 g.UseRegister(index), g.UseRegister(base));
225 // Emit desired store opcode, using temp addr_reg.
226 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
227 addr_reg, g.TempImmediate(0), g.UseRegister(value));
232 void InstructionSelector::VisitWord32And(Node* node) {
233 VisitBinop(this, node, kMipsAnd);
237 void InstructionSelector::VisitWord32Or(Node* node) {
238 VisitBinop(this, node, kMipsOr);
242 void InstructionSelector::VisitWord32Xor(Node* node) {
243 VisitBinop(this, node, kMipsXor);
247 void InstructionSelector::VisitWord32Shl(Node* node) {
248 VisitRRO(this, kMipsShl, node);
252 void InstructionSelector::VisitWord32Shr(Node* node) {
253 VisitRRO(this, kMipsShr, node);
257 void InstructionSelector::VisitWord32Sar(Node* node) {
258 VisitRRO(this, kMipsSar, node);
262 void InstructionSelector::VisitWord32Ror(Node* node) {
263 VisitRRO(this, kMipsRor, node);
267 void InstructionSelector::VisitWord32Clz(Node* node) {
268 MipsOperandGenerator g(this);
269 Emit(kMipsClz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
273 void InstructionSelector::VisitInt32Add(Node* node) {
274 MipsOperandGenerator g(this);
276 // TODO(plind): Consider multiply & add optimization from arm port.
277 VisitBinop(this, node, kMipsAdd);
281 void InstructionSelector::VisitInt32Sub(Node* node) {
282 VisitBinop(this, node, kMipsSub);
286 void InstructionSelector::VisitInt32Mul(Node* node) {
287 MipsOperandGenerator g(this);
288 Int32BinopMatcher m(node);
289 if (m.right().HasValue() && m.right().Value() > 0) {
290 int32_t value = m.right().Value();
291 if (base::bits::IsPowerOfTwo32(value)) {
292 Emit(kMipsShl | AddressingModeField::encode(kMode_None),
293 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
294 g.TempImmediate(WhichPowerOf2(value)));
297 if (base::bits::IsPowerOfTwo32(value - 1)) {
298 InstructionOperand temp = g.TempRegister();
299 Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
300 g.UseRegister(m.left().node()),
301 g.TempImmediate(WhichPowerOf2(value - 1)));
302 Emit(kMipsAdd | AddressingModeField::encode(kMode_None),
303 g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
306 if (base::bits::IsPowerOfTwo32(value + 1)) {
307 InstructionOperand temp = g.TempRegister();
308 Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
309 g.UseRegister(m.left().node()),
310 g.TempImmediate(WhichPowerOf2(value + 1)));
311 Emit(kMipsSub | AddressingModeField::encode(kMode_None),
312 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
316 Emit(kMipsMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
317 g.UseRegister(m.right().node()));
321 void InstructionSelector::VisitInt32MulHigh(Node* node) {
322 MipsOperandGenerator g(this);
323 Emit(kMipsMulHigh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
324 g.UseRegister(node->InputAt(1)));
328 void InstructionSelector::VisitUint32MulHigh(Node* node) {
329 MipsOperandGenerator g(this);
330 Emit(kMipsMulHighU, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
331 g.UseRegister(node->InputAt(1)));
335 void InstructionSelector::VisitInt32Div(Node* node) {
336 MipsOperandGenerator g(this);
337 Int32BinopMatcher m(node);
338 Emit(kMipsDiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
339 g.UseRegister(m.right().node()));
343 void InstructionSelector::VisitUint32Div(Node* node) {
344 MipsOperandGenerator g(this);
345 Int32BinopMatcher m(node);
346 Emit(kMipsDivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
347 g.UseRegister(m.right().node()));
351 void InstructionSelector::VisitInt32Mod(Node* node) {
352 MipsOperandGenerator g(this);
353 Int32BinopMatcher m(node);
354 Emit(kMipsMod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
355 g.UseRegister(m.right().node()));
359 void InstructionSelector::VisitUint32Mod(Node* node) {
360 MipsOperandGenerator g(this);
361 Int32BinopMatcher m(node);
362 Emit(kMipsModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
363 g.UseRegister(m.right().node()));
367 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
368 MipsOperandGenerator g(this);
369 Emit(kMipsCvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
373 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
374 MipsOperandGenerator g(this);
375 Emit(kMipsCvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
379 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
380 MipsOperandGenerator g(this);
381 Emit(kMipsCvtDUw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
385 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
386 MipsOperandGenerator g(this);
387 Emit(kMipsTruncWD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
391 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
392 MipsOperandGenerator g(this);
393 Emit(kMipsTruncUwD, g.DefineAsRegister(node),
394 g.UseRegister(node->InputAt(0)));
398 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
399 MipsOperandGenerator g(this);
400 Emit(kMipsCvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
404 void InstructionSelector::VisitFloat64Add(Node* node) {
405 VisitRRR(this, kMipsAddD, node);
409 void InstructionSelector::VisitFloat64Sub(Node* node) {
410 MipsOperandGenerator g(this);
411 Float64BinopMatcher m(node);
412 if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
413 CanCover(m.node(), m.right().node())) {
414 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
415 CanCover(m.right().node(), m.right().InputAt(0))) {
416 Float64BinopMatcher mright0(m.right().InputAt(0));
417 if (mright0.left().IsMinusZero()) {
418 Emit(kMipsFloat64RoundUp, g.DefineAsRegister(node),
419 g.UseRegister(mright0.right().node()));
424 VisitRRR(this, kMipsSubD, node);
428 void InstructionSelector::VisitFloat64Mul(Node* node) {
429 VisitRRR(this, kMipsMulD, node);
433 void InstructionSelector::VisitFloat64Div(Node* node) {
434 VisitRRR(this, kMipsDivD, node);
438 void InstructionSelector::VisitFloat64Mod(Node* node) {
439 MipsOperandGenerator g(this);
440 Emit(kMipsModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
441 g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
445 void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
448 void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
451 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
452 MipsOperandGenerator g(this);
453 Emit(kMipsSqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
457 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
458 VisitRR(this, kMipsFloat64RoundDown, node);
462 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
463 VisitRR(this, kMipsFloat64RoundTruncate, node);
467 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
472 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
473 MipsOperandGenerator g(this);
474 const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
476 FrameStateDescriptor* frame_state_descriptor = NULL;
477 if (descriptor->NeedsFrameState()) {
478 frame_state_descriptor =
479 GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
482 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
484 // Compute InstructionOperands for inputs and outputs.
485 InitializeCallBuffer(node, &buffer, true, false);
486 // Possibly align stack here for functions.
487 int push_count = buffer.pushed_nodes.size();
488 if (push_count > 0) {
489 Emit(kMipsStackClaim | MiscField::encode(push_count), g.NoOutput());
491 int slot = buffer.pushed_nodes.size() - 1;
492 for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
494 Emit(kMipsStoreToStackSlot | MiscField::encode(slot), g.NoOutput(),
499 // Pass label of exception handler block.
500 CallDescriptor::Flags flags = descriptor->flags();
501 if (handler != nullptr) {
502 flags |= CallDescriptor::kHasExceptionHandler;
503 buffer.instruction_args.push_back(g.Label(handler));
506 // Select the appropriate opcode based on the call type.
507 InstructionCode opcode;
508 switch (descriptor->kind()) {
509 case CallDescriptor::kCallCodeObject: {
510 opcode = kArchCallCodeObject;
513 case CallDescriptor::kCallJSFunction:
514 opcode = kArchCallJSFunction;
520 opcode |= MiscField::encode(flags);
522 // Emit the call instruction.
523 InstructionOperand* first_output =
524 buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
525 Instruction* call_instr =
526 Emit(opcode, buffer.outputs.size(), first_output,
527 buffer.instruction_args.size(), &buffer.instruction_args.front());
528 call_instr->MarkAsCall();
532 void InstructionSelector::VisitCheckedLoad(Node* node) {
533 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
534 MachineType typ = TypeOf(OpParameter<MachineType>(node));
535 MipsOperandGenerator g(this);
536 Node* const buffer = node->InputAt(0);
537 Node* const offset = node->InputAt(1);
538 Node* const length = node->InputAt(2);
542 opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
545 opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
548 opcode = kCheckedLoadWord32;
551 opcode = kCheckedLoadFloat32;
554 opcode = kCheckedLoadFloat64;
560 InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
561 ? g.UseImmediate(offset)
562 : g.UseRegister(offset);
564 InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
565 ? g.CanBeImmediate(length, opcode)
566 ? g.UseImmediate(length)
567 : g.UseRegister(length)
568 : g.UseRegister(length);
570 Emit(opcode | AddressingModeField::encode(kMode_MRI),
571 g.DefineAsRegister(node), offset_operand, length_operand,
572 g.UseRegister(buffer));
576 void InstructionSelector::VisitCheckedStore(Node* node) {
577 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
578 MipsOperandGenerator g(this);
579 Node* const buffer = node->InputAt(0);
580 Node* const offset = node->InputAt(1);
581 Node* const length = node->InputAt(2);
582 Node* const value = node->InputAt(3);
586 opcode = kCheckedStoreWord8;
589 opcode = kCheckedStoreWord16;
592 opcode = kCheckedStoreWord32;
595 opcode = kCheckedStoreFloat32;
598 opcode = kCheckedStoreFloat64;
604 InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
605 ? g.UseImmediate(offset)
606 : g.UseRegister(offset);
608 InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
609 ? g.CanBeImmediate(length, opcode)
610 ? g.UseImmediate(length)
611 : g.UseRegister(length)
612 : g.UseRegister(length);
614 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
615 offset_operand, length_operand, g.UseRegister(value),
616 g.UseRegister(buffer));
622 // Shared routine for multiple compare operations.
623 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
624 InstructionOperand left, InstructionOperand right,
625 FlagsContinuation* cont) {
626 MipsOperandGenerator g(selector);
627 opcode = cont->Encode(opcode);
628 if (cont->IsBranch()) {
629 selector->Emit(opcode, g.NoOutput(), left, right,
630 g.Label(cont->true_block()), g.Label(cont->false_block()));
632 DCHECK(cont->IsSet());
633 // TODO(plind): Revisit and test this path.
634 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
639 // Shared routine for multiple float compare operations.
640 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
641 FlagsContinuation* cont) {
642 MipsOperandGenerator g(selector);
643 Node* left = node->InputAt(0);
644 Node* right = node->InputAt(1);
645 VisitCompare(selector, kMipsCmpD, g.UseRegister(left), g.UseRegister(right),
650 // Shared routine for multiple word compare operations.
651 void VisitWordCompare(InstructionSelector* selector, Node* node,
652 InstructionCode opcode, FlagsContinuation* cont,
654 MipsOperandGenerator g(selector);
655 Node* left = node->InputAt(0);
656 Node* right = node->InputAt(1);
658 // Match immediates on left or right side of comparison.
659 if (g.CanBeImmediate(right, opcode)) {
660 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
662 } else if (g.CanBeImmediate(left, opcode)) {
663 if (!commutative) cont->Commute();
664 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
667 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
673 void VisitWordCompare(InstructionSelector* selector, Node* node,
674 FlagsContinuation* cont) {
675 VisitWordCompare(selector, node, kMipsCmp, cont, false);
681 // Shared routine for word comparisons against zero.
682 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
683 Node* value, FlagsContinuation* cont) {
684 while (selector->CanCover(user, value)) {
685 switch (value->opcode()) {
686 case IrOpcode::kWord32Equal: {
687 // Combine with comparisons against 0 by simply inverting the
689 Int32BinopMatcher m(value);
690 if (m.right().Is(0)) {
692 value = m.left().node();
696 cont->OverwriteAndNegateIfEqual(kEqual);
697 return VisitWordCompare(selector, value, cont);
699 case IrOpcode::kInt32LessThan:
700 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
701 return VisitWordCompare(selector, value, cont);
702 case IrOpcode::kInt32LessThanOrEqual:
703 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
704 return VisitWordCompare(selector, value, cont);
705 case IrOpcode::kUint32LessThan:
706 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
707 return VisitWordCompare(selector, value, cont);
708 case IrOpcode::kUint32LessThanOrEqual:
709 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
710 return VisitWordCompare(selector, value, cont);
711 case IrOpcode::kFloat64Equal:
712 cont->OverwriteAndNegateIfEqual(kEqual);
713 return VisitFloat64Compare(selector, value, cont);
714 case IrOpcode::kFloat64LessThan:
715 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
716 return VisitFloat64Compare(selector, value, cont);
717 case IrOpcode::kFloat64LessThanOrEqual:
718 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
719 return VisitFloat64Compare(selector, value, cont);
720 case IrOpcode::kProjection:
721 // Check if this is the overflow output projection of an
722 // <Operation>WithOverflow node.
723 if (ProjectionIndexOf(value->op()) == 1u) {
724 // We cannot combine the <Operation>WithOverflow with this branch
725 // unless the 0th projection (the use of the actual value of the
726 // <Operation> is either NULL, which means there's no use of the
727 // actual value, or was already defined, which means it is scheduled
728 // *AFTER* this branch).
729 Node* const node = value->InputAt(0);
730 Node* const result = NodeProperties::FindProjection(node, 0);
731 if (!result || selector->IsDefined(result)) {
732 switch (node->opcode()) {
733 case IrOpcode::kInt32AddWithOverflow:
734 cont->OverwriteAndNegateIfEqual(kOverflow);
735 return VisitBinop(selector, node, kMipsAddOvf, cont);
736 case IrOpcode::kInt32SubWithOverflow:
737 cont->OverwriteAndNegateIfEqual(kOverflow);
738 return VisitBinop(selector, node, kMipsSubOvf, cont);
745 case IrOpcode::kWord32And:
746 return VisitWordCompare(selector, value, kMipsTst, cont, true);
753 // Continuation could not be combined with a compare, emit compare against 0.
754 MipsOperandGenerator g(selector);
755 InstructionCode const opcode = cont->Encode(kMipsCmp);
756 InstructionOperand const value_operand = g.UseRegister(value);
757 if (cont->IsBranch()) {
758 selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
759 g.Label(cont->true_block()), g.Label(cont->false_block()));
761 selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
767 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
768 BasicBlock* fbranch) {
769 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
770 VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
774 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
775 MipsOperandGenerator g(this);
776 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
778 // Emit either ArchTableSwitch or ArchLookupSwitch.
779 size_t table_space_cost = 9 + sw.value_range;
780 size_t table_time_cost = 3;
781 size_t lookup_space_cost = 2 + 2 * sw.case_count;
782 size_t lookup_time_cost = sw.case_count;
783 if (sw.case_count > 0 &&
784 table_space_cost + 3 * table_time_cost <=
785 lookup_space_cost + 3 * lookup_time_cost &&
786 sw.min_value > std::numeric_limits<int32_t>::min()) {
787 InstructionOperand index_operand = value_operand;
789 index_operand = g.TempRegister();
790 Emit(kMipsSub, index_operand, value_operand,
791 g.TempImmediate(sw.min_value));
793 // Generate a table lookup.
794 return EmitTableSwitch(sw, index_operand);
797 // Generate a sequence of conditional jumps.
798 return EmitLookupSwitch(sw, value_operand);
802 void InstructionSelector::VisitWord32Equal(Node* const node) {
803 FlagsContinuation cont(kEqual, node);
804 Int32BinopMatcher m(node);
805 if (m.right().Is(0)) {
806 return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
808 VisitWordCompare(this, node, &cont);
812 void InstructionSelector::VisitInt32LessThan(Node* node) {
813 FlagsContinuation cont(kSignedLessThan, node);
814 VisitWordCompare(this, node, &cont);
818 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
819 FlagsContinuation cont(kSignedLessThanOrEqual, node);
820 VisitWordCompare(this, node, &cont);
824 void InstructionSelector::VisitUint32LessThan(Node* node) {
825 FlagsContinuation cont(kUnsignedLessThan, node);
826 VisitWordCompare(this, node, &cont);
830 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
831 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
832 VisitWordCompare(this, node, &cont);
836 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
837 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
838 FlagsContinuation cont(kOverflow, ovf);
839 return VisitBinop(this, node, kMipsAddOvf, &cont);
841 FlagsContinuation cont;
842 VisitBinop(this, node, kMipsAddOvf, &cont);
846 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
847 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
848 FlagsContinuation cont(kOverflow, ovf);
849 return VisitBinop(this, node, kMipsSubOvf, &cont);
851 FlagsContinuation cont;
852 VisitBinop(this, node, kMipsSubOvf, &cont);
856 void InstructionSelector::VisitFloat64Equal(Node* node) {
857 FlagsContinuation cont(kEqual, node);
858 VisitFloat64Compare(this, node, &cont);
862 void InstructionSelector::VisitFloat64LessThan(Node* node) {
863 FlagsContinuation cont(kUnsignedLessThan, node);
864 VisitFloat64Compare(this, node, &cont);
868 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
869 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
870 VisitFloat64Compare(this, node, &cont);
874 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
875 MipsOperandGenerator g(this);
876 Emit(kMipsFloat64ExtractLowWord32, g.DefineAsRegister(node),
877 g.UseRegister(node->InputAt(0)));
881 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
882 MipsOperandGenerator g(this);
883 Emit(kMipsFloat64ExtractHighWord32, g.DefineAsRegister(node),
884 g.UseRegister(node->InputAt(0)));
888 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
889 MipsOperandGenerator g(this);
890 Node* left = node->InputAt(0);
891 Node* right = node->InputAt(1);
892 Emit(kMipsFloat64InsertLowWord32, g.DefineSameAsFirst(node),
893 g.UseRegister(left), g.UseRegister(right));
897 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
898 MipsOperandGenerator g(this);
899 Node* left = node->InputAt(0);
900 Node* right = node->InputAt(1);
901 Emit(kMipsFloat64InsertHighWord32, g.DefineSameAsFirst(node),
902 g.UseRegister(left), g.UseRegister(right));
907 MachineOperatorBuilder::Flags
908 InstructionSelector::SupportedMachineOperatorFlags() {
909 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
910 return MachineOperatorBuilder::kFloat64RoundDown |
911 MachineOperatorBuilder::kFloat64RoundTruncate;
913 return MachineOperatorBuilder::kNoFlags;
916 } // namespace compiler
917 } // namespace internal