1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/base/bits.h"
6 #include "src/compiler/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
14 // Adds Arm-specific methods for generating InstructionOperands.
15 class ArmOperandGenerator : public OperandGenerator {
17 explicit ArmOperandGenerator(InstructionSelector* selector)
18 : OperandGenerator(selector) {}
20 bool CanBeImmediate(int32_t value) const {
21 return Assembler::ImmediateFitsAddrMode1Instruction(value);
24 bool CanBeImmediate(uint32_t value) const {
25 return CanBeImmediate(bit_cast<int32_t>(value));
28 bool CanBeImmediate(Node* node, InstructionCode opcode) {
30 if (!m.HasValue()) return false;
31 int32_t value = m.Value();
32 switch (ArchOpcodeField::decode(opcode)) {
37 return CanBeImmediate(value) || CanBeImmediate(~value);
43 return CanBeImmediate(value) || CanBeImmediate(-value);
50 return CanBeImmediate(value);
56 return value >= -1020 && value <= 1020 && (value % 4) == 0;
63 case kArmStoreWriteBarrier:
64 return value >= -4095 && value <= 4095;
69 return value >= -255 && value <= 255;
81 void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
83 ArmOperandGenerator g(selector);
84 selector->Emit(opcode, g.DefineAsRegister(node),
85 g.UseRegister(node->InputAt(0)));
89 void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
91 ArmOperandGenerator g(selector);
92 selector->Emit(opcode, g.DefineAsRegister(node),
93 g.UseRegister(node->InputAt(0)),
94 g.UseRegister(node->InputAt(1)));
98 template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
99 AddressingMode kImmMode, AddressingMode kRegMode>
100 bool TryMatchShift(InstructionSelector* selector,
101 InstructionCode* opcode_return, Node* node,
102 InstructionOperand* value_return,
103 InstructionOperand* shift_return) {
104 ArmOperandGenerator g(selector);
105 if (node->opcode() == kOpcode) {
106 Int32BinopMatcher m(node);
107 *value_return = g.UseRegister(m.left().node());
108 if (m.right().IsInRange(kImmMin, kImmMax)) {
109 *opcode_return |= AddressingModeField::encode(kImmMode);
110 *shift_return = g.UseImmediate(m.right().node());
112 *opcode_return |= AddressingModeField::encode(kRegMode);
113 *shift_return = g.UseRegister(m.right().node());
121 bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
122 Node* node, InstructionOperand* value_return,
123 InstructionOperand* shift_return) {
124 return TryMatchShift<IrOpcode::kWord32Ror, 1, 31, kMode_Operand2_R_ROR_I,
125 kMode_Operand2_R_ROR_R>(selector, opcode_return, node,
126 value_return, shift_return);
130 bool TryMatchASR(InstructionSelector* selector, InstructionCode* opcode_return,
131 Node* node, InstructionOperand* value_return,
132 InstructionOperand* shift_return) {
133 return TryMatchShift<IrOpcode::kWord32Sar, 1, 32, kMode_Operand2_R_ASR_I,
134 kMode_Operand2_R_ASR_R>(selector, opcode_return, node,
135 value_return, shift_return);
139 bool TryMatchLSL(InstructionSelector* selector, InstructionCode* opcode_return,
140 Node* node, InstructionOperand* value_return,
141 InstructionOperand* shift_return) {
142 return TryMatchShift<IrOpcode::kWord32Shl, 0, 31, kMode_Operand2_R_LSL_I,
143 kMode_Operand2_R_LSL_R>(selector, opcode_return, node,
144 value_return, shift_return);
148 bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
149 Node* node, InstructionOperand* value_return,
150 InstructionOperand* shift_return) {
151 return TryMatchShift<IrOpcode::kWord32Shr, 1, 32, kMode_Operand2_R_LSR_I,
152 kMode_Operand2_R_LSR_R>(selector, opcode_return, node,
153 value_return, shift_return);
157 bool TryMatchShift(InstructionSelector* selector,
158 InstructionCode* opcode_return, Node* node,
159 InstructionOperand* value_return,
160 InstructionOperand* shift_return) {
162 TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
163 TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
164 TryMatchLSR(selector, opcode_return, node, value_return, shift_return) ||
165 TryMatchROR(selector, opcode_return, node, value_return, shift_return));
169 bool TryMatchImmediateOrShift(InstructionSelector* selector,
170 InstructionCode* opcode_return, Node* node,
171 size_t* input_count_return,
172 InstructionOperand* inputs) {
173 ArmOperandGenerator g(selector);
174 if (g.CanBeImmediate(node, *opcode_return)) {
175 *opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
176 inputs[0] = g.UseImmediate(node);
177 *input_count_return = 1;
180 if (TryMatchShift(selector, opcode_return, node, &inputs[0], &inputs[1])) {
181 *input_count_return = 2;
188 void VisitBinop(InstructionSelector* selector, Node* node,
189 InstructionCode opcode, InstructionCode reverse_opcode,
190 FlagsContinuation* cont) {
191 ArmOperandGenerator g(selector);
192 Int32BinopMatcher m(node);
193 InstructionOperand inputs[5];
194 size_t input_count = 0;
195 InstructionOperand outputs[2];
196 size_t output_count = 0;
198 if (m.left().node() == m.right().node()) {
199 // If both inputs refer to the same operand, enforce allocating a register
200 // for both of them to ensure that we don't end up generating code like
203 // mov r0, r1, asr #16
204 // adds r0, r0, r1, asr #16
206 InstructionOperand const input = g.UseRegister(m.left().node());
207 opcode |= AddressingModeField::encode(kMode_Operand2_R);
208 inputs[input_count++] = input;
209 inputs[input_count++] = input;
210 } else if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
211 &input_count, &inputs[1])) {
212 inputs[0] = g.UseRegister(m.left().node());
214 } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
215 m.left().node(), &input_count,
217 inputs[0] = g.UseRegister(m.right().node());
218 opcode = reverse_opcode;
221 opcode |= AddressingModeField::encode(kMode_Operand2_R);
222 inputs[input_count++] = g.UseRegister(m.left().node());
223 inputs[input_count++] = g.UseRegister(m.right().node());
226 if (cont->IsBranch()) {
227 inputs[input_count++] = g.Label(cont->true_block());
228 inputs[input_count++] = g.Label(cont->false_block());
231 outputs[output_count++] = g.DefineAsRegister(node);
233 outputs[output_count++] = g.DefineAsRegister(cont->result());
236 DCHECK_NE(0u, input_count);
237 DCHECK_NE(0u, output_count);
238 DCHECK_GE(arraysize(inputs), input_count);
239 DCHECK_GE(arraysize(outputs), output_count);
240 DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
242 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
243 outputs, input_count, inputs);
244 if (cont->IsBranch()) instr->MarkAsControl();
248 void VisitBinop(InstructionSelector* selector, Node* node,
249 InstructionCode opcode, InstructionCode reverse_opcode) {
250 FlagsContinuation cont;
251 VisitBinop(selector, node, opcode, reverse_opcode, &cont);
258 void InstructionSelector::VisitLoad(Node* node) {
259 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
260 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
261 ArmOperandGenerator g(this);
262 Node* base = node->InputAt(0);
263 Node* index = node->InputAt(1);
268 opcode = kArmVldrF32;
271 opcode = kArmVldrF64;
273 case kRepBit: // Fall through.
275 opcode = typ == kTypeUint32 ? kArmLdrb : kArmLdrsb;
278 opcode = typ == kTypeUint32 ? kArmLdrh : kArmLdrsh;
280 case kRepTagged: // Fall through.
289 if (g.CanBeImmediate(index, opcode)) {
290 Emit(opcode | AddressingModeField::encode(kMode_Offset_RI),
291 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
293 Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
294 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
299 void InstructionSelector::VisitStore(Node* node) {
300 ArmOperandGenerator g(this);
301 Node* base = node->InputAt(0);
302 Node* index = node->InputAt(1);
303 Node* value = node->InputAt(2);
305 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
306 MachineType rep = RepresentationOf(store_rep.machine_type());
307 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
308 DCHECK(rep == kRepTagged);
309 // TODO(dcarney): refactor RecordWrite function to take temp registers
310 // and pass them here instead of using fixed regs
311 // TODO(dcarney): handle immediate indices.
312 InstructionOperand temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
313 Emit(kArmStoreWriteBarrier, g.NoOutput(), g.UseFixed(base, r4),
314 g.UseFixed(index, r5), g.UseFixed(value, r6), arraysize(temps), temps);
317 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
322 opcode = kArmVstrF32;
325 opcode = kArmVstrF64;
327 case kRepBit: // Fall through.
334 case kRepTagged: // Fall through.
343 if (g.CanBeImmediate(index, opcode)) {
344 Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), g.NoOutput(),
345 g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
347 Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
348 g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
353 void InstructionSelector::VisitCheckedLoad(Node* node) {
354 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
355 MachineType typ = TypeOf(OpParameter<MachineType>(node));
356 ArmOperandGenerator g(this);
357 Node* const buffer = node->InputAt(0);
358 Node* const offset = node->InputAt(1);
359 Node* const length = node->InputAt(2);
363 opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
366 opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
369 opcode = kCheckedLoadWord32;
372 opcode = kCheckedLoadFloat32;
375 opcode = kCheckedLoadFloat64;
381 InstructionOperand offset_operand = g.UseRegister(offset);
382 InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
383 ? g.UseImmediate(length)
384 : g.UseRegister(length);
385 Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
386 g.DefineAsRegister(node), offset_operand, length_operand,
387 g.UseRegister(buffer), offset_operand);
391 void InstructionSelector::VisitCheckedStore(Node* node) {
392 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
393 ArmOperandGenerator g(this);
394 Node* const buffer = node->InputAt(0);
395 Node* const offset = node->InputAt(1);
396 Node* const length = node->InputAt(2);
397 Node* const value = node->InputAt(3);
401 opcode = kCheckedStoreWord8;
404 opcode = kCheckedStoreWord16;
407 opcode = kCheckedStoreWord32;
410 opcode = kCheckedStoreFloat32;
413 opcode = kCheckedStoreFloat64;
419 InstructionOperand offset_operand = g.UseRegister(offset);
420 InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
421 ? g.UseImmediate(length)
422 : g.UseRegister(length);
423 Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
424 offset_operand, length_operand, g.UseRegister(value),
425 g.UseRegister(buffer), offset_operand);
431 void EmitBic(InstructionSelector* selector, Node* node, Node* left,
433 ArmOperandGenerator g(selector);
434 InstructionCode opcode = kArmBic;
435 InstructionOperand value_operand;
436 InstructionOperand shift_operand;
437 if (TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) {
438 selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
439 value_operand, shift_operand);
442 selector->Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
443 g.DefineAsRegister(node), g.UseRegister(left),
444 g.UseRegister(right));
448 void EmitUbfx(InstructionSelector* selector, Node* node, Node* left,
449 uint32_t lsb, uint32_t width) {
450 DCHECK_LE(1u, width);
451 DCHECK_LE(width, 32u - lsb);
452 ArmOperandGenerator g(selector);
453 selector->Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(left),
454 g.TempImmediate(lsb), g.TempImmediate(width));
460 void InstructionSelector::VisitWord32And(Node* node) {
461 ArmOperandGenerator g(this);
462 Int32BinopMatcher m(node);
463 if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
464 Int32BinopMatcher mleft(m.left().node());
465 if (mleft.right().Is(-1)) {
466 EmitBic(this, node, m.right().node(), mleft.left().node());
470 if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
471 Int32BinopMatcher mright(m.right().node());
472 if (mright.right().Is(-1)) {
473 EmitBic(this, node, m.left().node(), mright.left().node());
477 if (m.right().HasValue()) {
478 uint32_t const value = m.right().Value();
479 uint32_t width = base::bits::CountPopulation32(value);
480 uint32_t msb = base::bits::CountLeadingZeros32(value);
481 // Try to interpret this AND as UBFX.
482 if (IsSupported(ARMv7) && width != 0 && msb + width == 32) {
483 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
484 if (m.left().IsWord32Shr()) {
485 Int32BinopMatcher mleft(m.left().node());
486 if (mleft.right().IsInRange(0, 31)) {
487 // UBFX cannot extract bits past the register size, however since
488 // shifting the original value would have introduced some zeros we can
489 // still use UBFX with a smaller mask and the remaining bits will be
491 uint32_t const lsb = mleft.right().Value();
492 return EmitUbfx(this, node, mleft.left().node(), lsb,
493 std::min(width, 32 - lsb));
496 return EmitUbfx(this, node, m.left().node(), 0, width);
498 // Try to interpret this AND as BIC.
499 if (g.CanBeImmediate(~value)) {
500 Emit(kArmBic | AddressingModeField::encode(kMode_Operand2_I),
501 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
502 g.TempImmediate(~value));
505 // Try to interpret this AND as UXTH.
506 if (value == 0xffff) {
507 Emit(kArmUxth, g.DefineAsRegister(m.node()),
508 g.UseRegister(m.left().node()), g.TempImmediate(0));
511 // Try to interpret this AND as BFC.
512 if (IsSupported(ARMv7)) {
514 msb = base::bits::CountLeadingZeros32(~value);
515 uint32_t lsb = base::bits::CountTrailingZeros32(~value);
516 if (msb + width + lsb == 32) {
517 Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
518 g.TempImmediate(lsb), g.TempImmediate(width));
523 VisitBinop(this, node, kArmAnd, kArmAnd);
527 void InstructionSelector::VisitWord32Or(Node* node) {
528 VisitBinop(this, node, kArmOrr, kArmOrr);
532 void InstructionSelector::VisitWord32Xor(Node* node) {
533 ArmOperandGenerator g(this);
534 Int32BinopMatcher m(node);
535 if (m.right().Is(-1)) {
536 InstructionCode opcode = kArmMvn;
537 InstructionOperand value_operand;
538 InstructionOperand shift_operand;
539 if (TryMatchShift(this, &opcode, m.left().node(), &value_operand,
541 Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
544 Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
545 g.DefineAsRegister(node), g.UseRegister(m.left().node()));
548 VisitBinop(this, node, kArmEor, kArmEor);
554 template <typename TryMatchShift>
555 void VisitShift(InstructionSelector* selector, Node* node,
556 TryMatchShift try_match_shift, FlagsContinuation* cont) {
557 ArmOperandGenerator g(selector);
558 InstructionCode opcode = kArmMov;
559 InstructionOperand inputs[4];
560 size_t input_count = 2;
561 InstructionOperand outputs[2];
562 size_t output_count = 0;
564 CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
566 if (cont->IsBranch()) {
567 inputs[input_count++] = g.Label(cont->true_block());
568 inputs[input_count++] = g.Label(cont->false_block());
571 outputs[output_count++] = g.DefineAsRegister(node);
573 outputs[output_count++] = g.DefineAsRegister(cont->result());
576 DCHECK_NE(0u, input_count);
577 DCHECK_NE(0u, output_count);
578 DCHECK_GE(arraysize(inputs), input_count);
579 DCHECK_GE(arraysize(outputs), output_count);
580 DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
582 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
583 outputs, input_count, inputs);
584 if (cont->IsBranch()) instr->MarkAsControl();
588 template <typename TryMatchShift>
589 void VisitShift(InstructionSelector* selector, Node* node,
590 TryMatchShift try_match_shift) {
591 FlagsContinuation cont;
592 VisitShift(selector, node, try_match_shift, &cont);
598 void InstructionSelector::VisitWord32Shl(Node* node) {
599 VisitShift(this, node, TryMatchLSL);
603 void InstructionSelector::VisitWord32Shr(Node* node) {
604 ArmOperandGenerator g(this);
605 Int32BinopMatcher m(node);
606 if (IsSupported(ARMv7) && m.left().IsWord32And() &&
607 m.right().IsInRange(0, 31)) {
608 uint32_t lsb = m.right().Value();
609 Int32BinopMatcher mleft(m.left().node());
610 if (mleft.right().HasValue()) {
611 uint32_t value = (mleft.right().Value() >> lsb) << lsb;
612 uint32_t width = base::bits::CountPopulation32(value);
613 uint32_t msb = base::bits::CountLeadingZeros32(value);
614 if (msb + width + lsb == 32) {
615 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(value));
616 return EmitUbfx(this, node, mleft.left().node(), lsb, width);
620 VisitShift(this, node, TryMatchLSR);
624 void InstructionSelector::VisitWord32Sar(Node* node) {
625 ArmOperandGenerator g(this);
626 Int32BinopMatcher m(node);
627 if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
628 Int32BinopMatcher mleft(m.left().node());
629 if (mleft.right().Is(16) && m.right().Is(16)) {
630 Emit(kArmSxth, g.DefineAsRegister(node),
631 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
633 } else if (mleft.right().Is(24) && m.right().Is(24)) {
634 Emit(kArmSxtb, g.DefineAsRegister(node),
635 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
639 VisitShift(this, node, TryMatchASR);
643 void InstructionSelector::VisitWord32Ror(Node* node) {
644 VisitShift(this, node, TryMatchROR);
648 void InstructionSelector::VisitInt32Add(Node* node) {
649 ArmOperandGenerator g(this);
650 Int32BinopMatcher m(node);
651 if (CanCover(node, m.left().node())) {
652 switch (m.left().opcode()) {
653 case IrOpcode::kInt32Mul: {
654 Int32BinopMatcher mleft(m.left().node());
655 Emit(kArmMla, g.DefineAsRegister(node),
656 g.UseRegister(mleft.left().node()),
657 g.UseRegister(mleft.right().node()),
658 g.UseRegister(m.right().node()));
661 case IrOpcode::kInt32MulHigh: {
662 Int32BinopMatcher mleft(m.left().node());
663 Emit(kArmSmmla, g.DefineAsRegister(node),
664 g.UseRegister(mleft.left().node()),
665 g.UseRegister(mleft.right().node()),
666 g.UseRegister(m.right().node()));
669 case IrOpcode::kWord32And: {
670 Int32BinopMatcher mleft(m.left().node());
671 if (mleft.right().Is(0xff)) {
672 Emit(kArmUxtab, g.DefineAsRegister(node),
673 g.UseRegister(m.right().node()),
674 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
676 } else if (mleft.right().Is(0xffff)) {
677 Emit(kArmUxtah, g.DefineAsRegister(node),
678 g.UseRegister(m.right().node()),
679 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
683 case IrOpcode::kWord32Sar: {
684 Int32BinopMatcher mleft(m.left().node());
685 if (CanCover(mleft.node(), mleft.left().node()) &&
686 mleft.left().IsWord32Shl()) {
687 Int32BinopMatcher mleftleft(mleft.left().node());
688 if (mleft.right().Is(24) && mleftleft.right().Is(24)) {
689 Emit(kArmSxtab, g.DefineAsRegister(node),
690 g.UseRegister(m.right().node()),
691 g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
693 } else if (mleft.right().Is(16) && mleftleft.right().Is(16)) {
694 Emit(kArmSxtah, g.DefineAsRegister(node),
695 g.UseRegister(m.right().node()),
696 g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
705 if (CanCover(node, m.right().node())) {
706 switch (m.right().opcode()) {
707 case IrOpcode::kInt32Mul: {
708 Int32BinopMatcher mright(m.right().node());
709 Emit(kArmMla, g.DefineAsRegister(node),
710 g.UseRegister(mright.left().node()),
711 g.UseRegister(mright.right().node()),
712 g.UseRegister(m.left().node()));
715 case IrOpcode::kInt32MulHigh: {
716 Int32BinopMatcher mright(m.right().node());
717 Emit(kArmSmmla, g.DefineAsRegister(node),
718 g.UseRegister(mright.left().node()),
719 g.UseRegister(mright.right().node()),
720 g.UseRegister(m.left().node()));
723 case IrOpcode::kWord32And: {
724 Int32BinopMatcher mright(m.right().node());
725 if (mright.right().Is(0xff)) {
726 Emit(kArmUxtab, g.DefineAsRegister(node),
727 g.UseRegister(m.left().node()),
728 g.UseRegister(mright.left().node()), g.TempImmediate(0));
730 } else if (mright.right().Is(0xffff)) {
731 Emit(kArmUxtah, g.DefineAsRegister(node),
732 g.UseRegister(m.left().node()),
733 g.UseRegister(mright.left().node()), g.TempImmediate(0));
737 case IrOpcode::kWord32Sar: {
738 Int32BinopMatcher mright(m.right().node());
739 if (CanCover(mright.node(), mright.left().node()) &&
740 mright.left().IsWord32Shl()) {
741 Int32BinopMatcher mrightleft(mright.left().node());
742 if (mright.right().Is(24) && mrightleft.right().Is(24)) {
743 Emit(kArmSxtab, g.DefineAsRegister(node),
744 g.UseRegister(m.left().node()),
745 g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
747 } else if (mright.right().Is(16) && mrightleft.right().Is(16)) {
748 Emit(kArmSxtah, g.DefineAsRegister(node),
749 g.UseRegister(m.left().node()),
750 g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
759 VisitBinop(this, node, kArmAdd, kArmAdd);
763 void InstructionSelector::VisitInt32Sub(Node* node) {
764 ArmOperandGenerator g(this);
765 Int32BinopMatcher m(node);
766 if (IsSupported(MLS) && m.right().IsInt32Mul() &&
767 CanCover(node, m.right().node())) {
768 Int32BinopMatcher mright(m.right().node());
769 Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
770 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
773 VisitBinop(this, node, kArmSub, kArmRsb);
777 void InstructionSelector::VisitInt32Mul(Node* node) {
778 ArmOperandGenerator g(this);
779 Int32BinopMatcher m(node);
780 if (m.right().HasValue() && m.right().Value() > 0) {
781 int32_t value = m.right().Value();
782 if (base::bits::IsPowerOfTwo32(value - 1)) {
783 Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
784 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
785 g.UseRegister(m.left().node()),
786 g.TempImmediate(WhichPowerOf2(value - 1)));
789 if (value < kMaxInt && base::bits::IsPowerOfTwo32(value + 1)) {
790 Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
791 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
792 g.UseRegister(m.left().node()),
793 g.TempImmediate(WhichPowerOf2(value + 1)));
797 Emit(kArmMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
798 g.UseRegister(m.right().node()));
802 void InstructionSelector::VisitInt32MulHigh(Node* node) {
803 ArmOperandGenerator g(this);
804 Emit(kArmSmmul, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
805 g.UseRegister(node->InputAt(1)));
809 void InstructionSelector::VisitUint32MulHigh(Node* node) {
810 ArmOperandGenerator g(this);
811 InstructionOperand outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
812 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
813 g.UseRegister(node->InputAt(1))};
814 Emit(kArmUmull, arraysize(outputs), outputs, arraysize(inputs), inputs);
818 static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
819 ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
820 InstructionOperand result_operand,
821 InstructionOperand left_operand,
822 InstructionOperand right_operand) {
823 ArmOperandGenerator g(selector);
824 if (selector->IsSupported(SUDIV)) {
825 selector->Emit(div_opcode, result_operand, left_operand, right_operand);
828 InstructionOperand left_double_operand = g.TempDoubleRegister();
829 InstructionOperand right_double_operand = g.TempDoubleRegister();
830 InstructionOperand result_double_operand = g.TempDoubleRegister();
831 selector->Emit(f64i32_opcode, left_double_operand, left_operand);
832 selector->Emit(f64i32_opcode, right_double_operand, right_operand);
833 selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
834 right_double_operand);
835 selector->Emit(i32f64_opcode, result_operand, result_double_operand);
839 static void VisitDiv(InstructionSelector* selector, Node* node,
840 ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
841 ArchOpcode i32f64_opcode) {
842 ArmOperandGenerator g(selector);
843 Int32BinopMatcher m(node);
844 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
845 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
846 g.UseRegister(m.right().node()));
850 void InstructionSelector::VisitInt32Div(Node* node) {
851 VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
855 void InstructionSelector::VisitUint32Div(Node* node) {
856 VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
860 static void VisitMod(InstructionSelector* selector, Node* node,
861 ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
862 ArchOpcode i32f64_opcode) {
863 ArmOperandGenerator g(selector);
864 Int32BinopMatcher m(node);
865 InstructionOperand div_operand = g.TempRegister();
866 InstructionOperand result_operand = g.DefineAsRegister(node);
867 InstructionOperand left_operand = g.UseRegister(m.left().node());
868 InstructionOperand right_operand = g.UseRegister(m.right().node());
869 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
870 left_operand, right_operand);
871 if (selector->IsSupported(MLS)) {
872 selector->Emit(kArmMls, result_operand, div_operand, right_operand,
876 InstructionOperand mul_operand = g.TempRegister();
877 selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
878 selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
882 void InstructionSelector::VisitInt32Mod(Node* node) {
883 VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
887 void InstructionSelector::VisitUint32Mod(Node* node) {
888 VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
892 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
893 ArmOperandGenerator g(this);
894 Emit(kArmVcvtF64F32, g.DefineAsRegister(node),
895 g.UseRegister(node->InputAt(0)));
899 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
900 ArmOperandGenerator g(this);
901 Emit(kArmVcvtF64S32, g.DefineAsRegister(node),
902 g.UseRegister(node->InputAt(0)));
906 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
907 ArmOperandGenerator g(this);
908 Emit(kArmVcvtF64U32, g.DefineAsRegister(node),
909 g.UseRegister(node->InputAt(0)));
913 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
914 ArmOperandGenerator g(this);
915 Emit(kArmVcvtS32F64, g.DefineAsRegister(node),
916 g.UseRegister(node->InputAt(0)));
920 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
921 ArmOperandGenerator g(this);
922 Emit(kArmVcvtU32F64, g.DefineAsRegister(node),
923 g.UseRegister(node->InputAt(0)));
927 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
928 ArmOperandGenerator g(this);
929 Emit(kArmVcvtF32F64, g.DefineAsRegister(node),
930 g.UseRegister(node->InputAt(0)));
934 void InstructionSelector::VisitFloat64Add(Node* node) {
935 ArmOperandGenerator g(this);
936 Float64BinopMatcher m(node);
937 if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
938 Float64BinopMatcher mleft(m.left().node());
939 Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
940 g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
941 g.UseRegister(mleft.right().node()));
944 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
945 Float64BinopMatcher mright(m.right().node());
946 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
947 g.UseRegister(mright.left().node()),
948 g.UseRegister(mright.right().node()));
951 VisitRRRFloat64(this, kArmVaddF64, node);
955 void InstructionSelector::VisitFloat64Sub(Node* node) {
956 ArmOperandGenerator g(this);
957 Float64BinopMatcher m(node);
958 if (m.left().IsMinusZero()) {
959 Emit(kArmVnegF64, g.DefineAsRegister(node),
960 g.UseRegister(m.right().node()));
963 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
964 Float64BinopMatcher mright(m.right().node());
965 Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
966 g.UseRegister(mright.left().node()),
967 g.UseRegister(mright.right().node()));
970 VisitRRRFloat64(this, kArmVsubF64, node);
974 void InstructionSelector::VisitFloat64Mul(Node* node) {
975 VisitRRRFloat64(this, kArmVmulF64, node);
979 void InstructionSelector::VisitFloat64Div(Node* node) {
980 VisitRRRFloat64(this, kArmVdivF64, node);
984 void InstructionSelector::VisitFloat64Mod(Node* node) {
985 ArmOperandGenerator g(this);
986 Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
987 g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
991 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
992 ArmOperandGenerator g(this);
993 Emit(kArmVsqrtF64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
997 void InstructionSelector::VisitFloat64Floor(Node* node) {
998 DCHECK(CpuFeatures::IsSupported(ARMv8));
999 VisitRRFloat64(this, kArmVfloorF64, node);
1003 void InstructionSelector::VisitFloat64Ceil(Node* node) {
1004 DCHECK(CpuFeatures::IsSupported(ARMv8));
1005 VisitRRFloat64(this, kArmVceilF64, node);
1009 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1010 DCHECK(CpuFeatures::IsSupported(ARMv8));
1011 VisitRRFloat64(this, kArmVroundTruncateF64, node);
1015 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1016 DCHECK(CpuFeatures::IsSupported(ARMv8));
1017 VisitRRFloat64(this, kArmVroundTiesAwayF64, node);
1021 void InstructionSelector::VisitCall(Node* node) {
1022 ArmOperandGenerator g(this);
1023 const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
1025 FrameStateDescriptor* frame_state_descriptor = NULL;
1026 if (descriptor->NeedsFrameState()) {
1027 frame_state_descriptor =
1028 GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
1031 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
1033 // Compute InstructionOperands for inputs and outputs.
1034 // TODO(turbofan): on ARM64 it's probably better to use the code object in a
1035 // register if there are multiple uses of it. Improve constant pool and the
1036 // heuristics in the register allocator for where to emit constants.
1037 InitializeCallBuffer(node, &buffer, true, false);
1039 // TODO(dcarney): might be possible to use claim/poke instead
1040 // Push any stack arguments.
1041 for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
1043 Emit(kArmPush, g.NoOutput(), g.UseRegister(*i));
1046 // Select the appropriate opcode based on the call type.
1047 InstructionCode opcode;
1048 switch (descriptor->kind()) {
1049 case CallDescriptor::kCallCodeObject: {
1050 opcode = kArchCallCodeObject;
1053 case CallDescriptor::kCallJSFunction:
1054 opcode = kArchCallJSFunction;
1060 opcode |= MiscField::encode(descriptor->flags());
1062 // Emit the call instruction.
1063 InstructionOperand* first_output =
1064 buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
1065 Instruction* call_instr =
1066 Emit(opcode, buffer.outputs.size(), first_output,
1067 buffer.instruction_args.size(), &buffer.instruction_args.front());
1068 call_instr->MarkAsCall();
1074 // Shared routine for multiple float compare operations.
1075 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1076 FlagsContinuation* cont) {
1077 ArmOperandGenerator g(selector);
1078 Float64BinopMatcher m(node);
1079 InstructionOperand rhs = m.right().Is(0.0) ? g.UseImmediate(m.right().node())
1080 : g.UseRegister(m.right().node());
1081 if (cont->IsBranch()) {
1082 selector->Emit(cont->Encode(kArmVcmpF64), g.NoOutput(),
1083 g.UseRegister(m.left().node()), rhs,
1084 g.Label(cont->true_block()),
1085 g.Label(cont->false_block()))->MarkAsControl();
1087 DCHECK(cont->IsSet());
1088 selector->Emit(cont->Encode(kArmVcmpF64),
1089 g.DefineAsRegister(cont->result()),
1090 g.UseRegister(m.left().node()), rhs);
1095 // Shared routine for multiple word compare operations.
1096 void VisitWordCompare(InstructionSelector* selector, Node* node,
1097 InstructionCode opcode, FlagsContinuation* cont) {
1098 ArmOperandGenerator g(selector);
1099 Int32BinopMatcher m(node);
1100 InstructionOperand inputs[5];
1101 size_t input_count = 0;
1102 InstructionOperand outputs[1];
1103 size_t output_count = 0;
1105 if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
1106 &input_count, &inputs[1])) {
1107 inputs[0] = g.UseRegister(m.left().node());
1109 } else if (TryMatchImmediateOrShift(selector, &opcode, m.left().node(),
1110 &input_count, &inputs[1])) {
1111 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1112 inputs[0] = g.UseRegister(m.right().node());
1115 opcode |= AddressingModeField::encode(kMode_Operand2_R);
1116 inputs[input_count++] = g.UseRegister(m.left().node());
1117 inputs[input_count++] = g.UseRegister(m.right().node());
1120 if (cont->IsBranch()) {
1121 inputs[input_count++] = g.Label(cont->true_block());
1122 inputs[input_count++] = g.Label(cont->false_block());
1124 DCHECK(cont->IsSet());
1125 outputs[output_count++] = g.DefineAsRegister(cont->result());
1128 DCHECK_NE(0u, input_count);
1129 DCHECK_GE(arraysize(inputs), input_count);
1130 DCHECK_GE(arraysize(outputs), output_count);
1132 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
1133 outputs, input_count, inputs);
1134 if (cont->IsBranch()) instr->MarkAsControl();
1138 void VisitWordCompare(InstructionSelector* selector, Node* node,
1139 FlagsContinuation* cont) {
1140 VisitWordCompare(selector, node, kArmCmp, cont);
1144 // Shared routine for word comparisons against zero.
1145 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
1146 Node* value, FlagsContinuation* cont) {
1147 while (selector->CanCover(user, value)) {
1148 switch (value->opcode()) {
1149 case IrOpcode::kWord32Equal: {
1150 // Combine with comparisons against 0 by simply inverting the
1152 Int32BinopMatcher m(value);
1153 if (m.right().Is(0)) {
1155 value = m.left().node();
1159 cont->OverwriteAndNegateIfEqual(kEqual);
1160 return VisitWordCompare(selector, value, cont);
1162 case IrOpcode::kInt32LessThan:
1163 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1164 return VisitWordCompare(selector, value, cont);
1165 case IrOpcode::kInt32LessThanOrEqual:
1166 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1167 return VisitWordCompare(selector, value, cont);
1168 case IrOpcode::kUint32LessThan:
1169 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1170 return VisitWordCompare(selector, value, cont);
1171 case IrOpcode::kUint32LessThanOrEqual:
1172 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1173 return VisitWordCompare(selector, value, cont);
1174 case IrOpcode::kFloat64Equal:
1175 cont->OverwriteAndNegateIfEqual(kEqual);
1176 return VisitFloat64Compare(selector, value, cont);
1177 case IrOpcode::kFloat64LessThan:
1178 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1179 return VisitFloat64Compare(selector, value, cont);
1180 case IrOpcode::kFloat64LessThanOrEqual:
1181 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1182 return VisitFloat64Compare(selector, value, cont);
1183 case IrOpcode::kProjection:
1184 // Check if this is the overflow output projection of an
1185 // <Operation>WithOverflow node.
1186 if (ProjectionIndexOf(value->op()) == 1u) {
1187 // We cannot combine the <Operation>WithOverflow with this branch
1188 // unless the 0th projection (the use of the actual value of the
1189 // <Operation> is either NULL, which means there's no use of the
1190 // actual value, or was already defined, which means it is scheduled
1191 // *AFTER* this branch).
1192 Node* const node = value->InputAt(0);
1193 Node* const result = NodeProperties::FindProjection(node, 0);
1194 if (!result || selector->IsDefined(result)) {
1195 switch (node->opcode()) {
1196 case IrOpcode::kInt32AddWithOverflow:
1197 cont->OverwriteAndNegateIfEqual(kOverflow);
1198 return VisitBinop(selector, node, kArmAdd, kArmAdd, cont);
1199 case IrOpcode::kInt32SubWithOverflow:
1200 cont->OverwriteAndNegateIfEqual(kOverflow);
1201 return VisitBinop(selector, node, kArmSub, kArmRsb, cont);
1208 case IrOpcode::kInt32Add:
1209 return VisitWordCompare(selector, value, kArmCmn, cont);
1210 case IrOpcode::kInt32Sub:
1211 return VisitWordCompare(selector, value, kArmCmp, cont);
1212 case IrOpcode::kWord32And:
1213 return VisitWordCompare(selector, value, kArmTst, cont);
1214 case IrOpcode::kWord32Or:
1215 return VisitBinop(selector, value, kArmOrr, kArmOrr, cont);
1216 case IrOpcode::kWord32Xor:
1217 return VisitWordCompare(selector, value, kArmTeq, cont);
1218 case IrOpcode::kWord32Sar:
1219 return VisitShift(selector, value, TryMatchASR, cont);
1220 case IrOpcode::kWord32Shl:
1221 return VisitShift(selector, value, TryMatchLSL, cont);
1222 case IrOpcode::kWord32Shr:
1223 return VisitShift(selector, value, TryMatchLSR, cont);
1224 case IrOpcode::kWord32Ror:
1225 return VisitShift(selector, value, TryMatchROR, cont);
1232 // Continuation could not be combined with a compare, emit compare against 0.
1233 ArmOperandGenerator g(selector);
1234 InstructionCode const opcode =
1235 cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
1236 InstructionOperand const value_operand = g.UseRegister(value);
1237 if (cont->IsBranch()) {
1238 selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
1239 g.Label(cont->true_block()),
1240 g.Label(cont->false_block()))->MarkAsControl();
1242 selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
1250 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1251 BasicBlock* fbranch) {
1252 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1253 VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
1257 void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
1258 BasicBlock** case_branches,
1259 int32_t* case_values, size_t case_count,
1260 int32_t min_value, int32_t max_value) {
1261 ArmOperandGenerator g(this);
1262 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1263 InstructionOperand default_operand = g.Label(default_branch);
1265 // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
1266 // is 2^31-1, so don't assume that it's non-zero below.
1267 size_t value_range =
1268 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
1270 // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
1272 size_t table_space_cost = 4 + value_range;
1273 size_t table_time_cost = 3;
1274 size_t lookup_space_cost = 3 + 2 * case_count;
1275 size_t lookup_time_cost = case_count;
1276 if (case_count > 0 &&
1277 table_space_cost + 3 * table_time_cost <=
1278 lookup_space_cost + 3 * lookup_time_cost &&
1279 min_value > std::numeric_limits<int32_t>::min()) {
1280 InstructionOperand index_operand = value_operand;
1282 index_operand = g.TempRegister();
1283 Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
1284 index_operand, value_operand, g.TempImmediate(min_value));
1286 size_t input_count = 2 + value_range;
1287 auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
1288 inputs[0] = index_operand;
1289 std::fill(&inputs[1], &inputs[input_count], default_operand);
1290 for (size_t index = 0; index < case_count; ++index) {
1291 size_t value = case_values[index] - min_value;
1292 BasicBlock* branch = case_branches[index];
1293 DCHECK_LE(0u, value);
1294 DCHECK_LT(value + 2, input_count);
1295 inputs[value + 2] = g.Label(branch);
1297 Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
1302 // Generate a sequence of conditional jumps.
1303 size_t input_count = 2 + case_count * 2;
1304 auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
1305 inputs[0] = value_operand;
1306 inputs[1] = default_operand;
1307 for (size_t index = 0; index < case_count; ++index) {
1308 int32_t value = case_values[index];
1309 BasicBlock* branch = case_branches[index];
1310 inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
1311 inputs[index * 2 + 2 + 1] = g.Label(branch);
1313 Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
1318 void InstructionSelector::VisitWord32Equal(Node* const node) {
1319 FlagsContinuation cont(kEqual, node);
1320 Int32BinopMatcher m(node);
1321 if (m.right().Is(0)) {
1322 return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1324 VisitWordCompare(this, node, &cont);
1328 void InstructionSelector::VisitInt32LessThan(Node* node) {
1329 FlagsContinuation cont(kSignedLessThan, node);
1330 VisitWordCompare(this, node, &cont);
1334 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1335 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1336 VisitWordCompare(this, node, &cont);
1340 void InstructionSelector::VisitUint32LessThan(Node* node) {
1341 FlagsContinuation cont(kUnsignedLessThan, node);
1342 VisitWordCompare(this, node, &cont);
1346 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1347 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1348 VisitWordCompare(this, node, &cont);
1352 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1353 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1354 FlagsContinuation cont(kOverflow, ovf);
1355 return VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
1357 FlagsContinuation cont;
1358 VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
1362 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1363 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1364 FlagsContinuation cont(kOverflow, ovf);
1365 return VisitBinop(this, node, kArmSub, kArmRsb, &cont);
1367 FlagsContinuation cont;
1368 VisitBinop(this, node, kArmSub, kArmRsb, &cont);
1372 void InstructionSelector::VisitFloat64Equal(Node* node) {
1373 FlagsContinuation cont(kEqual, node);
1374 VisitFloat64Compare(this, node, &cont);
1378 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1379 FlagsContinuation cont(kUnsignedLessThan, node);
1380 VisitFloat64Compare(this, node, &cont);
1384 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1385 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1386 VisitFloat64Compare(this, node, &cont);
1391 MachineOperatorBuilder::Flags
1392 InstructionSelector::SupportedMachineOperatorFlags() {
1393 MachineOperatorBuilder::Flags flags =
1394 MachineOperatorBuilder::kInt32DivIsSafe |
1395 MachineOperatorBuilder::kUint32DivIsSafe;
1397 if (CpuFeatures::IsSupported(ARMv8)) {
1398 flags |= MachineOperatorBuilder::kFloat64Floor |
1399 MachineOperatorBuilder::kFloat64Ceil |
1400 MachineOperatorBuilder::kFloat64RoundTruncate |
1401 MachineOperatorBuilder::kFloat64RoundTiesAway;
1406 } // namespace compiler
1407 } // namespace internal