1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/base/bits.h"
6 #include "src/compiler/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
14 // Adds Arm-specific methods for generating InstructionOperands.
15 class ArmOperandGenerator : public OperandGenerator {
17 explicit ArmOperandGenerator(InstructionSelector* selector)
18 : OperandGenerator(selector) {}
20 bool CanBeImmediate(int32_t value) const {
21 return Assembler::ImmediateFitsAddrMode1Instruction(value);
24 bool CanBeImmediate(uint32_t value) const {
25 return CanBeImmediate(bit_cast<int32_t>(value));
28 bool CanBeImmediate(Node* node, InstructionCode opcode) {
30 if (!m.HasValue()) return false;
31 int32_t value = m.Value();
32 switch (ArchOpcodeField::decode(opcode)) {
37 return CanBeImmediate(value) || CanBeImmediate(~value);
43 return CanBeImmediate(value) || CanBeImmediate(-value);
50 return CanBeImmediate(value);
56 return value >= -1020 && value <= 1020 && (value % 4) == 0;
63 case kArmStoreWriteBarrier:
64 return value >= -4095 && value <= 4095;
69 return value >= -255 && value <= 255;
81 void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
83 ArmOperandGenerator g(selector);
84 selector->Emit(opcode, g.DefineAsRegister(node),
85 g.UseRegister(node->InputAt(0)));
89 void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
91 ArmOperandGenerator g(selector);
92 selector->Emit(opcode, g.DefineAsRegister(node),
93 g.UseRegister(node->InputAt(0)),
94 g.UseRegister(node->InputAt(1)));
98 template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
99 AddressingMode kImmMode, AddressingMode kRegMode>
100 bool TryMatchShift(InstructionSelector* selector,
101 InstructionCode* opcode_return, Node* node,
102 InstructionOperand* value_return,
103 InstructionOperand* shift_return) {
104 ArmOperandGenerator g(selector);
105 if (node->opcode() == kOpcode) {
106 Int32BinopMatcher m(node);
107 *value_return = g.UseRegister(m.left().node());
108 if (m.right().IsInRange(kImmMin, kImmMax)) {
109 *opcode_return |= AddressingModeField::encode(kImmMode);
110 *shift_return = g.UseImmediate(m.right().node());
112 *opcode_return |= AddressingModeField::encode(kRegMode);
113 *shift_return = g.UseRegister(m.right().node());
121 bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
122 Node* node, InstructionOperand* value_return,
123 InstructionOperand* shift_return) {
124 return TryMatchShift<IrOpcode::kWord32Ror, 1, 31, kMode_Operand2_R_ROR_I,
125 kMode_Operand2_R_ROR_R>(selector, opcode_return, node,
126 value_return, shift_return);
130 bool TryMatchASR(InstructionSelector* selector, InstructionCode* opcode_return,
131 Node* node, InstructionOperand* value_return,
132 InstructionOperand* shift_return) {
133 return TryMatchShift<IrOpcode::kWord32Sar, 1, 32, kMode_Operand2_R_ASR_I,
134 kMode_Operand2_R_ASR_R>(selector, opcode_return, node,
135 value_return, shift_return);
139 bool TryMatchLSL(InstructionSelector* selector, InstructionCode* opcode_return,
140 Node* node, InstructionOperand* value_return,
141 InstructionOperand* shift_return) {
142 return TryMatchShift<IrOpcode::kWord32Shl, 0, 31, kMode_Operand2_R_LSL_I,
143 kMode_Operand2_R_LSL_R>(selector, opcode_return, node,
144 value_return, shift_return);
148 bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
149 Node* node, InstructionOperand* value_return,
150 InstructionOperand* shift_return) {
151 return TryMatchShift<IrOpcode::kWord32Shr, 1, 32, kMode_Operand2_R_LSR_I,
152 kMode_Operand2_R_LSR_R>(selector, opcode_return, node,
153 value_return, shift_return);
157 bool TryMatchShift(InstructionSelector* selector,
158 InstructionCode* opcode_return, Node* node,
159 InstructionOperand* value_return,
160 InstructionOperand* shift_return) {
162 TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
163 TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
164 TryMatchLSR(selector, opcode_return, node, value_return, shift_return) ||
165 TryMatchROR(selector, opcode_return, node, value_return, shift_return));
169 bool TryMatchImmediateOrShift(InstructionSelector* selector,
170 InstructionCode* opcode_return, Node* node,
171 size_t* input_count_return,
172 InstructionOperand* inputs) {
173 ArmOperandGenerator g(selector);
174 if (g.CanBeImmediate(node, *opcode_return)) {
175 *opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
176 inputs[0] = g.UseImmediate(node);
177 *input_count_return = 1;
180 if (TryMatchShift(selector, opcode_return, node, &inputs[0], &inputs[1])) {
181 *input_count_return = 2;
188 void VisitBinop(InstructionSelector* selector, Node* node,
189 InstructionCode opcode, InstructionCode reverse_opcode,
190 FlagsContinuation* cont) {
191 ArmOperandGenerator g(selector);
192 Int32BinopMatcher m(node);
193 InstructionOperand inputs[5];
194 size_t input_count = 0;
195 InstructionOperand outputs[2];
196 size_t output_count = 0;
198 if (m.left().node() == m.right().node()) {
199 // If both inputs refer to the same operand, enforce allocating a register
200 // for both of them to ensure that we don't end up generating code like
203 // mov r0, r1, asr #16
204 // adds r0, r0, r1, asr #16
206 InstructionOperand const input = g.UseRegister(m.left().node());
207 opcode |= AddressingModeField::encode(kMode_Operand2_R);
208 inputs[input_count++] = input;
209 inputs[input_count++] = input;
210 } else if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
211 &input_count, &inputs[1])) {
212 inputs[0] = g.UseRegister(m.left().node());
214 } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
215 m.left().node(), &input_count,
217 inputs[0] = g.UseRegister(m.right().node());
218 opcode = reverse_opcode;
221 opcode |= AddressingModeField::encode(kMode_Operand2_R);
222 inputs[input_count++] = g.UseRegister(m.left().node());
223 inputs[input_count++] = g.UseRegister(m.right().node());
226 if (cont->IsBranch()) {
227 inputs[input_count++] = g.Label(cont->true_block());
228 inputs[input_count++] = g.Label(cont->false_block());
231 outputs[output_count++] = g.DefineAsRegister(node);
233 outputs[output_count++] = g.DefineAsRegister(cont->result());
236 DCHECK_NE(0u, input_count);
237 DCHECK_NE(0u, output_count);
238 DCHECK_GE(arraysize(inputs), input_count);
239 DCHECK_GE(arraysize(outputs), output_count);
240 DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
242 selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
247 void VisitBinop(InstructionSelector* selector, Node* node,
248 InstructionCode opcode, InstructionCode reverse_opcode) {
249 FlagsContinuation cont;
250 VisitBinop(selector, node, opcode, reverse_opcode, &cont);
257 void InstructionSelector::VisitLoad(Node* node) {
258 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
259 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
260 ArmOperandGenerator g(this);
261 Node* base = node->InputAt(0);
262 Node* index = node->InputAt(1);
267 opcode = kArmVldrF32;
270 opcode = kArmVldrF64;
272 case kRepBit: // Fall through.
274 opcode = typ == kTypeUint32 ? kArmLdrb : kArmLdrsb;
277 opcode = typ == kTypeUint32 ? kArmLdrh : kArmLdrsh;
279 case kRepTagged: // Fall through.
288 if (g.CanBeImmediate(index, opcode)) {
289 Emit(opcode | AddressingModeField::encode(kMode_Offset_RI),
290 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
292 Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
293 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
298 void InstructionSelector::VisitStore(Node* node) {
299 ArmOperandGenerator g(this);
300 Node* base = node->InputAt(0);
301 Node* index = node->InputAt(1);
302 Node* value = node->InputAt(2);
304 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
305 MachineType rep = RepresentationOf(store_rep.machine_type());
306 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
307 DCHECK(rep == kRepTagged);
308 // TODO(dcarney): refactor RecordWrite function to take temp registers
309 // and pass them here instead of using fixed regs
310 // TODO(dcarney): handle immediate indices.
311 InstructionOperand temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
312 Emit(kArmStoreWriteBarrier, g.NoOutput(), g.UseFixed(base, r4),
313 g.UseFixed(index, r5), g.UseFixed(value, r6), arraysize(temps), temps);
316 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
321 opcode = kArmVstrF32;
324 opcode = kArmVstrF64;
326 case kRepBit: // Fall through.
333 case kRepTagged: // Fall through.
342 if (g.CanBeImmediate(index, opcode)) {
343 Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), g.NoOutput(),
344 g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
346 Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
347 g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
352 void InstructionSelector::VisitCheckedLoad(Node* node) {
353 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
354 MachineType typ = TypeOf(OpParameter<MachineType>(node));
355 ArmOperandGenerator g(this);
356 Node* const buffer = node->InputAt(0);
357 Node* const offset = node->InputAt(1);
358 Node* const length = node->InputAt(2);
362 opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
365 opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
368 opcode = kCheckedLoadWord32;
371 opcode = kCheckedLoadFloat32;
374 opcode = kCheckedLoadFloat64;
380 InstructionOperand offset_operand = g.UseRegister(offset);
381 InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
382 ? g.UseImmediate(length)
383 : g.UseRegister(length);
384 Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
385 g.DefineAsRegister(node), offset_operand, length_operand,
386 g.UseRegister(buffer), offset_operand);
390 void InstructionSelector::VisitCheckedStore(Node* node) {
391 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
392 ArmOperandGenerator g(this);
393 Node* const buffer = node->InputAt(0);
394 Node* const offset = node->InputAt(1);
395 Node* const length = node->InputAt(2);
396 Node* const value = node->InputAt(3);
400 opcode = kCheckedStoreWord8;
403 opcode = kCheckedStoreWord16;
406 opcode = kCheckedStoreWord32;
409 opcode = kCheckedStoreFloat32;
412 opcode = kCheckedStoreFloat64;
418 InstructionOperand offset_operand = g.UseRegister(offset);
419 InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
420 ? g.UseImmediate(length)
421 : g.UseRegister(length);
422 Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
423 offset_operand, length_operand, g.UseRegister(value),
424 g.UseRegister(buffer), offset_operand);
430 void EmitBic(InstructionSelector* selector, Node* node, Node* left,
432 ArmOperandGenerator g(selector);
433 InstructionCode opcode = kArmBic;
434 InstructionOperand value_operand;
435 InstructionOperand shift_operand;
436 if (TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) {
437 selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
438 value_operand, shift_operand);
441 selector->Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
442 g.DefineAsRegister(node), g.UseRegister(left),
443 g.UseRegister(right));
447 void EmitUbfx(InstructionSelector* selector, Node* node, Node* left,
448 uint32_t lsb, uint32_t width) {
449 DCHECK_LE(1u, width);
450 DCHECK_LE(width, 32u - lsb);
451 ArmOperandGenerator g(selector);
452 selector->Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(left),
453 g.TempImmediate(lsb), g.TempImmediate(width));
459 void InstructionSelector::VisitWord32And(Node* node) {
460 ArmOperandGenerator g(this);
461 Int32BinopMatcher m(node);
462 if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
463 Int32BinopMatcher mleft(m.left().node());
464 if (mleft.right().Is(-1)) {
465 EmitBic(this, node, m.right().node(), mleft.left().node());
469 if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
470 Int32BinopMatcher mright(m.right().node());
471 if (mright.right().Is(-1)) {
472 EmitBic(this, node, m.left().node(), mright.left().node());
476 if (m.right().HasValue()) {
477 uint32_t const value = m.right().Value();
478 uint32_t width = base::bits::CountPopulation32(value);
479 uint32_t msb = base::bits::CountLeadingZeros32(value);
480 // Try to interpret this AND as UBFX.
481 if (IsSupported(ARMv7) && width != 0 && msb + width == 32) {
482 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
483 if (m.left().IsWord32Shr()) {
484 Int32BinopMatcher mleft(m.left().node());
485 if (mleft.right().IsInRange(0, 31)) {
486 // UBFX cannot extract bits past the register size, however since
487 // shifting the original value would have introduced some zeros we can
488 // still use UBFX with a smaller mask and the remaining bits will be
490 uint32_t const lsb = mleft.right().Value();
491 return EmitUbfx(this, node, mleft.left().node(), lsb,
492 std::min(width, 32 - lsb));
495 return EmitUbfx(this, node, m.left().node(), 0, width);
497 // Try to interpret this AND as BIC.
498 if (g.CanBeImmediate(~value)) {
499 Emit(kArmBic | AddressingModeField::encode(kMode_Operand2_I),
500 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
501 g.TempImmediate(~value));
504 // Try to interpret this AND as UXTH.
505 if (value == 0xffff) {
506 Emit(kArmUxth, g.DefineAsRegister(m.node()),
507 g.UseRegister(m.left().node()), g.TempImmediate(0));
510 // Try to interpret this AND as BFC.
511 if (IsSupported(ARMv7)) {
513 msb = base::bits::CountLeadingZeros32(~value);
514 uint32_t lsb = base::bits::CountTrailingZeros32(~value);
515 if (msb + width + lsb == 32) {
516 Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
517 g.TempImmediate(lsb), g.TempImmediate(width));
522 VisitBinop(this, node, kArmAnd, kArmAnd);
526 void InstructionSelector::VisitWord32Or(Node* node) {
527 VisitBinop(this, node, kArmOrr, kArmOrr);
531 void InstructionSelector::VisitWord32Xor(Node* node) {
532 ArmOperandGenerator g(this);
533 Int32BinopMatcher m(node);
534 if (m.right().Is(-1)) {
535 InstructionCode opcode = kArmMvn;
536 InstructionOperand value_operand;
537 InstructionOperand shift_operand;
538 if (TryMatchShift(this, &opcode, m.left().node(), &value_operand,
540 Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
543 Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
544 g.DefineAsRegister(node), g.UseRegister(m.left().node()));
547 VisitBinop(this, node, kArmEor, kArmEor);
553 template <typename TryMatchShift>
554 void VisitShift(InstructionSelector* selector, Node* node,
555 TryMatchShift try_match_shift, FlagsContinuation* cont) {
556 ArmOperandGenerator g(selector);
557 InstructionCode opcode = kArmMov;
558 InstructionOperand inputs[4];
559 size_t input_count = 2;
560 InstructionOperand outputs[2];
561 size_t output_count = 0;
563 CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
565 if (cont->IsBranch()) {
566 inputs[input_count++] = g.Label(cont->true_block());
567 inputs[input_count++] = g.Label(cont->false_block());
570 outputs[output_count++] = g.DefineAsRegister(node);
572 outputs[output_count++] = g.DefineAsRegister(cont->result());
575 DCHECK_NE(0u, input_count);
576 DCHECK_NE(0u, output_count);
577 DCHECK_GE(arraysize(inputs), input_count);
578 DCHECK_GE(arraysize(outputs), output_count);
579 DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
581 selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
586 template <typename TryMatchShift>
587 void VisitShift(InstructionSelector* selector, Node* node,
588 TryMatchShift try_match_shift) {
589 FlagsContinuation cont;
590 VisitShift(selector, node, try_match_shift, &cont);
596 void InstructionSelector::VisitWord32Shl(Node* node) {
597 VisitShift(this, node, TryMatchLSL);
601 void InstructionSelector::VisitWord32Shr(Node* node) {
602 ArmOperandGenerator g(this);
603 Int32BinopMatcher m(node);
604 if (IsSupported(ARMv7) && m.left().IsWord32And() &&
605 m.right().IsInRange(0, 31)) {
606 uint32_t lsb = m.right().Value();
607 Int32BinopMatcher mleft(m.left().node());
608 if (mleft.right().HasValue()) {
609 uint32_t value = (mleft.right().Value() >> lsb) << lsb;
610 uint32_t width = base::bits::CountPopulation32(value);
611 uint32_t msb = base::bits::CountLeadingZeros32(value);
612 if (msb + width + lsb == 32) {
613 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(value));
614 return EmitUbfx(this, node, mleft.left().node(), lsb, width);
618 VisitShift(this, node, TryMatchLSR);
622 void InstructionSelector::VisitWord32Sar(Node* node) {
623 ArmOperandGenerator g(this);
624 Int32BinopMatcher m(node);
625 if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
626 Int32BinopMatcher mleft(m.left().node());
627 if (mleft.right().Is(16) && m.right().Is(16)) {
628 Emit(kArmSxth, g.DefineAsRegister(node),
629 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
631 } else if (mleft.right().Is(24) && m.right().Is(24)) {
632 Emit(kArmSxtb, g.DefineAsRegister(node),
633 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
637 VisitShift(this, node, TryMatchASR);
641 void InstructionSelector::VisitWord32Ror(Node* node) {
642 VisitShift(this, node, TryMatchROR);
646 void InstructionSelector::VisitWord32Clz(Node* node) {
647 ArmOperandGenerator g(this);
648 Emit(kArmClz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
652 void InstructionSelector::VisitInt32Add(Node* node) {
653 ArmOperandGenerator g(this);
654 Int32BinopMatcher m(node);
655 if (CanCover(node, m.left().node())) {
656 switch (m.left().opcode()) {
657 case IrOpcode::kInt32Mul: {
658 Int32BinopMatcher mleft(m.left().node());
659 Emit(kArmMla, g.DefineAsRegister(node),
660 g.UseRegister(mleft.left().node()),
661 g.UseRegister(mleft.right().node()),
662 g.UseRegister(m.right().node()));
665 case IrOpcode::kInt32MulHigh: {
666 Int32BinopMatcher mleft(m.left().node());
667 Emit(kArmSmmla, g.DefineAsRegister(node),
668 g.UseRegister(mleft.left().node()),
669 g.UseRegister(mleft.right().node()),
670 g.UseRegister(m.right().node()));
673 case IrOpcode::kWord32And: {
674 Int32BinopMatcher mleft(m.left().node());
675 if (mleft.right().Is(0xff)) {
676 Emit(kArmUxtab, g.DefineAsRegister(node),
677 g.UseRegister(m.right().node()),
678 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
680 } else if (mleft.right().Is(0xffff)) {
681 Emit(kArmUxtah, g.DefineAsRegister(node),
682 g.UseRegister(m.right().node()),
683 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
687 case IrOpcode::kWord32Sar: {
688 Int32BinopMatcher mleft(m.left().node());
689 if (CanCover(mleft.node(), mleft.left().node()) &&
690 mleft.left().IsWord32Shl()) {
691 Int32BinopMatcher mleftleft(mleft.left().node());
692 if (mleft.right().Is(24) && mleftleft.right().Is(24)) {
693 Emit(kArmSxtab, g.DefineAsRegister(node),
694 g.UseRegister(m.right().node()),
695 g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
697 } else if (mleft.right().Is(16) && mleftleft.right().Is(16)) {
698 Emit(kArmSxtah, g.DefineAsRegister(node),
699 g.UseRegister(m.right().node()),
700 g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
709 if (CanCover(node, m.right().node())) {
710 switch (m.right().opcode()) {
711 case IrOpcode::kInt32Mul: {
712 Int32BinopMatcher mright(m.right().node());
713 Emit(kArmMla, g.DefineAsRegister(node),
714 g.UseRegister(mright.left().node()),
715 g.UseRegister(mright.right().node()),
716 g.UseRegister(m.left().node()));
719 case IrOpcode::kInt32MulHigh: {
720 Int32BinopMatcher mright(m.right().node());
721 Emit(kArmSmmla, g.DefineAsRegister(node),
722 g.UseRegister(mright.left().node()),
723 g.UseRegister(mright.right().node()),
724 g.UseRegister(m.left().node()));
727 case IrOpcode::kWord32And: {
728 Int32BinopMatcher mright(m.right().node());
729 if (mright.right().Is(0xff)) {
730 Emit(kArmUxtab, g.DefineAsRegister(node),
731 g.UseRegister(m.left().node()),
732 g.UseRegister(mright.left().node()), g.TempImmediate(0));
734 } else if (mright.right().Is(0xffff)) {
735 Emit(kArmUxtah, g.DefineAsRegister(node),
736 g.UseRegister(m.left().node()),
737 g.UseRegister(mright.left().node()), g.TempImmediate(0));
741 case IrOpcode::kWord32Sar: {
742 Int32BinopMatcher mright(m.right().node());
743 if (CanCover(mright.node(), mright.left().node()) &&
744 mright.left().IsWord32Shl()) {
745 Int32BinopMatcher mrightleft(mright.left().node());
746 if (mright.right().Is(24) && mrightleft.right().Is(24)) {
747 Emit(kArmSxtab, g.DefineAsRegister(node),
748 g.UseRegister(m.left().node()),
749 g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
751 } else if (mright.right().Is(16) && mrightleft.right().Is(16)) {
752 Emit(kArmSxtah, g.DefineAsRegister(node),
753 g.UseRegister(m.left().node()),
754 g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
763 VisitBinop(this, node, kArmAdd, kArmAdd);
767 void InstructionSelector::VisitInt32Sub(Node* node) {
768 ArmOperandGenerator g(this);
769 Int32BinopMatcher m(node);
770 if (IsSupported(MLS) && m.right().IsInt32Mul() &&
771 CanCover(node, m.right().node())) {
772 Int32BinopMatcher mright(m.right().node());
773 Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
774 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
777 VisitBinop(this, node, kArmSub, kArmRsb);
781 void InstructionSelector::VisitInt32Mul(Node* node) {
782 ArmOperandGenerator g(this);
783 Int32BinopMatcher m(node);
784 if (m.right().HasValue() && m.right().Value() > 0) {
785 int32_t value = m.right().Value();
786 if (base::bits::IsPowerOfTwo32(value - 1)) {
787 Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
788 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
789 g.UseRegister(m.left().node()),
790 g.TempImmediate(WhichPowerOf2(value - 1)));
793 if (value < kMaxInt && base::bits::IsPowerOfTwo32(value + 1)) {
794 Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
795 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
796 g.UseRegister(m.left().node()),
797 g.TempImmediate(WhichPowerOf2(value + 1)));
801 Emit(kArmMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
802 g.UseRegister(m.right().node()));
806 void InstructionSelector::VisitInt32MulHigh(Node* node) {
807 ArmOperandGenerator g(this);
808 Emit(kArmSmmul, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
809 g.UseRegister(node->InputAt(1)));
813 void InstructionSelector::VisitUint32MulHigh(Node* node) {
814 ArmOperandGenerator g(this);
815 InstructionOperand outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
816 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
817 g.UseRegister(node->InputAt(1))};
818 Emit(kArmUmull, arraysize(outputs), outputs, arraysize(inputs), inputs);
822 static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
823 ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
824 InstructionOperand result_operand,
825 InstructionOperand left_operand,
826 InstructionOperand right_operand) {
827 ArmOperandGenerator g(selector);
828 if (selector->IsSupported(SUDIV)) {
829 selector->Emit(div_opcode, result_operand, left_operand, right_operand);
832 InstructionOperand left_double_operand = g.TempDoubleRegister();
833 InstructionOperand right_double_operand = g.TempDoubleRegister();
834 InstructionOperand result_double_operand = g.TempDoubleRegister();
835 selector->Emit(f64i32_opcode, left_double_operand, left_operand);
836 selector->Emit(f64i32_opcode, right_double_operand, right_operand);
837 selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
838 right_double_operand);
839 selector->Emit(i32f64_opcode, result_operand, result_double_operand);
843 static void VisitDiv(InstructionSelector* selector, Node* node,
844 ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
845 ArchOpcode i32f64_opcode) {
846 ArmOperandGenerator g(selector);
847 Int32BinopMatcher m(node);
848 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
849 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
850 g.UseRegister(m.right().node()));
854 void InstructionSelector::VisitInt32Div(Node* node) {
855 VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
859 void InstructionSelector::VisitUint32Div(Node* node) {
860 VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
864 static void VisitMod(InstructionSelector* selector, Node* node,
865 ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
866 ArchOpcode i32f64_opcode) {
867 ArmOperandGenerator g(selector);
868 Int32BinopMatcher m(node);
869 InstructionOperand div_operand = g.TempRegister();
870 InstructionOperand result_operand = g.DefineAsRegister(node);
871 InstructionOperand left_operand = g.UseRegister(m.left().node());
872 InstructionOperand right_operand = g.UseRegister(m.right().node());
873 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
874 left_operand, right_operand);
875 if (selector->IsSupported(MLS)) {
876 selector->Emit(kArmMls, result_operand, div_operand, right_operand,
880 InstructionOperand mul_operand = g.TempRegister();
881 selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
882 selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
886 void InstructionSelector::VisitInt32Mod(Node* node) {
887 VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
891 void InstructionSelector::VisitUint32Mod(Node* node) {
892 VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
896 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
897 ArmOperandGenerator g(this);
898 Emit(kArmVcvtF64F32, g.DefineAsRegister(node),
899 g.UseRegister(node->InputAt(0)));
903 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
904 ArmOperandGenerator g(this);
905 Emit(kArmVcvtF64S32, g.DefineAsRegister(node),
906 g.UseRegister(node->InputAt(0)));
910 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
911 ArmOperandGenerator g(this);
912 Emit(kArmVcvtF64U32, g.DefineAsRegister(node),
913 g.UseRegister(node->InputAt(0)));
917 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
918 ArmOperandGenerator g(this);
919 Emit(kArmVcvtS32F64, g.DefineAsRegister(node),
920 g.UseRegister(node->InputAt(0)));
924 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
925 ArmOperandGenerator g(this);
926 Emit(kArmVcvtU32F64, g.DefineAsRegister(node),
927 g.UseRegister(node->InputAt(0)));
931 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
932 ArmOperandGenerator g(this);
933 Emit(kArmVcvtF32F64, g.DefineAsRegister(node),
934 g.UseRegister(node->InputAt(0)));
938 void InstructionSelector::VisitFloat64Add(Node* node) {
939 ArmOperandGenerator g(this);
940 Float64BinopMatcher m(node);
941 if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
942 Float64BinopMatcher mleft(m.left().node());
943 Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
944 g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
945 g.UseRegister(mleft.right().node()));
948 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
949 Float64BinopMatcher mright(m.right().node());
950 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
951 g.UseRegister(mright.left().node()),
952 g.UseRegister(mright.right().node()));
955 VisitRRRFloat64(this, kArmVaddF64, node);
959 void InstructionSelector::VisitFloat64Sub(Node* node) {
960 ArmOperandGenerator g(this);
961 Float64BinopMatcher m(node);
962 if (m.left().IsMinusZero()) {
963 if (m.right().IsFloat64RoundDown() &&
964 CanCover(m.node(), m.right().node())) {
965 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
966 CanCover(m.right().node(), m.right().InputAt(0))) {
967 Float64BinopMatcher mright0(m.right().InputAt(0));
968 if (mright0.left().IsMinusZero()) {
969 Emit(kArmVrintpF64, g.DefineAsRegister(node),
970 g.UseRegister(mright0.right().node()));
975 Emit(kArmVnegF64, g.DefineAsRegister(node),
976 g.UseRegister(m.right().node()));
979 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
980 Float64BinopMatcher mright(m.right().node());
981 Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
982 g.UseRegister(mright.left().node()),
983 g.UseRegister(mright.right().node()));
986 VisitRRRFloat64(this, kArmVsubF64, node);
990 void InstructionSelector::VisitFloat64Mul(Node* node) {
991 VisitRRRFloat64(this, kArmVmulF64, node);
995 void InstructionSelector::VisitFloat64Div(Node* node) {
996 VisitRRRFloat64(this, kArmVdivF64, node);
1000 void InstructionSelector::VisitFloat64Mod(Node* node) {
1001 ArmOperandGenerator g(this);
1002 Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
1003 g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
1007 void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
1010 void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
1013 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1014 ArmOperandGenerator g(this);
1015 Emit(kArmVsqrtF64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1019 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1020 VisitRRFloat64(this, kArmVrintmF64, node);
1024 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1025 VisitRRFloat64(this, kArmVrintzF64, node);
1029 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1030 VisitRRFloat64(this, kArmVrintaF64, node);
1034 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
1035 ArmOperandGenerator g(this);
1036 const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
1038 FrameStateDescriptor* frame_state_descriptor = NULL;
1039 if (descriptor->NeedsFrameState()) {
1040 frame_state_descriptor =
1041 GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
1044 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
1046 // Compute InstructionOperands for inputs and outputs.
1047 // TODO(turbofan): on ARM64 it's probably better to use the code object in a
1048 // register if there are multiple uses of it. Improve constant pool and the
1049 // heuristics in the register allocator for where to emit constants.
1050 InitializeCallBuffer(node, &buffer, true, false);
1052 // TODO(dcarney): might be possible to use claim/poke instead
1053 // Push any stack arguments.
1054 for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
1056 Emit(kArmPush, g.NoOutput(), g.UseRegister(*i));
1059 // Pass label of exception handler block.
1060 CallDescriptor::Flags flags = descriptor->flags();
1061 if (handler != nullptr) {
1062 flags |= CallDescriptor::kHasExceptionHandler;
1063 buffer.instruction_args.push_back(g.Label(handler));
1066 // Select the appropriate opcode based on the call type.
1067 InstructionCode opcode;
1068 switch (descriptor->kind()) {
1069 case CallDescriptor::kCallCodeObject: {
1070 opcode = kArchCallCodeObject;
1073 case CallDescriptor::kCallJSFunction:
1074 opcode = kArchCallJSFunction;
1080 opcode |= MiscField::encode(flags);
1082 // Emit the call instruction.
1083 InstructionOperand* first_output =
1084 buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
1085 Instruction* call_instr =
1086 Emit(opcode, buffer.outputs.size(), first_output,
1087 buffer.instruction_args.size(), &buffer.instruction_args.front());
1088 call_instr->MarkAsCall();
1094 // Shared routine for multiple float compare operations.
1095 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1096 FlagsContinuation* cont) {
1097 ArmOperandGenerator g(selector);
1098 Float64BinopMatcher m(node);
1099 InstructionOperand rhs = m.right().Is(0.0) ? g.UseImmediate(m.right().node())
1100 : g.UseRegister(m.right().node());
1101 if (cont->IsBranch()) {
1102 selector->Emit(cont->Encode(kArmVcmpF64), g.NoOutput(),
1103 g.UseRegister(m.left().node()), rhs,
1104 g.Label(cont->true_block()), g.Label(cont->false_block()));
1106 DCHECK(cont->IsSet());
1107 selector->Emit(cont->Encode(kArmVcmpF64),
1108 g.DefineAsRegister(cont->result()),
1109 g.UseRegister(m.left().node()), rhs);
1114 // Shared routine for multiple word compare operations.
1115 void VisitWordCompare(InstructionSelector* selector, Node* node,
1116 InstructionCode opcode, FlagsContinuation* cont) {
1117 ArmOperandGenerator g(selector);
1118 Int32BinopMatcher m(node);
1119 InstructionOperand inputs[5];
1120 size_t input_count = 0;
1121 InstructionOperand outputs[1];
1122 size_t output_count = 0;
1124 if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
1125 &input_count, &inputs[1])) {
1126 inputs[0] = g.UseRegister(m.left().node());
1128 } else if (TryMatchImmediateOrShift(selector, &opcode, m.left().node(),
1129 &input_count, &inputs[1])) {
1130 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1131 inputs[0] = g.UseRegister(m.right().node());
1134 opcode |= AddressingModeField::encode(kMode_Operand2_R);
1135 inputs[input_count++] = g.UseRegister(m.left().node());
1136 inputs[input_count++] = g.UseRegister(m.right().node());
1139 if (cont->IsBranch()) {
1140 inputs[input_count++] = g.Label(cont->true_block());
1141 inputs[input_count++] = g.Label(cont->false_block());
1143 DCHECK(cont->IsSet());
1144 outputs[output_count++] = g.DefineAsRegister(cont->result());
1147 DCHECK_NE(0u, input_count);
1148 DCHECK_GE(arraysize(inputs), input_count);
1149 DCHECK_GE(arraysize(outputs), output_count);
1151 selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
1156 void VisitWordCompare(InstructionSelector* selector, Node* node,
1157 FlagsContinuation* cont) {
1158 VisitWordCompare(selector, node, kArmCmp, cont);
1162 // Shared routine for word comparisons against zero.
1163 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
1164 Node* value, FlagsContinuation* cont) {
1165 while (selector->CanCover(user, value)) {
1166 switch (value->opcode()) {
1167 case IrOpcode::kWord32Equal: {
1168 // Combine with comparisons against 0 by simply inverting the
1170 Int32BinopMatcher m(value);
1171 if (m.right().Is(0)) {
1173 value = m.left().node();
1177 cont->OverwriteAndNegateIfEqual(kEqual);
1178 return VisitWordCompare(selector, value, cont);
1180 case IrOpcode::kInt32LessThan:
1181 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1182 return VisitWordCompare(selector, value, cont);
1183 case IrOpcode::kInt32LessThanOrEqual:
1184 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1185 return VisitWordCompare(selector, value, cont);
1186 case IrOpcode::kUint32LessThan:
1187 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1188 return VisitWordCompare(selector, value, cont);
1189 case IrOpcode::kUint32LessThanOrEqual:
1190 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1191 return VisitWordCompare(selector, value, cont);
1192 case IrOpcode::kFloat64Equal:
1193 cont->OverwriteAndNegateIfEqual(kEqual);
1194 return VisitFloat64Compare(selector, value, cont);
1195 case IrOpcode::kFloat64LessThan:
1196 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1197 return VisitFloat64Compare(selector, value, cont);
1198 case IrOpcode::kFloat64LessThanOrEqual:
1199 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1200 return VisitFloat64Compare(selector, value, cont);
1201 case IrOpcode::kProjection:
1202 // Check if this is the overflow output projection of an
1203 // <Operation>WithOverflow node.
1204 if (ProjectionIndexOf(value->op()) == 1u) {
1205 // We cannot combine the <Operation>WithOverflow with this branch
1206 // unless the 0th projection (the use of the actual value of the
1207 // <Operation> is either NULL, which means there's no use of the
1208 // actual value, or was already defined, which means it is scheduled
1209 // *AFTER* this branch).
1210 Node* const node = value->InputAt(0);
1211 Node* const result = NodeProperties::FindProjection(node, 0);
1212 if (!result || selector->IsDefined(result)) {
1213 switch (node->opcode()) {
1214 case IrOpcode::kInt32AddWithOverflow:
1215 cont->OverwriteAndNegateIfEqual(kOverflow);
1216 return VisitBinop(selector, node, kArmAdd, kArmAdd, cont);
1217 case IrOpcode::kInt32SubWithOverflow:
1218 cont->OverwriteAndNegateIfEqual(kOverflow);
1219 return VisitBinop(selector, node, kArmSub, kArmRsb, cont);
1226 case IrOpcode::kInt32Add:
1227 return VisitWordCompare(selector, value, kArmCmn, cont);
1228 case IrOpcode::kInt32Sub:
1229 return VisitWordCompare(selector, value, kArmCmp, cont);
1230 case IrOpcode::kWord32And:
1231 return VisitWordCompare(selector, value, kArmTst, cont);
1232 case IrOpcode::kWord32Or:
1233 return VisitBinop(selector, value, kArmOrr, kArmOrr, cont);
1234 case IrOpcode::kWord32Xor:
1235 return VisitWordCompare(selector, value, kArmTeq, cont);
1236 case IrOpcode::kWord32Sar:
1237 return VisitShift(selector, value, TryMatchASR, cont);
1238 case IrOpcode::kWord32Shl:
1239 return VisitShift(selector, value, TryMatchLSL, cont);
1240 case IrOpcode::kWord32Shr:
1241 return VisitShift(selector, value, TryMatchLSR, cont);
1242 case IrOpcode::kWord32Ror:
1243 return VisitShift(selector, value, TryMatchROR, cont);
1250 // Continuation could not be combined with a compare, emit compare against 0.
1251 ArmOperandGenerator g(selector);
1252 InstructionCode const opcode =
1253 cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
1254 InstructionOperand const value_operand = g.UseRegister(value);
1255 if (cont->IsBranch()) {
1256 selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
1257 g.Label(cont->true_block()), g.Label(cont->false_block()));
1259 selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
1267 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1268 BasicBlock* fbranch) {
1269 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1270 VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
1274 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1275 ArmOperandGenerator g(this);
1276 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1278 // Emit either ArchTableSwitch or ArchLookupSwitch.
1279 size_t table_space_cost = 4 + sw.value_range;
1280 size_t table_time_cost = 3;
1281 size_t lookup_space_cost = 3 + 2 * sw.case_count;
1282 size_t lookup_time_cost = sw.case_count;
1283 if (sw.case_count > 0 &&
1284 table_space_cost + 3 * table_time_cost <=
1285 lookup_space_cost + 3 * lookup_time_cost &&
1286 sw.min_value > std::numeric_limits<int32_t>::min()) {
1287 InstructionOperand index_operand = value_operand;
1289 index_operand = g.TempRegister();
1290 Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
1291 index_operand, value_operand, g.TempImmediate(sw.min_value));
1293 // Generate a table lookup.
1294 return EmitTableSwitch(sw, index_operand);
1297 // Generate a sequence of conditional jumps.
1298 return EmitLookupSwitch(sw, value_operand);
1302 void InstructionSelector::VisitWord32Equal(Node* const node) {
1303 FlagsContinuation cont(kEqual, node);
1304 Int32BinopMatcher m(node);
1305 if (m.right().Is(0)) {
1306 return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1308 VisitWordCompare(this, node, &cont);
1312 void InstructionSelector::VisitInt32LessThan(Node* node) {
1313 FlagsContinuation cont(kSignedLessThan, node);
1314 VisitWordCompare(this, node, &cont);
1318 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1319 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1320 VisitWordCompare(this, node, &cont);
1324 void InstructionSelector::VisitUint32LessThan(Node* node) {
1325 FlagsContinuation cont(kUnsignedLessThan, node);
1326 VisitWordCompare(this, node, &cont);
1330 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1331 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1332 VisitWordCompare(this, node, &cont);
1336 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1337 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1338 FlagsContinuation cont(kOverflow, ovf);
1339 return VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
1341 FlagsContinuation cont;
1342 VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
1346 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1347 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1348 FlagsContinuation cont(kOverflow, ovf);
1349 return VisitBinop(this, node, kArmSub, kArmRsb, &cont);
1351 FlagsContinuation cont;
1352 VisitBinop(this, node, kArmSub, kArmRsb, &cont);
1356 void InstructionSelector::VisitFloat64Equal(Node* node) {
1357 FlagsContinuation cont(kEqual, node);
1358 VisitFloat64Compare(this, node, &cont);
1362 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1363 FlagsContinuation cont(kUnsignedLessThan, node);
1364 VisitFloat64Compare(this, node, &cont);
1368 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1369 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1370 VisitFloat64Compare(this, node, &cont);
1374 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1375 ArmOperandGenerator g(this);
1376 Emit(kArmVmovLowU32F64, g.DefineAsRegister(node),
1377 g.UseRegister(node->InputAt(0)));
1381 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1382 ArmOperandGenerator g(this);
1383 Emit(kArmVmovHighU32F64, g.DefineAsRegister(node),
1384 g.UseRegister(node->InputAt(0)));
1388 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1389 ArmOperandGenerator g(this);
1390 Node* left = node->InputAt(0);
1391 Node* right = node->InputAt(1);
1392 if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
1393 CanCover(node, left)) {
1394 left = left->InputAt(1);
1395 Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(right),
1396 g.UseRegister(left));
1399 Emit(kArmVmovLowF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
1400 g.UseRegister(right));
1404 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1405 ArmOperandGenerator g(this);
1406 Node* left = node->InputAt(0);
1407 Node* right = node->InputAt(1);
1408 if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
1409 CanCover(node, left)) {
1410 left = left->InputAt(1);
1411 Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(left),
1412 g.UseRegister(right));
1415 Emit(kArmVmovHighF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
1416 g.UseRegister(right));
1421 MachineOperatorBuilder::Flags
1422 InstructionSelector::SupportedMachineOperatorFlags() {
1423 MachineOperatorBuilder::Flags flags =
1424 MachineOperatorBuilder::kInt32DivIsSafe |
1425 MachineOperatorBuilder::kUint32DivIsSafe;
1427 if (CpuFeatures::IsSupported(ARMv8)) {
1428 flags |= MachineOperatorBuilder::kFloat64RoundDown |
1429 MachineOperatorBuilder::kFloat64RoundTruncate |
1430 MachineOperatorBuilder::kFloat64RoundTiesAway;
1435 } // namespace compiler
1436 } // namespace internal