1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h"
7 #include "src/compiler/node-properties.h"
14 kArithmeticImm, // 12 bit unsigned immediate shifted left 0 or 12 bits
15 kShift32Imm, // 0 - 31
16 kShift64Imm, // 0 - 63
19 kLoadStoreImm8, // signed 8 bit or 12 bit unsigned scaled by access size
27 // Adds Arm64-specific methods for generating operands.
28 class Arm64OperandGenerator FINAL : public OperandGenerator {
30 explicit Arm64OperandGenerator(InstructionSelector* selector)
31 : OperandGenerator(selector) {}
33 InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
34 if (CanBeImmediate(node, mode)) {
35 return UseImmediate(node);
37 return UseRegister(node);
40 bool CanBeImmediate(Node* node, ImmediateMode mode) {
42 if (node->opcode() == IrOpcode::kInt32Constant)
43 value = OpParameter<int32_t>(node);
44 else if (node->opcode() == IrOpcode::kInt64Constant)
45 value = OpParameter<int64_t>(node);
48 return CanBeImmediate(value, mode);
51 bool CanBeImmediate(int64_t value, ImmediateMode mode) {
55 // TODO(dcarney): some unencodable values can be handled by
56 // switching instructions.
57 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
58 &ignored, &ignored, &ignored);
60 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
61 &ignored, &ignored, &ignored);
63 return Assembler::IsImmAddSub(value);
65 return 0 <= value && value < 32;
67 return 0 <= value && value < 64;
69 return IsLoadStoreImmediate(value, LSByte);
71 return IsLoadStoreImmediate(value, LSHalfword);
73 return IsLoadStoreImmediate(value, LSWord);
75 return IsLoadStoreImmediate(value, LSDoubleWord);
83 bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
84 return Assembler::IsImmLSScaled(value, size) ||
85 Assembler::IsImmLSUnscaled(value);
90 static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
92 Arm64OperandGenerator g(selector);
93 selector->Emit(opcode, g.DefineAsRegister(node),
94 g.UseRegister(node->InputAt(0)));
98 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
100 Arm64OperandGenerator g(selector);
101 selector->Emit(opcode, g.DefineAsRegister(node),
102 g.UseRegister(node->InputAt(0)),
103 g.UseRegister(node->InputAt(1)));
107 static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
109 Arm64OperandGenerator g(selector);
110 selector->Emit(opcode, g.DefineAsRegister(node),
111 g.UseRegister(node->InputAt(0)),
112 g.UseRegister(node->InputAt(1)));
116 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
117 Node* node, ImmediateMode operand_mode) {
118 Arm64OperandGenerator g(selector);
119 selector->Emit(opcode, g.DefineAsRegister(node),
120 g.UseRegister(node->InputAt(0)),
121 g.UseOperand(node->InputAt(1), operand_mode));
125 template <typename Matcher>
126 static bool TryMatchShift(InstructionSelector* selector, Node* node,
127 InstructionCode* opcode, IrOpcode::Value shift_opcode,
128 ImmediateMode imm_mode,
129 AddressingMode addressing_mode) {
130 if (node->opcode() != shift_opcode) return false;
131 Arm64OperandGenerator g(selector);
133 if (g.CanBeImmediate(m.right().node(), imm_mode)) {
134 *opcode |= AddressingModeField::encode(addressing_mode);
141 static bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
142 InstructionCode* opcode, bool try_ror) {
143 return TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
144 IrOpcode::kWord32Shl, kShift32Imm,
145 kMode_Operand2_R_LSL_I) ||
146 TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
147 IrOpcode::kWord32Shr, kShift32Imm,
148 kMode_Operand2_R_LSR_I) ||
149 TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
150 IrOpcode::kWord32Sar, kShift32Imm,
151 kMode_Operand2_R_ASR_I) ||
152 (try_ror && TryMatchShift<Int32BinopMatcher>(
153 selector, node, opcode, IrOpcode::kWord32Ror,
154 kShift32Imm, kMode_Operand2_R_ROR_I)) ||
155 TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
156 IrOpcode::kWord64Shl, kShift64Imm,
157 kMode_Operand2_R_LSL_I) ||
158 TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
159 IrOpcode::kWord64Shr, kShift64Imm,
160 kMode_Operand2_R_LSR_I) ||
161 TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
162 IrOpcode::kWord64Sar, kShift64Imm,
163 kMode_Operand2_R_ASR_I) ||
164 (try_ror && TryMatchShift<Int64BinopMatcher>(
165 selector, node, opcode, IrOpcode::kWord64Ror,
166 kShift64Imm, kMode_Operand2_R_ROR_I));
170 static bool TryMatchAnyExtend(InstructionSelector* selector, Node* node,
171 InstructionCode* opcode) {
172 NodeMatcher nm(node);
173 if (nm.IsWord32And()) {
174 Int32BinopMatcher m(node);
175 if (m.right().HasValue()) {
176 if (m.right().Value() == 0xff) {
177 *opcode |= AddressingModeField::encode(kMode_Operand2_R_UXTB);
179 } else if (m.right().Value() == 0xffff) {
180 *opcode |= AddressingModeField::encode(kMode_Operand2_R_UXTH);
189 // Shared routine for multiple binary operations.
190 template <typename Matcher>
191 static void VisitBinop(InstructionSelector* selector, Node* node,
192 InstructionCode opcode, ImmediateMode operand_mode,
193 FlagsContinuation* cont) {
194 Arm64OperandGenerator g(selector);
196 InstructionOperand inputs[4];
197 size_t input_count = 0;
198 InstructionOperand outputs[2];
199 size_t output_count = 0;
200 bool is_add_sub = false;
202 if (m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() || m.IsInt64Sub()) {
206 if (g.CanBeImmediate(m.right().node(), operand_mode)) {
207 inputs[input_count++] = g.UseRegister(m.left().node());
208 inputs[input_count++] = g.UseImmediate(m.right().node());
209 } else if (TryMatchAnyShift(selector, m.right().node(), &opcode,
211 Matcher m_shift(m.right().node());
212 inputs[input_count++] = g.UseRegister(m.left().node());
213 inputs[input_count++] = g.UseRegister(m_shift.left().node());
214 inputs[input_count++] = g.UseImmediate(m_shift.right().node());
215 } else if (m.HasProperty(Operator::kCommutative) &&
216 TryMatchAnyShift(selector, m.left().node(), &opcode,
218 Matcher m_shift(m.left().node());
219 inputs[input_count++] = g.UseRegister(m.right().node());
220 inputs[input_count++] = g.UseRegister(m_shift.left().node());
221 inputs[input_count++] = g.UseImmediate(m_shift.right().node());
222 } else if (is_add_sub &&
223 TryMatchAnyExtend(selector, m.right().node(), &opcode)) {
224 Matcher mright(m.right().node());
225 inputs[input_count++] = g.UseRegister(m.left().node());
226 inputs[input_count++] = g.UseRegister(mright.left().node());
227 } else if (is_add_sub && m.HasProperty(Operator::kCommutative) &&
228 TryMatchAnyExtend(selector, m.left().node(), &opcode)) {
229 Matcher mleft(m.left().node());
230 inputs[input_count++] = g.UseRegister(m.right().node());
231 inputs[input_count++] = g.UseRegister(mleft.left().node());
233 inputs[input_count++] = g.UseRegister(m.left().node());
234 inputs[input_count++] = g.UseRegister(m.right().node());
237 if (cont->IsBranch()) {
238 inputs[input_count++] = g.Label(cont->true_block());
239 inputs[input_count++] = g.Label(cont->false_block());
242 outputs[output_count++] = g.DefineAsRegister(node);
244 outputs[output_count++] = g.DefineAsRegister(cont->result());
247 DCHECK_NE(0u, input_count);
248 DCHECK_NE(0u, output_count);
249 DCHECK_GE(arraysize(inputs), input_count);
250 DCHECK_GE(arraysize(outputs), output_count);
252 selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
257 // Shared routine for multiple binary operations.
258 template <typename Matcher>
259 static void VisitBinop(InstructionSelector* selector, Node* node,
260 ArchOpcode opcode, ImmediateMode operand_mode) {
261 FlagsContinuation cont;
262 VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
266 template <typename Matcher>
267 static void VisitAddSub(InstructionSelector* selector, Node* node,
268 ArchOpcode opcode, ArchOpcode negate_opcode) {
269 Arm64OperandGenerator g(selector);
271 if (m.right().HasValue() && (m.right().Value() < 0) &&
272 g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
273 selector->Emit(negate_opcode, g.DefineAsRegister(node),
274 g.UseRegister(m.left().node()),
275 g.TempImmediate(-m.right().Value()));
277 VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
282 void InstructionSelector::VisitLoad(Node* node) {
283 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
284 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
285 Arm64OperandGenerator g(this);
286 Node* base = node->InputAt(0);
287 Node* index = node->InputAt(1);
289 ImmediateMode immediate_mode = kNoImmediate;
293 immediate_mode = kLoadStoreImm32;
297 immediate_mode = kLoadStoreImm64;
299 case kRepBit: // Fall through.
301 opcode = typ == kTypeInt32 ? kArm64Ldrsb : kArm64Ldrb;
302 immediate_mode = kLoadStoreImm8;
305 opcode = typ == kTypeInt32 ? kArm64Ldrsh : kArm64Ldrh;
306 immediate_mode = kLoadStoreImm16;
310 immediate_mode = kLoadStoreImm32;
312 case kRepTagged: // Fall through.
315 immediate_mode = kLoadStoreImm64;
321 if (g.CanBeImmediate(index, immediate_mode)) {
322 Emit(opcode | AddressingModeField::encode(kMode_MRI),
323 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
325 Emit(opcode | AddressingModeField::encode(kMode_MRR),
326 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
331 void InstructionSelector::VisitStore(Node* node) {
332 Arm64OperandGenerator g(this);
333 Node* base = node->InputAt(0);
334 Node* index = node->InputAt(1);
335 Node* value = node->InputAt(2);
337 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
338 MachineType rep = RepresentationOf(store_rep.machine_type());
339 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
340 DCHECK(rep == kRepTagged);
341 // TODO(dcarney): refactor RecordWrite function to take temp registers
342 // and pass them here instead of using fixed regs
343 // TODO(dcarney): handle immediate indices.
344 InstructionOperand temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
345 Emit(kArm64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, x10),
346 g.UseFixed(index, x11), g.UseFixed(value, x12), arraysize(temps),
350 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
352 ImmediateMode immediate_mode = kNoImmediate;
356 immediate_mode = kLoadStoreImm32;
360 immediate_mode = kLoadStoreImm64;
362 case kRepBit: // Fall through.
365 immediate_mode = kLoadStoreImm8;
369 immediate_mode = kLoadStoreImm16;
373 immediate_mode = kLoadStoreImm32;
375 case kRepTagged: // Fall through.
378 immediate_mode = kLoadStoreImm64;
384 if (g.CanBeImmediate(index, immediate_mode)) {
385 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
386 g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
388 Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
389 g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
394 void InstructionSelector::VisitCheckedLoad(Node* node) {
395 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
396 MachineType typ = TypeOf(OpParameter<MachineType>(node));
397 Arm64OperandGenerator g(this);
398 Node* const buffer = node->InputAt(0);
399 Node* const offset = node->InputAt(1);
400 Node* const length = node->InputAt(2);
404 opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
407 opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
410 opcode = kCheckedLoadWord32;
413 opcode = kCheckedLoadFloat32;
416 opcode = kCheckedLoadFloat64;
422 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
423 g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
427 void InstructionSelector::VisitCheckedStore(Node* node) {
428 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
429 Arm64OperandGenerator g(this);
430 Node* const buffer = node->InputAt(0);
431 Node* const offset = node->InputAt(1);
432 Node* const length = node->InputAt(2);
433 Node* const value = node->InputAt(3);
437 opcode = kCheckedStoreWord8;
440 opcode = kCheckedStoreWord16;
443 opcode = kCheckedStoreWord32;
446 opcode = kCheckedStoreFloat32;
449 opcode = kCheckedStoreFloat64;
455 Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
456 g.UseOperand(length, kArithmeticImm), g.UseRegister(value));
460 template <typename Matcher>
461 static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
462 ArchOpcode opcode, bool left_can_cover,
463 bool right_can_cover, ImmediateMode imm_mode) {
464 Arm64OperandGenerator g(selector);
466 // Map instruction to equivalent operation with inverted right input.
467 ArchOpcode inv_opcode = opcode;
470 inv_opcode = kArm64Bic32;
473 inv_opcode = kArm64Bic;
476 inv_opcode = kArm64Orn32;
479 inv_opcode = kArm64Orn;
482 inv_opcode = kArm64Eon32;
485 inv_opcode = kArm64Eon;
491 // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
492 if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
493 Matcher mleft(m->left().node());
494 if (mleft.right().Is(-1)) {
495 // TODO(all): support shifted operand on right.
496 selector->Emit(inv_opcode, g.DefineAsRegister(node),
497 g.UseRegister(m->right().node()),
498 g.UseRegister(mleft.left().node()));
503 // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
504 if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
506 Matcher mright(m->right().node());
507 if (mright.right().Is(-1)) {
508 // TODO(all): support shifted operand on right.
509 selector->Emit(inv_opcode, g.DefineAsRegister(node),
510 g.UseRegister(m->left().node()),
511 g.UseRegister(mright.left().node()));
516 if (m->IsWord32Xor() && m->right().Is(-1)) {
517 selector->Emit(kArm64Not32, g.DefineAsRegister(node),
518 g.UseRegister(m->left().node()));
519 } else if (m->IsWord64Xor() && m->right().Is(-1)) {
520 selector->Emit(kArm64Not, g.DefineAsRegister(node),
521 g.UseRegister(m->left().node()));
523 VisitBinop<Matcher>(selector, node, opcode, imm_mode);
528 void InstructionSelector::VisitWord32And(Node* node) {
529 Arm64OperandGenerator g(this);
530 Int32BinopMatcher m(node);
531 if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
532 m.right().HasValue()) {
533 uint32_t mask = m.right().Value();
534 uint32_t mask_width = base::bits::CountPopulation32(mask);
535 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
536 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
537 // The mask must be contiguous, and occupy the least-significant bits.
538 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
540 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
542 Int32BinopMatcher mleft(m.left().node());
543 if (mleft.right().IsInRange(0, 31)) {
544 // Ubfx cannot extract bits past the register size, however since
545 // shifting the original value would have introduced some zeros we can
546 // still use ubfx with a smaller mask and the remaining bits will be
548 uint32_t lsb = mleft.right().Value();
549 if (lsb + mask_width > 32) mask_width = 32 - lsb;
551 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
552 g.UseRegister(mleft.left().node()),
553 g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width));
556 // Other cases fall through to the normal And operation.
559 VisitLogical<Int32BinopMatcher>(
560 this, node, &m, kArm64And32, CanCover(node, m.left().node()),
561 CanCover(node, m.right().node()), kLogical32Imm);
565 void InstructionSelector::VisitWord64And(Node* node) {
566 Arm64OperandGenerator g(this);
567 Int64BinopMatcher m(node);
568 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
569 m.right().HasValue()) {
570 uint64_t mask = m.right().Value();
571 uint64_t mask_width = base::bits::CountPopulation64(mask);
572 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
573 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
574 // The mask must be contiguous, and occupy the least-significant bits.
575 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
577 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
579 Int64BinopMatcher mleft(m.left().node());
580 if (mleft.right().IsInRange(0, 63)) {
581 // Ubfx cannot extract bits past the register size, however since
582 // shifting the original value would have introduced some zeros we can
583 // still use ubfx with a smaller mask and the remaining bits will be
585 uint64_t lsb = mleft.right().Value();
586 if (lsb + mask_width > 64) mask_width = 64 - lsb;
588 Emit(kArm64Ubfx, g.DefineAsRegister(node),
589 g.UseRegister(mleft.left().node()),
590 g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width));
593 // Other cases fall through to the normal And operation.
596 VisitLogical<Int64BinopMatcher>(
597 this, node, &m, kArm64And, CanCover(node, m.left().node()),
598 CanCover(node, m.right().node()), kLogical64Imm);
602 void InstructionSelector::VisitWord32Or(Node* node) {
603 Int32BinopMatcher m(node);
604 VisitLogical<Int32BinopMatcher>(
605 this, node, &m, kArm64Or32, CanCover(node, m.left().node()),
606 CanCover(node, m.right().node()), kLogical32Imm);
610 void InstructionSelector::VisitWord64Or(Node* node) {
611 Int64BinopMatcher m(node);
612 VisitLogical<Int64BinopMatcher>(
613 this, node, &m, kArm64Or, CanCover(node, m.left().node()),
614 CanCover(node, m.right().node()), kLogical64Imm);
618 void InstructionSelector::VisitWord32Xor(Node* node) {
619 Int32BinopMatcher m(node);
620 VisitLogical<Int32BinopMatcher>(
621 this, node, &m, kArm64Eor32, CanCover(node, m.left().node()),
622 CanCover(node, m.right().node()), kLogical32Imm);
626 void InstructionSelector::VisitWord64Xor(Node* node) {
627 Int64BinopMatcher m(node);
628 VisitLogical<Int64BinopMatcher>(
629 this, node, &m, kArm64Eor, CanCover(node, m.left().node()),
630 CanCover(node, m.right().node()), kLogical64Imm);
634 void InstructionSelector::VisitWord32Shl(Node* node) {
635 VisitRRO(this, kArm64Lsl32, node, kShift32Imm);
639 void InstructionSelector::VisitWord64Shl(Node* node) {
640 Arm64OperandGenerator g(this);
641 Int64BinopMatcher m(node);
642 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
643 m.right().IsInRange(32, 63)) {
644 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
646 Emit(kArm64Lsl, g.DefineAsRegister(node),
647 g.UseRegister(m.left().node()->InputAt(0)),
648 g.UseImmediate(m.right().node()));
651 VisitRRO(this, kArm64Lsl, node, kShift64Imm);
655 void InstructionSelector::VisitWord32Shr(Node* node) {
656 Arm64OperandGenerator g(this);
657 Int32BinopMatcher m(node);
658 if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
659 uint32_t lsb = m.right().Value();
660 Int32BinopMatcher mleft(m.left().node());
661 if (mleft.right().HasValue()) {
662 uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
663 uint32_t mask_width = base::bits::CountPopulation32(mask);
664 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
665 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
666 // shifted into the least-significant bits.
667 if ((mask_msb + mask_width + lsb) == 32) {
668 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
669 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
670 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
671 g.TempImmediate(mask_width));
676 VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
680 void InstructionSelector::VisitWord64Shr(Node* node) {
681 Arm64OperandGenerator g(this);
682 Int64BinopMatcher m(node);
683 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
684 uint64_t lsb = m.right().Value();
685 Int64BinopMatcher mleft(m.left().node());
686 if (mleft.right().HasValue()) {
687 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
688 // shifted into the least-significant bits.
689 uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
690 uint64_t mask_width = base::bits::CountPopulation64(mask);
691 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
692 if ((mask_msb + mask_width + lsb) == 64) {
693 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
694 Emit(kArm64Ubfx, g.DefineAsRegister(node),
695 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
696 g.TempImmediate(mask_width));
701 VisitRRO(this, kArm64Lsr, node, kShift64Imm);
705 void InstructionSelector::VisitWord32Sar(Node* node) {
706 Arm64OperandGenerator g(this);
707 Int32BinopMatcher m(node);
708 // Select Sxth/Sxtb for (x << K) >> K where K is 16 or 24.
709 if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
710 Int32BinopMatcher mleft(m.left().node());
711 if (mleft.right().Is(16) && m.right().Is(16)) {
712 Emit(kArm64Sxth32, g.DefineAsRegister(node),
713 g.UseRegister(mleft.left().node()));
715 } else if (mleft.right().Is(24) && m.right().Is(24)) {
716 Emit(kArm64Sxtb32, g.DefineAsRegister(node),
717 g.UseRegister(mleft.left().node()));
721 VisitRRO(this, kArm64Asr32, node, kShift32Imm);
725 void InstructionSelector::VisitWord64Sar(Node* node) {
726 VisitRRO(this, kArm64Asr, node, kShift64Imm);
730 void InstructionSelector::VisitWord32Ror(Node* node) {
731 VisitRRO(this, kArm64Ror32, node, kShift32Imm);
735 void InstructionSelector::VisitWord64Ror(Node* node) {
736 VisitRRO(this, kArm64Ror, node, kShift64Imm);
740 void InstructionSelector::VisitWord32Clz(Node* node) {
741 Arm64OperandGenerator g(this);
742 Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
746 void InstructionSelector::VisitInt32Add(Node* node) {
747 Arm64OperandGenerator g(this);
748 Int32BinopMatcher m(node);
749 // Select Madd(x, y, z) for Add(Mul(x, y), z).
750 if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
751 Int32BinopMatcher mleft(m.left().node());
752 Emit(kArm64Madd32, g.DefineAsRegister(node),
753 g.UseRegister(mleft.left().node()),
754 g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
757 // Select Madd(x, y, z) for Add(x, Mul(x, y)).
758 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
759 Int32BinopMatcher mright(m.right().node());
760 Emit(kArm64Madd32, g.DefineAsRegister(node),
761 g.UseRegister(mright.left().node()),
762 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
765 VisitAddSub<Int32BinopMatcher>(this, node, kArm64Add32, kArm64Sub32);
769 void InstructionSelector::VisitInt64Add(Node* node) {
770 Arm64OperandGenerator g(this);
771 Int64BinopMatcher m(node);
772 // Select Madd(x, y, z) for Add(Mul(x, y), z).
773 if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
774 Int64BinopMatcher mleft(m.left().node());
775 Emit(kArm64Madd, g.DefineAsRegister(node),
776 g.UseRegister(mleft.left().node()),
777 g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
780 // Select Madd(x, y, z) for Add(x, Mul(x, y)).
781 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
782 Int64BinopMatcher mright(m.right().node());
783 Emit(kArm64Madd, g.DefineAsRegister(node),
784 g.UseRegister(mright.left().node()),
785 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
788 VisitAddSub<Int64BinopMatcher>(this, node, kArm64Add, kArm64Sub);
792 void InstructionSelector::VisitInt32Sub(Node* node) {
793 Arm64OperandGenerator g(this);
794 Int32BinopMatcher m(node);
796 // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
797 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
798 Int32BinopMatcher mright(m.right().node());
799 Emit(kArm64Msub32, g.DefineAsRegister(node),
800 g.UseRegister(mright.left().node()),
801 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
805 if (m.left().Is(0)) {
806 Emit(kArm64Neg32, g.DefineAsRegister(node),
807 g.UseRegister(m.right().node()));
809 VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
814 void InstructionSelector::VisitInt64Sub(Node* node) {
815 Arm64OperandGenerator g(this);
816 Int64BinopMatcher m(node);
818 // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
819 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
820 Int64BinopMatcher mright(m.right().node());
821 Emit(kArm64Msub, g.DefineAsRegister(node),
822 g.UseRegister(mright.left().node()),
823 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
827 if (m.left().Is(0)) {
828 Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
830 VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
835 void InstructionSelector::VisitInt32Mul(Node* node) {
836 Arm64OperandGenerator g(this);
837 Int32BinopMatcher m(node);
839 if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
840 Int32BinopMatcher mleft(m.left().node());
842 // Select Mneg(x, y) for Mul(Sub(0, x), y).
843 if (mleft.left().Is(0)) {
844 Emit(kArm64Mneg32, g.DefineAsRegister(node),
845 g.UseRegister(mleft.right().node()),
846 g.UseRegister(m.right().node()));
851 if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) {
852 Int32BinopMatcher mright(m.right().node());
854 // Select Mneg(x, y) for Mul(x, Sub(0, y)).
855 if (mright.left().Is(0)) {
856 Emit(kArm64Mneg32, g.DefineAsRegister(node),
857 g.UseRegister(m.left().node()),
858 g.UseRegister(mright.right().node()));
863 VisitRRR(this, kArm64Mul32, node);
867 void InstructionSelector::VisitInt64Mul(Node* node) {
868 Arm64OperandGenerator g(this);
869 Int64BinopMatcher m(node);
871 if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
872 Int64BinopMatcher mleft(m.left().node());
874 // Select Mneg(x, y) for Mul(Sub(0, x), y).
875 if (mleft.left().Is(0)) {
876 Emit(kArm64Mneg, g.DefineAsRegister(node),
877 g.UseRegister(mleft.right().node()),
878 g.UseRegister(m.right().node()));
883 if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) {
884 Int64BinopMatcher mright(m.right().node());
886 // Select Mneg(x, y) for Mul(x, Sub(0, y)).
887 if (mright.left().Is(0)) {
888 Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
889 g.UseRegister(mright.right().node()));
894 VisitRRR(this, kArm64Mul, node);
898 void InstructionSelector::VisitInt32MulHigh(Node* node) {
899 // TODO(arm64): Can we do better here?
900 Arm64OperandGenerator g(this);
901 InstructionOperand const smull_operand = g.TempRegister();
902 Emit(kArm64Smull, smull_operand, g.UseRegister(node->InputAt(0)),
903 g.UseRegister(node->InputAt(1)));
904 Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
908 void InstructionSelector::VisitUint32MulHigh(Node* node) {
909 // TODO(arm64): Can we do better here?
910 Arm64OperandGenerator g(this);
911 InstructionOperand const smull_operand = g.TempRegister();
912 Emit(kArm64Umull, smull_operand, g.UseRegister(node->InputAt(0)),
913 g.UseRegister(node->InputAt(1)));
914 Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
918 void InstructionSelector::VisitInt32Div(Node* node) {
919 VisitRRR(this, kArm64Idiv32, node);
923 void InstructionSelector::VisitInt64Div(Node* node) {
924 VisitRRR(this, kArm64Idiv, node);
928 void InstructionSelector::VisitUint32Div(Node* node) {
929 VisitRRR(this, kArm64Udiv32, node);
933 void InstructionSelector::VisitUint64Div(Node* node) {
934 VisitRRR(this, kArm64Udiv, node);
938 void InstructionSelector::VisitInt32Mod(Node* node) {
939 VisitRRR(this, kArm64Imod32, node);
943 void InstructionSelector::VisitInt64Mod(Node* node) {
944 VisitRRR(this, kArm64Imod, node);
948 void InstructionSelector::VisitUint32Mod(Node* node) {
949 VisitRRR(this, kArm64Umod32, node);
953 void InstructionSelector::VisitUint64Mod(Node* node) {
954 VisitRRR(this, kArm64Umod, node);
958 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
959 Arm64OperandGenerator g(this);
960 Emit(kArm64Float32ToFloat64, g.DefineAsRegister(node),
961 g.UseRegister(node->InputAt(0)));
965 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
966 Arm64OperandGenerator g(this);
967 Emit(kArm64Int32ToFloat64, g.DefineAsRegister(node),
968 g.UseRegister(node->InputAt(0)));
972 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
973 Arm64OperandGenerator g(this);
974 Emit(kArm64Uint32ToFloat64, g.DefineAsRegister(node),
975 g.UseRegister(node->InputAt(0)));
979 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
980 Arm64OperandGenerator g(this);
981 Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
982 g.UseRegister(node->InputAt(0)));
986 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
987 Arm64OperandGenerator g(this);
988 Emit(kArm64Float64ToUint32, g.DefineAsRegister(node),
989 g.UseRegister(node->InputAt(0)));
993 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
994 Arm64OperandGenerator g(this);
995 Emit(kArm64Sxtw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
999 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1000 Arm64OperandGenerator g(this);
1001 Node* value = node->InputAt(0);
1002 switch (value->opcode()) {
1003 case IrOpcode::kWord32And:
1004 case IrOpcode::kWord32Or:
1005 case IrOpcode::kWord32Xor:
1006 case IrOpcode::kWord32Shl:
1007 case IrOpcode::kWord32Shr:
1008 case IrOpcode::kWord32Sar:
1009 case IrOpcode::kWord32Ror:
1010 case IrOpcode::kWord32Equal:
1011 case IrOpcode::kInt32Add:
1012 case IrOpcode::kInt32AddWithOverflow:
1013 case IrOpcode::kInt32Sub:
1014 case IrOpcode::kInt32SubWithOverflow:
1015 case IrOpcode::kInt32Mul:
1016 case IrOpcode::kInt32MulHigh:
1017 case IrOpcode::kInt32Div:
1018 case IrOpcode::kInt32Mod:
1019 case IrOpcode::kInt32LessThan:
1020 case IrOpcode::kInt32LessThanOrEqual:
1021 case IrOpcode::kUint32Div:
1022 case IrOpcode::kUint32LessThan:
1023 case IrOpcode::kUint32LessThanOrEqual:
1024 case IrOpcode::kUint32Mod:
1025 case IrOpcode::kUint32MulHigh: {
1026 // 32-bit operations will write their result in a W register (implicitly
1027 // clearing the top 32-bit of the corresponding X register) so the
1028 // zero-extension is a no-op.
1029 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1035 Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
1039 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1040 Arm64OperandGenerator g(this);
1041 Emit(kArm64Float64ToFloat32, g.DefineAsRegister(node),
1042 g.UseRegister(node->InputAt(0)));
1046 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1047 Arm64OperandGenerator g(this);
1048 Node* value = node->InputAt(0);
1049 if (CanCover(node, value)) {
1050 Int64BinopMatcher m(value);
1051 if ((m.IsWord64Sar() && m.right().HasValue() &&
1052 (m.right().Value() == 32)) ||
1053 (m.IsWord64Shr() && m.right().IsInRange(32, 63))) {
1054 Emit(kArm64Lsr, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1055 g.UseImmediate(m.right().node()));
1060 Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1064 void InstructionSelector::VisitFloat64Add(Node* node) {
1065 VisitRRRFloat64(this, kArm64Float64Add, node);
1069 void InstructionSelector::VisitFloat64Sub(Node* node) {
1070 Arm64OperandGenerator g(this);
1071 Float64BinopMatcher m(node);
1072 if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
1073 CanCover(m.node(), m.right().node())) {
1074 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
1075 CanCover(m.right().node(), m.right().InputAt(0))) {
1076 Float64BinopMatcher mright0(m.right().InputAt(0));
1077 if (mright0.left().IsMinusZero()) {
1078 Emit(kArm64Float64RoundUp, g.DefineAsRegister(node),
1079 g.UseRegister(mright0.right().node()));
1084 VisitRRRFloat64(this, kArm64Float64Sub, node);
1088 void InstructionSelector::VisitFloat64Mul(Node* node) {
1089 VisitRRRFloat64(this, kArm64Float64Mul, node);
1093 void InstructionSelector::VisitFloat64Div(Node* node) {
1094 VisitRRRFloat64(this, kArm64Float64Div, node);
1098 void InstructionSelector::VisitFloat64Mod(Node* node) {
1099 Arm64OperandGenerator g(this);
1100 Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
1101 g.UseFixed(node->InputAt(0), d0),
1102 g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
1106 void InstructionSelector::VisitFloat64Max(Node* node) {
1107 Arm64OperandGenerator g(this);
1108 Node* left = node->InputAt(0);
1109 Node* right = node->InputAt(1);
1110 Emit(kArm64Float64Max, g.DefineAsRegister(node), g.UseRegister(left),
1111 g.UseRegister(right));
1115 void InstructionSelector::VisitFloat64Min(Node* node) {
1116 Arm64OperandGenerator g(this);
1117 Node* left = node->InputAt(0);
1118 Node* right = node->InputAt(1);
1119 Emit(kArm64Float64Min, g.DefineAsRegister(node), g.UseRegister(left),
1120 g.UseRegister(right));
1124 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1125 VisitRRFloat64(this, kArm64Float64Sqrt, node);
1129 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1130 VisitRRFloat64(this, kArm64Float64RoundDown, node);
1134 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1135 VisitRRFloat64(this, kArm64Float64RoundTruncate, node);
1139 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1140 VisitRRFloat64(this, kArm64Float64RoundTiesAway, node);
1144 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
1145 Arm64OperandGenerator g(this);
1146 const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
1148 FrameStateDescriptor* frame_state_descriptor = NULL;
1149 if (descriptor->NeedsFrameState()) {
1150 frame_state_descriptor =
1151 GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
1154 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
1156 // Compute InstructionOperands for inputs and outputs.
1157 // TODO(turbofan): on ARM64 it's probably better to use the code object in a
1158 // register if there are multiple uses of it. Improve constant pool and the
1159 // heuristics in the register allocator for where to emit constants.
1160 InitializeCallBuffer(node, &buffer, true, false);
1162 // Push the arguments to the stack.
1163 bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
1164 int aligned_push_count = buffer.pushed_nodes.size();
1165 // TODO(dcarney): claim and poke probably take small immediates,
1166 // loop here or whatever.
1167 // Bump the stack pointer(s).
1168 if (aligned_push_count > 0) {
1169 // TODO(dcarney): it would be better to bump the csp here only
1170 // and emit paired stores with increment for non c frames.
1171 Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(aligned_push_count));
1173 // Move arguments to the stack.
1175 int slot = buffer.pushed_nodes.size() - 1;
1176 // Emit the uneven pushes.
1177 if (pushed_count_uneven) {
1178 Node* input = buffer.pushed_nodes[slot];
1179 Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input),
1180 g.TempImmediate(slot));
1183 // Now all pushes can be done in pairs.
1184 for (; slot >= 0; slot -= 2) {
1185 Emit(kArm64PokePair, g.NoOutput(),
1186 g.UseRegister(buffer.pushed_nodes[slot]),
1187 g.UseRegister(buffer.pushed_nodes[slot - 1]),
1188 g.TempImmediate(slot));
1192 // Pass label of exception handler block.
1193 CallDescriptor::Flags flags = descriptor->flags();
1194 if (handler != nullptr) {
1195 flags |= CallDescriptor::kHasExceptionHandler;
1196 buffer.instruction_args.push_back(g.Label(handler));
1199 // Select the appropriate opcode based on the call type.
1200 InstructionCode opcode;
1201 switch (descriptor->kind()) {
1202 case CallDescriptor::kCallCodeObject: {
1203 opcode = kArchCallCodeObject;
1206 case CallDescriptor::kCallJSFunction:
1207 opcode = kArchCallJSFunction;
1213 opcode |= MiscField::encode(flags);
1215 // Emit the call instruction.
1216 InstructionOperand* first_output =
1217 buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
1218 Instruction* call_instr =
1219 Emit(opcode, buffer.outputs.size(), first_output,
1220 buffer.instruction_args.size(), &buffer.instruction_args.front());
1221 call_instr->MarkAsCall();
1225 // Shared routine for multiple compare operations.
1226 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1227 InstructionOperand left, InstructionOperand right,
1228 FlagsContinuation* cont) {
1229 Arm64OperandGenerator g(selector);
1230 opcode = cont->Encode(opcode);
1231 if (cont->IsBranch()) {
1232 selector->Emit(opcode, g.NoOutput(), left, right,
1233 g.Label(cont->true_block()), g.Label(cont->false_block()));
1235 DCHECK(cont->IsSet());
1236 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1241 // Shared routine for multiple word compare operations.
1242 static void VisitWordCompare(InstructionSelector* selector, Node* node,
1243 InstructionCode opcode, FlagsContinuation* cont,
1244 bool commutative, ImmediateMode immediate_mode) {
1245 Arm64OperandGenerator g(selector);
1246 Node* left = node->InputAt(0);
1247 Node* right = node->InputAt(1);
1249 // Match immediates on left or right side of comparison.
1250 if (g.CanBeImmediate(right, immediate_mode)) {
1251 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1253 } else if (g.CanBeImmediate(left, immediate_mode)) {
1254 if (!commutative) cont->Commute();
1255 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1258 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1264 static void VisitWord32Compare(InstructionSelector* selector, Node* node,
1265 FlagsContinuation* cont) {
1266 VisitWordCompare(selector, node, kArm64Cmp32, cont, false, kArithmeticImm);
1270 static void VisitWordTest(InstructionSelector* selector, Node* node,
1271 InstructionCode opcode, FlagsContinuation* cont) {
1272 Arm64OperandGenerator g(selector);
1273 VisitCompare(selector, opcode, g.UseRegister(node), g.UseRegister(node),
1278 static void VisitWord32Test(InstructionSelector* selector, Node* node,
1279 FlagsContinuation* cont) {
1280 VisitWordTest(selector, node, kArm64Tst32, cont);
1284 static void VisitWord64Test(InstructionSelector* selector, Node* node,
1285 FlagsContinuation* cont) {
1286 VisitWordTest(selector, node, kArm64Tst, cont);
1290 // Shared routine for multiple float compare operations.
1291 static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1292 FlagsContinuation* cont) {
1293 Arm64OperandGenerator g(selector);
1294 Float64BinopMatcher m(node);
1295 if (m.right().Is(0.0)) {
1296 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
1297 g.UseImmediate(m.right().node()), cont);
1299 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
1300 g.UseRegister(m.right().node()), cont);
1305 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1306 BasicBlock* fbranch) {
1307 OperandGenerator g(this);
1308 Node* user = branch;
1309 Node* value = branch->InputAt(0);
1311 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1313 // Try to combine with comparisons against 0 by simply inverting the branch.
1314 while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
1315 Int32BinopMatcher m(value);
1316 if (m.right().Is(0)) {
1318 value = m.left().node();
1325 // Try to combine the branch with a comparison.
1326 if (CanCover(user, value)) {
1327 switch (value->opcode()) {
1328 case IrOpcode::kWord32Equal:
1329 cont.OverwriteAndNegateIfEqual(kEqual);
1330 return VisitWord32Compare(this, value, &cont);
1331 case IrOpcode::kInt32LessThan:
1332 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1333 return VisitWord32Compare(this, value, &cont);
1334 case IrOpcode::kInt32LessThanOrEqual:
1335 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1336 return VisitWord32Compare(this, value, &cont);
1337 case IrOpcode::kUint32LessThan:
1338 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1339 return VisitWord32Compare(this, value, &cont);
1340 case IrOpcode::kUint32LessThanOrEqual:
1341 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1342 return VisitWord32Compare(this, value, &cont);
1343 case IrOpcode::kWord64Equal:
1344 cont.OverwriteAndNegateIfEqual(kEqual);
1345 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1347 case IrOpcode::kInt64LessThan:
1348 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1349 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1351 case IrOpcode::kInt64LessThanOrEqual:
1352 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1353 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1355 case IrOpcode::kUint64LessThan:
1356 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1357 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1359 case IrOpcode::kFloat64Equal:
1360 cont.OverwriteAndNegateIfEqual(kEqual);
1361 return VisitFloat64Compare(this, value, &cont);
1362 case IrOpcode::kFloat64LessThan:
1363 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1364 return VisitFloat64Compare(this, value, &cont);
1365 case IrOpcode::kFloat64LessThanOrEqual:
1366 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1367 return VisitFloat64Compare(this, value, &cont);
1368 case IrOpcode::kProjection:
1369 // Check if this is the overflow output projection of an
1370 // <Operation>WithOverflow node.
1371 if (ProjectionIndexOf(value->op()) == 1u) {
1372 // We cannot combine the <Operation>WithOverflow with this branch
1373 // unless the 0th projection (the use of the actual value of the
1374 // <Operation> is either NULL, which means there's no use of the
1375 // actual value, or was already defined, which means it is scheduled
1376 // *AFTER* this branch).
1377 Node* const node = value->InputAt(0);
1378 Node* const result = NodeProperties::FindProjection(node, 0);
1379 if (result == NULL || IsDefined(result)) {
1380 switch (node->opcode()) {
1381 case IrOpcode::kInt32AddWithOverflow:
1382 cont.OverwriteAndNegateIfEqual(kOverflow);
1383 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
1384 kArithmeticImm, &cont);
1385 case IrOpcode::kInt32SubWithOverflow:
1386 cont.OverwriteAndNegateIfEqual(kOverflow);
1387 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
1388 kArithmeticImm, &cont);
1395 case IrOpcode::kInt32Add:
1396 return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
1398 case IrOpcode::kInt32Sub:
1399 return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
1401 case IrOpcode::kWord32And: {
1402 Int32BinopMatcher m(value);
1403 if (m.right().HasValue() &&
1404 (base::bits::CountPopulation32(m.right().Value()) == 1)) {
1405 // If the mask has only one bit set, we can use tbz/tbnz.
1406 DCHECK((cont.condition() == kEqual) ||
1407 (cont.condition() == kNotEqual));
1408 Emit(cont.Encode(kArm64TestAndBranch32), g.NoOutput(),
1409 g.UseRegister(m.left().node()),
1411 base::bits::CountTrailingZeros32(m.right().Value())),
1412 g.Label(cont.true_block()), g.Label(cont.false_block()));
1415 return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
1418 case IrOpcode::kWord64And: {
1419 Int64BinopMatcher m(value);
1420 if (m.right().HasValue() &&
1421 (base::bits::CountPopulation64(m.right().Value()) == 1)) {
1422 // If the mask has only one bit set, we can use tbz/tbnz.
1423 DCHECK((cont.condition() == kEqual) ||
1424 (cont.condition() == kNotEqual));
1425 Emit(cont.Encode(kArm64TestAndBranch), g.NoOutput(),
1426 g.UseRegister(m.left().node()),
1428 base::bits::CountTrailingZeros64(m.right().Value())),
1429 g.Label(cont.true_block()), g.Label(cont.false_block()));
1432 return VisitWordCompare(this, value, kArm64Tst, &cont, true,
1440 // Branch could not be combined with a compare, compare against 0 and branch.
1441 Emit(cont.Encode(kArm64CompareAndBranch32), g.NoOutput(),
1442 g.UseRegister(value), g.Label(cont.true_block()),
1443 g.Label(cont.false_block()));
1447 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1448 Arm64OperandGenerator g(this);
1449 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1451 // Emit either ArchTableSwitch or ArchLookupSwitch.
1452 size_t table_space_cost = 4 + sw.value_range;
1453 size_t table_time_cost = 3;
1454 size_t lookup_space_cost = 3 + 2 * sw.case_count;
1455 size_t lookup_time_cost = sw.case_count;
1456 if (sw.case_count > 0 &&
1457 table_space_cost + 3 * table_time_cost <=
1458 lookup_space_cost + 3 * lookup_time_cost &&
1459 sw.min_value > std::numeric_limits<int32_t>::min()) {
1460 InstructionOperand index_operand = value_operand;
1462 index_operand = g.TempRegister();
1463 Emit(kArm64Sub32, index_operand, value_operand,
1464 g.TempImmediate(sw.min_value));
1466 // Generate a table lookup.
1467 return EmitTableSwitch(sw, index_operand);
1470 // Generate a sequence of conditional jumps.
1471 return EmitLookupSwitch(sw, value_operand);
1475 void InstructionSelector::VisitWord32Equal(Node* const node) {
1476 Node* const user = node;
1477 FlagsContinuation cont(kEqual, node);
1478 Int32BinopMatcher m(user);
1479 if (m.right().Is(0)) {
1480 Node* const value = m.left().node();
1481 if (CanCover(user, value)) {
1482 switch (value->opcode()) {
1483 case IrOpcode::kInt32Add:
1484 return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
1486 case IrOpcode::kInt32Sub:
1487 return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
1489 case IrOpcode::kWord32And:
1490 return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
1495 return VisitWord32Test(this, value, &cont);
1498 VisitWord32Compare(this, node, &cont);
1502 void InstructionSelector::VisitInt32LessThan(Node* node) {
1503 FlagsContinuation cont(kSignedLessThan, node);
1504 VisitWord32Compare(this, node, &cont);
1508 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1509 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1510 VisitWord32Compare(this, node, &cont);
1514 void InstructionSelector::VisitUint32LessThan(Node* node) {
1515 FlagsContinuation cont(kUnsignedLessThan, node);
1516 VisitWord32Compare(this, node, &cont);
1520 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1521 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1522 VisitWord32Compare(this, node, &cont);
1526 void InstructionSelector::VisitWord64Equal(Node* const node) {
1527 Node* const user = node;
1528 FlagsContinuation cont(kEqual, node);
1529 Int64BinopMatcher m(user);
1530 if (m.right().Is(0)) {
1531 Node* const value = m.left().node();
1532 if (CanCover(user, value)) {
1533 switch (value->opcode()) {
1534 case IrOpcode::kWord64And:
1535 return VisitWordCompare(this, value, kArm64Tst, &cont, true,
1540 return VisitWord64Test(this, value, &cont);
1543 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
1547 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1548 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1549 FlagsContinuation cont(kOverflow, ovf);
1550 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
1551 kArithmeticImm, &cont);
1553 FlagsContinuation cont;
1554 VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, &cont);
1558 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1559 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1560 FlagsContinuation cont(kOverflow, ovf);
1561 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
1562 kArithmeticImm, &cont);
1564 FlagsContinuation cont;
1565 VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, &cont);
1569 void InstructionSelector::VisitInt64LessThan(Node* node) {
1570 FlagsContinuation cont(kSignedLessThan, node);
1571 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
1575 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1576 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1577 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
1581 void InstructionSelector::VisitUint64LessThan(Node* node) {
1582 FlagsContinuation cont(kUnsignedLessThan, node);
1583 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
1587 void InstructionSelector::VisitFloat64Equal(Node* node) {
1588 FlagsContinuation cont(kEqual, node);
1589 VisitFloat64Compare(this, node, &cont);
1593 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1594 FlagsContinuation cont(kUnsignedLessThan, node);
1595 VisitFloat64Compare(this, node, &cont);
1599 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1600 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1601 VisitFloat64Compare(this, node, &cont);
1605 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1606 Arm64OperandGenerator g(this);
1607 Emit(kArm64Float64ExtractLowWord32, g.DefineAsRegister(node),
1608 g.UseRegister(node->InputAt(0)));
1612 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1613 Arm64OperandGenerator g(this);
1614 Emit(kArm64Float64ExtractHighWord32, g.DefineAsRegister(node),
1615 g.UseRegister(node->InputAt(0)));
1619 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1620 Arm64OperandGenerator g(this);
1621 Node* left = node->InputAt(0);
1622 Node* right = node->InputAt(1);
1623 if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
1624 CanCover(node, left)) {
1625 Node* right_of_left = left->InputAt(1);
1626 Emit(kArm64Bfi, g.DefineSameAsFirst(right), g.UseRegister(right),
1627 g.UseRegister(right_of_left), g.TempImmediate(32),
1628 g.TempImmediate(32));
1629 Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(right));
1632 Emit(kArm64Float64InsertLowWord32, g.DefineAsRegister(node),
1633 g.UseRegister(left), g.UseRegister(right));
1637 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1638 Arm64OperandGenerator g(this);
1639 Node* left = node->InputAt(0);
1640 Node* right = node->InputAt(1);
1641 if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
1642 CanCover(node, left)) {
1643 Node* right_of_left = left->InputAt(1);
1644 Emit(kArm64Bfi, g.DefineSameAsFirst(left), g.UseRegister(right_of_left),
1645 g.UseRegister(right), g.TempImmediate(32), g.TempImmediate(32));
1646 Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(left));
1649 Emit(kArm64Float64InsertHighWord32, g.DefineAsRegister(node),
1650 g.UseRegister(left), g.UseRegister(right));
1655 MachineOperatorBuilder::Flags
1656 InstructionSelector::SupportedMachineOperatorFlags() {
1657 return MachineOperatorBuilder::kFloat64RoundDown |
1658 MachineOperatorBuilder::kFloat64RoundTruncate |
1659 MachineOperatorBuilder::kFloat64RoundTiesAway |
1660 MachineOperatorBuilder::kFloat64Max |
1661 MachineOperatorBuilder::kFloat64Min |
1662 MachineOperatorBuilder::kWord32ShiftIsSafe |
1663 MachineOperatorBuilder::kInt32DivIsSafe |
1664 MachineOperatorBuilder::kUint32DivIsSafe;
1667 } // namespace compiler
1668 } // namespace internal