1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h"
7 #include "src/compiler/node-properties.h"
14 kArithmeticImm, // 12 bit unsigned immediate shifted left 0 or 12 bits
15 kShift32Imm, // 0 - 31
16 kShift64Imm, // 0 - 63
19 kLoadStoreImm8, // signed 8 bit or 12 bit unsigned scaled by access size
27 // Adds Arm64-specific methods for generating operands.
28 class Arm64OperandGenerator FINAL : public OperandGenerator {
30 explicit Arm64OperandGenerator(InstructionSelector* selector)
31 : OperandGenerator(selector) {}
33 InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
34 if (CanBeImmediate(node, mode)) {
35 return UseImmediate(node);
37 return UseRegister(node);
40 bool CanBeImmediate(Node* node, ImmediateMode mode) {
42 if (node->opcode() == IrOpcode::kInt32Constant)
43 value = OpParameter<int32_t>(node);
44 else if (node->opcode() == IrOpcode::kInt64Constant)
45 value = OpParameter<int64_t>(node);
48 return CanBeImmediate(value, mode);
51 bool CanBeImmediate(int64_t value, ImmediateMode mode) {
55 // TODO(dcarney): some unencodable values can be handled by
56 // switching instructions.
57 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
58 &ignored, &ignored, &ignored);
60 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
61 &ignored, &ignored, &ignored);
63 return Assembler::IsImmAddSub(value);
65 return 0 <= value && value < 32;
67 return 0 <= value && value < 64;
69 return IsLoadStoreImmediate(value, LSByte);
71 return IsLoadStoreImmediate(value, LSHalfword);
73 return IsLoadStoreImmediate(value, LSWord);
75 return IsLoadStoreImmediate(value, LSDoubleWord);
83 bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
84 return Assembler::IsImmLSScaled(value, size) ||
85 Assembler::IsImmLSUnscaled(value);
90 static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
92 Arm64OperandGenerator g(selector);
93 selector->Emit(opcode, g.DefineAsRegister(node),
94 g.UseRegister(node->InputAt(0)));
98 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
100 Arm64OperandGenerator g(selector);
101 selector->Emit(opcode, g.DefineAsRegister(node),
102 g.UseRegister(node->InputAt(0)),
103 g.UseRegister(node->InputAt(1)));
107 static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
109 Arm64OperandGenerator g(selector);
110 selector->Emit(opcode, g.DefineAsRegister(node),
111 g.UseRegister(node->InputAt(0)),
112 g.UseRegister(node->InputAt(1)));
116 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
117 Node* node, ImmediateMode operand_mode) {
118 Arm64OperandGenerator g(selector);
119 selector->Emit(opcode, g.DefineAsRegister(node),
120 g.UseRegister(node->InputAt(0)),
121 g.UseOperand(node->InputAt(1), operand_mode));
125 template <typename Matcher>
126 static bool TryMatchShift(InstructionSelector* selector, Node* node,
127 InstructionCode* opcode, IrOpcode::Value shift_opcode,
128 ImmediateMode imm_mode,
129 AddressingMode addressing_mode) {
130 if (node->opcode() != shift_opcode) return false;
131 Arm64OperandGenerator g(selector);
133 if (g.CanBeImmediate(m.right().node(), imm_mode)) {
134 *opcode |= AddressingModeField::encode(addressing_mode);
141 static bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
142 InstructionCode* opcode, bool try_ror) {
143 return TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
144 IrOpcode::kWord32Shl, kShift32Imm,
145 kMode_Operand2_R_LSL_I) ||
146 TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
147 IrOpcode::kWord32Shr, kShift32Imm,
148 kMode_Operand2_R_LSR_I) ||
149 TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
150 IrOpcode::kWord32Sar, kShift32Imm,
151 kMode_Operand2_R_ASR_I) ||
152 (try_ror && TryMatchShift<Int32BinopMatcher>(
153 selector, node, opcode, IrOpcode::kWord32Ror,
154 kShift32Imm, kMode_Operand2_R_ROR_I)) ||
155 TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
156 IrOpcode::kWord64Shl, kShift64Imm,
157 kMode_Operand2_R_LSL_I) ||
158 TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
159 IrOpcode::kWord64Shr, kShift64Imm,
160 kMode_Operand2_R_LSR_I) ||
161 TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
162 IrOpcode::kWord64Sar, kShift64Imm,
163 kMode_Operand2_R_ASR_I) ||
164 (try_ror && TryMatchShift<Int64BinopMatcher>(
165 selector, node, opcode, IrOpcode::kWord64Ror,
166 kShift64Imm, kMode_Operand2_R_ROR_I));
170 // Shared routine for multiple binary operations.
171 template <typename Matcher>
172 static void VisitBinop(InstructionSelector* selector, Node* node,
173 InstructionCode opcode, ImmediateMode operand_mode,
174 FlagsContinuation* cont) {
175 Arm64OperandGenerator g(selector);
177 InstructionOperand inputs[4];
178 size_t input_count = 0;
179 InstructionOperand outputs[2];
180 size_t output_count = 0;
181 bool try_ror_operand = true;
183 if (m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() || m.IsInt64Sub()) {
184 try_ror_operand = false;
187 if (g.CanBeImmediate(m.right().node(), operand_mode)) {
188 inputs[input_count++] = g.UseRegister(m.left().node());
189 inputs[input_count++] = g.UseImmediate(m.right().node());
190 } else if (TryMatchAnyShift(selector, m.right().node(), &opcode,
192 Matcher m_shift(m.right().node());
193 inputs[input_count++] = g.UseRegister(m.left().node());
194 inputs[input_count++] = g.UseRegister(m_shift.left().node());
195 inputs[input_count++] = g.UseImmediate(m_shift.right().node());
196 } else if (m.HasProperty(Operator::kCommutative) &&
197 TryMatchAnyShift(selector, m.left().node(), &opcode,
199 Matcher m_shift(m.left().node());
200 inputs[input_count++] = g.UseRegister(m.right().node());
201 inputs[input_count++] = g.UseRegister(m_shift.left().node());
202 inputs[input_count++] = g.UseImmediate(m_shift.right().node());
204 inputs[input_count++] = g.UseRegister(m.left().node());
205 inputs[input_count++] = g.UseRegister(m.right().node());
208 if (cont->IsBranch()) {
209 inputs[input_count++] = g.Label(cont->true_block());
210 inputs[input_count++] = g.Label(cont->false_block());
213 outputs[output_count++] = g.DefineAsRegister(node);
215 outputs[output_count++] = g.DefineAsRegister(cont->result());
218 DCHECK_NE(0u, input_count);
219 DCHECK_NE(0u, output_count);
220 DCHECK_GE(arraysize(inputs), input_count);
221 DCHECK_GE(arraysize(outputs), output_count);
223 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
224 outputs, input_count, inputs);
225 if (cont->IsBranch()) instr->MarkAsControl();
229 // Shared routine for multiple binary operations.
230 template <typename Matcher>
231 static void VisitBinop(InstructionSelector* selector, Node* node,
232 ArchOpcode opcode, ImmediateMode operand_mode) {
233 FlagsContinuation cont;
234 VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
238 template <typename Matcher>
239 static void VisitAddSub(InstructionSelector* selector, Node* node,
240 ArchOpcode opcode, ArchOpcode negate_opcode) {
241 Arm64OperandGenerator g(selector);
243 if (m.right().HasValue() && (m.right().Value() < 0) &&
244 g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
245 selector->Emit(negate_opcode, g.DefineAsRegister(node),
246 g.UseRegister(m.left().node()),
247 g.TempImmediate(-m.right().Value()));
249 VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
254 void InstructionSelector::VisitLoad(Node* node) {
255 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
256 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
257 Arm64OperandGenerator g(this);
258 Node* base = node->InputAt(0);
259 Node* index = node->InputAt(1);
261 ImmediateMode immediate_mode = kNoImmediate;
265 immediate_mode = kLoadStoreImm32;
269 immediate_mode = kLoadStoreImm64;
271 case kRepBit: // Fall through.
273 opcode = typ == kTypeInt32 ? kArm64Ldrsb : kArm64Ldrb;
274 immediate_mode = kLoadStoreImm8;
277 opcode = typ == kTypeInt32 ? kArm64Ldrsh : kArm64Ldrh;
278 immediate_mode = kLoadStoreImm16;
282 immediate_mode = kLoadStoreImm32;
284 case kRepTagged: // Fall through.
287 immediate_mode = kLoadStoreImm64;
293 if (g.CanBeImmediate(index, immediate_mode)) {
294 Emit(opcode | AddressingModeField::encode(kMode_MRI),
295 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
297 Emit(opcode | AddressingModeField::encode(kMode_MRR),
298 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
303 void InstructionSelector::VisitStore(Node* node) {
304 Arm64OperandGenerator g(this);
305 Node* base = node->InputAt(0);
306 Node* index = node->InputAt(1);
307 Node* value = node->InputAt(2);
309 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
310 MachineType rep = RepresentationOf(store_rep.machine_type());
311 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
312 DCHECK(rep == kRepTagged);
313 // TODO(dcarney): refactor RecordWrite function to take temp registers
314 // and pass them here instead of using fixed regs
315 // TODO(dcarney): handle immediate indices.
316 InstructionOperand temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
317 Emit(kArm64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, x10),
318 g.UseFixed(index, x11), g.UseFixed(value, x12), arraysize(temps),
322 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
324 ImmediateMode immediate_mode = kNoImmediate;
328 immediate_mode = kLoadStoreImm32;
332 immediate_mode = kLoadStoreImm64;
334 case kRepBit: // Fall through.
337 immediate_mode = kLoadStoreImm8;
341 immediate_mode = kLoadStoreImm16;
345 immediate_mode = kLoadStoreImm32;
347 case kRepTagged: // Fall through.
350 immediate_mode = kLoadStoreImm64;
356 if (g.CanBeImmediate(index, immediate_mode)) {
357 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
358 g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
360 Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
361 g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
366 void InstructionSelector::VisitCheckedLoad(Node* node) {
367 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
368 MachineType typ = TypeOf(OpParameter<MachineType>(node));
369 Arm64OperandGenerator g(this);
370 Node* const buffer = node->InputAt(0);
371 Node* const offset = node->InputAt(1);
372 Node* const length = node->InputAt(2);
376 opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
379 opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
382 opcode = kCheckedLoadWord32;
385 opcode = kCheckedLoadFloat32;
388 opcode = kCheckedLoadFloat64;
394 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
395 g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
399 void InstructionSelector::VisitCheckedStore(Node* node) {
400 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
401 Arm64OperandGenerator g(this);
402 Node* const buffer = node->InputAt(0);
403 Node* const offset = node->InputAt(1);
404 Node* const length = node->InputAt(2);
405 Node* const value = node->InputAt(3);
409 opcode = kCheckedStoreWord8;
412 opcode = kCheckedStoreWord16;
415 opcode = kCheckedStoreWord32;
418 opcode = kCheckedStoreFloat32;
421 opcode = kCheckedStoreFloat64;
427 Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
428 g.UseOperand(length, kArithmeticImm), g.UseRegister(value));
432 template <typename Matcher>
433 static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
434 ArchOpcode opcode, bool left_can_cover,
435 bool right_can_cover, ImmediateMode imm_mode) {
436 Arm64OperandGenerator g(selector);
438 // Map instruction to equivalent operation with inverted right input.
439 ArchOpcode inv_opcode = opcode;
442 inv_opcode = kArm64Bic32;
445 inv_opcode = kArm64Bic;
448 inv_opcode = kArm64Orn32;
451 inv_opcode = kArm64Orn;
454 inv_opcode = kArm64Eon32;
457 inv_opcode = kArm64Eon;
463 // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
464 if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
465 Matcher mleft(m->left().node());
466 if (mleft.right().Is(-1)) {
467 // TODO(all): support shifted operand on right.
468 selector->Emit(inv_opcode, g.DefineAsRegister(node),
469 g.UseRegister(m->right().node()),
470 g.UseRegister(mleft.left().node()));
475 // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
476 if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
478 Matcher mright(m->right().node());
479 if (mright.right().Is(-1)) {
480 // TODO(all): support shifted operand on right.
481 selector->Emit(inv_opcode, g.DefineAsRegister(node),
482 g.UseRegister(m->left().node()),
483 g.UseRegister(mright.left().node()));
488 if (m->IsWord32Xor() && m->right().Is(-1)) {
489 selector->Emit(kArm64Not32, g.DefineAsRegister(node),
490 g.UseRegister(m->left().node()));
491 } else if (m->IsWord64Xor() && m->right().Is(-1)) {
492 selector->Emit(kArm64Not, g.DefineAsRegister(node),
493 g.UseRegister(m->left().node()));
495 VisitBinop<Matcher>(selector, node, opcode, imm_mode);
500 void InstructionSelector::VisitWord32And(Node* node) {
501 Arm64OperandGenerator g(this);
502 Int32BinopMatcher m(node);
503 if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
504 m.right().HasValue()) {
505 uint32_t mask = m.right().Value();
506 uint32_t mask_width = base::bits::CountPopulation32(mask);
507 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
508 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
509 // The mask must be contiguous, and occupy the least-significant bits.
510 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
512 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
514 Int32BinopMatcher mleft(m.left().node());
515 if (mleft.right().IsInRange(0, 31)) {
516 // Ubfx cannot extract bits past the register size, however since
517 // shifting the original value would have introduced some zeros we can
518 // still use ubfx with a smaller mask and the remaining bits will be
520 uint32_t lsb = mleft.right().Value();
521 if (lsb + mask_width > 32) mask_width = 32 - lsb;
523 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
524 g.UseRegister(mleft.left().node()),
525 g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width));
528 // Other cases fall through to the normal And operation.
531 VisitLogical<Int32BinopMatcher>(
532 this, node, &m, kArm64And32, CanCover(node, m.left().node()),
533 CanCover(node, m.right().node()), kLogical32Imm);
537 void InstructionSelector::VisitWord64And(Node* node) {
538 Arm64OperandGenerator g(this);
539 Int64BinopMatcher m(node);
540 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
541 m.right().HasValue()) {
542 uint64_t mask = m.right().Value();
543 uint64_t mask_width = base::bits::CountPopulation64(mask);
544 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
545 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
546 // The mask must be contiguous, and occupy the least-significant bits.
547 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
549 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
551 Int64BinopMatcher mleft(m.left().node());
552 if (mleft.right().IsInRange(0, 63)) {
553 // Ubfx cannot extract bits past the register size, however since
554 // shifting the original value would have introduced some zeros we can
555 // still use ubfx with a smaller mask and the remaining bits will be
557 uint64_t lsb = mleft.right().Value();
558 if (lsb + mask_width > 64) mask_width = 64 - lsb;
560 Emit(kArm64Ubfx, g.DefineAsRegister(node),
561 g.UseRegister(mleft.left().node()),
562 g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width));
565 // Other cases fall through to the normal And operation.
568 VisitLogical<Int64BinopMatcher>(
569 this, node, &m, kArm64And, CanCover(node, m.left().node()),
570 CanCover(node, m.right().node()), kLogical64Imm);
574 void InstructionSelector::VisitWord32Or(Node* node) {
575 Int32BinopMatcher m(node);
576 VisitLogical<Int32BinopMatcher>(
577 this, node, &m, kArm64Or32, CanCover(node, m.left().node()),
578 CanCover(node, m.right().node()), kLogical32Imm);
582 void InstructionSelector::VisitWord64Or(Node* node) {
583 Int64BinopMatcher m(node);
584 VisitLogical<Int64BinopMatcher>(
585 this, node, &m, kArm64Or, CanCover(node, m.left().node()),
586 CanCover(node, m.right().node()), kLogical64Imm);
590 void InstructionSelector::VisitWord32Xor(Node* node) {
591 Int32BinopMatcher m(node);
592 VisitLogical<Int32BinopMatcher>(
593 this, node, &m, kArm64Eor32, CanCover(node, m.left().node()),
594 CanCover(node, m.right().node()), kLogical32Imm);
598 void InstructionSelector::VisitWord64Xor(Node* node) {
599 Int64BinopMatcher m(node);
600 VisitLogical<Int64BinopMatcher>(
601 this, node, &m, kArm64Eor, CanCover(node, m.left().node()),
602 CanCover(node, m.right().node()), kLogical64Imm);
606 void InstructionSelector::VisitWord32Shl(Node* node) {
607 VisitRRO(this, kArm64Lsl32, node, kShift32Imm);
611 void InstructionSelector::VisitWord64Shl(Node* node) {
612 Arm64OperandGenerator g(this);
613 Int64BinopMatcher m(node);
614 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
615 m.right().IsInRange(32, 63)) {
616 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
618 Emit(kArm64Lsl, g.DefineAsRegister(node),
619 g.UseRegister(m.left().node()->InputAt(0)),
620 g.UseImmediate(m.right().node()));
623 VisitRRO(this, kArm64Lsl, node, kShift64Imm);
627 void InstructionSelector::VisitWord32Shr(Node* node) {
628 Arm64OperandGenerator g(this);
629 Int32BinopMatcher m(node);
630 if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
631 uint32_t lsb = m.right().Value();
632 Int32BinopMatcher mleft(m.left().node());
633 if (mleft.right().HasValue()) {
634 uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
635 uint32_t mask_width = base::bits::CountPopulation32(mask);
636 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
637 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
638 // shifted into the least-significant bits.
639 if ((mask_msb + mask_width + lsb) == 32) {
640 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
641 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
642 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
643 g.TempImmediate(mask_width));
648 VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
652 void InstructionSelector::VisitWord64Shr(Node* node) {
653 Arm64OperandGenerator g(this);
654 Int64BinopMatcher m(node);
655 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
656 uint64_t lsb = m.right().Value();
657 Int64BinopMatcher mleft(m.left().node());
658 if (mleft.right().HasValue()) {
659 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
660 // shifted into the least-significant bits.
661 uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
662 uint64_t mask_width = base::bits::CountPopulation64(mask);
663 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
664 if ((mask_msb + mask_width + lsb) == 64) {
665 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
666 Emit(kArm64Ubfx, g.DefineAsRegister(node),
667 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
668 g.TempImmediate(mask_width));
673 VisitRRO(this, kArm64Lsr, node, kShift64Imm);
677 void InstructionSelector::VisitWord32Sar(Node* node) {
678 Arm64OperandGenerator g(this);
679 Int32BinopMatcher m(node);
680 // Select Sxth/Sxtb for (x << K) >> K where K is 16 or 24.
681 if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
682 Int32BinopMatcher mleft(m.left().node());
683 if (mleft.right().Is(16) && m.right().Is(16)) {
684 Emit(kArm64Sxth32, g.DefineAsRegister(node),
685 g.UseRegister(mleft.left().node()));
687 } else if (mleft.right().Is(24) && m.right().Is(24)) {
688 Emit(kArm64Sxtb32, g.DefineAsRegister(node),
689 g.UseRegister(mleft.left().node()));
693 VisitRRO(this, kArm64Asr32, node, kShift32Imm);
697 void InstructionSelector::VisitWord64Sar(Node* node) {
698 VisitRRO(this, kArm64Asr, node, kShift64Imm);
702 void InstructionSelector::VisitWord32Ror(Node* node) {
703 VisitRRO(this, kArm64Ror32, node, kShift32Imm);
707 void InstructionSelector::VisitWord64Ror(Node* node) {
708 VisitRRO(this, kArm64Ror, node, kShift64Imm);
712 void InstructionSelector::VisitInt32Add(Node* node) {
713 Arm64OperandGenerator g(this);
714 Int32BinopMatcher m(node);
715 // Select Madd(x, y, z) for Add(Mul(x, y), z).
716 if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
717 Int32BinopMatcher mleft(m.left().node());
718 Emit(kArm64Madd32, g.DefineAsRegister(node),
719 g.UseRegister(mleft.left().node()),
720 g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
723 // Select Madd(x, y, z) for Add(x, Mul(x, y)).
724 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
725 Int32BinopMatcher mright(m.right().node());
726 Emit(kArm64Madd32, g.DefineAsRegister(node),
727 g.UseRegister(mright.left().node()),
728 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
731 VisitAddSub<Int32BinopMatcher>(this, node, kArm64Add32, kArm64Sub32);
735 void InstructionSelector::VisitInt64Add(Node* node) {
736 Arm64OperandGenerator g(this);
737 Int64BinopMatcher m(node);
738 // Select Madd(x, y, z) for Add(Mul(x, y), z).
739 if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
740 Int64BinopMatcher mleft(m.left().node());
741 Emit(kArm64Madd, g.DefineAsRegister(node),
742 g.UseRegister(mleft.left().node()),
743 g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
746 // Select Madd(x, y, z) for Add(x, Mul(x, y)).
747 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
748 Int64BinopMatcher mright(m.right().node());
749 Emit(kArm64Madd, g.DefineAsRegister(node),
750 g.UseRegister(mright.left().node()),
751 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
754 VisitAddSub<Int64BinopMatcher>(this, node, kArm64Add, kArm64Sub);
758 void InstructionSelector::VisitInt32Sub(Node* node) {
759 Arm64OperandGenerator g(this);
760 Int32BinopMatcher m(node);
762 // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
763 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
764 Int32BinopMatcher mright(m.right().node());
765 Emit(kArm64Msub32, g.DefineAsRegister(node),
766 g.UseRegister(mright.left().node()),
767 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
771 if (m.left().Is(0)) {
772 Emit(kArm64Neg32, g.DefineAsRegister(node),
773 g.UseRegister(m.right().node()));
775 VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
780 void InstructionSelector::VisitInt64Sub(Node* node) {
781 Arm64OperandGenerator g(this);
782 Int64BinopMatcher m(node);
784 // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
785 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
786 Int64BinopMatcher mright(m.right().node());
787 Emit(kArm64Msub, g.DefineAsRegister(node),
788 g.UseRegister(mright.left().node()),
789 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
793 if (m.left().Is(0)) {
794 Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
796 VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
801 void InstructionSelector::VisitInt32Mul(Node* node) {
802 Arm64OperandGenerator g(this);
803 Int32BinopMatcher m(node);
805 if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
806 Int32BinopMatcher mleft(m.left().node());
808 // Select Mneg(x, y) for Mul(Sub(0, x), y).
809 if (mleft.left().Is(0)) {
810 Emit(kArm64Mneg32, g.DefineAsRegister(node),
811 g.UseRegister(mleft.right().node()),
812 g.UseRegister(m.right().node()));
817 if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) {
818 Int32BinopMatcher mright(m.right().node());
820 // Select Mneg(x, y) for Mul(x, Sub(0, y)).
821 if (mright.left().Is(0)) {
822 Emit(kArm64Mneg32, g.DefineAsRegister(node),
823 g.UseRegister(m.left().node()),
824 g.UseRegister(mright.right().node()));
829 VisitRRR(this, kArm64Mul32, node);
833 void InstructionSelector::VisitInt64Mul(Node* node) {
834 Arm64OperandGenerator g(this);
835 Int64BinopMatcher m(node);
837 if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
838 Int64BinopMatcher mleft(m.left().node());
840 // Select Mneg(x, y) for Mul(Sub(0, x), y).
841 if (mleft.left().Is(0)) {
842 Emit(kArm64Mneg, g.DefineAsRegister(node),
843 g.UseRegister(mleft.right().node()),
844 g.UseRegister(m.right().node()));
849 if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) {
850 Int64BinopMatcher mright(m.right().node());
852 // Select Mneg(x, y) for Mul(x, Sub(0, y)).
853 if (mright.left().Is(0)) {
854 Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
855 g.UseRegister(mright.right().node()));
860 VisitRRR(this, kArm64Mul, node);
864 void InstructionSelector::VisitInt32MulHigh(Node* node) {
865 // TODO(arm64): Can we do better here?
866 Arm64OperandGenerator g(this);
867 InstructionOperand const smull_operand = g.TempRegister();
868 Emit(kArm64Smull, smull_operand, g.UseRegister(node->InputAt(0)),
869 g.UseRegister(node->InputAt(1)));
870 Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
874 void InstructionSelector::VisitUint32MulHigh(Node* node) {
875 // TODO(arm64): Can we do better here?
876 Arm64OperandGenerator g(this);
877 InstructionOperand const smull_operand = g.TempRegister();
878 Emit(kArm64Umull, smull_operand, g.UseRegister(node->InputAt(0)),
879 g.UseRegister(node->InputAt(1)));
880 Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
884 void InstructionSelector::VisitInt32Div(Node* node) {
885 VisitRRR(this, kArm64Idiv32, node);
889 void InstructionSelector::VisitInt64Div(Node* node) {
890 VisitRRR(this, kArm64Idiv, node);
894 void InstructionSelector::VisitUint32Div(Node* node) {
895 VisitRRR(this, kArm64Udiv32, node);
899 void InstructionSelector::VisitUint64Div(Node* node) {
900 VisitRRR(this, kArm64Udiv, node);
904 void InstructionSelector::VisitInt32Mod(Node* node) {
905 VisitRRR(this, kArm64Imod32, node);
909 void InstructionSelector::VisitInt64Mod(Node* node) {
910 VisitRRR(this, kArm64Imod, node);
914 void InstructionSelector::VisitUint32Mod(Node* node) {
915 VisitRRR(this, kArm64Umod32, node);
919 void InstructionSelector::VisitUint64Mod(Node* node) {
920 VisitRRR(this, kArm64Umod, node);
924 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
925 Arm64OperandGenerator g(this);
926 Emit(kArm64Float32ToFloat64, g.DefineAsRegister(node),
927 g.UseRegister(node->InputAt(0)));
931 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
932 Arm64OperandGenerator g(this);
933 Emit(kArm64Int32ToFloat64, g.DefineAsRegister(node),
934 g.UseRegister(node->InputAt(0)));
938 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
939 Arm64OperandGenerator g(this);
940 Emit(kArm64Uint32ToFloat64, g.DefineAsRegister(node),
941 g.UseRegister(node->InputAt(0)));
945 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
946 Arm64OperandGenerator g(this);
947 Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
948 g.UseRegister(node->InputAt(0)));
952 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
953 Arm64OperandGenerator g(this);
954 Emit(kArm64Float64ToUint32, g.DefineAsRegister(node),
955 g.UseRegister(node->InputAt(0)));
959 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
960 Arm64OperandGenerator g(this);
961 Emit(kArm64Sxtw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
965 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
966 Arm64OperandGenerator g(this);
967 Node* value = node->InputAt(0);
968 switch (value->opcode()) {
969 case IrOpcode::kWord32And:
970 case IrOpcode::kWord32Or:
971 case IrOpcode::kWord32Xor:
972 case IrOpcode::kWord32Shl:
973 case IrOpcode::kWord32Shr:
974 case IrOpcode::kWord32Sar:
975 case IrOpcode::kWord32Ror:
976 case IrOpcode::kWord32Equal:
977 case IrOpcode::kInt32Add:
978 case IrOpcode::kInt32AddWithOverflow:
979 case IrOpcode::kInt32Sub:
980 case IrOpcode::kInt32SubWithOverflow:
981 case IrOpcode::kInt32Mul:
982 case IrOpcode::kInt32MulHigh:
983 case IrOpcode::kInt32Div:
984 case IrOpcode::kInt32Mod:
985 case IrOpcode::kInt32LessThan:
986 case IrOpcode::kInt32LessThanOrEqual:
987 case IrOpcode::kUint32Div:
988 case IrOpcode::kUint32LessThan:
989 case IrOpcode::kUint32LessThanOrEqual:
990 case IrOpcode::kUint32Mod:
991 case IrOpcode::kUint32MulHigh: {
992 // 32-bit operations will write their result in a W register (implicitly
993 // clearing the top 32-bit of the corresponding X register) so the
994 // zero-extension is a no-op.
995 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1001 Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
1005 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1006 Arm64OperandGenerator g(this);
1007 Emit(kArm64Float64ToFloat32, g.DefineAsRegister(node),
1008 g.UseRegister(node->InputAt(0)));
1012 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1013 Arm64OperandGenerator g(this);
1014 Node* value = node->InputAt(0);
1015 if (CanCover(node, value)) {
1016 Int64BinopMatcher m(value);
1017 if ((m.IsWord64Sar() && m.right().HasValue() &&
1018 (m.right().Value() == 32)) ||
1019 (m.IsWord64Shr() && m.right().IsInRange(32, 63))) {
1020 Emit(kArm64Lsr, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1021 g.UseImmediate(m.right().node()));
1026 Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1030 void InstructionSelector::VisitFloat64Add(Node* node) {
1031 VisitRRRFloat64(this, kArm64Float64Add, node);
1035 void InstructionSelector::VisitFloat64Sub(Node* node) {
1036 VisitRRRFloat64(this, kArm64Float64Sub, node);
1040 void InstructionSelector::VisitFloat64Mul(Node* node) {
1041 VisitRRRFloat64(this, kArm64Float64Mul, node);
1045 void InstructionSelector::VisitFloat64Div(Node* node) {
1046 VisitRRRFloat64(this, kArm64Float64Div, node);
1050 void InstructionSelector::VisitFloat64Mod(Node* node) {
1051 Arm64OperandGenerator g(this);
1052 Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
1053 g.UseFixed(node->InputAt(0), d0),
1054 g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
1058 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1059 VisitRRFloat64(this, kArm64Float64Sqrt, node);
1063 void InstructionSelector::VisitFloat64Floor(Node* node) {
1064 VisitRRFloat64(this, kArm64Float64Floor, node);
1068 void InstructionSelector::VisitFloat64Ceil(Node* node) {
1069 VisitRRFloat64(this, kArm64Float64Ceil, node);
1073 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1074 VisitRRFloat64(this, kArm64Float64RoundTruncate, node);
1078 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1079 VisitRRFloat64(this, kArm64Float64RoundTiesAway, node);
1083 void InstructionSelector::VisitCall(Node* node) {
1084 Arm64OperandGenerator g(this);
1085 const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
1087 FrameStateDescriptor* frame_state_descriptor = NULL;
1088 if (descriptor->NeedsFrameState()) {
1089 frame_state_descriptor =
1090 GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
1093 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
1095 // Compute InstructionOperands for inputs and outputs.
1096 // TODO(turbofan): on ARM64 it's probably better to use the code object in a
1097 // register if there are multiple uses of it. Improve constant pool and the
1098 // heuristics in the register allocator for where to emit constants.
1099 InitializeCallBuffer(node, &buffer, true, false);
1101 // Push the arguments to the stack.
1102 bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
1103 int aligned_push_count = buffer.pushed_nodes.size();
1104 // TODO(dcarney): claim and poke probably take small immediates,
1105 // loop here or whatever.
1106 // Bump the stack pointer(s).
1107 if (aligned_push_count > 0) {
1108 // TODO(dcarney): it would be better to bump the csp here only
1109 // and emit paired stores with increment for non c frames.
1110 Emit(kArm64Claim | MiscField::encode(aligned_push_count), g.NoOutput());
1112 // Move arguments to the stack.
1114 int slot = buffer.pushed_nodes.size() - 1;
1115 // Emit the uneven pushes.
1116 if (pushed_count_uneven) {
1117 Node* input = buffer.pushed_nodes[slot];
1118 Emit(kArm64Poke | MiscField::encode(slot), g.NoOutput(),
1119 g.UseRegister(input));
1122 // Now all pushes can be done in pairs.
1123 for (; slot >= 0; slot -= 2) {
1124 Emit(kArm64PokePair | MiscField::encode(slot), g.NoOutput(),
1125 g.UseRegister(buffer.pushed_nodes[slot]),
1126 g.UseRegister(buffer.pushed_nodes[slot - 1]));
1130 // Select the appropriate opcode based on the call type.
1131 InstructionCode opcode;
1132 switch (descriptor->kind()) {
1133 case CallDescriptor::kCallCodeObject: {
1134 opcode = kArchCallCodeObject;
1137 case CallDescriptor::kCallJSFunction:
1138 opcode = kArchCallJSFunction;
1144 opcode |= MiscField::encode(descriptor->flags());
1146 // Emit the call instruction.
1147 InstructionOperand* first_output =
1148 buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
1149 Instruction* call_instr =
1150 Emit(opcode, buffer.outputs.size(), first_output,
1151 buffer.instruction_args.size(), &buffer.instruction_args.front());
1152 call_instr->MarkAsCall();
1156 // Shared routine for multiple compare operations.
1157 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1158 InstructionOperand left, InstructionOperand right,
1159 FlagsContinuation* cont) {
1160 Arm64OperandGenerator g(selector);
1161 opcode = cont->Encode(opcode);
1162 if (cont->IsBranch()) {
1163 selector->Emit(opcode, g.NoOutput(), left, right,
1164 g.Label(cont->true_block()),
1165 g.Label(cont->false_block()))->MarkAsControl();
1167 DCHECK(cont->IsSet());
1168 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1173 // Shared routine for multiple word compare operations.
1174 static void VisitWordCompare(InstructionSelector* selector, Node* node,
1175 InstructionCode opcode, FlagsContinuation* cont,
1176 bool commutative, ImmediateMode immediate_mode) {
1177 Arm64OperandGenerator g(selector);
1178 Node* left = node->InputAt(0);
1179 Node* right = node->InputAt(1);
1181 // Match immediates on left or right side of comparison.
1182 if (g.CanBeImmediate(right, immediate_mode)) {
1183 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1185 } else if (g.CanBeImmediate(left, immediate_mode)) {
1186 if (!commutative) cont->Commute();
1187 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1190 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1196 static void VisitWord32Compare(InstructionSelector* selector, Node* node,
1197 FlagsContinuation* cont) {
1198 VisitWordCompare(selector, node, kArm64Cmp32, cont, false, kArithmeticImm);
1202 static void VisitWordTest(InstructionSelector* selector, Node* node,
1203 InstructionCode opcode, FlagsContinuation* cont) {
1204 Arm64OperandGenerator g(selector);
1205 VisitCompare(selector, opcode, g.UseRegister(node), g.UseRegister(node),
1210 static void VisitWord32Test(InstructionSelector* selector, Node* node,
1211 FlagsContinuation* cont) {
1212 VisitWordTest(selector, node, kArm64Tst32, cont);
1216 static void VisitWord64Test(InstructionSelector* selector, Node* node,
1217 FlagsContinuation* cont) {
1218 VisitWordTest(selector, node, kArm64Tst, cont);
1222 // Shared routine for multiple float compare operations.
1223 static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1224 FlagsContinuation* cont) {
1225 Arm64OperandGenerator g(selector);
1226 Float64BinopMatcher m(node);
1227 if (m.right().Is(0.0)) {
1228 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
1229 g.UseImmediate(m.right().node()), cont);
1231 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
1232 g.UseRegister(m.right().node()), cont);
1237 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1238 BasicBlock* fbranch) {
1239 OperandGenerator g(this);
1240 Node* user = branch;
1241 Node* value = branch->InputAt(0);
1243 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1245 // Try to combine with comparisons against 0 by simply inverting the branch.
1246 while (CanCover(user, value)) {
1247 if (value->opcode() == IrOpcode::kWord32Equal) {
1248 Int32BinopMatcher m(value);
1249 if (m.right().Is(0)) {
1251 value = m.left().node();
1256 } else if (value->opcode() == IrOpcode::kWord64Equal) {
1257 Int64BinopMatcher m(value);
1258 if (m.right().Is(0)) {
1260 value = m.left().node();
1270 // Try to combine the branch with a comparison.
1271 if (CanCover(user, value)) {
1272 switch (value->opcode()) {
1273 case IrOpcode::kWord32Equal:
1274 cont.OverwriteAndNegateIfEqual(kEqual);
1275 return VisitWord32Compare(this, value, &cont);
1276 case IrOpcode::kInt32LessThan:
1277 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1278 return VisitWord32Compare(this, value, &cont);
1279 case IrOpcode::kInt32LessThanOrEqual:
1280 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1281 return VisitWord32Compare(this, value, &cont);
1282 case IrOpcode::kUint32LessThan:
1283 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1284 return VisitWord32Compare(this, value, &cont);
1285 case IrOpcode::kUint32LessThanOrEqual:
1286 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1287 return VisitWord32Compare(this, value, &cont);
1288 case IrOpcode::kWord64Equal:
1289 cont.OverwriteAndNegateIfEqual(kEqual);
1290 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1292 case IrOpcode::kInt64LessThan:
1293 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1294 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1296 case IrOpcode::kInt64LessThanOrEqual:
1297 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1298 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1300 case IrOpcode::kUint64LessThan:
1301 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1302 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1304 case IrOpcode::kFloat64Equal:
1305 cont.OverwriteAndNegateIfEqual(kEqual);
1306 return VisitFloat64Compare(this, value, &cont);
1307 case IrOpcode::kFloat64LessThan:
1308 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1309 return VisitFloat64Compare(this, value, &cont);
1310 case IrOpcode::kFloat64LessThanOrEqual:
1311 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1312 return VisitFloat64Compare(this, value, &cont);
1313 case IrOpcode::kProjection:
1314 // Check if this is the overflow output projection of an
1315 // <Operation>WithOverflow node.
1316 if (ProjectionIndexOf(value->op()) == 1u) {
1317 // We cannot combine the <Operation>WithOverflow with this branch
1318 // unless the 0th projection (the use of the actual value of the
1319 // <Operation> is either NULL, which means there's no use of the
1320 // actual value, or was already defined, which means it is scheduled
1321 // *AFTER* this branch).
1322 Node* const node = value->InputAt(0);
1323 Node* const result = NodeProperties::FindProjection(node, 0);
1324 if (result == NULL || IsDefined(result)) {
1325 switch (node->opcode()) {
1326 case IrOpcode::kInt32AddWithOverflow:
1327 cont.OverwriteAndNegateIfEqual(kOverflow);
1328 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
1329 kArithmeticImm, &cont);
1330 case IrOpcode::kInt32SubWithOverflow:
1331 cont.OverwriteAndNegateIfEqual(kOverflow);
1332 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
1333 kArithmeticImm, &cont);
1340 case IrOpcode::kInt32Add:
1341 return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
1343 case IrOpcode::kInt32Sub:
1344 return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
1346 case IrOpcode::kWord32And: {
1347 Int32BinopMatcher m(value);
1348 if (m.right().HasValue() &&
1349 (base::bits::CountPopulation32(m.right().Value()) == 1)) {
1350 // If the mask has only one bit set, we can use tbz/tbnz.
1351 DCHECK((cont.condition() == kEqual) ||
1352 (cont.condition() == kNotEqual));
1353 Emit(cont.Encode(kArm64TestAndBranch32), g.NoOutput(),
1354 g.UseRegister(m.left().node()),
1356 base::bits::CountTrailingZeros32(m.right().Value())),
1357 g.Label(cont.true_block()),
1358 g.Label(cont.false_block()))->MarkAsControl();
1361 return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
1364 case IrOpcode::kWord64And: {
1365 Int64BinopMatcher m(value);
1366 if (m.right().HasValue() &&
1367 (base::bits::CountPopulation64(m.right().Value()) == 1)) {
1368 // If the mask has only one bit set, we can use tbz/tbnz.
1369 DCHECK((cont.condition() == kEqual) ||
1370 (cont.condition() == kNotEqual));
1371 Emit(cont.Encode(kArm64TestAndBranch), g.NoOutput(),
1372 g.UseRegister(m.left().node()),
1374 base::bits::CountTrailingZeros64(m.right().Value())),
1375 g.Label(cont.true_block()),
1376 g.Label(cont.false_block()))->MarkAsControl();
1379 return VisitWordCompare(this, value, kArm64Tst, &cont, true,
1387 // Branch could not be combined with a compare, compare against 0 and branch.
1388 Emit(cont.Encode(kArm64CompareAndBranch32), g.NoOutput(),
1389 g.UseRegister(value), g.Label(cont.true_block()),
1390 g.Label(cont.false_block()))->MarkAsControl();
1394 void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
1395 BasicBlock** case_branches,
1396 int32_t* case_values, size_t case_count,
1397 int32_t min_value, int32_t max_value) {
1398 Arm64OperandGenerator g(this);
1399 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1400 InstructionOperand default_operand = g.Label(default_branch);
1402 // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
1403 // is 2^31-1, so don't assume that it's non-zero below.
1404 size_t value_range =
1405 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
1407 // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
1409 size_t table_space_cost = 4 + value_range;
1410 size_t table_time_cost = 3;
1411 size_t lookup_space_cost = 3 + 2 * case_count;
1412 size_t lookup_time_cost = case_count;
1413 if (case_count > 0 &&
1414 table_space_cost + 3 * table_time_cost <=
1415 lookup_space_cost + 3 * lookup_time_cost &&
1416 min_value > std::numeric_limits<int32_t>::min()) {
1417 InstructionOperand index_operand = value_operand;
1419 index_operand = g.TempRegister();
1420 Emit(kArm64Sub32, index_operand, value_operand,
1421 g.TempImmediate(min_value));
1423 size_t input_count = 2 + value_range;
1424 auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
1425 inputs[0] = index_operand;
1426 std::fill(&inputs[1], &inputs[input_count], default_operand);
1427 for (size_t index = 0; index < case_count; ++index) {
1428 size_t value = case_values[index] - min_value;
1429 BasicBlock* branch = case_branches[index];
1430 DCHECK_LE(0u, value);
1431 DCHECK_LT(value + 2, input_count);
1432 inputs[value + 2] = g.Label(branch);
1434 Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
1439 // Generate a sequence of conditional jumps.
1440 size_t input_count = 2 + case_count * 2;
1441 auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
1442 inputs[0] = value_operand;
1443 inputs[1] = default_operand;
1444 for (size_t index = 0; index < case_count; ++index) {
1445 int32_t value = case_values[index];
1446 BasicBlock* branch = case_branches[index];
1447 inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
1448 inputs[index * 2 + 2 + 1] = g.Label(branch);
1450 Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
1455 void InstructionSelector::VisitWord32Equal(Node* const node) {
1456 Node* const user = node;
1457 FlagsContinuation cont(kEqual, node);
1458 Int32BinopMatcher m(user);
1459 if (m.right().Is(0)) {
1460 Node* const value = m.left().node();
1461 if (CanCover(user, value)) {
1462 switch (value->opcode()) {
1463 case IrOpcode::kInt32Add:
1464 return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
1466 case IrOpcode::kInt32Sub:
1467 return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
1469 case IrOpcode::kWord32And:
1470 return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
1475 return VisitWord32Test(this, value, &cont);
1478 VisitWord32Compare(this, node, &cont);
1482 void InstructionSelector::VisitInt32LessThan(Node* node) {
1483 FlagsContinuation cont(kSignedLessThan, node);
1484 VisitWord32Compare(this, node, &cont);
1488 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1489 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1490 VisitWord32Compare(this, node, &cont);
1494 void InstructionSelector::VisitUint32LessThan(Node* node) {
1495 FlagsContinuation cont(kUnsignedLessThan, node);
1496 VisitWord32Compare(this, node, &cont);
1500 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1501 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1502 VisitWord32Compare(this, node, &cont);
1506 void InstructionSelector::VisitWord64Equal(Node* const node) {
1507 Node* const user = node;
1508 FlagsContinuation cont(kEqual, node);
1509 Int64BinopMatcher m(user);
1510 if (m.right().Is(0)) {
1511 Node* const value = m.left().node();
1512 if (CanCover(user, value)) {
1513 switch (value->opcode()) {
1514 case IrOpcode::kWord64And:
1515 return VisitWordCompare(this, value, kArm64Tst, &cont, true,
1520 return VisitWord64Test(this, value, &cont);
1523 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
1527 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1528 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1529 FlagsContinuation cont(kOverflow, ovf);
1530 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
1531 kArithmeticImm, &cont);
1533 FlagsContinuation cont;
1534 VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, &cont);
1538 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1539 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1540 FlagsContinuation cont(kOverflow, ovf);
1541 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
1542 kArithmeticImm, &cont);
1544 FlagsContinuation cont;
1545 VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, &cont);
1549 void InstructionSelector::VisitInt64LessThan(Node* node) {
1550 FlagsContinuation cont(kSignedLessThan, node);
1551 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
1555 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1556 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1557 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
1561 void InstructionSelector::VisitUint64LessThan(Node* node) {
1562 FlagsContinuation cont(kUnsignedLessThan, node);
1563 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
1567 void InstructionSelector::VisitFloat64Equal(Node* node) {
1568 FlagsContinuation cont(kEqual, node);
1569 VisitFloat64Compare(this, node, &cont);
1573 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1574 FlagsContinuation cont(kUnsignedLessThan, node);
1575 VisitFloat64Compare(this, node, &cont);
1579 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1580 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1581 VisitFloat64Compare(this, node, &cont);
1586 MachineOperatorBuilder::Flags
1587 InstructionSelector::SupportedMachineOperatorFlags() {
1588 return MachineOperatorBuilder::kFloat64Floor |
1589 MachineOperatorBuilder::kFloat64Ceil |
1590 MachineOperatorBuilder::kFloat64RoundTruncate |
1591 MachineOperatorBuilder::kFloat64RoundTiesAway |
1592 MachineOperatorBuilder::kWord32ShiftIsSafe |
1593 MachineOperatorBuilder::kInt32DivIsSafe |
1594 MachineOperatorBuilder::kUint32DivIsSafe;
1597 } // namespace compiler
1598 } // namespace internal