1 //===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation ----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the AArch64TargetLowering class.
11 //===----------------------------------------------------------------------===//
13 #include "AArch64ISelLowering.h"
14 #include "AArch64CallingConvention.h"
15 #include "AArch64ExpandImm.h"
16 #include "AArch64MachineFunctionInfo.h"
17 #include "AArch64PerfectShuffle.h"
18 #include "AArch64RegisterInfo.h"
19 #include "AArch64Subtarget.h"
20 #include "MCTargetDesc/AArch64AddressingModes.h"
21 #include "Utils/AArch64BaseInfo.h"
22 #include "llvm/ADT/APFloat.h"
23 #include "llvm/ADT/APInt.h"
24 #include "llvm/ADT/ArrayRef.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SmallSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/ADT/Triple.h"
31 #include "llvm/ADT/Twine.h"
32 #include "llvm/Analysis/MemoryLocation.h"
33 #include "llvm/Analysis/ObjCARCUtil.h"
34 #include "llvm/Analysis/TargetTransformInfo.h"
35 #include "llvm/Analysis/VectorUtils.h"
36 #include "llvm/CodeGen/Analysis.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineMemOperand.h"
45 #include "llvm/CodeGen/MachineRegisterInfo.h"
46 #include "llvm/CodeGen/RuntimeLibcalls.h"
47 #include "llvm/CodeGen/SelectionDAG.h"
48 #include "llvm/CodeGen/SelectionDAGNodes.h"
49 #include "llvm/CodeGen/TargetCallingConv.h"
50 #include "llvm/CodeGen/TargetInstrInfo.h"
51 #include "llvm/CodeGen/ValueTypes.h"
52 #include "llvm/IR/Attributes.h"
53 #include "llvm/IR/Constants.h"
54 #include "llvm/IR/DataLayout.h"
55 #include "llvm/IR/DebugLoc.h"
56 #include "llvm/IR/DerivedTypes.h"
57 #include "llvm/IR/Function.h"
58 #include "llvm/IR/GetElementPtrTypeIterator.h"
59 #include "llvm/IR/GlobalValue.h"
60 #include "llvm/IR/IRBuilder.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/IntrinsicsAArch64.h"
66 #include "llvm/IR/Module.h"
67 #include "llvm/IR/OperandTraits.h"
68 #include "llvm/IR/PatternMatch.h"
69 #include "llvm/IR/Type.h"
70 #include "llvm/IR/Use.h"
71 #include "llvm/IR/Value.h"
72 #include "llvm/MC/MCRegisterInfo.h"
73 #include "llvm/Support/Casting.h"
74 #include "llvm/Support/CodeGen.h"
75 #include "llvm/Support/CommandLine.h"
76 #include "llvm/Support/Compiler.h"
77 #include "llvm/Support/Debug.h"
78 #include "llvm/Support/ErrorHandling.h"
79 #include "llvm/Support/InstructionCost.h"
80 #include "llvm/Support/KnownBits.h"
81 #include "llvm/Support/MachineValueType.h"
82 #include "llvm/Support/MathExtras.h"
83 #include "llvm/Support/raw_ostream.h"
84 #include "llvm/Target/TargetMachine.h"
85 #include "llvm/Target/TargetOptions.h"
99 using namespace llvm::PatternMatch;
101 #define DEBUG_TYPE "aarch64-lower"
103 STATISTIC(NumTailCalls, "Number of tail calls");
104 STATISTIC(NumShiftInserts, "Number of vector shift inserts");
105 STATISTIC(NumOptimizedImms, "Number of times immediates were optimized");
107 // FIXME: The necessary dtprel relocations don't seem to be supported
108 // well in the GNU bfd and gold linkers at the moment. Therefore, by
109 // default, for now, fall back to GeneralDynamic code generation.
110 cl::opt<bool> EnableAArch64ELFLocalDynamicTLSGeneration(
111 "aarch64-elf-ldtls-generation", cl::Hidden,
112 cl::desc("Allow AArch64 Local Dynamic TLS code generation"),
116 EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden,
117 cl::desc("Enable AArch64 logical imm instruction "
121 // Temporary option added for the purpose of testing functionality added
122 // to DAGCombiner.cpp in D92230. It is expected that this can be removed
123 // in future when both implementations will be based off MGATHER rather
124 // than the GLD1 nodes added for the SVE gather load intrinsics.
126 EnableCombineMGatherIntrinsics("aarch64-enable-mgather-combine", cl::Hidden,
127 cl::desc("Combine extends of AArch64 masked "
128 "gather intrinsics"),
131 /// Value type used for condition codes.
132 static const MVT MVT_CC = MVT::i32;
134 static inline EVT getPackedSVEVectorVT(EVT VT) {
135 switch (VT.getSimpleVT().SimpleTy) {
137 llvm_unreachable("unexpected element type for vector");
153 return MVT::nxv8bf16;
157 // NOTE: Currently there's only a need to return integer vector types. If this
158 // changes then just add an extra "type" parameter.
159 static inline EVT getPackedSVEVectorVT(ElementCount EC) {
160 switch (EC.getKnownMinValue()) {
162 llvm_unreachable("unexpected element count for vector");
174 static inline EVT getPromotedVTForPredicate(EVT VT) {
175 assert(VT.isScalableVector() && (VT.getVectorElementType() == MVT::i1) &&
176 "Expected scalable predicate vector type!");
177 switch (VT.getVectorMinNumElements()) {
179 llvm_unreachable("unexpected element count for vector");
191 /// Returns true if VT's elements occupy the lowest bit positions of its
192 /// associated register class without any intervening space.
194 /// For example, nxv2f16, nxv4f16 and nxv8f16 are legal types that belong to the
195 /// same register class, but only nxv8f16 can be treated as a packed vector.
196 static inline bool isPackedVectorType(EVT VT, SelectionDAG &DAG) {
197 assert(VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
198 "Expected legal vector type!");
199 return VT.isFixedLengthVector() ||
200 VT.getSizeInBits().getKnownMinSize() == AArch64::SVEBitsPerBlock;
203 // Returns true for ####_MERGE_PASSTHRU opcodes, whose operands have a leading
204 // predicate and end with a passthru value matching the result type.
205 static bool isMergePassthruOpcode(unsigned Opc) {
209 case AArch64ISD::BITREVERSE_MERGE_PASSTHRU:
210 case AArch64ISD::BSWAP_MERGE_PASSTHRU:
211 case AArch64ISD::REVH_MERGE_PASSTHRU:
212 case AArch64ISD::REVW_MERGE_PASSTHRU:
213 case AArch64ISD::REVD_MERGE_PASSTHRU:
214 case AArch64ISD::CTLZ_MERGE_PASSTHRU:
215 case AArch64ISD::CTPOP_MERGE_PASSTHRU:
216 case AArch64ISD::DUP_MERGE_PASSTHRU:
217 case AArch64ISD::ABS_MERGE_PASSTHRU:
218 case AArch64ISD::NEG_MERGE_PASSTHRU:
219 case AArch64ISD::FNEG_MERGE_PASSTHRU:
220 case AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU:
221 case AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU:
222 case AArch64ISD::FCEIL_MERGE_PASSTHRU:
223 case AArch64ISD::FFLOOR_MERGE_PASSTHRU:
224 case AArch64ISD::FNEARBYINT_MERGE_PASSTHRU:
225 case AArch64ISD::FRINT_MERGE_PASSTHRU:
226 case AArch64ISD::FROUND_MERGE_PASSTHRU:
227 case AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU:
228 case AArch64ISD::FTRUNC_MERGE_PASSTHRU:
229 case AArch64ISD::FP_ROUND_MERGE_PASSTHRU:
230 case AArch64ISD::FP_EXTEND_MERGE_PASSTHRU:
231 case AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU:
232 case AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU:
233 case AArch64ISD::FCVTZU_MERGE_PASSTHRU:
234 case AArch64ISD::FCVTZS_MERGE_PASSTHRU:
235 case AArch64ISD::FSQRT_MERGE_PASSTHRU:
236 case AArch64ISD::FRECPX_MERGE_PASSTHRU:
237 case AArch64ISD::FABS_MERGE_PASSTHRU:
242 // Returns true if inactive lanes are known to be zeroed by construction.
243 static bool isZeroingInactiveLanes(SDValue Op) {
244 switch (Op.getOpcode()) {
246 // We guarantee i1 splat_vectors to zero the other lanes by
247 // implementing it with ptrue and possibly a punpklo for nxv1i1.
248 if (ISD::isConstantSplatVectorAllOnes(Op.getNode()))
251 case AArch64ISD::PTRUE:
252 case AArch64ISD::SETCC_MERGE_ZERO:
254 case ISD::INTRINSIC_WO_CHAIN:
255 switch (Op.getConstantOperandVal(0)) {
258 case Intrinsic::aarch64_sve_ptrue:
259 case Intrinsic::aarch64_sve_pnext:
260 case Intrinsic::aarch64_sve_cmpeq:
261 case Intrinsic::aarch64_sve_cmpne:
262 case Intrinsic::aarch64_sve_cmpge:
263 case Intrinsic::aarch64_sve_cmpgt:
264 case Intrinsic::aarch64_sve_cmphs:
265 case Intrinsic::aarch64_sve_cmphi:
266 case Intrinsic::aarch64_sve_cmpeq_wide:
267 case Intrinsic::aarch64_sve_cmpne_wide:
268 case Intrinsic::aarch64_sve_cmpge_wide:
269 case Intrinsic::aarch64_sve_cmpgt_wide:
270 case Intrinsic::aarch64_sve_cmplt_wide:
271 case Intrinsic::aarch64_sve_cmple_wide:
272 case Intrinsic::aarch64_sve_cmphs_wide:
273 case Intrinsic::aarch64_sve_cmphi_wide:
274 case Intrinsic::aarch64_sve_cmplo_wide:
275 case Intrinsic::aarch64_sve_cmpls_wide:
276 case Intrinsic::aarch64_sve_fcmpeq:
277 case Intrinsic::aarch64_sve_fcmpne:
278 case Intrinsic::aarch64_sve_fcmpge:
279 case Intrinsic::aarch64_sve_fcmpgt:
280 case Intrinsic::aarch64_sve_fcmpuo:
286 AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
287 const AArch64Subtarget &STI)
288 : TargetLowering(TM), Subtarget(&STI) {
289 // AArch64 doesn't have comparisons which set GPRs or setcc instructions, so
290 // we have to make something up. Arbitrarily, choose ZeroOrOne.
291 setBooleanContents(ZeroOrOneBooleanContent);
292 // When comparing vectors the result sets the different elements in the
293 // vector to all-one or all-zero.
294 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
296 // Set up the register classes.
297 addRegisterClass(MVT::i32, &AArch64::GPR32allRegClass);
298 addRegisterClass(MVT::i64, &AArch64::GPR64allRegClass);
300 if (Subtarget->hasLS64()) {
301 addRegisterClass(MVT::i64x8, &AArch64::GPR64x8ClassRegClass);
302 setOperationAction(ISD::LOAD, MVT::i64x8, Custom);
303 setOperationAction(ISD::STORE, MVT::i64x8, Custom);
306 if (Subtarget->hasFPARMv8()) {
307 addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
308 addRegisterClass(MVT::bf16, &AArch64::FPR16RegClass);
309 addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
310 addRegisterClass(MVT::f64, &AArch64::FPR64RegClass);
311 addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
314 if (Subtarget->hasNEON()) {
315 addRegisterClass(MVT::v16i8, &AArch64::FPR8RegClass);
316 addRegisterClass(MVT::v8i16, &AArch64::FPR16RegClass);
317 // Someone set us up the NEON.
318 addDRTypeForNEON(MVT::v2f32);
319 addDRTypeForNEON(MVT::v8i8);
320 addDRTypeForNEON(MVT::v4i16);
321 addDRTypeForNEON(MVT::v2i32);
322 addDRTypeForNEON(MVT::v1i64);
323 addDRTypeForNEON(MVT::v1f64);
324 addDRTypeForNEON(MVT::v4f16);
325 if (Subtarget->hasBF16())
326 addDRTypeForNEON(MVT::v4bf16);
328 addQRTypeForNEON(MVT::v4f32);
329 addQRTypeForNEON(MVT::v2f64);
330 addQRTypeForNEON(MVT::v16i8);
331 addQRTypeForNEON(MVT::v8i16);
332 addQRTypeForNEON(MVT::v4i32);
333 addQRTypeForNEON(MVT::v2i64);
334 addQRTypeForNEON(MVT::v8f16);
335 if (Subtarget->hasBF16())
336 addQRTypeForNEON(MVT::v8bf16);
339 if (Subtarget->hasSVE() || Subtarget->hasSME()) {
340 // Add legal sve predicate types
341 addRegisterClass(MVT::nxv1i1, &AArch64::PPRRegClass);
342 addRegisterClass(MVT::nxv2i1, &AArch64::PPRRegClass);
343 addRegisterClass(MVT::nxv4i1, &AArch64::PPRRegClass);
344 addRegisterClass(MVT::nxv8i1, &AArch64::PPRRegClass);
345 addRegisterClass(MVT::nxv16i1, &AArch64::PPRRegClass);
347 // Add legal sve data types
348 addRegisterClass(MVT::nxv16i8, &AArch64::ZPRRegClass);
349 addRegisterClass(MVT::nxv8i16, &AArch64::ZPRRegClass);
350 addRegisterClass(MVT::nxv4i32, &AArch64::ZPRRegClass);
351 addRegisterClass(MVT::nxv2i64, &AArch64::ZPRRegClass);
353 addRegisterClass(MVT::nxv2f16, &AArch64::ZPRRegClass);
354 addRegisterClass(MVT::nxv4f16, &AArch64::ZPRRegClass);
355 addRegisterClass(MVT::nxv8f16, &AArch64::ZPRRegClass);
356 addRegisterClass(MVT::nxv2f32, &AArch64::ZPRRegClass);
357 addRegisterClass(MVT::nxv4f32, &AArch64::ZPRRegClass);
358 addRegisterClass(MVT::nxv2f64, &AArch64::ZPRRegClass);
360 if (Subtarget->hasBF16()) {
361 addRegisterClass(MVT::nxv2bf16, &AArch64::ZPRRegClass);
362 addRegisterClass(MVT::nxv4bf16, &AArch64::ZPRRegClass);
363 addRegisterClass(MVT::nxv8bf16, &AArch64::ZPRRegClass);
366 if (Subtarget->useSVEForFixedLengthVectors()) {
367 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
368 if (useSVEForFixedLengthVectorVT(VT))
369 addRegisterClass(VT, &AArch64::ZPRRegClass);
371 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
372 if (useSVEForFixedLengthVectorVT(VT))
373 addRegisterClass(VT, &AArch64::ZPRRegClass);
377 // Compute derived properties from the register classes
378 computeRegisterProperties(Subtarget->getRegisterInfo());
380 // Provide all sorts of operation actions
381 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
382 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
383 setOperationAction(ISD::SETCC, MVT::i32, Custom);
384 setOperationAction(ISD::SETCC, MVT::i64, Custom);
385 setOperationAction(ISD::SETCC, MVT::f16, Custom);
386 setOperationAction(ISD::SETCC, MVT::f32, Custom);
387 setOperationAction(ISD::SETCC, MVT::f64, Custom);
388 setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom);
389 setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom);
390 setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom);
391 setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom);
392 setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom);
393 setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom);
394 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
395 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
396 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
397 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
398 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
399 setOperationAction(ISD::BR_CC, MVT::f16, Custom);
400 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
401 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
402 setOperationAction(ISD::SELECT, MVT::i32, Custom);
403 setOperationAction(ISD::SELECT, MVT::i64, Custom);
404 setOperationAction(ISD::SELECT, MVT::f16, Custom);
405 setOperationAction(ISD::SELECT, MVT::f32, Custom);
406 setOperationAction(ISD::SELECT, MVT::f64, Custom);
407 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
408 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
409 setOperationAction(ISD::SELECT_CC, MVT::f16, Custom);
410 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
411 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
412 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
413 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
415 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
416 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
417 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
419 setOperationAction(ISD::FREM, MVT::f32, Expand);
420 setOperationAction(ISD::FREM, MVT::f64, Expand);
421 setOperationAction(ISD::FREM, MVT::f80, Expand);
423 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
425 // Custom lowering hooks are needed for XOR
426 // to fold it into CSINC/CSINV.
427 setOperationAction(ISD::XOR, MVT::i32, Custom);
428 setOperationAction(ISD::XOR, MVT::i64, Custom);
430 // Virtually no operation on f128 is legal, but LLVM can't expand them when
431 // there's a valid register class, so we need custom operations in most cases.
432 setOperationAction(ISD::FABS, MVT::f128, Expand);
433 setOperationAction(ISD::FADD, MVT::f128, LibCall);
434 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
435 setOperationAction(ISD::FCOS, MVT::f128, Expand);
436 setOperationAction(ISD::FDIV, MVT::f128, LibCall);
437 setOperationAction(ISD::FMA, MVT::f128, Expand);
438 setOperationAction(ISD::FMUL, MVT::f128, LibCall);
439 setOperationAction(ISD::FNEG, MVT::f128, Expand);
440 setOperationAction(ISD::FPOW, MVT::f128, Expand);
441 setOperationAction(ISD::FREM, MVT::f128, Expand);
442 setOperationAction(ISD::FRINT, MVT::f128, Expand);
443 setOperationAction(ISD::FSIN, MVT::f128, Expand);
444 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
445 setOperationAction(ISD::FSQRT, MVT::f128, Expand);
446 setOperationAction(ISD::FSUB, MVT::f128, LibCall);
447 setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
448 setOperationAction(ISD::SETCC, MVT::f128, Custom);
449 setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom);
450 setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom);
451 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
452 setOperationAction(ISD::SELECT, MVT::f128, Custom);
453 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
454 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
455 // FIXME: f128 FMINIMUM and FMAXIMUM (including STRICT versions) currently
458 // Lowering for many of the conversions is actually specified by the non-f128
459 // type. The LowerXXX function will be trivial when f128 isn't involved.
460 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
461 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
462 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
463 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
464 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
465 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom);
466 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
467 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
468 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
469 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
470 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
471 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom);
472 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
473 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
474 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
475 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
476 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
477 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom);
478 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
479 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
480 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
481 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
482 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
483 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom);
484 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
485 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
486 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
487 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
488 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
489 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
491 setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i32, Custom);
492 setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
493 setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i32, Custom);
494 setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
496 // Variable arguments.
497 setOperationAction(ISD::VASTART, MVT::Other, Custom);
498 setOperationAction(ISD::VAARG, MVT::Other, Custom);
499 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
500 setOperationAction(ISD::VAEND, MVT::Other, Expand);
502 // Variable-sized objects.
503 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
504 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
506 if (Subtarget->isTargetWindows())
507 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
509 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
511 // Constant pool entries
512 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
515 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
517 // AArch64 lacks both left-rotate and popcount instructions.
518 setOperationAction(ISD::ROTL, MVT::i32, Expand);
519 setOperationAction(ISD::ROTL, MVT::i64, Expand);
520 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
521 setOperationAction(ISD::ROTL, VT, Expand);
522 setOperationAction(ISD::ROTR, VT, Expand);
525 // AArch64 doesn't have i32 MULH{S|U}.
526 setOperationAction(ISD::MULHU, MVT::i32, Expand);
527 setOperationAction(ISD::MULHS, MVT::i32, Expand);
529 // AArch64 doesn't have {U|S}MUL_LOHI.
530 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
531 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
533 setOperationAction(ISD::CTPOP, MVT::i32, Custom);
534 setOperationAction(ISD::CTPOP, MVT::i64, Custom);
535 setOperationAction(ISD::CTPOP, MVT::i128, Custom);
537 setOperationAction(ISD::PARITY, MVT::i64, Custom);
538 setOperationAction(ISD::PARITY, MVT::i128, Custom);
540 setOperationAction(ISD::ABS, MVT::i32, Custom);
541 setOperationAction(ISD::ABS, MVT::i64, Custom);
543 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
544 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
545 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
546 setOperationAction(ISD::SDIVREM, VT, Expand);
547 setOperationAction(ISD::UDIVREM, VT, Expand);
549 setOperationAction(ISD::SREM, MVT::i32, Expand);
550 setOperationAction(ISD::SREM, MVT::i64, Expand);
551 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
552 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
553 setOperationAction(ISD::UREM, MVT::i32, Expand);
554 setOperationAction(ISD::UREM, MVT::i64, Expand);
556 // Custom lower Add/Sub/Mul with overflow.
557 setOperationAction(ISD::SADDO, MVT::i32, Custom);
558 setOperationAction(ISD::SADDO, MVT::i64, Custom);
559 setOperationAction(ISD::UADDO, MVT::i32, Custom);
560 setOperationAction(ISD::UADDO, MVT::i64, Custom);
561 setOperationAction(ISD::SSUBO, MVT::i32, Custom);
562 setOperationAction(ISD::SSUBO, MVT::i64, Custom);
563 setOperationAction(ISD::USUBO, MVT::i32, Custom);
564 setOperationAction(ISD::USUBO, MVT::i64, Custom);
565 setOperationAction(ISD::SMULO, MVT::i32, Custom);
566 setOperationAction(ISD::SMULO, MVT::i64, Custom);
567 setOperationAction(ISD::UMULO, MVT::i32, Custom);
568 setOperationAction(ISD::UMULO, MVT::i64, Custom);
570 setOperationAction(ISD::ADDCARRY, MVT::i32, Custom);
571 setOperationAction(ISD::ADDCARRY, MVT::i64, Custom);
572 setOperationAction(ISD::SUBCARRY, MVT::i32, Custom);
573 setOperationAction(ISD::SUBCARRY, MVT::i64, Custom);
574 setOperationAction(ISD::SADDO_CARRY, MVT::i32, Custom);
575 setOperationAction(ISD::SADDO_CARRY, MVT::i64, Custom);
576 setOperationAction(ISD::SSUBO_CARRY, MVT::i32, Custom);
577 setOperationAction(ISD::SSUBO_CARRY, MVT::i64, Custom);
579 setOperationAction(ISD::FSIN, MVT::f32, Expand);
580 setOperationAction(ISD::FSIN, MVT::f64, Expand);
581 setOperationAction(ISD::FCOS, MVT::f32, Expand);
582 setOperationAction(ISD::FCOS, MVT::f64, Expand);
583 setOperationAction(ISD::FPOW, MVT::f32, Expand);
584 setOperationAction(ISD::FPOW, MVT::f64, Expand);
585 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
586 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
587 if (Subtarget->hasFullFP16())
588 setOperationAction(ISD::FCOPYSIGN, MVT::f16, Custom);
590 setOperationAction(ISD::FCOPYSIGN, MVT::f16, Promote);
592 for (auto Op : {ISD::FREM, ISD::FPOW, ISD::FPOWI,
593 ISD::FCOS, ISD::FSIN, ISD::FSINCOS,
594 ISD::FEXP, ISD::FEXP2, ISD::FLOG,
595 ISD::FLOG2, ISD::FLOG10, ISD::STRICT_FREM,
596 ISD::STRICT_FPOW, ISD::STRICT_FPOWI, ISD::STRICT_FCOS,
597 ISD::STRICT_FSIN, ISD::STRICT_FEXP, ISD::STRICT_FEXP2,
598 ISD::STRICT_FLOG, ISD::STRICT_FLOG2, ISD::STRICT_FLOG10}) {
599 setOperationAction(Op, MVT::f16, Promote);
600 setOperationAction(Op, MVT::v4f16, Expand);
601 setOperationAction(Op, MVT::v8f16, Expand);
604 if (!Subtarget->hasFullFP16()) {
606 {ISD::SELECT, ISD::SELECT_CC, ISD::SETCC,
607 ISD::BR_CC, ISD::FADD, ISD::FSUB,
608 ISD::FMUL, ISD::FDIV, ISD::FMA,
609 ISD::FNEG, ISD::FABS, ISD::FCEIL,
610 ISD::FSQRT, ISD::FFLOOR, ISD::FNEARBYINT,
611 ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN,
612 ISD::FTRUNC, ISD::FMINNUM, ISD::FMAXNUM,
613 ISD::FMINIMUM, ISD::FMAXIMUM, ISD::STRICT_FADD,
614 ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV,
615 ISD::STRICT_FMA, ISD::STRICT_FCEIL, ISD::STRICT_FFLOOR,
616 ISD::STRICT_FSQRT, ISD::STRICT_FRINT, ISD::STRICT_FNEARBYINT,
617 ISD::STRICT_FROUND, ISD::STRICT_FTRUNC, ISD::STRICT_FROUNDEVEN,
618 ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, ISD::STRICT_FMINIMUM,
619 ISD::STRICT_FMAXIMUM})
620 setOperationAction(Op, MVT::f16, Promote);
622 // Round-to-integer need custom lowering for fp16, as Promote doesn't work
623 // because the result type is integer.
624 for (auto Op : {ISD::STRICT_LROUND, ISD::STRICT_LLROUND, ISD::STRICT_LRINT,
626 setOperationAction(Op, MVT::f16, Custom);
628 // promote v4f16 to v4f32 when that is known to be safe.
629 setOperationAction(ISD::FADD, MVT::v4f16, Promote);
630 setOperationAction(ISD::FSUB, MVT::v4f16, Promote);
631 setOperationAction(ISD::FMUL, MVT::v4f16, Promote);
632 setOperationAction(ISD::FDIV, MVT::v4f16, Promote);
633 AddPromotedToType(ISD::FADD, MVT::v4f16, MVT::v4f32);
634 AddPromotedToType(ISD::FSUB, MVT::v4f16, MVT::v4f32);
635 AddPromotedToType(ISD::FMUL, MVT::v4f16, MVT::v4f32);
636 AddPromotedToType(ISD::FDIV, MVT::v4f16, MVT::v4f32);
638 setOperationAction(ISD::FABS, MVT::v4f16, Expand);
639 setOperationAction(ISD::FNEG, MVT::v4f16, Expand);
640 setOperationAction(ISD::FROUND, MVT::v4f16, Expand);
641 setOperationAction(ISD::FROUNDEVEN, MVT::v4f16, Expand);
642 setOperationAction(ISD::FMA, MVT::v4f16, Expand);
643 setOperationAction(ISD::SETCC, MVT::v4f16, Expand);
644 setOperationAction(ISD::BR_CC, MVT::v4f16, Expand);
645 setOperationAction(ISD::SELECT, MVT::v4f16, Expand);
646 setOperationAction(ISD::SELECT_CC, MVT::v4f16, Expand);
647 setOperationAction(ISD::FTRUNC, MVT::v4f16, Expand);
648 setOperationAction(ISD::FCOPYSIGN, MVT::v4f16, Expand);
649 setOperationAction(ISD::FFLOOR, MVT::v4f16, Expand);
650 setOperationAction(ISD::FCEIL, MVT::v4f16, Expand);
651 setOperationAction(ISD::FRINT, MVT::v4f16, Expand);
652 setOperationAction(ISD::FNEARBYINT, MVT::v4f16, Expand);
653 setOperationAction(ISD::FSQRT, MVT::v4f16, Expand);
655 setOperationAction(ISD::FABS, MVT::v8f16, Expand);
656 setOperationAction(ISD::FADD, MVT::v8f16, Expand);
657 setOperationAction(ISD::FCEIL, MVT::v8f16, Expand);
658 setOperationAction(ISD::FCOPYSIGN, MVT::v8f16, Expand);
659 setOperationAction(ISD::FDIV, MVT::v8f16, Expand);
660 setOperationAction(ISD::FFLOOR, MVT::v8f16, Expand);
661 setOperationAction(ISD::FMA, MVT::v8f16, Expand);
662 setOperationAction(ISD::FMUL, MVT::v8f16, Expand);
663 setOperationAction(ISD::FNEARBYINT, MVT::v8f16, Expand);
664 setOperationAction(ISD::FNEG, MVT::v8f16, Expand);
665 setOperationAction(ISD::FROUND, MVT::v8f16, Expand);
666 setOperationAction(ISD::FROUNDEVEN, MVT::v8f16, Expand);
667 setOperationAction(ISD::FRINT, MVT::v8f16, Expand);
668 setOperationAction(ISD::FSQRT, MVT::v8f16, Expand);
669 setOperationAction(ISD::FSUB, MVT::v8f16, Expand);
670 setOperationAction(ISD::FTRUNC, MVT::v8f16, Expand);
671 setOperationAction(ISD::SETCC, MVT::v8f16, Expand);
672 setOperationAction(ISD::BR_CC, MVT::v8f16, Expand);
673 setOperationAction(ISD::SELECT, MVT::v8f16, Expand);
674 setOperationAction(ISD::SELECT_CC, MVT::v8f16, Expand);
675 setOperationAction(ISD::FP_EXTEND, MVT::v8f16, Expand);
678 // AArch64 has implementations of a lot of rounding-like FP operations.
680 {ISD::FFLOOR, ISD::FNEARBYINT, ISD::FCEIL,
681 ISD::FRINT, ISD::FTRUNC, ISD::FROUND,
682 ISD::FROUNDEVEN, ISD::FMINNUM, ISD::FMAXNUM,
683 ISD::FMINIMUM, ISD::FMAXIMUM, ISD::LROUND,
684 ISD::LLROUND, ISD::LRINT, ISD::LLRINT,
685 ISD::STRICT_FFLOOR, ISD::STRICT_FCEIL, ISD::STRICT_FNEARBYINT,
686 ISD::STRICT_FRINT, ISD::STRICT_FTRUNC, ISD::STRICT_FROUNDEVEN,
687 ISD::STRICT_FROUND, ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM,
688 ISD::STRICT_FMINIMUM, ISD::STRICT_FMAXIMUM, ISD::STRICT_LROUND,
689 ISD::STRICT_LLROUND, ISD::STRICT_LRINT, ISD::STRICT_LLRINT}) {
690 for (MVT Ty : {MVT::f32, MVT::f64})
691 setOperationAction(Op, Ty, Legal);
692 if (Subtarget->hasFullFP16())
693 setOperationAction(Op, MVT::f16, Legal);
696 // Basic strict FP operations are legal
697 for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
698 ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT}) {
699 for (MVT Ty : {MVT::f32, MVT::f64})
700 setOperationAction(Op, Ty, Legal);
701 if (Subtarget->hasFullFP16())
702 setOperationAction(Op, MVT::f16, Legal);
705 // Strict conversion to a larger type is legal
706 for (auto VT : {MVT::f32, MVT::f64})
707 setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal);
709 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
711 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
712 setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
714 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom);
715 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
716 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
717 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
718 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom);
720 // Generate outline atomics library calls only if LSE was not specified for
722 if (Subtarget->outlineAtomics() && !Subtarget->hasLSE()) {
723 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, LibCall);
724 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, LibCall);
725 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, LibCall);
726 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, LibCall);
727 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, LibCall);
728 setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, LibCall);
729 setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, LibCall);
730 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, LibCall);
731 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, LibCall);
732 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, LibCall);
733 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, LibCall);
734 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, LibCall);
735 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, LibCall);
736 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, LibCall);
737 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, LibCall);
738 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, LibCall);
739 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, LibCall);
740 setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i8, LibCall);
741 setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i16, LibCall);
742 setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i32, LibCall);
743 setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i64, LibCall);
744 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, LibCall);
745 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, LibCall);
746 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, LibCall);
747 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, LibCall);
748 #define LCALLNAMES(A, B, N) \
749 setLibcallName(A##N##_RELAX, #B #N "_relax"); \
750 setLibcallName(A##N##_ACQ, #B #N "_acq"); \
751 setLibcallName(A##N##_REL, #B #N "_rel"); \
752 setLibcallName(A##N##_ACQ_REL, #B #N "_acq_rel");
753 #define LCALLNAME4(A, B) \
754 LCALLNAMES(A, B, 1) \
755 LCALLNAMES(A, B, 2) LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8)
756 #define LCALLNAME5(A, B) \
757 LCALLNAMES(A, B, 1) \
758 LCALLNAMES(A, B, 2) \
759 LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8) LCALLNAMES(A, B, 16)
760 LCALLNAME5(RTLIB::OUTLINE_ATOMIC_CAS, __aarch64_cas)
761 LCALLNAME4(RTLIB::OUTLINE_ATOMIC_SWP, __aarch64_swp)
762 LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDADD, __aarch64_ldadd)
763 LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDSET, __aarch64_ldset)
764 LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDCLR, __aarch64_ldclr)
765 LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDEOR, __aarch64_ldeor)
771 // 128-bit loads and stores can be done without expanding
772 setOperationAction(ISD::LOAD, MVT::i128, Custom);
773 setOperationAction(ISD::STORE, MVT::i128, Custom);
775 // Aligned 128-bit loads and stores are single-copy atomic according to the
777 if (Subtarget->hasLSE2()) {
778 setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom);
779 setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom);
782 // 256 bit non-temporal stores can be lowered to STNP. Do this as part of the
783 // custom lowering, as there are no un-paired non-temporal stores and
784 // legalization will break up 256 bit inputs.
785 setOperationAction(ISD::STORE, MVT::v32i8, Custom);
786 setOperationAction(ISD::STORE, MVT::v16i16, Custom);
787 setOperationAction(ISD::STORE, MVT::v16f16, Custom);
788 setOperationAction(ISD::STORE, MVT::v8i32, Custom);
789 setOperationAction(ISD::STORE, MVT::v8f32, Custom);
790 setOperationAction(ISD::STORE, MVT::v4f64, Custom);
791 setOperationAction(ISD::STORE, MVT::v4i64, Custom);
793 // Lower READCYCLECOUNTER using an mrs from PMCCNTR_EL0.
794 // This requires the Performance Monitors extension.
795 if (Subtarget->hasPerfMon())
796 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
798 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
799 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
800 // Issue __sincos_stret if available.
801 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
802 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
804 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
805 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
808 if (Subtarget->getTargetTriple().isOSMSVCRT()) {
809 // MSVCRT doesn't have powi; fall back to pow
810 setLibcallName(RTLIB::POWI_F32, nullptr);
811 setLibcallName(RTLIB::POWI_F64, nullptr);
814 // Make floating-point constants legal for the large code model, so they don't
815 // become loads from the constant pool.
816 if (Subtarget->isTargetMachO() && TM.getCodeModel() == CodeModel::Large) {
817 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
818 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
821 // AArch64 does not have floating-point extending loads, i1 sign-extending
822 // load, floating-point truncating stores, or v2i32->v2i16 truncating store.
823 for (MVT VT : MVT::fp_valuetypes()) {
824 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
825 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
826 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
827 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);
829 for (MVT VT : MVT::integer_valuetypes())
830 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Expand);
832 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
833 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
834 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
835 setTruncStoreAction(MVT::f128, MVT::f80, Expand);
836 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
837 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
838 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
840 setOperationAction(ISD::BITCAST, MVT::i16, Custom);
841 setOperationAction(ISD::BITCAST, MVT::f16, Custom);
842 setOperationAction(ISD::BITCAST, MVT::bf16, Custom);
844 // Indexed loads and stores are supported.
845 for (unsigned im = (unsigned)ISD::PRE_INC;
846 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
847 setIndexedLoadAction(im, MVT::i8, Legal);
848 setIndexedLoadAction(im, MVT::i16, Legal);
849 setIndexedLoadAction(im, MVT::i32, Legal);
850 setIndexedLoadAction(im, MVT::i64, Legal);
851 setIndexedLoadAction(im, MVT::f64, Legal);
852 setIndexedLoadAction(im, MVT::f32, Legal);
853 setIndexedLoadAction(im, MVT::f16, Legal);
854 setIndexedLoadAction(im, MVT::bf16, Legal);
855 setIndexedStoreAction(im, MVT::i8, Legal);
856 setIndexedStoreAction(im, MVT::i16, Legal);
857 setIndexedStoreAction(im, MVT::i32, Legal);
858 setIndexedStoreAction(im, MVT::i64, Legal);
859 setIndexedStoreAction(im, MVT::f64, Legal);
860 setIndexedStoreAction(im, MVT::f32, Legal);
861 setIndexedStoreAction(im, MVT::f16, Legal);
862 setIndexedStoreAction(im, MVT::bf16, Legal);
866 setOperationAction(ISD::TRAP, MVT::Other, Legal);
867 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
868 setOperationAction(ISD::UBSANTRAP, MVT::Other, Legal);
870 // We combine OR nodes for bitfield operations.
871 setTargetDAGCombine(ISD::OR);
872 // Try to create BICs for vector ANDs.
873 setTargetDAGCombine(ISD::AND);
875 // Vector add and sub nodes may conceal a high-half opportunity.
876 // Also, try to fold ADD into CSINC/CSINV..
877 setTargetDAGCombine({ISD::ADD, ISD::ABS, ISD::SUB, ISD::XOR, ISD::SINT_TO_FP,
880 setTargetDAGCombine({ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_TO_SINT_SAT,
881 ISD::FP_TO_UINT_SAT, ISD::FDIV});
883 // Try and combine setcc with csel
884 setTargetDAGCombine(ISD::SETCC);
886 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
888 setTargetDAGCombine({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND,
889 ISD::VECTOR_SPLICE, ISD::SIGN_EXTEND_INREG,
890 ISD::CONCAT_VECTORS, ISD::EXTRACT_SUBVECTOR,
891 ISD::INSERT_SUBVECTOR, ISD::STORE, ISD::BUILD_VECTOR});
892 if (Subtarget->supportsAddressTopByteIgnored())
893 setTargetDAGCombine(ISD::LOAD);
895 setTargetDAGCombine(ISD::MSTORE);
897 setTargetDAGCombine(ISD::MUL);
899 setTargetDAGCombine({ISD::SELECT, ISD::VSELECT});
901 setTargetDAGCombine({ISD::INTRINSIC_VOID, ISD::INTRINSIC_W_CHAIN,
902 ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
903 ISD::VECREDUCE_ADD, ISD::STEP_VECTOR});
905 setTargetDAGCombine({ISD::MGATHER, ISD::MSCATTER});
907 setTargetDAGCombine(ISD::FP_EXTEND);
909 setTargetDAGCombine(ISD::GlobalAddress);
911 // In case of strict alignment, avoid an excessive number of byte wide stores.
912 MaxStoresPerMemsetOptSize = 8;
914 Subtarget->requiresStrictAlign() ? MaxStoresPerMemsetOptSize : 32;
916 MaxGluedStoresPerMemcpy = 4;
917 MaxStoresPerMemcpyOptSize = 4;
919 Subtarget->requiresStrictAlign() ? MaxStoresPerMemcpyOptSize : 16;
921 MaxStoresPerMemmoveOptSize = 4;
922 MaxStoresPerMemmove = 4;
924 MaxLoadsPerMemcmpOptSize = 4;
926 Subtarget->requiresStrictAlign() ? MaxLoadsPerMemcmpOptSize : 8;
928 setStackPointerRegisterToSaveRestore(AArch64::SP);
930 setSchedulingPreference(Sched::Hybrid);
932 EnableExtLdPromotion = true;
934 // Set required alignment.
935 setMinFunctionAlignment(Align(4));
936 // Set preferred alignments.
937 setPrefLoopAlignment(Align(1ULL << STI.getPrefLoopLogAlignment()));
938 setMaxBytesForAlignment(STI.getMaxBytesForLoopAlignment());
939 setPrefFunctionAlignment(Align(1ULL << STI.getPrefFunctionLogAlignment()));
941 // Only change the limit for entries in a jump table if specified by
942 // the sub target, but not at the command line.
943 unsigned MaxJT = STI.getMaximumJumpTableSize();
944 if (MaxJT && getMaximumJumpTableSize() == UINT_MAX)
945 setMaximumJumpTableSize(MaxJT);
947 setHasExtractBitsInsn(true);
949 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
951 if (Subtarget->hasNEON()) {
952 // FIXME: v1f64 shouldn't be legal if we can avoid it, because it leads to
953 // silliness like this:
955 {ISD::SELECT, ISD::SELECT_CC, ISD::SETCC,
956 ISD::BR_CC, ISD::FADD, ISD::FSUB,
957 ISD::FMUL, ISD::FDIV, ISD::FMA,
958 ISD::FNEG, ISD::FABS, ISD::FCEIL,
959 ISD::FSQRT, ISD::FFLOOR, ISD::FNEARBYINT,
960 ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN,
961 ISD::FTRUNC, ISD::FMINNUM, ISD::FMAXNUM,
962 ISD::FMINIMUM, ISD::FMAXIMUM, ISD::STRICT_FADD,
963 ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV,
964 ISD::STRICT_FMA, ISD::STRICT_FCEIL, ISD::STRICT_FFLOOR,
965 ISD::STRICT_FSQRT, ISD::STRICT_FRINT, ISD::STRICT_FNEARBYINT,
966 ISD::STRICT_FROUND, ISD::STRICT_FTRUNC, ISD::STRICT_FROUNDEVEN,
967 ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, ISD::STRICT_FMINIMUM,
968 ISD::STRICT_FMAXIMUM})
969 setOperationAction(Op, MVT::v1f64, Expand);
972 {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, ISD::UINT_TO_FP,
973 ISD::FP_ROUND, ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT, ISD::MUL,
974 ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT,
975 ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, ISD::STRICT_FP_ROUND})
976 setOperationAction(Op, MVT::v1i64, Expand);
978 // AArch64 doesn't have a direct vector ->f32 conversion instructions for
979 // elements smaller than i32, so promote the input to i32 first.
980 setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i8, MVT::v4i32);
981 setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i8, MVT::v4i32);
983 // Similarly, there is no direct i32 -> f64 vector conversion instruction.
984 // Or, direct i32 -> f16 vector conversion. Set it so custom, so the
985 // conversion happens in two steps: v4i32 -> v4f32 -> v4f16
986 for (auto Op : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
987 ISD::STRICT_UINT_TO_FP})
988 for (auto VT : {MVT::v2i32, MVT::v2i64, MVT::v4i32})
989 setOperationAction(Op, VT, Custom);
991 if (Subtarget->hasFullFP16()) {
992 setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
994 setOperationAction(ISD::SINT_TO_FP, MVT::v8i8, Custom);
995 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
996 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Custom);
997 setOperationAction(ISD::UINT_TO_FP, MVT::v16i8, Custom);
998 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
999 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1000 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom);
1001 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1003 // when AArch64 doesn't have fullfp16 support, promote the input
1005 setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i8, MVT::v8i32);
1006 setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i8, MVT::v8i32);
1007 setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v16i8, MVT::v16i32);
1008 setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v16i8, MVT::v16i32);
1009 setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i16, MVT::v4i32);
1010 setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i16, MVT::v4i32);
1011 setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i16, MVT::v8i32);
1012 setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i16, MVT::v8i32);
1015 setOperationAction(ISD::CTLZ, MVT::v1i64, Expand);
1016 setOperationAction(ISD::CTLZ, MVT::v2i64, Expand);
1017 setOperationAction(ISD::BITREVERSE, MVT::v8i8, Legal);
1018 setOperationAction(ISD::BITREVERSE, MVT::v16i8, Legal);
1019 setOperationAction(ISD::BITREVERSE, MVT::v2i32, Custom);
1020 setOperationAction(ISD::BITREVERSE, MVT::v4i32, Custom);
1021 setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom);
1022 setOperationAction(ISD::BITREVERSE, MVT::v2i64, Custom);
1023 for (auto VT : {MVT::v1i64, MVT::v2i64}) {
1024 setOperationAction(ISD::UMAX, VT, Custom);
1025 setOperationAction(ISD::SMAX, VT, Custom);
1026 setOperationAction(ISD::UMIN, VT, Custom);
1027 setOperationAction(ISD::SMIN, VT, Custom);
1030 // AArch64 doesn't have MUL.2d:
1031 setOperationAction(ISD::MUL, MVT::v2i64, Expand);
1032 // Custom handling for some quad-vector types to detect MULL.
1033 setOperationAction(ISD::MUL, MVT::v8i16, Custom);
1034 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
1035 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
1038 for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32,
1039 MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1040 setOperationAction(ISD::SADDSAT, VT, Legal);
1041 setOperationAction(ISD::UADDSAT, VT, Legal);
1042 setOperationAction(ISD::SSUBSAT, VT, Legal);
1043 setOperationAction(ISD::USUBSAT, VT, Legal);
1046 for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16,
1048 setOperationAction(ISD::AVGFLOORS, VT, Legal);
1049 setOperationAction(ISD::AVGFLOORU, VT, Legal);
1050 setOperationAction(ISD::AVGCEILS, VT, Legal);
1051 setOperationAction(ISD::AVGCEILU, VT, Legal);
1052 setOperationAction(ISD::ABDS, VT, Legal);
1053 setOperationAction(ISD::ABDU, VT, Legal);
1056 // Vector reductions
1057 for (MVT VT : { MVT::v4f16, MVT::v2f32,
1058 MVT::v8f16, MVT::v4f32, MVT::v2f64 }) {
1059 if (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16()) {
1060 setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1061 setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1063 setOperationAction(ISD::VECREDUCE_FADD, VT, Legal);
1066 for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32,
1067 MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
1068 setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
1069 setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
1070 setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
1071 setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
1072 setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
1074 setOperationAction(ISD::VECREDUCE_ADD, MVT::v2i64, Custom);
1076 setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal);
1077 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
1078 // Likewise, narrowing and extending vector loads/stores aren't handled
1080 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
1081 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
1083 if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32) {
1084 setOperationAction(ISD::MULHS, VT, Legal);
1085 setOperationAction(ISD::MULHU, VT, Legal);
1087 setOperationAction(ISD::MULHS, VT, Expand);
1088 setOperationAction(ISD::MULHU, VT, Expand);
1090 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
1091 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
1093 setOperationAction(ISD::BSWAP, VT, Expand);
1094 setOperationAction(ISD::CTTZ, VT, Expand);
1096 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
1097 setTruncStoreAction(VT, InnerVT, Expand);
1098 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
1099 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
1100 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
1104 // AArch64 has implementations of a lot of rounding-like FP operations.
1106 {ISD::FFLOOR, ISD::FNEARBYINT, ISD::FCEIL, ISD::FRINT, ISD::FTRUNC,
1107 ISD::FROUND, ISD::FROUNDEVEN, ISD::STRICT_FFLOOR,
1108 ISD::STRICT_FNEARBYINT, ISD::STRICT_FCEIL, ISD::STRICT_FRINT,
1109 ISD::STRICT_FTRUNC, ISD::STRICT_FROUND, ISD::STRICT_FROUNDEVEN}) {
1110 for (MVT Ty : {MVT::v2f32, MVT::v4f32, MVT::v2f64})
1111 setOperationAction(Op, Ty, Legal);
1112 if (Subtarget->hasFullFP16())
1113 for (MVT Ty : {MVT::v4f16, MVT::v8f16})
1114 setOperationAction(Op, Ty, Legal);
1117 setTruncStoreAction(MVT::v4i16, MVT::v4i8, Custom);
1119 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, MVT::v4i8, Custom);
1120 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Custom);
1121 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Custom);
1122 setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i8, Custom);
1123 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Custom);
1124 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Custom);
1126 // ADDP custom lowering
1127 for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1128 setOperationAction(ISD::ADD, VT, Custom);
1129 // FADDP custom lowering
1130 for (MVT VT : { MVT::v16f16, MVT::v8f32, MVT::v4f64 })
1131 setOperationAction(ISD::FADD, VT, Custom);
1134 if (Subtarget->hasSME()) {
1135 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1138 // FIXME: Move lowering for more nodes here if those are common between
1140 if (Subtarget->hasSVE() || Subtarget->hasSME()) {
1142 {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1, MVT::nxv1i1}) {
1143 setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
1144 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1148 if (Subtarget->hasSVE()) {
1149 for (auto VT : {MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64}) {
1150 setOperationAction(ISD::BITREVERSE, VT, Custom);
1151 setOperationAction(ISD::BSWAP, VT, Custom);
1152 setOperationAction(ISD::CTLZ, VT, Custom);
1153 setOperationAction(ISD::CTPOP, VT, Custom);
1154 setOperationAction(ISD::CTTZ, VT, Custom);
1155 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1156 setOperationAction(ISD::UINT_TO_FP, VT, Custom);
1157 setOperationAction(ISD::SINT_TO_FP, VT, Custom);
1158 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
1159 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
1160 setOperationAction(ISD::MGATHER, VT, Custom);
1161 setOperationAction(ISD::MSCATTER, VT, Custom);
1162 setOperationAction(ISD::MLOAD, VT, Custom);
1163 setOperationAction(ISD::MUL, VT, Custom);
1164 setOperationAction(ISD::MULHS, VT, Custom);
1165 setOperationAction(ISD::MULHU, VT, Custom);
1166 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
1167 setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
1168 setOperationAction(ISD::SELECT, VT, Custom);
1169 setOperationAction(ISD::SETCC, VT, Custom);
1170 setOperationAction(ISD::SDIV, VT, Custom);
1171 setOperationAction(ISD::UDIV, VT, Custom);
1172 setOperationAction(ISD::SMIN, VT, Custom);
1173 setOperationAction(ISD::UMIN, VT, Custom);
1174 setOperationAction(ISD::SMAX, VT, Custom);
1175 setOperationAction(ISD::UMAX, VT, Custom);
1176 setOperationAction(ISD::SHL, VT, Custom);
1177 setOperationAction(ISD::SRL, VT, Custom);
1178 setOperationAction(ISD::SRA, VT, Custom);
1179 setOperationAction(ISD::ABS, VT, Custom);
1180 setOperationAction(ISD::ABDS, VT, Custom);
1181 setOperationAction(ISD::ABDU, VT, Custom);
1182 setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
1183 setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1184 setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1185 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1186 setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
1187 setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
1188 setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
1189 setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
1191 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
1192 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
1193 setOperationAction(ISD::SELECT_CC, VT, Expand);
1194 setOperationAction(ISD::ROTL, VT, Expand);
1195 setOperationAction(ISD::ROTR, VT, Expand);
1197 setOperationAction(ISD::SADDSAT, VT, Legal);
1198 setOperationAction(ISD::UADDSAT, VT, Legal);
1199 setOperationAction(ISD::SSUBSAT, VT, Legal);
1200 setOperationAction(ISD::USUBSAT, VT, Legal);
1201 setOperationAction(ISD::UREM, VT, Expand);
1202 setOperationAction(ISD::SREM, VT, Expand);
1203 setOperationAction(ISD::SDIVREM, VT, Expand);
1204 setOperationAction(ISD::UDIVREM, VT, Expand);
1207 // Illegal unpacked integer vector types.
1208 for (auto VT : {MVT::nxv8i8, MVT::nxv4i16, MVT::nxv2i32}) {
1209 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1210 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1213 // Legalize unpacked bitcasts to REINTERPRET_CAST.
1214 for (auto VT : {MVT::nxv2i16, MVT::nxv4i16, MVT::nxv2i32, MVT::nxv2bf16,
1215 MVT::nxv4bf16, MVT::nxv2f16, MVT::nxv4f16, MVT::nxv2f32})
1216 setOperationAction(ISD::BITCAST, VT, Custom);
1219 { MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64, MVT::nxv4i8,
1220 MVT::nxv4i16, MVT::nxv4i32, MVT::nxv8i8, MVT::nxv8i16 })
1221 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Legal);
1224 {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1, MVT::nxv1i1}) {
1225 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1226 setOperationAction(ISD::SELECT, VT, Custom);
1227 setOperationAction(ISD::SETCC, VT, Custom);
1228 setOperationAction(ISD::TRUNCATE, VT, Custom);
1229 setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1230 setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1231 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1233 setOperationAction(ISD::SELECT_CC, VT, Expand);
1234 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1235 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1237 // There are no legal MVT::nxv16f## based types.
1238 if (VT != MVT::nxv16i1) {
1239 setOperationAction(ISD::SINT_TO_FP, VT, Custom);
1240 setOperationAction(ISD::UINT_TO_FP, VT, Custom);
1244 // NEON doesn't support masked loads/stores/gathers/scatters, but SVE does
1245 for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v1f64,
1246 MVT::v2f64, MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
1247 MVT::v2i32, MVT::v4i32, MVT::v1i64, MVT::v2i64}) {
1248 setOperationAction(ISD::MLOAD, VT, Custom);
1249 setOperationAction(ISD::MSTORE, VT, Custom);
1250 setOperationAction(ISD::MGATHER, VT, Custom);
1251 setOperationAction(ISD::MSCATTER, VT, Custom);
1254 // Firstly, exclude all scalable vector extending loads/truncating stores,
1255 // include both integer and floating scalable vector.
1256 for (MVT VT : MVT::scalable_vector_valuetypes()) {
1257 for (MVT InnerVT : MVT::scalable_vector_valuetypes()) {
1258 setTruncStoreAction(VT, InnerVT, Expand);
1259 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
1260 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
1261 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
1265 // Then, selectively enable those which we directly support.
1266 setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i8, Legal);
1267 setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i16, Legal);
1268 setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i32, Legal);
1269 setTruncStoreAction(MVT::nxv4i32, MVT::nxv4i8, Legal);
1270 setTruncStoreAction(MVT::nxv4i32, MVT::nxv4i16, Legal);
1271 setTruncStoreAction(MVT::nxv8i16, MVT::nxv8i8, Legal);
1272 for (auto Op : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) {
1273 setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i8, Legal);
1274 setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i16, Legal);
1275 setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i32, Legal);
1276 setLoadExtAction(Op, MVT::nxv4i32, MVT::nxv4i8, Legal);
1277 setLoadExtAction(Op, MVT::nxv4i32, MVT::nxv4i16, Legal);
1278 setLoadExtAction(Op, MVT::nxv8i16, MVT::nxv8i8, Legal);
1281 // SVE supports truncating stores of 64 and 128-bit vectors
1282 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Custom);
1283 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Custom);
1284 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Custom);
1285 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
1286 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
1288 for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32,
1289 MVT::nxv4f32, MVT::nxv2f64}) {
1290 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1291 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1292 setOperationAction(ISD::MGATHER, VT, Custom);
1293 setOperationAction(ISD::MSCATTER, VT, Custom);
1294 setOperationAction(ISD::MLOAD, VT, Custom);
1295 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
1296 setOperationAction(ISD::SELECT, VT, Custom);
1297 setOperationAction(ISD::FADD, VT, Custom);
1298 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1299 setOperationAction(ISD::FDIV, VT, Custom);
1300 setOperationAction(ISD::FMA, VT, Custom);
1301 setOperationAction(ISD::FMAXIMUM, VT, Custom);
1302 setOperationAction(ISD::FMAXNUM, VT, Custom);
1303 setOperationAction(ISD::FMINIMUM, VT, Custom);
1304 setOperationAction(ISD::FMINNUM, VT, Custom);
1305 setOperationAction(ISD::FMUL, VT, Custom);
1306 setOperationAction(ISD::FNEG, VT, Custom);
1307 setOperationAction(ISD::FSUB, VT, Custom);
1308 setOperationAction(ISD::FCEIL, VT, Custom);
1309 setOperationAction(ISD::FFLOOR, VT, Custom);
1310 setOperationAction(ISD::FNEARBYINT, VT, Custom);
1311 setOperationAction(ISD::FRINT, VT, Custom);
1312 setOperationAction(ISD::FROUND, VT, Custom);
1313 setOperationAction(ISD::FROUNDEVEN, VT, Custom);
1314 setOperationAction(ISD::FTRUNC, VT, Custom);
1315 setOperationAction(ISD::FSQRT, VT, Custom);
1316 setOperationAction(ISD::FABS, VT, Custom);
1317 setOperationAction(ISD::FP_EXTEND, VT, Custom);
1318 setOperationAction(ISD::FP_ROUND, VT, Custom);
1319 setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1320 setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1321 setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1322 setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1323 setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
1325 setOperationAction(ISD::SELECT_CC, VT, Expand);
1326 setOperationAction(ISD::FREM, VT, Expand);
1327 setOperationAction(ISD::FPOW, VT, Expand);
1328 setOperationAction(ISD::FPOWI, VT, Expand);
1329 setOperationAction(ISD::FCOS, VT, Expand);
1330 setOperationAction(ISD::FSIN, VT, Expand);
1331 setOperationAction(ISD::FSINCOS, VT, Expand);
1332 setOperationAction(ISD::FEXP, VT, Expand);
1333 setOperationAction(ISD::FEXP2, VT, Expand);
1334 setOperationAction(ISD::FLOG, VT, Expand);
1335 setOperationAction(ISD::FLOG2, VT, Expand);
1336 setOperationAction(ISD::FLOG10, VT, Expand);
1338 setCondCodeAction(ISD::SETO, VT, Expand);
1339 setCondCodeAction(ISD::SETOLT, VT, Expand);
1340 setCondCodeAction(ISD::SETLT, VT, Expand);
1341 setCondCodeAction(ISD::SETOLE, VT, Expand);
1342 setCondCodeAction(ISD::SETLE, VT, Expand);
1343 setCondCodeAction(ISD::SETULT, VT, Expand);
1344 setCondCodeAction(ISD::SETULE, VT, Expand);
1345 setCondCodeAction(ISD::SETUGE, VT, Expand);
1346 setCondCodeAction(ISD::SETUGT, VT, Expand);
1347 setCondCodeAction(ISD::SETUEQ, VT, Expand);
1348 setCondCodeAction(ISD::SETONE, VT, Expand);
1351 for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) {
1352 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1353 setOperationAction(ISD::MGATHER, VT, Custom);
1354 setOperationAction(ISD::MSCATTER, VT, Custom);
1355 setOperationAction(ISD::MLOAD, VT, Custom);
1356 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1357 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
1360 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
1361 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
1363 // NEON doesn't support integer divides, but SVE does
1364 for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32,
1365 MVT::v4i32, MVT::v1i64, MVT::v2i64}) {
1366 setOperationAction(ISD::SDIV, VT, Custom);
1367 setOperationAction(ISD::UDIV, VT, Custom);
1370 // NEON doesn't support 64-bit vector integer muls, but SVE does.
1371 setOperationAction(ISD::MUL, MVT::v1i64, Custom);
1372 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
1374 // NOTE: Currently this has to happen after computeRegisterProperties rather
1375 // than the preferred option of combining it with the addRegisterClass call.
1376 if (Subtarget->useSVEForFixedLengthVectors()) {
1377 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
1378 if (useSVEForFixedLengthVectorVT(VT))
1379 addTypeForFixedLengthSVE(VT);
1380 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
1381 if (useSVEForFixedLengthVectorVT(VT))
1382 addTypeForFixedLengthSVE(VT);
1384 // 64bit results can mean a bigger than NEON input.
1385 for (auto VT : {MVT::v8i8, MVT::v4i16})
1386 setOperationAction(ISD::TRUNCATE, VT, Custom);
1387 setOperationAction(ISD::FP_ROUND, MVT::v4f16, Custom);
1389 // 128bit results imply a bigger than NEON input.
1390 for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
1391 setOperationAction(ISD::TRUNCATE, VT, Custom);
1392 for (auto VT : {MVT::v8f16, MVT::v4f32})
1393 setOperationAction(ISD::FP_ROUND, VT, Custom);
1395 // These operations are not supported on NEON but SVE can do them.
1396 setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom);
1397 setOperationAction(ISD::CTLZ, MVT::v1i64, Custom);
1398 setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1399 setOperationAction(ISD::CTTZ, MVT::v1i64, Custom);
1400 setOperationAction(ISD::MULHS, MVT::v1i64, Custom);
1401 setOperationAction(ISD::MULHS, MVT::v2i64, Custom);
1402 setOperationAction(ISD::MULHU, MVT::v1i64, Custom);
1403 setOperationAction(ISD::MULHU, MVT::v2i64, Custom);
1404 setOperationAction(ISD::SMAX, MVT::v1i64, Custom);
1405 setOperationAction(ISD::SMAX, MVT::v2i64, Custom);
1406 setOperationAction(ISD::SMIN, MVT::v1i64, Custom);
1407 setOperationAction(ISD::SMIN, MVT::v2i64, Custom);
1408 setOperationAction(ISD::UMAX, MVT::v1i64, Custom);
1409 setOperationAction(ISD::UMAX, MVT::v2i64, Custom);
1410 setOperationAction(ISD::UMIN, MVT::v1i64, Custom);
1411 setOperationAction(ISD::UMIN, MVT::v2i64, Custom);
1412 setOperationAction(ISD::VECREDUCE_SMAX, MVT::v2i64, Custom);
1413 setOperationAction(ISD::VECREDUCE_SMIN, MVT::v2i64, Custom);
1414 setOperationAction(ISD::VECREDUCE_UMAX, MVT::v2i64, Custom);
1415 setOperationAction(ISD::VECREDUCE_UMIN, MVT::v2i64, Custom);
1417 // Int operations with no NEON support.
1418 for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
1419 MVT::v2i32, MVT::v4i32, MVT::v2i64}) {
1420 setOperationAction(ISD::BITREVERSE, VT, Custom);
1421 setOperationAction(ISD::CTTZ, VT, Custom);
1422 setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1423 setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1424 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1427 // FP operations with no NEON support.
1428 for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32,
1429 MVT::v1f64, MVT::v2f64})
1430 setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1432 // Use SVE for vectors with more than 2 elements.
1433 for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v4f32})
1434 setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1437 setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv2i1, MVT::nxv2i64);
1438 setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv4i1, MVT::nxv4i32);
1439 setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv8i1, MVT::nxv8i16);
1440 setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv16i1, MVT::nxv16i8);
1442 setOperationAction(ISD::VSCALE, MVT::i32, Custom);
1445 if (Subtarget->hasMOPS() && Subtarget->hasMTE()) {
1446 // Only required for llvm.aarch64.mops.memset.tag
1447 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
1450 PredictableSelectIsExpensive = Subtarget->predictableSelectIsExpensive();
1452 IsStrictFPEnabled = true;
1455 void AArch64TargetLowering::addTypeForNEON(MVT VT) {
1456 assert(VT.isVector() && "VT should be a vector type");
1458 if (VT.isFloatingPoint()) {
1459 MVT PromoteTo = EVT(VT).changeVectorElementTypeToInteger().getSimpleVT();
1460 setOperationPromotedToType(ISD::LOAD, VT, PromoteTo);
1461 setOperationPromotedToType(ISD::STORE, VT, PromoteTo);
1464 // Mark vector float intrinsics as expand.
1465 if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64) {
1466 setOperationAction(ISD::FSIN, VT, Expand);
1467 setOperationAction(ISD::FCOS, VT, Expand);
1468 setOperationAction(ISD::FPOW, VT, Expand);
1469 setOperationAction(ISD::FLOG, VT, Expand);
1470 setOperationAction(ISD::FLOG2, VT, Expand);
1471 setOperationAction(ISD::FLOG10, VT, Expand);
1472 setOperationAction(ISD::FEXP, VT, Expand);
1473 setOperationAction(ISD::FEXP2, VT, Expand);
1476 // But we do support custom-lowering for FCOPYSIGN.
1477 if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
1478 ((VT == MVT::v4f16 || VT == MVT::v8f16) && Subtarget->hasFullFP16()))
1479 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1481 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1482 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1483 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1484 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1485 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1486 setOperationAction(ISD::SRA, VT, Custom);
1487 setOperationAction(ISD::SRL, VT, Custom);
1488 setOperationAction(ISD::SHL, VT, Custom);
1489 setOperationAction(ISD::OR, VT, Custom);
1490 setOperationAction(ISD::SETCC, VT, Custom);
1491 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal);
1493 setOperationAction(ISD::SELECT, VT, Expand);
1494 setOperationAction(ISD::SELECT_CC, VT, Expand);
1495 setOperationAction(ISD::VSELECT, VT, Expand);
1496 for (MVT InnerVT : MVT::all_valuetypes())
1497 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
1499 // CNT supports only B element sizes, then use UADDLP to widen.
1500 if (VT != MVT::v8i8 && VT != MVT::v16i8)
1501 setOperationAction(ISD::CTPOP, VT, Custom);
1503 setOperationAction(ISD::UDIV, VT, Expand);
1504 setOperationAction(ISD::SDIV, VT, Expand);
1505 setOperationAction(ISD::UREM, VT, Expand);
1506 setOperationAction(ISD::SREM, VT, Expand);
1507 setOperationAction(ISD::FREM, VT, Expand);
1509 for (unsigned Opcode :
1510 {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_TO_SINT_SAT,
1511 ISD::FP_TO_UINT_SAT, ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT})
1512 setOperationAction(Opcode, VT, Custom);
1514 if (!VT.isFloatingPoint())
1515 setOperationAction(ISD::ABS, VT, Legal);
1517 // [SU][MIN|MAX] are available for all NEON types apart from i64.
1518 if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64)
1519 for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
1520 setOperationAction(Opcode, VT, Legal);
1522 // F[MIN|MAX][NUM|NAN] and simple strict operations are available for all FP
1524 if (VT.isFloatingPoint() &&
1525 VT.getVectorElementType() != MVT::bf16 &&
1526 (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16()))
1527 for (unsigned Opcode :
1528 {ISD::FMINIMUM, ISD::FMAXIMUM, ISD::FMINNUM, ISD::FMAXNUM,
1529 ISD::STRICT_FMINIMUM, ISD::STRICT_FMAXIMUM, ISD::STRICT_FMINNUM,
1530 ISD::STRICT_FMAXNUM, ISD::STRICT_FADD, ISD::STRICT_FSUB,
1531 ISD::STRICT_FMUL, ISD::STRICT_FDIV, ISD::STRICT_FMA,
1533 setOperationAction(Opcode, VT, Legal);
1535 // Strict fp extend and trunc are legal
1536 if (VT.isFloatingPoint() && VT.getScalarSizeInBits() != 16)
1537 setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal);
1538 if (VT.isFloatingPoint() && VT.getScalarSizeInBits() != 64)
1539 setOperationAction(ISD::STRICT_FP_ROUND, VT, Legal);
1541 // FIXME: We could potentially make use of the vector comparison instructions
1542 // for STRICT_FSETCC and STRICT_FSETCSS, but there's a number of
1544 // * FCMPEQ/NE are quiet comparisons, the rest are signalling comparisons,
1545 // so we would need to expand when the condition code doesn't match the
1546 // kind of comparison.
1547 // * Some kinds of comparison require more than one FCMXY instruction so
1548 // would need to be expanded instead.
1549 // * The lowering of the non-strict versions involves target-specific ISD
1550 // nodes so we would likely need to add strict versions of all of them and
1551 // handle them appropriately.
1552 setOperationAction(ISD::STRICT_FSETCC, VT, Expand);
1553 setOperationAction(ISD::STRICT_FSETCCS, VT, Expand);
1555 if (Subtarget->isLittleEndian()) {
1556 for (unsigned im = (unsigned)ISD::PRE_INC;
1557 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
1558 setIndexedLoadAction(im, VT, Legal);
1559 setIndexedStoreAction(im, VT, Legal);
1564 bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
1566 // Only SVE has a 1:1 mapping from intrinsic -> instruction (whilelo).
1567 if (!Subtarget->hasSVE())
1570 // We can only support legal predicate result types. We can use the SVE
1571 // whilelo instruction for generating fixed-width predicates too.
1572 if (ResVT != MVT::nxv2i1 && ResVT != MVT::nxv4i1 && ResVT != MVT::nxv8i1 &&
1573 ResVT != MVT::nxv16i1 && ResVT != MVT::v2i1 && ResVT != MVT::v4i1 &&
1574 ResVT != MVT::v8i1 && ResVT != MVT::v16i1)
1577 // The whilelo instruction only works with i32 or i64 scalar inputs.
1578 if (OpVT != MVT::i32 && OpVT != MVT::i64)
1584 void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
1585 assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
1587 // By default everything must be expanded.
1588 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
1589 setOperationAction(Op, VT, Expand);
1591 // We use EXTRACT_SUBVECTOR to "cast" a scalable vector to a fixed length one.
1592 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1594 if (VT.isFloatingPoint()) {
1595 setCondCodeAction(ISD::SETO, VT, Expand);
1596 setCondCodeAction(ISD::SETOLT, VT, Expand);
1597 setCondCodeAction(ISD::SETLT, VT, Expand);
1598 setCondCodeAction(ISD::SETOLE, VT, Expand);
1599 setCondCodeAction(ISD::SETLE, VT, Expand);
1600 setCondCodeAction(ISD::SETULT, VT, Expand);
1601 setCondCodeAction(ISD::SETULE, VT, Expand);
1602 setCondCodeAction(ISD::SETUGE, VT, Expand);
1603 setCondCodeAction(ISD::SETUGT, VT, Expand);
1604 setCondCodeAction(ISD::SETUEQ, VT, Expand);
1605 setCondCodeAction(ISD::SETONE, VT, Expand);
1608 // Mark integer truncating stores/extending loads as having custom lowering
1609 if (VT.isInteger()) {
1610 MVT InnerVT = VT.changeVectorElementType(MVT::i8);
1611 while (InnerVT != VT) {
1612 setTruncStoreAction(VT, InnerVT, Custom);
1613 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Custom);
1614 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Custom);
1615 InnerVT = InnerVT.changeVectorElementType(
1616 MVT::getIntegerVT(2 * InnerVT.getScalarSizeInBits()));
1620 // Mark floating-point truncating stores/extending loads as having custom
1622 if (VT.isFloatingPoint()) {
1623 MVT InnerVT = VT.changeVectorElementType(MVT::f16);
1624 while (InnerVT != VT) {
1625 setTruncStoreAction(VT, InnerVT, Custom);
1626 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Custom);
1627 InnerVT = InnerVT.changeVectorElementType(
1628 MVT::getFloatingPointVT(2 * InnerVT.getScalarSizeInBits()));
1632 // Lower fixed length vector operations to scalable equivalents.
1633 setOperationAction(ISD::ABS, VT, Custom);
1634 setOperationAction(ISD::ADD, VT, Custom);
1635 setOperationAction(ISD::AND, VT, Custom);
1636 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1637 setOperationAction(ISD::BITCAST, VT, Custom);
1638 setOperationAction(ISD::BITREVERSE, VT, Custom);
1639 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1640 setOperationAction(ISD::BSWAP, VT, Custom);
1641 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1642 setOperationAction(ISD::CTLZ, VT, Custom);
1643 setOperationAction(ISD::CTPOP, VT, Custom);
1644 setOperationAction(ISD::CTTZ, VT, Custom);
1645 setOperationAction(ISD::FABS, VT, Custom);
1646 setOperationAction(ISD::FADD, VT, Custom);
1647 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1648 setOperationAction(ISD::FCEIL, VT, Custom);
1649 setOperationAction(ISD::FDIV, VT, Custom);
1650 setOperationAction(ISD::FFLOOR, VT, Custom);
1651 setOperationAction(ISD::FMA, VT, Custom);
1652 setOperationAction(ISD::FMAXIMUM, VT, Custom);
1653 setOperationAction(ISD::FMAXNUM, VT, Custom);
1654 setOperationAction(ISD::FMINIMUM, VT, Custom);
1655 setOperationAction(ISD::FMINNUM, VT, Custom);
1656 setOperationAction(ISD::FMUL, VT, Custom);
1657 setOperationAction(ISD::FNEARBYINT, VT, Custom);
1658 setOperationAction(ISD::FNEG, VT, Custom);
1659 setOperationAction(ISD::FP_EXTEND, VT, Custom);
1660 setOperationAction(ISD::FP_ROUND, VT, Custom);
1661 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
1662 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
1663 setOperationAction(ISD::FRINT, VT, Custom);
1664 setOperationAction(ISD::FROUND, VT, Custom);
1665 setOperationAction(ISD::FROUNDEVEN, VT, Custom);
1666 setOperationAction(ISD::FSQRT, VT, Custom);
1667 setOperationAction(ISD::FSUB, VT, Custom);
1668 setOperationAction(ISD::FTRUNC, VT, Custom);
1669 setOperationAction(ISD::LOAD, VT, Custom);
1670 setOperationAction(ISD::MGATHER, VT, Custom);
1671 setOperationAction(ISD::MLOAD, VT, Custom);
1672 setOperationAction(ISD::MSCATTER, VT, Custom);
1673 setOperationAction(ISD::MSTORE, VT, Custom);
1674 setOperationAction(ISD::MUL, VT, Custom);
1675 setOperationAction(ISD::MULHS, VT, Custom);
1676 setOperationAction(ISD::MULHU, VT, Custom);
1677 setOperationAction(ISD::OR, VT, Custom);
1678 setOperationAction(ISD::SDIV, VT, Custom);
1679 setOperationAction(ISD::SELECT, VT, Custom);
1680 setOperationAction(ISD::SETCC, VT, Custom);
1681 setOperationAction(ISD::SHL, VT, Custom);
1682 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1683 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1684 setOperationAction(ISD::SINT_TO_FP, VT, Custom);
1685 setOperationAction(ISD::SMAX, VT, Custom);
1686 setOperationAction(ISD::SMIN, VT, Custom);
1687 setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
1688 setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
1689 setOperationAction(ISD::SRA, VT, Custom);
1690 setOperationAction(ISD::SRL, VT, Custom);
1691 setOperationAction(ISD::STORE, VT, Custom);
1692 setOperationAction(ISD::SUB, VT, Custom);
1693 setOperationAction(ISD::TRUNCATE, VT, Custom);
1694 setOperationAction(ISD::UDIV, VT, Custom);
1695 setOperationAction(ISD::UINT_TO_FP, VT, Custom);
1696 setOperationAction(ISD::UMAX, VT, Custom);
1697 setOperationAction(ISD::UMIN, VT, Custom);
1698 setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
1699 setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1700 setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1701 setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1702 setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1703 setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1704 setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1705 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1706 setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
1707 setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
1708 setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
1709 setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
1710 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1711 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1712 setOperationAction(ISD::VSELECT, VT, Custom);
1713 setOperationAction(ISD::XOR, VT, Custom);
1714 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1717 void AArch64TargetLowering::addDRTypeForNEON(MVT VT) {
1718 addRegisterClass(VT, &AArch64::FPR64RegClass);
1722 void AArch64TargetLowering::addQRTypeForNEON(MVT VT) {
1723 addRegisterClass(VT, &AArch64::FPR128RegClass);
1727 EVT AArch64TargetLowering::getSetCCResultType(const DataLayout &,
1728 LLVMContext &C, EVT VT) const {
1731 if (VT.isScalableVector())
1732 return EVT::getVectorVT(C, MVT::i1, VT.getVectorElementCount());
1733 return VT.changeVectorElementTypeToInteger();
1736 static bool optimizeLogicalImm(SDValue Op, unsigned Size, uint64_t Imm,
1737 const APInt &Demanded,
1738 TargetLowering::TargetLoweringOpt &TLO,
1740 uint64_t OldImm = Imm, NewImm, Enc;
1741 uint64_t Mask = ((uint64_t)(-1LL) >> (64 - Size)), OrigMask = Mask;
1743 // Return if the immediate is already all zeros, all ones, a bimm32 or a
1745 if (Imm == 0 || Imm == Mask ||
1746 AArch64_AM::isLogicalImmediate(Imm & Mask, Size))
1749 unsigned EltSize = Size;
1750 uint64_t DemandedBits = Demanded.getZExtValue();
1752 // Clear bits that are not demanded.
1753 Imm &= DemandedBits;
1756 // The goal here is to set the non-demanded bits in a way that minimizes
1757 // the number of switching between 0 and 1. In order to achieve this goal,
1758 // we set the non-demanded bits to the value of the preceding demanded bits.
1759 // For example, if we have an immediate 0bx10xx0x1 ('x' indicates a
1760 // non-demanded bit), we copy bit0 (1) to the least significant 'x',
1761 // bit2 (0) to 'xx', and bit6 (1) to the most significant 'x'.
1762 // The final result is 0b11000011.
1763 uint64_t NonDemandedBits = ~DemandedBits;
1764 uint64_t InvertedImm = ~Imm & DemandedBits;
1765 uint64_t RotatedImm =
1766 ((InvertedImm << 1) | (InvertedImm >> (EltSize - 1) & 1)) &
1768 uint64_t Sum = RotatedImm + NonDemandedBits;
1769 bool Carry = NonDemandedBits & ~Sum & (1ULL << (EltSize - 1));
1770 uint64_t Ones = (Sum + Carry) & NonDemandedBits;
1771 NewImm = (Imm | Ones) & Mask;
1773 // If NewImm or its bitwise NOT is a shifted mask, it is a bitmask immediate
1774 // or all-ones or all-zeros, in which case we can stop searching. Otherwise,
1775 // we halve the element size and continue the search.
1776 if (isShiftedMask_64(NewImm) || isShiftedMask_64(~(NewImm | ~Mask)))
1779 // We cannot shrink the element size any further if it is 2-bits.
1785 uint64_t Hi = Imm >> EltSize, DemandedBitsHi = DemandedBits >> EltSize;
1787 // Return if there is mismatch in any of the demanded bits of Imm and Hi.
1788 if (((Imm ^ Hi) & (DemandedBits & DemandedBitsHi) & Mask) != 0)
1791 // Merge the upper and lower halves of Imm and DemandedBits.
1793 DemandedBits |= DemandedBitsHi;
1798 // Replicate the element across the register width.
1799 while (EltSize < Size) {
1800 NewImm |= NewImm << EltSize;
1805 assert(((OldImm ^ NewImm) & Demanded.getZExtValue()) == 0 &&
1806 "demanded bits should never be altered");
1807 assert(OldImm != NewImm && "the new imm shouldn't be equal to the old imm");
1809 // Create the new constant immediate node.
1810 EVT VT = Op.getValueType();
1814 // If the new constant immediate is all-zeros or all-ones, let the target
1815 // independent DAG combine optimize this node.
1816 if (NewImm == 0 || NewImm == OrigMask) {
1817 New = TLO.DAG.getNode(Op.getOpcode(), DL, VT, Op.getOperand(0),
1818 TLO.DAG.getConstant(NewImm, DL, VT));
1819 // Otherwise, create a machine node so that target independent DAG combine
1820 // doesn't undo this optimization.
1822 Enc = AArch64_AM::encodeLogicalImmediate(NewImm, Size);
1823 SDValue EncConst = TLO.DAG.getTargetConstant(Enc, DL, VT);
1825 TLO.DAG.getMachineNode(NewOpc, DL, VT, Op.getOperand(0), EncConst), 0);
1828 return TLO.CombineTo(Op, New);
1831 bool AArch64TargetLowering::targetShrinkDemandedConstant(
1832 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
1833 TargetLoweringOpt &TLO) const {
1834 // Delay this optimization to as late as possible.
1838 if (!EnableOptimizeLogicalImm)
1841 EVT VT = Op.getValueType();
1845 unsigned Size = VT.getSizeInBits();
1846 assert((Size == 32 || Size == 64) &&
1847 "i32 or i64 is expected after legalization.");
1849 // Exit early if we demand all bits.
1850 if (DemandedBits.countPopulation() == Size)
1854 switch (Op.getOpcode()) {
1858 NewOpc = Size == 32 ? AArch64::ANDWri : AArch64::ANDXri;
1861 NewOpc = Size == 32 ? AArch64::ORRWri : AArch64::ORRXri;
1864 NewOpc = Size == 32 ? AArch64::EORWri : AArch64::EORXri;
1867 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
1870 uint64_t Imm = C->getZExtValue();
1871 return optimizeLogicalImm(Op, Size, Imm, DemandedBits, TLO, NewOpc);
1874 /// computeKnownBitsForTargetNode - Determine which of the bits specified in
1875 /// Mask are known to be either zero or one and return them Known.
1876 void AArch64TargetLowering::computeKnownBitsForTargetNode(
1877 const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
1878 const SelectionDAG &DAG, unsigned Depth) const {
1879 switch (Op.getOpcode()) {
1882 case AArch64ISD::DUP: {
1883 SDValue SrcOp = Op.getOperand(0);
1884 Known = DAG.computeKnownBits(SrcOp, Depth + 1);
1885 if (SrcOp.getValueSizeInBits() != Op.getScalarValueSizeInBits()) {
1886 assert(SrcOp.getValueSizeInBits() > Op.getScalarValueSizeInBits() &&
1887 "Expected DUP implicit truncation");
1888 Known = Known.trunc(Op.getScalarValueSizeInBits());
1892 case AArch64ISD::CSEL: {
1894 Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1895 Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
1896 Known = KnownBits::commonBits(Known, Known2);
1899 case AArch64ISD::BICi: {
1900 // Compute the bit cleared value.
1902 ~(Op->getConstantOperandVal(1) << Op->getConstantOperandVal(2));
1903 Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1904 Known &= KnownBits::makeConstant(APInt(Known.getBitWidth(), Mask));
1907 case AArch64ISD::VLSHR: {
1909 Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1910 Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
1911 Known = KnownBits::lshr(Known, Known2);
1914 case AArch64ISD::VASHR: {
1916 Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1917 Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
1918 Known = KnownBits::ashr(Known, Known2);
1921 case AArch64ISD::LOADgot:
1922 case AArch64ISD::ADDlow: {
1923 if (!Subtarget->isTargetILP32())
1925 // In ILP32 mode all valid pointers are in the low 4GB of the address-space.
1926 Known.Zero = APInt::getHighBitsSet(64, 32);
1929 case AArch64ISD::ASSERT_ZEXT_BOOL: {
1930 Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1931 Known.Zero |= APInt(Known.getBitWidth(), 0xFE);
1934 case ISD::INTRINSIC_W_CHAIN: {
1935 ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
1936 Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
1939 case Intrinsic::aarch64_ldaxr:
1940 case Intrinsic::aarch64_ldxr: {
1941 unsigned BitWidth = Known.getBitWidth();
1942 EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
1943 unsigned MemBits = VT.getScalarSizeInBits();
1944 Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
1950 case ISD::INTRINSIC_WO_CHAIN:
1951 case ISD::INTRINSIC_VOID: {
1952 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1956 case Intrinsic::aarch64_neon_umaxv:
1957 case Intrinsic::aarch64_neon_uminv: {
1958 // Figure out the datatype of the vector operand. The UMINV instruction
1959 // will zero extend the result, so we can mark as known zero all the
1960 // bits larger than the element datatype. 32-bit or larget doesn't need
1961 // this as those are legal types and will be handled by isel directly.
1962 MVT VT = Op.getOperand(1).getValueType().getSimpleVT();
1963 unsigned BitWidth = Known.getBitWidth();
1964 if (VT == MVT::v8i8 || VT == MVT::v16i8) {
1965 assert(BitWidth >= 8 && "Unexpected width!");
1966 APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 8);
1968 } else if (VT == MVT::v4i16 || VT == MVT::v8i16) {
1969 assert(BitWidth >= 16 && "Unexpected width!");
1970 APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
1980 MVT AArch64TargetLowering::getScalarShiftAmountTy(const DataLayout &DL,
1985 bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
1986 EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
1988 if (Subtarget->requiresStrictAlign())
1992 // Some CPUs are fine with unaligned stores except for 128-bit ones.
1993 *Fast = !Subtarget->isMisaligned128StoreSlow() || VT.getStoreSize() != 16 ||
1994 // See comments in performSTORECombine() for more details about
1995 // these conditions.
1997 // Code that uses clang vector extensions can mark that it
1998 // wants unaligned accesses to be treated as fast by
1999 // underspecifying alignment to be 1 or 2.
2002 // Disregard v2i64. Memcpy lowering produces those and splitting
2003 // them regresses performance on micro-benchmarks and olden/bh.
2009 // Same as above but handling LLTs instead.
2010 bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
2011 LLT Ty, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
2013 if (Subtarget->requiresStrictAlign())
2017 // Some CPUs are fine with unaligned stores except for 128-bit ones.
2018 *Fast = !Subtarget->isMisaligned128StoreSlow() ||
2019 Ty.getSizeInBytes() != 16 ||
2020 // See comments in performSTORECombine() for more details about
2021 // these conditions.
2023 // Code that uses clang vector extensions can mark that it
2024 // wants unaligned accesses to be treated as fast by
2025 // underspecifying alignment to be 1 or 2.
2028 // Disregard v2i64. Memcpy lowering produces those and splitting
2029 // them regresses performance on micro-benchmarks and olden/bh.
2030 Ty == LLT::fixed_vector(2, 64);
2036 AArch64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
2037 const TargetLibraryInfo *libInfo) const {
2038 return AArch64::createFastISel(funcInfo, libInfo);
2041 const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
2042 #define MAKE_CASE(V) \
2045 switch ((AArch64ISD::NodeType)Opcode) {
2046 case AArch64ISD::FIRST_NUMBER:
2048 MAKE_CASE(AArch64ISD::CALL)
2049 MAKE_CASE(AArch64ISD::ADRP)
2050 MAKE_CASE(AArch64ISD::ADR)
2051 MAKE_CASE(AArch64ISD::ADDlow)
2052 MAKE_CASE(AArch64ISD::LOADgot)
2053 MAKE_CASE(AArch64ISD::RET_FLAG)
2054 MAKE_CASE(AArch64ISD::BRCOND)
2055 MAKE_CASE(AArch64ISD::CSEL)
2056 MAKE_CASE(AArch64ISD::CSINV)
2057 MAKE_CASE(AArch64ISD::CSNEG)
2058 MAKE_CASE(AArch64ISD::CSINC)
2059 MAKE_CASE(AArch64ISD::THREAD_POINTER)
2060 MAKE_CASE(AArch64ISD::TLSDESC_CALLSEQ)
2061 MAKE_CASE(AArch64ISD::ABDS_PRED)
2062 MAKE_CASE(AArch64ISD::ABDU_PRED)
2063 MAKE_CASE(AArch64ISD::MUL_PRED)
2064 MAKE_CASE(AArch64ISD::MULHS_PRED)
2065 MAKE_CASE(AArch64ISD::MULHU_PRED)
2066 MAKE_CASE(AArch64ISD::SDIV_PRED)
2067 MAKE_CASE(AArch64ISD::SHL_PRED)
2068 MAKE_CASE(AArch64ISD::SMAX_PRED)
2069 MAKE_CASE(AArch64ISD::SMIN_PRED)
2070 MAKE_CASE(AArch64ISD::SRA_PRED)
2071 MAKE_CASE(AArch64ISD::SRL_PRED)
2072 MAKE_CASE(AArch64ISD::UDIV_PRED)
2073 MAKE_CASE(AArch64ISD::UMAX_PRED)
2074 MAKE_CASE(AArch64ISD::UMIN_PRED)
2075 MAKE_CASE(AArch64ISD::SRAD_MERGE_OP1)
2076 MAKE_CASE(AArch64ISD::FNEG_MERGE_PASSTHRU)
2077 MAKE_CASE(AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU)
2078 MAKE_CASE(AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU)
2079 MAKE_CASE(AArch64ISD::FCEIL_MERGE_PASSTHRU)
2080 MAKE_CASE(AArch64ISD::FFLOOR_MERGE_PASSTHRU)
2081 MAKE_CASE(AArch64ISD::FNEARBYINT_MERGE_PASSTHRU)
2082 MAKE_CASE(AArch64ISD::FRINT_MERGE_PASSTHRU)
2083 MAKE_CASE(AArch64ISD::FROUND_MERGE_PASSTHRU)
2084 MAKE_CASE(AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU)
2085 MAKE_CASE(AArch64ISD::FTRUNC_MERGE_PASSTHRU)
2086 MAKE_CASE(AArch64ISD::FP_ROUND_MERGE_PASSTHRU)
2087 MAKE_CASE(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU)
2088 MAKE_CASE(AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU)
2089 MAKE_CASE(AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU)
2090 MAKE_CASE(AArch64ISD::FCVTZU_MERGE_PASSTHRU)
2091 MAKE_CASE(AArch64ISD::FCVTZS_MERGE_PASSTHRU)
2092 MAKE_CASE(AArch64ISD::FSQRT_MERGE_PASSTHRU)
2093 MAKE_CASE(AArch64ISD::FRECPX_MERGE_PASSTHRU)
2094 MAKE_CASE(AArch64ISD::FABS_MERGE_PASSTHRU)
2095 MAKE_CASE(AArch64ISD::ABS_MERGE_PASSTHRU)
2096 MAKE_CASE(AArch64ISD::NEG_MERGE_PASSTHRU)
2097 MAKE_CASE(AArch64ISD::SETCC_MERGE_ZERO)
2098 MAKE_CASE(AArch64ISD::ADC)
2099 MAKE_CASE(AArch64ISD::SBC)
2100 MAKE_CASE(AArch64ISD::ADDS)
2101 MAKE_CASE(AArch64ISD::SUBS)
2102 MAKE_CASE(AArch64ISD::ADCS)
2103 MAKE_CASE(AArch64ISD::SBCS)
2104 MAKE_CASE(AArch64ISD::ANDS)
2105 MAKE_CASE(AArch64ISD::CCMP)
2106 MAKE_CASE(AArch64ISD::CCMN)
2107 MAKE_CASE(AArch64ISD::FCCMP)
2108 MAKE_CASE(AArch64ISD::FCMP)
2109 MAKE_CASE(AArch64ISD::STRICT_FCMP)
2110 MAKE_CASE(AArch64ISD::STRICT_FCMPE)
2111 MAKE_CASE(AArch64ISD::DUP)
2112 MAKE_CASE(AArch64ISD::DUPLANE8)
2113 MAKE_CASE(AArch64ISD::DUPLANE16)
2114 MAKE_CASE(AArch64ISD::DUPLANE32)
2115 MAKE_CASE(AArch64ISD::DUPLANE64)
2116 MAKE_CASE(AArch64ISD::DUPLANE128)
2117 MAKE_CASE(AArch64ISD::MOVI)
2118 MAKE_CASE(AArch64ISD::MOVIshift)
2119 MAKE_CASE(AArch64ISD::MOVIedit)
2120 MAKE_CASE(AArch64ISD::MOVImsl)
2121 MAKE_CASE(AArch64ISD::FMOV)
2122 MAKE_CASE(AArch64ISD::MVNIshift)
2123 MAKE_CASE(AArch64ISD::MVNImsl)
2124 MAKE_CASE(AArch64ISD::BICi)
2125 MAKE_CASE(AArch64ISD::ORRi)
2126 MAKE_CASE(AArch64ISD::BSP)
2127 MAKE_CASE(AArch64ISD::EXTR)
2128 MAKE_CASE(AArch64ISD::ZIP1)
2129 MAKE_CASE(AArch64ISD::ZIP2)
2130 MAKE_CASE(AArch64ISD::UZP1)
2131 MAKE_CASE(AArch64ISD::UZP2)
2132 MAKE_CASE(AArch64ISD::TRN1)
2133 MAKE_CASE(AArch64ISD::TRN2)
2134 MAKE_CASE(AArch64ISD::REV16)
2135 MAKE_CASE(AArch64ISD::REV32)
2136 MAKE_CASE(AArch64ISD::REV64)
2137 MAKE_CASE(AArch64ISD::EXT)
2138 MAKE_CASE(AArch64ISD::SPLICE)
2139 MAKE_CASE(AArch64ISD::VSHL)
2140 MAKE_CASE(AArch64ISD::VLSHR)
2141 MAKE_CASE(AArch64ISD::VASHR)
2142 MAKE_CASE(AArch64ISD::VSLI)
2143 MAKE_CASE(AArch64ISD::VSRI)
2144 MAKE_CASE(AArch64ISD::CMEQ)
2145 MAKE_CASE(AArch64ISD::CMGE)
2146 MAKE_CASE(AArch64ISD::CMGT)
2147 MAKE_CASE(AArch64ISD::CMHI)
2148 MAKE_CASE(AArch64ISD::CMHS)
2149 MAKE_CASE(AArch64ISD::FCMEQ)
2150 MAKE_CASE(AArch64ISD::FCMGE)
2151 MAKE_CASE(AArch64ISD::FCMGT)
2152 MAKE_CASE(AArch64ISD::CMEQz)
2153 MAKE_CASE(AArch64ISD::CMGEz)
2154 MAKE_CASE(AArch64ISD::CMGTz)
2155 MAKE_CASE(AArch64ISD::CMLEz)
2156 MAKE_CASE(AArch64ISD::CMLTz)
2157 MAKE_CASE(AArch64ISD::FCMEQz)
2158 MAKE_CASE(AArch64ISD::FCMGEz)
2159 MAKE_CASE(AArch64ISD::FCMGTz)
2160 MAKE_CASE(AArch64ISD::FCMLEz)
2161 MAKE_CASE(AArch64ISD::FCMLTz)
2162 MAKE_CASE(AArch64ISD::SADDV)
2163 MAKE_CASE(AArch64ISD::UADDV)
2164 MAKE_CASE(AArch64ISD::SDOT)
2165 MAKE_CASE(AArch64ISD::UDOT)
2166 MAKE_CASE(AArch64ISD::SMINV)
2167 MAKE_CASE(AArch64ISD::UMINV)
2168 MAKE_CASE(AArch64ISD::SMAXV)
2169 MAKE_CASE(AArch64ISD::UMAXV)
2170 MAKE_CASE(AArch64ISD::SADDV_PRED)
2171 MAKE_CASE(AArch64ISD::UADDV_PRED)
2172 MAKE_CASE(AArch64ISD::SMAXV_PRED)
2173 MAKE_CASE(AArch64ISD::UMAXV_PRED)
2174 MAKE_CASE(AArch64ISD::SMINV_PRED)
2175 MAKE_CASE(AArch64ISD::UMINV_PRED)
2176 MAKE_CASE(AArch64ISD::ORV_PRED)
2177 MAKE_CASE(AArch64ISD::EORV_PRED)
2178 MAKE_CASE(AArch64ISD::ANDV_PRED)
2179 MAKE_CASE(AArch64ISD::CLASTA_N)
2180 MAKE_CASE(AArch64ISD::CLASTB_N)
2181 MAKE_CASE(AArch64ISD::LASTA)
2182 MAKE_CASE(AArch64ISD::LASTB)
2183 MAKE_CASE(AArch64ISD::REINTERPRET_CAST)
2184 MAKE_CASE(AArch64ISD::LS64_BUILD)
2185 MAKE_CASE(AArch64ISD::LS64_EXTRACT)
2186 MAKE_CASE(AArch64ISD::TBL)
2187 MAKE_CASE(AArch64ISD::FADD_PRED)
2188 MAKE_CASE(AArch64ISD::FADDA_PRED)
2189 MAKE_CASE(AArch64ISD::FADDV_PRED)
2190 MAKE_CASE(AArch64ISD::FDIV_PRED)
2191 MAKE_CASE(AArch64ISD::FMA_PRED)
2192 MAKE_CASE(AArch64ISD::FMAX_PRED)
2193 MAKE_CASE(AArch64ISD::FMAXV_PRED)
2194 MAKE_CASE(AArch64ISD::FMAXNM_PRED)
2195 MAKE_CASE(AArch64ISD::FMAXNMV_PRED)
2196 MAKE_CASE(AArch64ISD::FMIN_PRED)
2197 MAKE_CASE(AArch64ISD::FMINV_PRED)
2198 MAKE_CASE(AArch64ISD::FMINNM_PRED)
2199 MAKE_CASE(AArch64ISD::FMINNMV_PRED)
2200 MAKE_CASE(AArch64ISD::FMUL_PRED)
2201 MAKE_CASE(AArch64ISD::FSUB_PRED)
2202 MAKE_CASE(AArch64ISD::RDSVL)
2203 MAKE_CASE(AArch64ISD::BIC)
2204 MAKE_CASE(AArch64ISD::BIT)
2205 MAKE_CASE(AArch64ISD::CBZ)
2206 MAKE_CASE(AArch64ISD::CBNZ)
2207 MAKE_CASE(AArch64ISD::TBZ)
2208 MAKE_CASE(AArch64ISD::TBNZ)
2209 MAKE_CASE(AArch64ISD::TC_RETURN)
2210 MAKE_CASE(AArch64ISD::PREFETCH)
2211 MAKE_CASE(AArch64ISD::SITOF)
2212 MAKE_CASE(AArch64ISD::UITOF)
2213 MAKE_CASE(AArch64ISD::NVCAST)
2214 MAKE_CASE(AArch64ISD::MRS)
2215 MAKE_CASE(AArch64ISD::SQSHL_I)
2216 MAKE_CASE(AArch64ISD::UQSHL_I)
2217 MAKE_CASE(AArch64ISD::SRSHR_I)
2218 MAKE_CASE(AArch64ISD::URSHR_I)
2219 MAKE_CASE(AArch64ISD::SQSHLU_I)
2220 MAKE_CASE(AArch64ISD::WrapperLarge)
2221 MAKE_CASE(AArch64ISD::LD2post)
2222 MAKE_CASE(AArch64ISD::LD3post)
2223 MAKE_CASE(AArch64ISD::LD4post)
2224 MAKE_CASE(AArch64ISD::ST2post)
2225 MAKE_CASE(AArch64ISD::ST3post)
2226 MAKE_CASE(AArch64ISD::ST4post)
2227 MAKE_CASE(AArch64ISD::LD1x2post)
2228 MAKE_CASE(AArch64ISD::LD1x3post)
2229 MAKE_CASE(AArch64ISD::LD1x4post)
2230 MAKE_CASE(AArch64ISD::ST1x2post)
2231 MAKE_CASE(AArch64ISD::ST1x3post)
2232 MAKE_CASE(AArch64ISD::ST1x4post)
2233 MAKE_CASE(AArch64ISD::LD1DUPpost)
2234 MAKE_CASE(AArch64ISD::LD2DUPpost)
2235 MAKE_CASE(AArch64ISD::LD3DUPpost)
2236 MAKE_CASE(AArch64ISD::LD4DUPpost)
2237 MAKE_CASE(AArch64ISD::LD1LANEpost)
2238 MAKE_CASE(AArch64ISD::LD2LANEpost)
2239 MAKE_CASE(AArch64ISD::LD3LANEpost)
2240 MAKE_CASE(AArch64ISD::LD4LANEpost)
2241 MAKE_CASE(AArch64ISD::ST2LANEpost)
2242 MAKE_CASE(AArch64ISD::ST3LANEpost)
2243 MAKE_CASE(AArch64ISD::ST4LANEpost)
2244 MAKE_CASE(AArch64ISD::SMULL)
2245 MAKE_CASE(AArch64ISD::UMULL)
2246 MAKE_CASE(AArch64ISD::FRECPE)
2247 MAKE_CASE(AArch64ISD::FRECPS)
2248 MAKE_CASE(AArch64ISD::FRSQRTE)
2249 MAKE_CASE(AArch64ISD::FRSQRTS)
2250 MAKE_CASE(AArch64ISD::STG)
2251 MAKE_CASE(AArch64ISD::STZG)
2252 MAKE_CASE(AArch64ISD::ST2G)
2253 MAKE_CASE(AArch64ISD::STZ2G)
2254 MAKE_CASE(AArch64ISD::SUNPKHI)
2255 MAKE_CASE(AArch64ISD::SUNPKLO)
2256 MAKE_CASE(AArch64ISD::UUNPKHI)
2257 MAKE_CASE(AArch64ISD::UUNPKLO)
2258 MAKE_CASE(AArch64ISD::INSR)
2259 MAKE_CASE(AArch64ISD::PTEST)
2260 MAKE_CASE(AArch64ISD::PTRUE)
2261 MAKE_CASE(AArch64ISD::LD1_MERGE_ZERO)
2262 MAKE_CASE(AArch64ISD::LD1S_MERGE_ZERO)
2263 MAKE_CASE(AArch64ISD::LDNF1_MERGE_ZERO)
2264 MAKE_CASE(AArch64ISD::LDNF1S_MERGE_ZERO)
2265 MAKE_CASE(AArch64ISD::LDFF1_MERGE_ZERO)
2266 MAKE_CASE(AArch64ISD::LDFF1S_MERGE_ZERO)
2267 MAKE_CASE(AArch64ISD::LD1RQ_MERGE_ZERO)
2268 MAKE_CASE(AArch64ISD::LD1RO_MERGE_ZERO)
2269 MAKE_CASE(AArch64ISD::SVE_LD2_MERGE_ZERO)
2270 MAKE_CASE(AArch64ISD::SVE_LD3_MERGE_ZERO)
2271 MAKE_CASE(AArch64ISD::SVE_LD4_MERGE_ZERO)
2272 MAKE_CASE(AArch64ISD::GLD1_MERGE_ZERO)
2273 MAKE_CASE(AArch64ISD::GLD1_SCALED_MERGE_ZERO)
2274 MAKE_CASE(AArch64ISD::GLD1_SXTW_MERGE_ZERO)
2275 MAKE_CASE(AArch64ISD::GLD1_UXTW_MERGE_ZERO)
2276 MAKE_CASE(AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO)
2277 MAKE_CASE(AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO)
2278 MAKE_CASE(AArch64ISD::GLD1_IMM_MERGE_ZERO)
2279 MAKE_CASE(AArch64ISD::GLD1S_MERGE_ZERO)
2280 MAKE_CASE(AArch64ISD::GLD1S_SCALED_MERGE_ZERO)
2281 MAKE_CASE(AArch64ISD::GLD1S_SXTW_MERGE_ZERO)
2282 MAKE_CASE(AArch64ISD::GLD1S_UXTW_MERGE_ZERO)
2283 MAKE_CASE(AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO)
2284 MAKE_CASE(AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO)
2285 MAKE_CASE(AArch64ISD::GLD1S_IMM_MERGE_ZERO)
2286 MAKE_CASE(AArch64ISD::GLDFF1_MERGE_ZERO)
2287 MAKE_CASE(AArch64ISD::GLDFF1_SCALED_MERGE_ZERO)
2288 MAKE_CASE(AArch64ISD::GLDFF1_SXTW_MERGE_ZERO)
2289 MAKE_CASE(AArch64ISD::GLDFF1_UXTW_MERGE_ZERO)
2290 MAKE_CASE(AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO)
2291 MAKE_CASE(AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO)
2292 MAKE_CASE(AArch64ISD::GLDFF1_IMM_MERGE_ZERO)
2293 MAKE_CASE(AArch64ISD::GLDFF1S_MERGE_ZERO)
2294 MAKE_CASE(AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO)
2295 MAKE_CASE(AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO)
2296 MAKE_CASE(AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO)
2297 MAKE_CASE(AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO)
2298 MAKE_CASE(AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO)
2299 MAKE_CASE(AArch64ISD::GLDFF1S_IMM_MERGE_ZERO)
2300 MAKE_CASE(AArch64ISD::GLDNT1_MERGE_ZERO)
2301 MAKE_CASE(AArch64ISD::GLDNT1_INDEX_MERGE_ZERO)
2302 MAKE_CASE(AArch64ISD::GLDNT1S_MERGE_ZERO)
2303 MAKE_CASE(AArch64ISD::ST1_PRED)
2304 MAKE_CASE(AArch64ISD::SST1_PRED)
2305 MAKE_CASE(AArch64ISD::SST1_SCALED_PRED)
2306 MAKE_CASE(AArch64ISD::SST1_SXTW_PRED)
2307 MAKE_CASE(AArch64ISD::SST1_UXTW_PRED)
2308 MAKE_CASE(AArch64ISD::SST1_SXTW_SCALED_PRED)
2309 MAKE_CASE(AArch64ISD::SST1_UXTW_SCALED_PRED)
2310 MAKE_CASE(AArch64ISD::SST1_IMM_PRED)
2311 MAKE_CASE(AArch64ISD::SSTNT1_PRED)
2312 MAKE_CASE(AArch64ISD::SSTNT1_INDEX_PRED)
2313 MAKE_CASE(AArch64ISD::LDP)
2314 MAKE_CASE(AArch64ISD::STP)
2315 MAKE_CASE(AArch64ISD::STNP)
2316 MAKE_CASE(AArch64ISD::BITREVERSE_MERGE_PASSTHRU)
2317 MAKE_CASE(AArch64ISD::BSWAP_MERGE_PASSTHRU)
2318 MAKE_CASE(AArch64ISD::REVH_MERGE_PASSTHRU)
2319 MAKE_CASE(AArch64ISD::REVW_MERGE_PASSTHRU)
2320 MAKE_CASE(AArch64ISD::REVD_MERGE_PASSTHRU)
2321 MAKE_CASE(AArch64ISD::CTLZ_MERGE_PASSTHRU)
2322 MAKE_CASE(AArch64ISD::CTPOP_MERGE_PASSTHRU)
2323 MAKE_CASE(AArch64ISD::DUP_MERGE_PASSTHRU)
2324 MAKE_CASE(AArch64ISD::INDEX_VECTOR)
2325 MAKE_CASE(AArch64ISD::ADDP)
2326 MAKE_CASE(AArch64ISD::SADDLP)
2327 MAKE_CASE(AArch64ISD::UADDLP)
2328 MAKE_CASE(AArch64ISD::CALL_RVMARKER)
2329 MAKE_CASE(AArch64ISD::ASSERT_ZEXT_BOOL)
2330 MAKE_CASE(AArch64ISD::MOPS_MEMSET)
2331 MAKE_CASE(AArch64ISD::MOPS_MEMSET_TAGGING)
2332 MAKE_CASE(AArch64ISD::MOPS_MEMCOPY)
2333 MAKE_CASE(AArch64ISD::MOPS_MEMMOVE)
2334 MAKE_CASE(AArch64ISD::CALL_BTI)
2341 AArch64TargetLowering::EmitF128CSEL(MachineInstr &MI,
2342 MachineBasicBlock *MBB) const {
2343 // We materialise the F128CSEL pseudo-instruction as some control flow and a
2347 // [... previous instrs leading to comparison ...]
2353 // Dest = PHI [IfTrue, TrueBB], [IfFalse, OrigBB]
2355 MachineFunction *MF = MBB->getParent();
2356 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2357 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
2358 DebugLoc DL = MI.getDebugLoc();
2359 MachineFunction::iterator It = ++MBB->getIterator();
2361 Register DestReg = MI.getOperand(0).getReg();
2362 Register IfTrueReg = MI.getOperand(1).getReg();
2363 Register IfFalseReg = MI.getOperand(2).getReg();
2364 unsigned CondCode = MI.getOperand(3).getImm();
2365 bool NZCVKilled = MI.getOperand(4).isKill();
2367 MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB);
2368 MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB);
2369 MF->insert(It, TrueBB);
2370 MF->insert(It, EndBB);
2372 // Transfer rest of current basic-block to EndBB
2373 EndBB->splice(EndBB->begin(), MBB, std::next(MachineBasicBlock::iterator(MI)),
2375 EndBB->transferSuccessorsAndUpdatePHIs(MBB);
2377 BuildMI(MBB, DL, TII->get(AArch64::Bcc)).addImm(CondCode).addMBB(TrueBB);
2378 BuildMI(MBB, DL, TII->get(AArch64::B)).addMBB(EndBB);
2379 MBB->addSuccessor(TrueBB);
2380 MBB->addSuccessor(EndBB);
2382 // TrueBB falls through to the end.
2383 TrueBB->addSuccessor(EndBB);
2386 TrueBB->addLiveIn(AArch64::NZCV);
2387 EndBB->addLiveIn(AArch64::NZCV);
2390 BuildMI(*EndBB, EndBB->begin(), DL, TII->get(AArch64::PHI), DestReg)
2396 MI.eraseFromParent();
2400 MachineBasicBlock *AArch64TargetLowering::EmitLoweredCatchRet(
2401 MachineInstr &MI, MachineBasicBlock *BB) const {
2402 assert(!isAsynchronousEHPersonality(classifyEHPersonality(
2403 BB->getParent()->getFunction().getPersonalityFn())) &&
2404 "SEH does not use catchret!");
2409 AArch64TargetLowering::EmitTileLoad(unsigned Opc, unsigned BaseReg,
2411 MachineBasicBlock *BB) const {
2412 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2413 MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2415 MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2416 MIB.add(MI.getOperand(1)); // slice index register
2417 MIB.add(MI.getOperand(2)); // slice index offset
2418 MIB.add(MI.getOperand(3)); // pg
2419 MIB.add(MI.getOperand(4)); // base
2420 MIB.add(MI.getOperand(5)); // offset
2422 MI.eraseFromParent(); // The pseudo is gone now.
2427 AArch64TargetLowering::EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const {
2428 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2429 MachineInstrBuilder MIB =
2430 BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(AArch64::LDR_ZA));
2432 MIB.addReg(AArch64::ZA, RegState::Define);
2433 MIB.add(MI.getOperand(0)); // Vector select register
2434 MIB.add(MI.getOperand(1)); // Vector select offset
2435 MIB.add(MI.getOperand(2)); // Base
2436 MIB.add(MI.getOperand(1)); // Offset, same as vector select offset
2438 MI.eraseFromParent(); // The pseudo is gone now.
2443 AArch64TargetLowering::EmitMopa(unsigned Opc, unsigned BaseReg,
2444 MachineInstr &MI, MachineBasicBlock *BB) const {
2445 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2446 MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2448 MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2449 MIB.addReg(BaseReg + MI.getOperand(0).getImm());
2450 MIB.add(MI.getOperand(1)); // pn
2451 MIB.add(MI.getOperand(2)); // pm
2452 MIB.add(MI.getOperand(3)); // zn
2453 MIB.add(MI.getOperand(4)); // zm
2455 MI.eraseFromParent(); // The pseudo is gone now.
2460 AArch64TargetLowering::EmitInsertVectorToTile(unsigned Opc, unsigned BaseReg,
2462 MachineBasicBlock *BB) const {
2463 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2464 MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2466 MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2467 MIB.addReg(BaseReg + MI.getOperand(0).getImm());
2468 MIB.add(MI.getOperand(1)); // Slice index register
2469 MIB.add(MI.getOperand(2)); // Slice index offset
2470 MIB.add(MI.getOperand(3)); // pg
2471 MIB.add(MI.getOperand(4)); // zn
2473 MI.eraseFromParent(); // The pseudo is gone now.
2478 AArch64TargetLowering::EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const {
2479 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2480 MachineInstrBuilder MIB =
2481 BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(AArch64::ZERO_M));
2482 MIB.add(MI.getOperand(0)); // Mask
2484 unsigned Mask = MI.getOperand(0).getImm();
2485 for (unsigned I = 0; I < 8; I++) {
2486 if (Mask & (1 << I))
2487 MIB.addDef(AArch64::ZAD0 + I, RegState::ImplicitDefine);
2490 MI.eraseFromParent(); // The pseudo is gone now.
2495 AArch64TargetLowering::EmitAddVectorToTile(unsigned Opc, unsigned BaseReg,
2497 MachineBasicBlock *BB) const {
2498 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2499 MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2501 MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2502 MIB.addReg(BaseReg + MI.getOperand(0).getImm());
2503 MIB.add(MI.getOperand(1)); // pn
2504 MIB.add(MI.getOperand(2)); // pm
2505 MIB.add(MI.getOperand(3)); // zn
2507 MI.eraseFromParent(); // The pseudo is gone now.
2511 MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
2512 MachineInstr &MI, MachineBasicBlock *BB) const {
2513 switch (MI.getOpcode()) {
2518 llvm_unreachable("Unexpected instruction for custom inserter!");
2520 case AArch64::F128CSEL:
2521 return EmitF128CSEL(MI, BB);
2523 case TargetOpcode::STATEPOINT:
2524 // STATEPOINT is a pseudo instruction which has no implicit defs/uses
2525 // while bl call instruction (where statepoint will be lowered at the end)
2526 // has implicit def. This def is early-clobber as it will be set at
2527 // the moment of the call and earlier than any use is read.
2528 // Add this implicit dead def here as a workaround.
2529 MI.addOperand(*MI.getMF(),
2530 MachineOperand::CreateReg(
2531 AArch64::LR, /*isDef*/ true,
2532 /*isImp*/ true, /*isKill*/ false, /*isDead*/ true,
2533 /*isUndef*/ false, /*isEarlyClobber*/ true));
2535 case TargetOpcode::STACKMAP:
2536 case TargetOpcode::PATCHPOINT:
2537 return emitPatchPoint(MI, BB);
2539 case AArch64::CATCHRET:
2540 return EmitLoweredCatchRet(MI, BB);
2541 case AArch64::LD1_MXIPXX_H_PSEUDO_B:
2542 return EmitTileLoad(AArch64::LD1_MXIPXX_H_B, AArch64::ZAB0, MI, BB);
2543 case AArch64::LD1_MXIPXX_H_PSEUDO_H:
2544 return EmitTileLoad(AArch64::LD1_MXIPXX_H_H, AArch64::ZAH0, MI, BB);
2545 case AArch64::LD1_MXIPXX_H_PSEUDO_S:
2546 return EmitTileLoad(AArch64::LD1_MXIPXX_H_S, AArch64::ZAS0, MI, BB);
2547 case AArch64::LD1_MXIPXX_H_PSEUDO_D:
2548 return EmitTileLoad(AArch64::LD1_MXIPXX_H_D, AArch64::ZAD0, MI, BB);
2549 case AArch64::LD1_MXIPXX_H_PSEUDO_Q:
2550 return EmitTileLoad(AArch64::LD1_MXIPXX_H_Q, AArch64::ZAQ0, MI, BB);
2551 case AArch64::LD1_MXIPXX_V_PSEUDO_B:
2552 return EmitTileLoad(AArch64::LD1_MXIPXX_V_B, AArch64::ZAB0, MI, BB);
2553 case AArch64::LD1_MXIPXX_V_PSEUDO_H:
2554 return EmitTileLoad(AArch64::LD1_MXIPXX_V_H, AArch64::ZAH0, MI, BB);
2555 case AArch64::LD1_MXIPXX_V_PSEUDO_S:
2556 return EmitTileLoad(AArch64::LD1_MXIPXX_V_S, AArch64::ZAS0, MI, BB);
2557 case AArch64::LD1_MXIPXX_V_PSEUDO_D:
2558 return EmitTileLoad(AArch64::LD1_MXIPXX_V_D, AArch64::ZAD0, MI, BB);
2559 case AArch64::LD1_MXIPXX_V_PSEUDO_Q:
2560 return EmitTileLoad(AArch64::LD1_MXIPXX_V_Q, AArch64::ZAQ0, MI, BB);
2561 case AArch64::LDR_ZA_PSEUDO:
2562 return EmitFill(MI, BB);
2563 case AArch64::BFMOPA_MPPZZ_PSEUDO:
2564 return EmitMopa(AArch64::BFMOPA_MPPZZ, AArch64::ZAS0, MI, BB);
2565 case AArch64::BFMOPS_MPPZZ_PSEUDO:
2566 return EmitMopa(AArch64::BFMOPS_MPPZZ, AArch64::ZAS0, MI, BB);
2567 case AArch64::FMOPAL_MPPZZ_PSEUDO:
2568 return EmitMopa(AArch64::FMOPAL_MPPZZ, AArch64::ZAS0, MI, BB);
2569 case AArch64::FMOPSL_MPPZZ_PSEUDO:
2570 return EmitMopa(AArch64::FMOPSL_MPPZZ, AArch64::ZAS0, MI, BB);
2571 case AArch64::FMOPA_MPPZZ_S_PSEUDO:
2572 return EmitMopa(AArch64::FMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2573 case AArch64::FMOPS_MPPZZ_S_PSEUDO:
2574 return EmitMopa(AArch64::FMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2575 case AArch64::FMOPA_MPPZZ_D_PSEUDO:
2576 return EmitMopa(AArch64::FMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2577 case AArch64::FMOPS_MPPZZ_D_PSEUDO:
2578 return EmitMopa(AArch64::FMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2579 case AArch64::SMOPA_MPPZZ_S_PSEUDO:
2580 return EmitMopa(AArch64::SMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2581 case AArch64::SMOPS_MPPZZ_S_PSEUDO:
2582 return EmitMopa(AArch64::SMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2583 case AArch64::UMOPA_MPPZZ_S_PSEUDO:
2584 return EmitMopa(AArch64::UMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2585 case AArch64::UMOPS_MPPZZ_S_PSEUDO:
2586 return EmitMopa(AArch64::UMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2587 case AArch64::SUMOPA_MPPZZ_S_PSEUDO:
2588 return EmitMopa(AArch64::SUMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2589 case AArch64::SUMOPS_MPPZZ_S_PSEUDO:
2590 return EmitMopa(AArch64::SUMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2591 case AArch64::USMOPA_MPPZZ_S_PSEUDO:
2592 return EmitMopa(AArch64::USMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2593 case AArch64::USMOPS_MPPZZ_S_PSEUDO:
2594 return EmitMopa(AArch64::USMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2595 case AArch64::SMOPA_MPPZZ_D_PSEUDO:
2596 return EmitMopa(AArch64::SMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2597 case AArch64::SMOPS_MPPZZ_D_PSEUDO:
2598 return EmitMopa(AArch64::SMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2599 case AArch64::UMOPA_MPPZZ_D_PSEUDO:
2600 return EmitMopa(AArch64::UMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2601 case AArch64::UMOPS_MPPZZ_D_PSEUDO:
2602 return EmitMopa(AArch64::UMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2603 case AArch64::SUMOPA_MPPZZ_D_PSEUDO:
2604 return EmitMopa(AArch64::SUMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2605 case AArch64::SUMOPS_MPPZZ_D_PSEUDO:
2606 return EmitMopa(AArch64::SUMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2607 case AArch64::USMOPA_MPPZZ_D_PSEUDO:
2608 return EmitMopa(AArch64::USMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2609 case AArch64::USMOPS_MPPZZ_D_PSEUDO:
2610 return EmitMopa(AArch64::USMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2611 case AArch64::INSERT_MXIPZ_H_PSEUDO_B:
2612 return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_B, AArch64::ZAB0, MI,
2614 case AArch64::INSERT_MXIPZ_H_PSEUDO_H:
2615 return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_H, AArch64::ZAH0, MI,
2617 case AArch64::INSERT_MXIPZ_H_PSEUDO_S:
2618 return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_S, AArch64::ZAS0, MI,
2620 case AArch64::INSERT_MXIPZ_H_PSEUDO_D:
2621 return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_D, AArch64::ZAD0, MI,
2623 case AArch64::INSERT_MXIPZ_H_PSEUDO_Q:
2624 return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_Q, AArch64::ZAQ0, MI,
2626 case AArch64::INSERT_MXIPZ_V_PSEUDO_B:
2627 return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_B, AArch64::ZAB0, MI,
2629 case AArch64::INSERT_MXIPZ_V_PSEUDO_H:
2630 return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_H, AArch64::ZAH0, MI,
2632 case AArch64::INSERT_MXIPZ_V_PSEUDO_S:
2633 return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_S, AArch64::ZAS0, MI,
2635 case AArch64::INSERT_MXIPZ_V_PSEUDO_D:
2636 return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_D, AArch64::ZAD0, MI,
2638 case AArch64::INSERT_MXIPZ_V_PSEUDO_Q:
2639 return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_Q, AArch64::ZAQ0, MI,
2641 case AArch64::ZERO_M_PSEUDO:
2642 return EmitZero(MI, BB);
2643 case AArch64::ADDHA_MPPZ_PSEUDO_S:
2644 return EmitAddVectorToTile(AArch64::ADDHA_MPPZ_S, AArch64::ZAS0, MI, BB);
2645 case AArch64::ADDVA_MPPZ_PSEUDO_S:
2646 return EmitAddVectorToTile(AArch64::ADDVA_MPPZ_S, AArch64::ZAS0, MI, BB);
2647 case AArch64::ADDHA_MPPZ_PSEUDO_D:
2648 return EmitAddVectorToTile(AArch64::ADDHA_MPPZ_D, AArch64::ZAD0, MI, BB);
2649 case AArch64::ADDVA_MPPZ_PSEUDO_D:
2650 return EmitAddVectorToTile(AArch64::ADDVA_MPPZ_D, AArch64::ZAD0, MI, BB);
2654 //===----------------------------------------------------------------------===//
2655 // AArch64 Lowering private implementation.
2656 //===----------------------------------------------------------------------===//
2658 //===----------------------------------------------------------------------===//
2660 //===----------------------------------------------------------------------===//
2662 // Forward declarations of SVE fixed length lowering helpers
2663 static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT);
2664 static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V);
2665 static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V);
2666 static SDValue convertFixedMaskToScalableVector(SDValue Mask,
2668 static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
2671 /// isZerosVector - Check whether SDNode N is a zero-filled vector.
2672 static bool isZerosVector(const SDNode *N) {
2673 // Look through a bit convert.
2674 while (N->getOpcode() == ISD::BITCAST)
2675 N = N->getOperand(0).getNode();
2677 if (ISD::isConstantSplatVectorAllZeros(N))
2680 if (N->getOpcode() != AArch64ISD::DUP)
2683 auto Opnd0 = N->getOperand(0);
2684 auto *CINT = dyn_cast<ConstantSDNode>(Opnd0);
2685 auto *CFP = dyn_cast<ConstantFPSDNode>(Opnd0);
2686 return (CINT && CINT->isZero()) || (CFP && CFP->isZero());
2689 /// changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64
2691 static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC) {
2694 llvm_unreachable("Unknown condition code!");
2696 return AArch64CC::NE;
2698 return AArch64CC::EQ;
2700 return AArch64CC::GT;
2702 return AArch64CC::GE;
2704 return AArch64CC::LT;
2706 return AArch64CC::LE;
2708 return AArch64CC::HI;
2710 return AArch64CC::HS;
2712 return AArch64CC::LO;
2714 return AArch64CC::LS;
2718 /// changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC.
2719 static void changeFPCCToAArch64CC(ISD::CondCode CC,
2720 AArch64CC::CondCode &CondCode,
2721 AArch64CC::CondCode &CondCode2) {
2722 CondCode2 = AArch64CC::AL;
2725 llvm_unreachable("Unknown FP condition!");
2728 CondCode = AArch64CC::EQ;
2732 CondCode = AArch64CC::GT;
2736 CondCode = AArch64CC::GE;
2739 CondCode = AArch64CC::MI;
2742 CondCode = AArch64CC::LS;
2745 CondCode = AArch64CC::MI;
2746 CondCode2 = AArch64CC::GT;
2749 CondCode = AArch64CC::VC;
2752 CondCode = AArch64CC::VS;
2755 CondCode = AArch64CC::EQ;
2756 CondCode2 = AArch64CC::VS;
2759 CondCode = AArch64CC::HI;
2762 CondCode = AArch64CC::PL;
2766 CondCode = AArch64CC::LT;
2770 CondCode = AArch64CC::LE;
2774 CondCode = AArch64CC::NE;
2779 /// Convert a DAG fp condition code to an AArch64 CC.
2780 /// This differs from changeFPCCToAArch64CC in that it returns cond codes that
2781 /// should be AND'ed instead of OR'ed.
2782 static void changeFPCCToANDAArch64CC(ISD::CondCode CC,
2783 AArch64CC::CondCode &CondCode,
2784 AArch64CC::CondCode &CondCode2) {
2785 CondCode2 = AArch64CC::AL;
2788 changeFPCCToAArch64CC(CC, CondCode, CondCode2);
2789 assert(CondCode2 == AArch64CC::AL);
2793 // == ((a olt b) || (a ogt b))
2794 // == ((a ord b) && (a une b))
2795 CondCode = AArch64CC::VC;
2796 CondCode2 = AArch64CC::NE;
2800 // == ((a uno b) || (a oeq b))
2801 // == ((a ule b) && (a uge b))
2802 CondCode = AArch64CC::PL;
2803 CondCode2 = AArch64CC::LE;
2808 /// changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64
2809 /// CC usable with the vector instructions. Fewer operations are available
2810 /// without a real NZCV register, so we have to use less efficient combinations
2811 /// to get the same effect.
2812 static void changeVectorFPCCToAArch64CC(ISD::CondCode CC,
2813 AArch64CC::CondCode &CondCode,
2814 AArch64CC::CondCode &CondCode2,
2819 // Mostly the scalar mappings work fine.
2820 changeFPCCToAArch64CC(CC, CondCode, CondCode2);
2826 CondCode = AArch64CC::MI;
2827 CondCode2 = AArch64CC::GE;
2834 // All of the compare-mask comparisons are ordered, but we can switch
2835 // between the two by a double inversion. E.g. ULE == !OGT.
2837 changeFPCCToAArch64CC(getSetCCInverse(CC, /* FP inverse */ MVT::f32),
2838 CondCode, CondCode2);
2843 static bool isLegalArithImmed(uint64_t C) {
2844 // Matches AArch64DAGToDAGISel::SelectArithImmed().
2845 bool IsLegal = (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0);
2846 LLVM_DEBUG(dbgs() << "Is imm " << C
2847 << " legal: " << (IsLegal ? "yes\n" : "no\n"));
2851 // Can a (CMP op1, (sub 0, op2) be turned into a CMN instruction on
2852 // the grounds that "op1 - (-op2) == op1 + op2" ? Not always, the C and V flags
2853 // can be set differently by this operation. It comes down to whether
2854 // "SInt(~op2)+1 == SInt(~op2+1)" (and the same for UInt). If they are then
2855 // everything is fine. If not then the optimization is wrong. Thus general
2856 // comparisons are only valid if op2 != 0.
2858 // So, finally, the only LLVM-native comparisons that don't mention C and V
2859 // are SETEQ and SETNE. They're the only ones we can safely use CMN for in
2860 // the absence of information about op2.
2861 static bool isCMN(SDValue Op, ISD::CondCode CC) {
2862 return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)) &&
2863 (CC == ISD::SETEQ || CC == ISD::SETNE);
2866 static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl,
2867 SelectionDAG &DAG, SDValue Chain,
2869 EVT VT = LHS.getValueType();
2870 assert(VT != MVT::f128);
2872 const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
2874 if (VT == MVT::f16 && !FullFP16) {
2875 LHS = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
2877 RHS = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
2878 {LHS.getValue(1), RHS});
2879 Chain = RHS.getValue(1);
2883 IsSignaling ? AArch64ISD::STRICT_FCMPE : AArch64ISD::STRICT_FCMP;
2884 return DAG.getNode(Opcode, dl, {VT, MVT::Other}, {Chain, LHS, RHS});
2887 static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
2888 const SDLoc &dl, SelectionDAG &DAG) {
2889 EVT VT = LHS.getValueType();
2890 const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
2892 if (VT.isFloatingPoint()) {
2893 assert(VT != MVT::f128);
2894 if (VT == MVT::f16 && !FullFP16) {
2895 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS);
2896 RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS);
2899 return DAG.getNode(AArch64ISD::FCMP, dl, VT, LHS, RHS);
2902 // The CMP instruction is just an alias for SUBS, and representing it as
2903 // SUBS means that it's possible to get CSE with subtract operations.
2904 // A later phase can perform the optimization of setting the destination
2905 // register to WZR/XZR if it ends up being unused.
2906 unsigned Opcode = AArch64ISD::SUBS;
2908 if (isCMN(RHS, CC)) {
2909 // Can we combine a (CMP op1, (sub 0, op2) into a CMN instruction ?
2910 Opcode = AArch64ISD::ADDS;
2911 RHS = RHS.getOperand(1);
2912 } else if (isCMN(LHS, CC)) {
2913 // As we are looking for EQ/NE compares, the operands can be commuted ; can
2914 // we combine a (CMP (sub 0, op1), op2) into a CMN instruction ?
2915 Opcode = AArch64ISD::ADDS;
2916 LHS = LHS.getOperand(1);
2917 } else if (isNullConstant(RHS) && !isUnsignedIntSetCC(CC)) {
2918 if (LHS.getOpcode() == ISD::AND) {
2919 // Similarly, (CMP (and X, Y), 0) can be implemented with a TST
2920 // (a.k.a. ANDS) except that the flags are only guaranteed to work for one
2921 // of the signed comparisons.
2922 const SDValue ANDSNode = DAG.getNode(AArch64ISD::ANDS, dl,
2923 DAG.getVTList(VT, MVT_CC),
2926 // Replace all users of (and X, Y) with newly generated (ands X, Y)
2927 DAG.ReplaceAllUsesWith(LHS, ANDSNode);
2928 return ANDSNode.getValue(1);
2929 } else if (LHS.getOpcode() == AArch64ISD::ANDS) {
2930 // Use result of ANDS
2931 return LHS.getValue(1);
2935 return DAG.getNode(Opcode, dl, DAG.getVTList(VT, MVT_CC), LHS, RHS)
2939 /// \defgroup AArch64CCMP CMP;CCMP matching
2941 /// These functions deal with the formation of CMP;CCMP;... sequences.
2942 /// The CCMP/CCMN/FCCMP/FCCMPE instructions allow the conditional execution of
2943 /// a comparison. They set the NZCV flags to a predefined value if their
2944 /// predicate is false. This allows to express arbitrary conjunctions, for
2945 /// example "cmp 0 (and (setCA (cmp A)) (setCB (cmp B)))"
2948 /// ccmp B, inv(CB), CA
2949 /// check for CB flags
2951 /// This naturally lets us implement chains of AND operations with SETCC
2952 /// operands. And we can even implement some other situations by transforming
2954 /// - We can implement (NEG SETCC) i.e. negating a single comparison by
2955 /// negating the flags used in a CCMP/FCCMP operations.
2956 /// - We can negate the result of a whole chain of CMP/CCMP/FCCMP operations
2957 /// by negating the flags we test for afterwards. i.e.
2958 /// NEG (CMP CCMP CCCMP ...) can be implemented.
2959 /// - Note that we can only ever negate all previously processed results.
2960 /// What we can not implement by flipping the flags to test is a negation
2961 /// of two sub-trees (because the negation affects all sub-trees emitted so
2962 /// far, so the 2nd sub-tree we emit would also affect the first).
2963 /// With those tools we can implement some OR operations:
2964 /// - (OR (SETCC A) (SETCC B)) can be implemented via:
2965 /// NEG (AND (NEG (SETCC A)) (NEG (SETCC B)))
2966 /// - After transforming OR to NEG/AND combinations we may be able to use NEG
2967 /// elimination rules from earlier to implement the whole thing as a
2968 /// CCMP/FCCMP chain.
2970 /// As complete example:
2971 /// or (or (setCA (cmp A)) (setCB (cmp B)))
2972 /// (and (setCC (cmp C)) (setCD (cmp D)))"
2973 /// can be reassociated to:
2974 /// or (and (setCC (cmp C)) setCD (cmp D))
2975 // (or (setCA (cmp A)) (setCB (cmp B)))
2976 /// can be transformed to:
2977 /// not (and (not (and (setCC (cmp C)) (setCD (cmp D))))
2978 /// (and (not (setCA (cmp A)) (not (setCB (cmp B))))))"
2979 /// which can be implemented as:
2981 /// ccmp D, inv(CD), CC
2982 /// ccmp A, CA, inv(CD)
2983 /// ccmp B, CB, inv(CA)
2984 /// check for CB flags
2986 /// A counterexample is "or (and A B) (and C D)" which translates to
2987 /// not (and (not (and (not A) (not B))) (not (and (not C) (not D)))), we
2988 /// can only implement 1 of the inner (not) operations, but not both!
2991 /// Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate.
2992 static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
2993 ISD::CondCode CC, SDValue CCOp,
2994 AArch64CC::CondCode Predicate,
2995 AArch64CC::CondCode OutCC,
2996 const SDLoc &DL, SelectionDAG &DAG) {
2997 unsigned Opcode = 0;
2998 const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
3000 if (LHS.getValueType().isFloatingPoint()) {
3001 assert(LHS.getValueType() != MVT::f128);
3002 if (LHS.getValueType() == MVT::f16 && !FullFP16) {
3003 LHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, LHS);
3004 RHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, RHS);
3006 Opcode = AArch64ISD::FCCMP;
3007 } else if (RHS.getOpcode() == ISD::SUB) {
3008 SDValue SubOp0 = RHS.getOperand(0);
3009 if (isNullConstant(SubOp0) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3010 // See emitComparison() on why we can only do this for SETEQ and SETNE.
3011 Opcode = AArch64ISD::CCMN;
3012 RHS = RHS.getOperand(1);
3016 Opcode = AArch64ISD::CCMP;
3018 SDValue Condition = DAG.getConstant(Predicate, DL, MVT_CC);
3019 AArch64CC::CondCode InvOutCC = AArch64CC::getInvertedCondCode(OutCC);
3020 unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvOutCC);
3021 SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
3022 return DAG.getNode(Opcode, DL, MVT_CC, LHS, RHS, NZCVOp, Condition, CCOp);
3025 /// Returns true if @p Val is a tree of AND/OR/SETCC operations that can be
3026 /// expressed as a conjunction. See \ref AArch64CCMP.
3027 /// \param CanNegate Set to true if we can negate the whole sub-tree just by
3028 /// changing the conditions on the SETCC tests.
3029 /// (this means we can call emitConjunctionRec() with
3030 /// Negate==true on this sub-tree)
3031 /// \param MustBeFirst Set to true if this subtree needs to be negated and we
3032 /// cannot do the negation naturally. We are required to
3033 /// emit the subtree first in this case.
3034 /// \param WillNegate Is true if are called when the result of this
3035 /// subexpression must be negated. This happens when the
3036 /// outer expression is an OR. We can use this fact to know
3037 /// that we have a double negation (or (or ...) ...) that
3038 /// can be implemented for free.
3039 static bool canEmitConjunction(const SDValue Val, bool &CanNegate,
3040 bool &MustBeFirst, bool WillNegate,
3041 unsigned Depth = 0) {
3042 if (!Val.hasOneUse())
3044 unsigned Opcode = Val->getOpcode();
3045 if (Opcode == ISD::SETCC) {
3046 if (Val->getOperand(0).getValueType() == MVT::f128)
3049 MustBeFirst = false;
3052 // Protect against exponential runtime and stack overflow.
3055 if (Opcode == ISD::AND || Opcode == ISD::OR) {
3056 bool IsOR = Opcode == ISD::OR;
3057 SDValue O0 = Val->getOperand(0);
3058 SDValue O1 = Val->getOperand(1);
3061 if (!canEmitConjunction(O0, CanNegateL, MustBeFirstL, IsOR, Depth+1))
3065 if (!canEmitConjunction(O1, CanNegateR, MustBeFirstR, IsOR, Depth+1))
3068 if (MustBeFirstL && MustBeFirstR)
3072 // For an OR expression we need to be able to naturally negate at least
3073 // one side or we cannot do the transformation at all.
3074 if (!CanNegateL && !CanNegateR)
3076 // If we the result of the OR will be negated and we can naturally negate
3077 // the leafs, then this sub-tree as a whole negates naturally.
3078 CanNegate = WillNegate && CanNegateL && CanNegateR;
3079 // If we cannot naturally negate the whole sub-tree, then this must be
3081 MustBeFirst = !CanNegate;
3083 assert(Opcode == ISD::AND && "Must be OR or AND");
3084 // We cannot naturally negate an AND operation.
3086 MustBeFirst = MustBeFirstL || MustBeFirstR;
3093 /// Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain
3094 /// of CCMP/CFCMP ops. See @ref AArch64CCMP.
3095 /// Tries to transform the given i1 producing node @p Val to a series compare
3096 /// and conditional compare operations. @returns an NZCV flags producing node
3097 /// and sets @p OutCC to the flags that should be tested or returns SDValue() if
3098 /// transformation was not possible.
3099 /// \p Negate is true if we want this sub-tree being negated just by changing
3100 /// SETCC conditions.
3101 static SDValue emitConjunctionRec(SelectionDAG &DAG, SDValue Val,
3102 AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp,
3103 AArch64CC::CondCode Predicate) {
3104 // We're at a tree leaf, produce a conditional comparison operation.
3105 unsigned Opcode = Val->getOpcode();
3106 if (Opcode == ISD::SETCC) {
3107 SDValue LHS = Val->getOperand(0);
3108 SDValue RHS = Val->getOperand(1);
3109 ISD::CondCode CC = cast<CondCodeSDNode>(Val->getOperand(2))->get();
3110 bool isInteger = LHS.getValueType().isInteger();
3112 CC = getSetCCInverse(CC, LHS.getValueType());
3114 // Determine OutCC and handle FP special case.
3116 OutCC = changeIntCCToAArch64CC(CC);
3118 assert(LHS.getValueType().isFloatingPoint());
3119 AArch64CC::CondCode ExtraCC;
3120 changeFPCCToANDAArch64CC(CC, OutCC, ExtraCC);
3121 // Some floating point conditions can't be tested with a single condition
3122 // code. Construct an additional comparison in this case.
3123 if (ExtraCC != AArch64CC::AL) {
3125 if (!CCOp.getNode())
3126 ExtraCmp = emitComparison(LHS, RHS, CC, DL, DAG);
3128 ExtraCmp = emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate,
3131 Predicate = ExtraCC;
3135 // Produce a normal comparison if we are first in the chain
3137 return emitComparison(LHS, RHS, CC, DL, DAG);
3138 // Otherwise produce a ccmp.
3139 return emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, OutCC, DL,
3142 assert(Val->hasOneUse() && "Valid conjunction/disjunction tree");
3144 bool IsOR = Opcode == ISD::OR;
3146 SDValue LHS = Val->getOperand(0);
3149 bool ValidL = canEmitConjunction(LHS, CanNegateL, MustBeFirstL, IsOR);
3150 assert(ValidL && "Valid conjunction/disjunction tree");
3153 SDValue RHS = Val->getOperand(1);
3156 bool ValidR = canEmitConjunction(RHS, CanNegateR, MustBeFirstR, IsOR);
3157 assert(ValidR && "Valid conjunction/disjunction tree");
3160 // Swap sub-tree that must come first to the right side.
3162 assert(!MustBeFirstR && "Valid conjunction/disjunction tree");
3163 std::swap(LHS, RHS);
3164 std::swap(CanNegateL, CanNegateR);
3165 std::swap(MustBeFirstL, MustBeFirstR);
3171 bool NegateAfterAll;
3172 if (Opcode == ISD::OR) {
3173 // Swap the sub-tree that we can negate naturally to the left.
3175 assert(CanNegateR && "at least one side must be negatable");
3176 assert(!MustBeFirstR && "invalid conjunction/disjunction tree");
3178 std::swap(LHS, RHS);
3180 NegateAfterR = true;
3182 // Negate the left sub-tree if possible, otherwise negate the result.
3183 NegateR = CanNegateR;
3184 NegateAfterR = !CanNegateR;
3187 NegateAfterAll = !Negate;
3189 assert(Opcode == ISD::AND && "Valid conjunction/disjunction tree");
3190 assert(!Negate && "Valid conjunction/disjunction tree");
3194 NegateAfterR = false;
3195 NegateAfterAll = false;
3199 AArch64CC::CondCode RHSCC;
3200 SDValue CmpR = emitConjunctionRec(DAG, RHS, RHSCC, NegateR, CCOp, Predicate);
3202 RHSCC = AArch64CC::getInvertedCondCode(RHSCC);
3203 SDValue CmpL = emitConjunctionRec(DAG, LHS, OutCC, NegateL, CmpR, RHSCC);
3205 OutCC = AArch64CC::getInvertedCondCode(OutCC);
3209 /// Emit expression as a conjunction (a series of CCMP/CFCMP ops).
3210 /// In some cases this is even possible with OR operations in the expression.
3211 /// See \ref AArch64CCMP.
3212 /// \see emitConjunctionRec().
3213 static SDValue emitConjunction(SelectionDAG &DAG, SDValue Val,
3214 AArch64CC::CondCode &OutCC) {
3215 bool DummyCanNegate;
3216 bool DummyMustBeFirst;
3217 if (!canEmitConjunction(Val, DummyCanNegate, DummyMustBeFirst, false))
3220 return emitConjunctionRec(DAG, Val, OutCC, false, SDValue(), AArch64CC::AL);
3225 /// Returns how profitable it is to fold a comparison's operand's shift and/or
3226 /// extension operations.
3227 static unsigned getCmpOperandFoldingProfit(SDValue Op) {
3228 auto isSupportedExtend = [&](SDValue V) {
3229 if (V.getOpcode() == ISD::SIGN_EXTEND_INREG)
3232 if (V.getOpcode() == ISD::AND)
3233 if (ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
3234 uint64_t Mask = MaskCst->getZExtValue();
3235 return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF);
3241 if (!Op.hasOneUse())
3244 if (isSupportedExtend(Op))
3247 unsigned Opc = Op.getOpcode();
3248 if (Opc == ISD::SHL || Opc == ISD::SRL || Opc == ISD::SRA)
3249 if (ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3250 uint64_t Shift = ShiftCst->getZExtValue();
3251 if (isSupportedExtend(Op.getOperand(0)))
3252 return (Shift <= 4) ? 2 : 1;
3253 EVT VT = Op.getValueType();
3254 if ((VT == MVT::i32 && Shift <= 31) || (VT == MVT::i64 && Shift <= 63))
3261 static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
3262 SDValue &AArch64cc, SelectionDAG &DAG,
3264 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
3265 EVT VT = RHS.getValueType();
3266 uint64_t C = RHSC->getZExtValue();
3267 if (!isLegalArithImmed(C)) {
3268 // Constant does not fit, try adjusting it by one?
3274 if ((VT == MVT::i32 && C != 0x80000000 &&
3275 isLegalArithImmed((uint32_t)(C - 1))) ||
3276 (VT == MVT::i64 && C != 0x80000000ULL &&
3277 isLegalArithImmed(C - 1ULL))) {
3278 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
3279 C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
3280 RHS = DAG.getConstant(C, dl, VT);
3285 if ((VT == MVT::i32 && C != 0 &&
3286 isLegalArithImmed((uint32_t)(C - 1))) ||
3287 (VT == MVT::i64 && C != 0ULL && isLegalArithImmed(C - 1ULL))) {
3288 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
3289 C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
3290 RHS = DAG.getConstant(C, dl, VT);
3295 if ((VT == MVT::i32 && C != INT32_MAX &&
3296 isLegalArithImmed((uint32_t)(C + 1))) ||
3297 (VT == MVT::i64 && C != INT64_MAX &&
3298 isLegalArithImmed(C + 1ULL))) {
3299 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
3300 C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
3301 RHS = DAG.getConstant(C, dl, VT);
3306 if ((VT == MVT::i32 && C != UINT32_MAX &&
3307 isLegalArithImmed((uint32_t)(C + 1))) ||
3308 (VT == MVT::i64 && C != UINT64_MAX &&
3309 isLegalArithImmed(C + 1ULL))) {
3310 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
3311 C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
3312 RHS = DAG.getConstant(C, dl, VT);
3319 // Comparisons are canonicalized so that the RHS operand is simpler than the
3320 // LHS one, the extreme case being when RHS is an immediate. However, AArch64
3321 // can fold some shift+extend operations on the RHS operand, so swap the
3322 // operands if that can be done.
3327 // can be turned into:
3328 // cmp w12, w11, lsl #1
3329 if (!isa<ConstantSDNode>(RHS) ||
3330 !isLegalArithImmed(cast<ConstantSDNode>(RHS)->getZExtValue())) {
3331 SDValue TheLHS = isCMN(LHS, CC) ? LHS.getOperand(1) : LHS;
3333 if (getCmpOperandFoldingProfit(TheLHS) > getCmpOperandFoldingProfit(RHS)) {
3334 std::swap(LHS, RHS);
3335 CC = ISD::getSetCCSwappedOperands(CC);
3340 AArch64CC::CondCode AArch64CC;
3341 if ((CC == ISD::SETEQ || CC == ISD::SETNE) && isa<ConstantSDNode>(RHS)) {
3342 const ConstantSDNode *RHSC = cast<ConstantSDNode>(RHS);
3344 // The imm operand of ADDS is an unsigned immediate, in the range 0 to 4095.
3345 // For the i8 operand, the largest immediate is 255, so this can be easily
3346 // encoded in the compare instruction. For the i16 operand, however, the
3347 // largest immediate cannot be encoded in the compare.
3348 // Therefore, use a sign extending load and cmn to avoid materializing the
3349 // -1 constant. For example,
3351 // ldrh w0, [x0, #0]
3354 // ldrsh w0, [x0, #0]
3356 // Fundamental, we're relying on the property that (zext LHS) == (zext RHS)
3357 // if and only if (sext LHS) == (sext RHS). The checks are in place to
3358 // ensure both the LHS and RHS are truly zero extended and to make sure the
3359 // transformation is profitable.
3360 if ((RHSC->getZExtValue() >> 16 == 0) && isa<LoadSDNode>(LHS) &&
3361 cast<LoadSDNode>(LHS)->getExtensionType() == ISD::ZEXTLOAD &&
3362 cast<LoadSDNode>(LHS)->getMemoryVT() == MVT::i16 &&
3363 LHS.getNode()->hasNUsesOfValue(1, 0)) {
3364 int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue();
3365 if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) {
3367 DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS,
3368 DAG.getValueType(MVT::i16));
3369 Cmp = emitComparison(SExt, DAG.getConstant(ValueofRHS, dl,
3370 RHS.getValueType()),
3372 AArch64CC = changeIntCCToAArch64CC(CC);
3376 if (!Cmp && (RHSC->isZero() || RHSC->isOne())) {
3377 if ((Cmp = emitConjunction(DAG, LHS, AArch64CC))) {
3378 if ((CC == ISD::SETNE) ^ RHSC->isZero())
3379 AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
3385 Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
3386 AArch64CC = changeIntCCToAArch64CC(CC);
3388 AArch64cc = DAG.getConstant(AArch64CC, dl, MVT_CC);
3392 static std::pair<SDValue, SDValue>
3393 getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) {
3394 assert((Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) &&
3395 "Unsupported value type");
3396 SDValue Value, Overflow;
3398 SDValue LHS = Op.getOperand(0);
3399 SDValue RHS = Op.getOperand(1);
3401 switch (Op.getOpcode()) {
3403 llvm_unreachable("Unknown overflow instruction!");
3405 Opc = AArch64ISD::ADDS;
3409 Opc = AArch64ISD::ADDS;
3413 Opc = AArch64ISD::SUBS;
3417 Opc = AArch64ISD::SUBS;
3420 // Multiply needs a little bit extra work.
3424 bool IsSigned = Op.getOpcode() == ISD::SMULO;
3425 if (Op.getValueType() == MVT::i32) {
3426 // Extend to 64-bits, then perform a 64-bit multiply.
3427 unsigned ExtendOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3428 LHS = DAG.getNode(ExtendOpc, DL, MVT::i64, LHS);
3429 RHS = DAG.getNode(ExtendOpc, DL, MVT::i64, RHS);
3430 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS);
3431 Value = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
3433 // Check that the result fits into a 32-bit integer.
3434 SDVTList VTs = DAG.getVTList(MVT::i64, MVT_CC);
3436 // cmp xreg, wreg, sxtw
3437 SDValue SExtMul = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Value);
3439 DAG.getNode(AArch64ISD::SUBS, DL, VTs, Mul, SExtMul).getValue(1);
3441 // tst xreg, #0xffffffff00000000
3442 SDValue UpperBits = DAG.getConstant(0xFFFFFFFF00000000, DL, MVT::i64);
3444 DAG.getNode(AArch64ISD::ANDS, DL, VTs, Mul, UpperBits).getValue(1);
3448 assert(Op.getValueType() == MVT::i64 && "Expected an i64 value type");
3449 // For the 64 bit multiply
3450 Value = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS);
3452 SDValue UpperBits = DAG.getNode(ISD::MULHS, DL, MVT::i64, LHS, RHS);
3453 SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i64, Value,
3454 DAG.getConstant(63, DL, MVT::i64));
3455 // It is important that LowerBits is last, otherwise the arithmetic
3456 // shift will not be folded into the compare (SUBS).
3457 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
3458 Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, UpperBits, LowerBits)
3461 SDValue UpperBits = DAG.getNode(ISD::MULHU, DL, MVT::i64, LHS, RHS);
3462 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
3464 DAG.getNode(AArch64ISD::SUBS, DL, VTs,
3465 DAG.getConstant(0, DL, MVT::i64),
3466 UpperBits).getValue(1);
3473 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
3475 // Emit the AArch64 operation with overflow check.
3476 Value = DAG.getNode(Opc, DL, VTs, LHS, RHS);
3477 Overflow = Value.getValue(1);
3479 return std::make_pair(Value, Overflow);
3482 SDValue AArch64TargetLowering::LowerXOR(SDValue Op, SelectionDAG &DAG) const {
3483 if (useSVEForFixedLengthVectorVT(Op.getValueType()))
3484 return LowerToScalableOp(Op, DAG);
3486 SDValue Sel = Op.getOperand(0);
3487 SDValue Other = Op.getOperand(1);
3490 // If the operand is an overflow checking operation, invert the condition
3491 // code and kill the Not operation. I.e., transform:
3492 // (xor (overflow_op_bool, 1))
3494 // (csel 1, 0, invert(cc), overflow_op_bool)
3495 // ... which later gets transformed to just a cset instruction with an
3496 // inverted condition code, rather than a cset + eor sequence.
3497 if (isOneConstant(Other) && ISD::isOverflowIntrOpRes(Sel)) {
3498 // Only lower legal XALUO ops.
3499 if (!DAG.getTargetLoweringInfo().isTypeLegal(Sel->getValueType(0)))
3502 SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
3503 SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
3504 AArch64CC::CondCode CC;
3505 SDValue Value, Overflow;
3506 std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Sel.getValue(0), DAG);
3507 SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32);
3508 return DAG.getNode(AArch64ISD::CSEL, dl, Op.getValueType(), TVal, FVal,
3511 // If neither operand is a SELECT_CC, give up.
3512 if (Sel.getOpcode() != ISD::SELECT_CC)
3513 std::swap(Sel, Other);
3514 if (Sel.getOpcode() != ISD::SELECT_CC)
3517 // The folding we want to perform is:
3518 // (xor x, (select_cc a, b, cc, 0, -1) )
3520 // (csel x, (xor x, -1), cc ...)
3522 // The latter will get matched to a CSINV instruction.
3524 ISD::CondCode CC = cast<CondCodeSDNode>(Sel.getOperand(4))->get();
3525 SDValue LHS = Sel.getOperand(0);
3526 SDValue RHS = Sel.getOperand(1);
3527 SDValue TVal = Sel.getOperand(2);
3528 SDValue FVal = Sel.getOperand(3);
3530 // FIXME: This could be generalized to non-integer comparisons.
3531 if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64)
3534 ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal);
3535 ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal);
3537 // The values aren't constants, this isn't the pattern we're looking for.
3538 if (!CFVal || !CTVal)
3541 // We can commute the SELECT_CC by inverting the condition. This
3542 // might be needed to make this fit into a CSINV pattern.
3543 if (CTVal->isAllOnes() && CFVal->isZero()) {
3544 std::swap(TVal, FVal);
3545 std::swap(CTVal, CFVal);
3546 CC = ISD::getSetCCInverse(CC, LHS.getValueType());
3549 // If the constants line up, perform the transform!
3550 if (CTVal->isZero() && CFVal->isAllOnes()) {
3552 SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
3555 TVal = DAG.getNode(ISD::XOR, dl, Other.getValueType(), Other,
3556 DAG.getConstant(-1ULL, dl, Other.getValueType()));
3558 return DAG.getNode(AArch64ISD::CSEL, dl, Sel.getValueType(), FVal, TVal,
3565 // If Invert is false, sets 'C' bit of NZCV to 0 if value is 0, else sets 'C'
3566 // bit to 1. If Invert is true, sets 'C' bit of NZCV to 1 if value is 0, else
3567 // sets 'C' bit to 0.
3568 static SDValue valueToCarryFlag(SDValue Value, SelectionDAG &DAG, bool Invert) {
3570 EVT VT = Value.getValueType();
3571 SDValue Op0 = Invert ? DAG.getConstant(0, DL, VT) : Value;
3572 SDValue Op1 = Invert ? Value : DAG.getConstant(1, DL, VT);
3574 DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::Glue), Op0, Op1);
3575 return Cmp.getValue(1);
3578 // If Invert is false, value is 1 if 'C' bit of NZCV is 1, else 0.
3579 // If Invert is true, value is 0 if 'C' bit of NZCV is 1, else 1.
3580 static SDValue carryFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG,
3582 assert(Flag.getResNo() == 1);
3584 SDValue Zero = DAG.getConstant(0, DL, VT);
3585 SDValue One = DAG.getConstant(1, DL, VT);
3586 unsigned Cond = Invert ? AArch64CC::LO : AArch64CC::HS;
3587 SDValue CC = DAG.getConstant(Cond, DL, MVT::i32);
3588 return DAG.getNode(AArch64ISD::CSEL, DL, VT, One, Zero, CC, Flag);
3591 // Value is 1 if 'V' bit of NZCV is 1, else 0
3592 static SDValue overflowFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG) {
3593 assert(Flag.getResNo() == 1);
3595 SDValue Zero = DAG.getConstant(0, DL, VT);
3596 SDValue One = DAG.getConstant(1, DL, VT);
3597 SDValue CC = DAG.getConstant(AArch64CC::VS, DL, MVT::i32);
3598 return DAG.getNode(AArch64ISD::CSEL, DL, VT, One, Zero, CC, Flag);
3601 // This lowering is inefficient, but it will get cleaned up by
3602 // `foldOverflowCheck`
3603 static SDValue lowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG, unsigned Opcode,
3605 EVT VT0 = Op.getValue(0).getValueType();
3606 EVT VT1 = Op.getValue(1).getValueType();
3608 if (VT0 != MVT::i32 && VT0 != MVT::i64)
3611 bool InvertCarry = Opcode == AArch64ISD::SBCS;
3612 SDValue OpLHS = Op.getOperand(0);
3613 SDValue OpRHS = Op.getOperand(1);
3614 SDValue OpCarryIn = valueToCarryFlag(Op.getOperand(2), DAG, InvertCarry);
3617 SDVTList VTs = DAG.getVTList(VT0, VT1);
3619 SDValue Sum = DAG.getNode(Opcode, DL, DAG.getVTList(VT0, MVT::Glue), OpLHS,
3623 IsSigned ? overflowFlagToValue(Sum.getValue(1), VT1, DAG)
3624 : carryFlagToValue(Sum.getValue(1), VT1, DAG, InvertCarry);
3626 return DAG.getNode(ISD::MERGE_VALUES, DL, VTs, Sum, OutFlag);
3629 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
3630 // Let legalize expand this if it isn't a legal type yet.
3631 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
3635 AArch64CC::CondCode CC;
3636 // The actual operation that sets the overflow or carry flag.
3637 SDValue Value, Overflow;
3638 std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Op, DAG);
3640 // We use 0 and 1 as false and true values.
3641 SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
3642 SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
3644 // We use an inverted condition, because the conditional select is inverted
3645 // too. This will allow it to be selected to a single instruction:
3646 // CSINC Wd, WZR, WZR, invert(cond).
3647 SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32);
3648 Overflow = DAG.getNode(AArch64ISD::CSEL, dl, MVT::i32, FVal, TVal,
3651 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
3652 return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
3655 // Prefetch operands are:
3656 // 1: Address to prefetch
3658 // 3: int locality (0 = no locality ... 3 = extreme locality)
3659 // 4: bool isDataCache
3660 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) {
3662 unsigned IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
3663 unsigned Locality = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
3664 unsigned IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3666 bool IsStream = !Locality;
3667 // When the locality number is set
3669 // The front-end should have filtered out the out-of-range values
3670 assert(Locality <= 3 && "Prefetch locality out-of-range");
3671 // The locality degree is the opposite of the cache speed.
3672 // Put the number the other way around.
3673 // The encoding starts at 0 for level 1
3674 Locality = 3 - Locality;
3677 // built the mask value encoding the expected behavior.
3678 unsigned PrfOp = (IsWrite << 4) | // Load/Store bit
3679 (!IsData << 3) | // IsDataCache bit
3680 (Locality << 1) | // Cache level bits
3681 (unsigned)IsStream; // Stream bit
3682 return DAG.getNode(AArch64ISD::PREFETCH, DL, MVT::Other, Op.getOperand(0),
3683 DAG.getConstant(PrfOp, DL, MVT::i32), Op.getOperand(1));
3686 SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op,
3687 SelectionDAG &DAG) const {
3688 EVT VT = Op.getValueType();
3689 if (VT.isScalableVector())
3690 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_EXTEND_MERGE_PASSTHRU);
3692 if (useSVEForFixedLengthVectorVT(VT))
3693 return LowerFixedLengthFPExtendToSVE(Op, DAG);
3695 assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
3699 SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op,
3700 SelectionDAG &DAG) const {
3701 if (Op.getValueType().isScalableVector())
3702 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_ROUND_MERGE_PASSTHRU);
3704 bool IsStrict = Op->isStrictFPOpcode();
3705 SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
3706 EVT SrcVT = SrcVal.getValueType();
3708 if (useSVEForFixedLengthVectorVT(SrcVT))
3709 return LowerFixedLengthFPRoundToSVE(Op, DAG);
3711 if (SrcVT != MVT::f128) {
3712 // Expand cases where the input is a vector bigger than NEON.
3713 if (useSVEForFixedLengthVectorVT(SrcVT))
3716 // It's legal except when f128 is involved
3723 SDValue AArch64TargetLowering::LowerVectorFP_TO_INT(SDValue Op,
3724 SelectionDAG &DAG) const {
3725 // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp.
3726 // Any additional optimization in this function should be recorded
3727 // in the cost tables.
3728 bool IsStrict = Op->isStrictFPOpcode();
3729 EVT InVT = Op.getOperand(IsStrict ? 1 : 0).getValueType();
3730 EVT VT = Op.getValueType();
3732 if (VT.isScalableVector()) {
3733 unsigned Opcode = Op.getOpcode() == ISD::FP_TO_UINT
3734 ? AArch64ISD::FCVTZU_MERGE_PASSTHRU
3735 : AArch64ISD::FCVTZS_MERGE_PASSTHRU;
3736 return LowerToPredicatedOp(Op, DAG, Opcode);
3739 if (useSVEForFixedLengthVectorVT(VT) || useSVEForFixedLengthVectorVT(InVT))
3740 return LowerFixedLengthFPToIntToSVE(Op, DAG);
3742 unsigned NumElts = InVT.getVectorNumElements();
3744 // f16 conversions are promoted to f32 when full fp16 is not supported.
3745 if (InVT.getVectorElementType() == MVT::f16 &&
3746 !Subtarget->hasFullFP16()) {
3747 MVT NewVT = MVT::getVectorVT(MVT::f32, NumElts);
3750 SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {NewVT, MVT::Other},
3751 {Op.getOperand(0), Op.getOperand(1)});
3752 return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
3753 {Ext.getValue(1), Ext.getValue(0)});
3756 Op.getOpcode(), dl, Op.getValueType(),
3757 DAG.getNode(ISD::FP_EXTEND, dl, NewVT, Op.getOperand(0)));
3760 uint64_t VTSize = VT.getFixedSizeInBits();
3761 uint64_t InVTSize = InVT.getFixedSizeInBits();
3762 if (VTSize < InVTSize) {
3765 InVT = InVT.changeVectorElementTypeToInteger();
3766 SDValue Cv = DAG.getNode(Op.getOpcode(), dl, {InVT, MVT::Other},
3767 {Op.getOperand(0), Op.getOperand(1)});
3768 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, VT, Cv);
3769 return DAG.getMergeValues({Trunc, Cv.getValue(1)}, dl);
3772 DAG.getNode(Op.getOpcode(), dl, InVT.changeVectorElementTypeToInteger(),
3774 return DAG.getNode(ISD::TRUNCATE, dl, VT, Cv);
3777 if (VTSize > InVTSize) {
3780 MVT::getVectorVT(MVT::getFloatingPointVT(VT.getScalarSizeInBits()),
3781 VT.getVectorNumElements());
3783 SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {ExtVT, MVT::Other},
3784 {Op.getOperand(0), Op.getOperand(1)});
3785 return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
3786 {Ext.getValue(1), Ext.getValue(0)});
3788 SDValue Ext = DAG.getNode(ISD::FP_EXTEND, dl, ExtVT, Op.getOperand(0));
3789 return DAG.getNode(Op.getOpcode(), dl, VT, Ext);
3792 // Use a scalar operation for conversions between single-element vectors of
3796 SDValue Extract = DAG.getNode(
3797 ISD::EXTRACT_VECTOR_ELT, dl, InVT.getScalarType(),
3798 Op.getOperand(IsStrict ? 1 : 0), DAG.getConstant(0, dl, MVT::i64));
3799 EVT ScalarVT = VT.getScalarType();
3801 return DAG.getNode(Op.getOpcode(), dl, {ScalarVT, MVT::Other},
3802 {Op.getOperand(0), Extract});
3803 return DAG.getNode(Op.getOpcode(), dl, ScalarVT, Extract);
3806 // Type changing conversions are illegal.
3810 SDValue AArch64TargetLowering::LowerFP_TO_INT(SDValue Op,
3811 SelectionDAG &DAG) const {
3812 bool IsStrict = Op->isStrictFPOpcode();
3813 SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
3815 if (SrcVal.getValueType().isVector())
3816 return LowerVectorFP_TO_INT(Op, DAG);
3818 // f16 conversions are promoted to f32 when full fp16 is not supported.
3819 if (SrcVal.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) {
3823 DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
3824 {Op.getOperand(0), SrcVal});
3825 return DAG.getNode(Op.getOpcode(), dl, {Op.getValueType(), MVT::Other},
3826 {Ext.getValue(1), Ext.getValue(0)});
3829 Op.getOpcode(), dl, Op.getValueType(),
3830 DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, SrcVal));
3833 if (SrcVal.getValueType() != MVT::f128) {
3834 // It's legal except when f128 is involved
3842 AArch64TargetLowering::LowerVectorFP_TO_INT_SAT(SDValue Op,
3843 SelectionDAG &DAG) const {
3844 // AArch64 FP-to-int conversions saturate to the destination element size, so
3845 // we can lower common saturating conversions to simple instructions.
3846 SDValue SrcVal = Op.getOperand(0);
3847 EVT SrcVT = SrcVal.getValueType();
3848 EVT DstVT = Op.getValueType();
3849 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3851 uint64_t SrcElementWidth = SrcVT.getScalarSizeInBits();
3852 uint64_t DstElementWidth = DstVT.getScalarSizeInBits();
3853 uint64_t SatWidth = SatVT.getScalarSizeInBits();
3854 assert(SatWidth <= DstElementWidth &&
3855 "Saturation width cannot exceed result width");
3857 // TODO: Consider lowering to SVE operations, as in LowerVectorFP_TO_INT.
3858 // Currently, the `llvm.fpto[su]i.sat.*` intrinsics don't accept scalable
3859 // types, so this is hard to reach.
3860 if (DstVT.isScalableVector())
3863 EVT SrcElementVT = SrcVT.getVectorElementType();
3865 // In the absence of FP16 support, promote f16 to f32 and saturate the result.
3866 if (SrcElementVT == MVT::f16 &&
3867 (!Subtarget->hasFullFP16() || DstElementWidth > 16)) {
3868 MVT F32VT = MVT::getVectorVT(MVT::f32, SrcVT.getVectorNumElements());
3869 SrcVal = DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), F32VT, SrcVal);
3871 SrcElementVT = MVT::f32;
3872 SrcElementWidth = 32;
3873 } else if (SrcElementVT != MVT::f64 && SrcElementVT != MVT::f32 &&
3874 SrcElementVT != MVT::f16)
3878 // Cases that we can emit directly.
3879 if (SrcElementWidth == DstElementWidth && SrcElementWidth == SatWidth)
3880 return DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal,
3881 DAG.getValueType(DstVT.getScalarType()));
3883 // Otherwise we emit a cvt that saturates to a higher BW, and saturate the
3884 // result. This is only valid if the legal cvt is larger than the saturate
3885 // width. For double, as we don't have MIN/MAX, it can be simpler to scalarize
3886 // (at least until sqxtn is selected).
3887 if (SrcElementWidth < SatWidth || SrcElementVT == MVT::f64)
3890 EVT IntVT = SrcVT.changeVectorElementTypeToInteger();
3891 SDValue NativeCvt = DAG.getNode(Op.getOpcode(), DL, IntVT, SrcVal,
3892 DAG.getValueType(IntVT.getScalarType()));
3894 if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) {
3895 SDValue MinC = DAG.getConstant(
3896 APInt::getSignedMaxValue(SatWidth).sext(SrcElementWidth), DL, IntVT);
3897 SDValue Min = DAG.getNode(ISD::SMIN, DL, IntVT, NativeCvt, MinC);
3898 SDValue MaxC = DAG.getConstant(
3899 APInt::getSignedMinValue(SatWidth).sext(SrcElementWidth), DL, IntVT);
3900 Sat = DAG.getNode(ISD::SMAX, DL, IntVT, Min, MaxC);
3902 SDValue MinC = DAG.getConstant(
3903 APInt::getAllOnesValue(SatWidth).zext(SrcElementWidth), DL, IntVT);
3904 Sat = DAG.getNode(ISD::UMIN, DL, IntVT, NativeCvt, MinC);
3907 return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Sat);
3910 SDValue AArch64TargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
3911 SelectionDAG &DAG) const {
3912 // AArch64 FP-to-int conversions saturate to the destination register size, so
3913 // we can lower common saturating conversions to simple instructions.
3914 SDValue SrcVal = Op.getOperand(0);
3915 EVT SrcVT = SrcVal.getValueType();
3917 if (SrcVT.isVector())
3918 return LowerVectorFP_TO_INT_SAT(Op, DAG);
3920 EVT DstVT = Op.getValueType();
3921 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3922 uint64_t SatWidth = SatVT.getScalarSizeInBits();
3923 uint64_t DstWidth = DstVT.getScalarSizeInBits();
3924 assert(SatWidth <= DstWidth && "Saturation width cannot exceed result width");
3926 // In the absence of FP16 support, promote f16 to f32 and saturate the result.
3927 if (SrcVT == MVT::f16 && !Subtarget->hasFullFP16()) {
3928 SrcVal = DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), MVT::f32, SrcVal);
3930 } else if (SrcVT != MVT::f64 && SrcVT != MVT::f32 && SrcVT != MVT::f16)
3934 // Cases that we can emit directly.
3935 if ((SrcVT == MVT::f64 || SrcVT == MVT::f32 ||
3936 (SrcVT == MVT::f16 && Subtarget->hasFullFP16())) &&
3937 DstVT == SatVT && (DstVT == MVT::i64 || DstVT == MVT::i32))
3938 return DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal,
3939 DAG.getValueType(DstVT));
3941 // Otherwise we emit a cvt that saturates to a higher BW, and saturate the
3942 // result. This is only valid if the legal cvt is larger than the saturate
3944 if (DstWidth < SatWidth)
3948 DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal, DAG.getValueType(DstVT));
3950 if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) {
3951 SDValue MinC = DAG.getConstant(
3952 APInt::getSignedMaxValue(SatWidth).sext(DstWidth), DL, DstVT);
3953 SDValue Min = DAG.getNode(ISD::SMIN, DL, DstVT, NativeCvt, MinC);
3954 SDValue MaxC = DAG.getConstant(
3955 APInt::getSignedMinValue(SatWidth).sext(DstWidth), DL, DstVT);
3956 Sat = DAG.getNode(ISD::SMAX, DL, DstVT, Min, MaxC);
3958 SDValue MinC = DAG.getConstant(
3959 APInt::getAllOnesValue(SatWidth).zext(DstWidth), DL, DstVT);
3960 Sat = DAG.getNode(ISD::UMIN, DL, DstVT, NativeCvt, MinC);
3963 return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Sat);
3966 SDValue AArch64TargetLowering::LowerVectorINT_TO_FP(SDValue Op,
3967 SelectionDAG &DAG) const {
3968 // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp.
3969 // Any additional optimization in this function should be recorded
3970 // in the cost tables.
3971 bool IsStrict = Op->isStrictFPOpcode();
3972 EVT VT = Op.getValueType();
3974 SDValue In = Op.getOperand(IsStrict ? 1 : 0);
3975 EVT InVT = In.getValueType();
3976 unsigned Opc = Op.getOpcode();
3977 bool IsSigned = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
3979 if (VT.isScalableVector()) {
3980 if (InVT.getVectorElementType() == MVT::i1) {
3981 // We can't directly extend an SVE predicate; extend it first.
3982 unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3983 EVT CastVT = getPromotedVTForPredicate(InVT);
3984 In = DAG.getNode(CastOpc, dl, CastVT, In);
3985 return DAG.getNode(Opc, dl, VT, In);
3988 unsigned Opcode = IsSigned ? AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU
3989 : AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU;
3990 return LowerToPredicatedOp(Op, DAG, Opcode);
3993 if (useSVEForFixedLengthVectorVT(VT) || useSVEForFixedLengthVectorVT(InVT))
3994 return LowerFixedLengthIntToFPToSVE(Op, DAG);
3996 uint64_t VTSize = VT.getFixedSizeInBits();
3997 uint64_t InVTSize = InVT.getFixedSizeInBits();
3998 if (VTSize < InVTSize) {
4000 MVT::getVectorVT(MVT::getFloatingPointVT(InVT.getScalarSizeInBits()),
4001 InVT.getVectorNumElements());
4003 In = DAG.getNode(Opc, dl, {CastVT, MVT::Other},
4004 {Op.getOperand(0), In});
4006 ISD::STRICT_FP_ROUND, dl, {VT, MVT::Other},
4007 {In.getValue(1), In.getValue(0), DAG.getIntPtrConstant(0, dl)});
4009 In = DAG.getNode(Opc, dl, CastVT, In);
4010 return DAG.getNode(ISD::FP_ROUND, dl, VT, In, DAG.getIntPtrConstant(0, dl));
4013 if (VTSize > InVTSize) {
4014 unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4015 EVT CastVT = VT.changeVectorElementTypeToInteger();
4016 In = DAG.getNode(CastOpc, dl, CastVT, In);
4018 return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op.getOperand(0), In});
4019 return DAG.getNode(Opc, dl, VT, In);
4022 // Use a scalar operation for conversions between single-element vectors of
4024 if (VT.getVectorNumElements() == 1) {
4025 SDValue Extract = DAG.getNode(
4026 ISD::EXTRACT_VECTOR_ELT, dl, InVT.getScalarType(),
4027 In, DAG.getConstant(0, dl, MVT::i64));
4028 EVT ScalarVT = VT.getScalarType();
4030 return DAG.getNode(Op.getOpcode(), dl, {ScalarVT, MVT::Other},
4031 {Op.getOperand(0), Extract});
4032 return DAG.getNode(Op.getOpcode(), dl, ScalarVT, Extract);
4038 SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op,
4039 SelectionDAG &DAG) const {
4040 if (Op.getValueType().isVector())
4041 return LowerVectorINT_TO_FP(Op, DAG);
4043 bool IsStrict = Op->isStrictFPOpcode();
4044 SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
4046 // f16 conversions are promoted to f32 when full fp16 is not supported.
4047 if (Op.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) {
4050 SDValue Val = DAG.getNode(Op.getOpcode(), dl, {MVT::f32, MVT::Other},
4051 {Op.getOperand(0), SrcVal});
4053 ISD::STRICT_FP_ROUND, dl, {MVT::f16, MVT::Other},
4054 {Val.getValue(1), Val.getValue(0), DAG.getIntPtrConstant(0, dl)});
4057 ISD::FP_ROUND, dl, MVT::f16,
4058 DAG.getNode(Op.getOpcode(), dl, MVT::f32, SrcVal),
4059 DAG.getIntPtrConstant(0, dl));
4062 // i128 conversions are libcalls.
4063 if (SrcVal.getValueType() == MVT::i128)
4066 // Other conversions are legal, unless it's to the completely software-based
4068 if (Op.getValueType() != MVT::f128)
4073 SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
4074 SelectionDAG &DAG) const {
4075 // For iOS, we want to call an alternative entry point: __sincos_stret,
4076 // which returns the values in two S / D registers.
4078 SDValue Arg = Op.getOperand(0);
4079 EVT ArgVT = Arg.getValueType();
4080 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
4087 Entry.IsSExt = false;
4088 Entry.IsZExt = false;
4089 Args.push_back(Entry);
4091 RTLIB::Libcall LC = ArgVT == MVT::f64 ? RTLIB::SINCOS_STRET_F64
4092 : RTLIB::SINCOS_STRET_F32;
4093 const char *LibcallName = getLibcallName(LC);
4095 DAG.getExternalSymbol(LibcallName, getPointerTy(DAG.getDataLayout()));
4097 StructType *RetTy = StructType::get(ArgTy, ArgTy);
4098 TargetLowering::CallLoweringInfo CLI(DAG);
4100 .setChain(DAG.getEntryNode())
4101 .setLibCallee(CallingConv::Fast, RetTy, Callee, std::move(Args));
4103 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
4104 return CallResult.first;
4107 static MVT getSVEContainerType(EVT ContentTy);
4109 SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op,
4110 SelectionDAG &DAG) const {
4111 EVT OpVT = Op.getValueType();
4112 EVT ArgVT = Op.getOperand(0).getValueType();
4114 if (useSVEForFixedLengthVectorVT(OpVT))
4115 return LowerFixedLengthBitcastToSVE(Op, DAG);
4117 if (OpVT.isScalableVector()) {
4118 // Bitcasting between unpacked vector types of different element counts is
4119 // not a NOP because the live elements are laid out differently.
4121 // e.g. nxv2i32 = XX??XX??
4122 // nxv4f16 = X?X?X?X?
4123 if (OpVT.getVectorElementCount() != ArgVT.getVectorElementCount())
4126 if (isTypeLegal(OpVT) && !isTypeLegal(ArgVT)) {
4127 assert(OpVT.isFloatingPoint() && !ArgVT.isFloatingPoint() &&
4128 "Expected int->fp bitcast!");
4130 DAG.getNode(ISD::ANY_EXTEND, SDLoc(Op), getSVEContainerType(ArgVT),
4132 return getSVESafeBitCast(OpVT, ExtResult, DAG);
4134 return getSVESafeBitCast(OpVT, Op.getOperand(0), DAG);
4137 if (OpVT != MVT::f16 && OpVT != MVT::bf16)
4140 // Bitcasts between f16 and bf16 are legal.
4141 if (ArgVT == MVT::f16 || ArgVT == MVT::bf16)
4144 assert(ArgVT == MVT::i16);
4147 Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op.getOperand(0));
4148 Op = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Op);
4150 DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, OpVT, Op,
4151 DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)),
4155 static EVT getExtensionTo64Bits(const EVT &OrigVT) {
4156 if (OrigVT.getSizeInBits() >= 64)
4159 assert(OrigVT.isSimple() && "Expecting a simple value type");
4161 MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy;
4162 switch (OrigSimpleTy) {
4163 default: llvm_unreachable("Unexpected Vector Type");
4172 static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG,
4175 unsigned ExtOpcode) {
4176 // The vector originally had a size of OrigTy. It was then extended to ExtTy.
4177 // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
4178 // 64-bits we need to insert a new extension so that it will be 64-bits.
4179 assert(ExtTy.is128BitVector() && "Unexpected extension size");
4180 if (OrigTy.getSizeInBits() >= 64)
4183 // Must extend size to at least 64 bits to be used as an operand for VMULL.
4184 EVT NewVT = getExtensionTo64Bits(OrigTy);
4186 return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
4189 static bool isOperandOfHigherHalf(SDValue &Op) {
4190 SDNode *OpNode = Op.getNode();
4191 if (OpNode->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
4194 ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpNode->getOperand(1));
4195 if (!C || C->getZExtValue() != 1)
4198 EVT VT = OpNode->getOperand(0).getValueType();
4200 return VT.isFixedLengthVector() && VT.getVectorNumElements() == 2;
4203 static bool areOperandsOfHigherHalf(SDValue &Op1, SDValue &Op2) {
4204 return isOperandOfHigherHalf(Op1) && isOperandOfHigherHalf(Op2);
4207 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
4209 EVT VT = N->getValueType(0);
4211 if (N->getOpcode() != ISD::BUILD_VECTOR)
4214 for (const SDValue &Elt : N->op_values()) {
4215 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
4216 unsigned EltSize = VT.getScalarSizeInBits();
4217 unsigned HalfSize = EltSize / 2;
4219 if (!isIntN(HalfSize, C->getSExtValue()))
4222 if (!isUIntN(HalfSize, C->getZExtValue()))
4233 static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) {
4234 if (N->getOpcode() == ISD::SIGN_EXTEND ||
4235 N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND)
4236 return addRequiredExtensionForVectorMULL(N->getOperand(0), DAG,
4237 N->getOperand(0)->getValueType(0),
4241 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
4242 EVT VT = N->getValueType(0);
4244 unsigned EltSize = VT.getScalarSizeInBits() / 2;
4245 unsigned NumElts = VT.getVectorNumElements();
4246 MVT TruncVT = MVT::getIntegerVT(EltSize);
4247 SmallVector<SDValue, 8> Ops;
4248 for (unsigned i = 0; i != NumElts; ++i) {
4249 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
4250 const APInt &CInt = C->getAPIntValue();
4251 // Element types smaller than 32 bits are not legal, so use i32 elements.
4252 // The values are implicitly truncated so sext vs. zext doesn't matter.
4253 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
4255 return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
4258 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
4259 return N->getOpcode() == ISD::SIGN_EXTEND ||
4260 N->getOpcode() == ISD::ANY_EXTEND ||
4261 isExtendedBUILD_VECTOR(N, DAG, true);
4264 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
4265 return N->getOpcode() == ISD::ZERO_EXTEND ||
4266 N->getOpcode() == ISD::ANY_EXTEND ||
4267 isExtendedBUILD_VECTOR(N, DAG, false);
4270 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
4271 unsigned Opcode = N->getOpcode();
4272 if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4273 SDNode *N0 = N->getOperand(0).getNode();
4274 SDNode *N1 = N->getOperand(1).getNode();
4275 return N0->hasOneUse() && N1->hasOneUse() &&
4276 isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
4281 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
4282 unsigned Opcode = N->getOpcode();
4283 if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4284 SDNode *N0 = N->getOperand(0).getNode();
4285 SDNode *N1 = N->getOperand(1).getNode();
4286 return N0->hasOneUse() && N1->hasOneUse() &&
4287 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
4292 SDValue AArch64TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
4293 SelectionDAG &DAG) const {
4294 // The rounding mode is in bits 23:22 of the FPSCR.
4295 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
4296 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
4297 // so that the shift + and get folded into a bitfield extract.
4300 SDValue Chain = Op.getOperand(0);
4301 SDValue FPCR_64 = DAG.getNode(
4302 ISD::INTRINSIC_W_CHAIN, dl, {MVT::i64, MVT::Other},
4303 {Chain, DAG.getConstant(Intrinsic::aarch64_get_fpcr, dl, MVT::i64)});
4304 Chain = FPCR_64.getValue(1);
4305 SDValue FPCR_32 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, FPCR_64);
4306 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPCR_32,
4307 DAG.getConstant(1U << 22, dl, MVT::i32));
4308 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
4309 DAG.getConstant(22, dl, MVT::i32));
4310 SDValue AND = DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
4311 DAG.getConstant(3, dl, MVT::i32));
4312 return DAG.getMergeValues({AND, Chain}, dl);
4315 SDValue AArch64TargetLowering::LowerSET_ROUNDING(SDValue Op,
4316 SelectionDAG &DAG) const {
4318 SDValue Chain = Op->getOperand(0);
4319 SDValue RMValue = Op->getOperand(1);
4321 // The rounding mode is in bits 23:22 of the FPCR.
4322 // The llvm.set.rounding argument value to the rounding mode in FPCR mapping
4323 // is 0->3, 1->0, 2->1, 3->2. The formula we use to implement this is
4324 // ((arg - 1) & 3) << 22).
4326 // The argument of llvm.set.rounding must be within the segment [0, 3], so
4327 // NearestTiesToAway (4) is not handled here. It is responsibility of the code
4328 // generated llvm.set.rounding to ensure this condition.
4330 // Calculate new value of FPCR[23:22].
4331 RMValue = DAG.getNode(ISD::SUB, DL, MVT::i32, RMValue,
4332 DAG.getConstant(1, DL, MVT::i32));
4333 RMValue = DAG.getNode(ISD::AND, DL, MVT::i32, RMValue,
4334 DAG.getConstant(0x3, DL, MVT::i32));
4336 DAG.getNode(ISD::SHL, DL, MVT::i32, RMValue,
4337 DAG.getConstant(AArch64::RoundingBitsPos, DL, MVT::i32));
4338 RMValue = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, RMValue);
4340 // Get current value of FPCR.
4342 Chain, DAG.getTargetConstant(Intrinsic::aarch64_get_fpcr, DL, MVT::i64)};
4344 DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, {MVT::i64, MVT::Other}, Ops);
4345 Chain = FPCR.getValue(1);
4346 FPCR = FPCR.getValue(0);
4348 // Put new rounding mode into FPSCR[23:22].
4349 const int RMMask = ~(AArch64::Rounding::rmMask << AArch64::RoundingBitsPos);
4350 FPCR = DAG.getNode(ISD::AND, DL, MVT::i64, FPCR,
4351 DAG.getConstant(RMMask, DL, MVT::i64));
4352 FPCR = DAG.getNode(ISD::OR, DL, MVT::i64, FPCR, RMValue);
4354 Chain, DAG.getTargetConstant(Intrinsic::aarch64_set_fpcr, DL, MVT::i64),
4356 return DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops2);
4359 SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
4360 EVT VT = Op.getValueType();
4362 // If SVE is available then i64 vector multiplications can also be made legal.
4363 bool OverrideNEON = VT == MVT::v2i64 || VT == MVT::v1i64;
4365 if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT, OverrideNEON))
4366 return LowerToPredicatedOp(Op, DAG, AArch64ISD::MUL_PRED);
4368 // Multiplications are only custom-lowered for 128-bit vectors so that
4369 // VMULL can be detected. Otherwise v2i64 multiplications are not legal.
4370 assert(VT.is128BitVector() && VT.isInteger() &&
4371 "unexpected type for custom-lowering ISD::MUL");
4372 SDNode *N0 = Op.getOperand(0).getNode();
4373 SDNode *N1 = Op.getOperand(1).getNode();
4374 unsigned NewOpc = 0;
4376 bool isN0SExt = isSignExtended(N0, DAG);
4377 bool isN1SExt = isSignExtended(N1, DAG);
4378 if (isN0SExt && isN1SExt)
4379 NewOpc = AArch64ISD::SMULL;
4381 bool isN0ZExt = isZeroExtended(N0, DAG);
4382 bool isN1ZExt = isZeroExtended(N1, DAG);
4383 if (isN0ZExt && isN1ZExt)
4384 NewOpc = AArch64ISD::UMULL;
4385 else if (isN1SExt || isN1ZExt) {
4386 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
4387 // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
4388 if (isN1SExt && isAddSubSExt(N0, DAG)) {
4389 NewOpc = AArch64ISD::SMULL;
4391 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
4392 NewOpc = AArch64ISD::UMULL;
4394 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
4396 NewOpc = AArch64ISD::UMULL;
4402 if (VT == MVT::v2i64)
4403 // Fall through to expand this. It is not legal.
4406 // Other vector multiplications are legal.
4411 // Legalize to a S/UMULL instruction
4414 SDValue Op1 = skipExtensionForVectorMULL(N1, DAG);
4416 Op0 = skipExtensionForVectorMULL(N0, DAG);
4417 assert(Op0.getValueType().is64BitVector() &&
4418 Op1.getValueType().is64BitVector() &&
4419 "unexpected types for extended operands to VMULL");
4420 return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
4422 // Optimizing (zext A + zext B) * C, to (S/UMULL A, C) + (S/UMULL B, C) during
4423 // isel lowering to take advantage of no-stall back to back s/umul + s/umla.
4424 // This is true for CPUs with accumulate forwarding such as Cortex-A53/A57
4425 SDValue N00 = skipExtensionForVectorMULL(N0->getOperand(0).getNode(), DAG);
4426 SDValue N01 = skipExtensionForVectorMULL(N0->getOperand(1).getNode(), DAG);
4427 EVT Op1VT = Op1.getValueType();
4428 return DAG.getNode(N0->getOpcode(), DL, VT,
4429 DAG.getNode(NewOpc, DL, VT,
4430 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
4431 DAG.getNode(NewOpc, DL, VT,
4432 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
4435 static inline SDValue getPTrue(SelectionDAG &DAG, SDLoc DL, EVT VT,
4437 if (VT == MVT::nxv1i1 && Pattern == AArch64SVEPredPattern::all)
4438 return DAG.getConstant(1, DL, MVT::nxv1i1);
4439 return DAG.getNode(AArch64ISD::PTRUE, DL, VT,
4440 DAG.getTargetConstant(Pattern, DL, MVT::i32));
4443 // Returns a safe bitcast between two scalable vector predicates, where
4444 // any newly created lanes from a widening bitcast are defined as zero.
4445 static SDValue getSVEPredicateBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) {
4447 EVT InVT = Op.getValueType();
4449 assert(InVT.getVectorElementType() == MVT::i1 &&
4450 VT.getVectorElementType() == MVT::i1 &&
4451 "Expected a predicate-to-predicate bitcast");
4452 assert(VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
4453 InVT.isScalableVector() &&
4454 DAG.getTargetLoweringInfo().isTypeLegal(InVT) &&
4455 "Only expect to cast between legal scalable predicate types!");
4457 // Return the operand if the cast isn't changing type,
4458 // e.g. <n x 16 x i1> -> <n x 16 x i1>
4462 SDValue Reinterpret = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Op);
4464 // We only have to zero the lanes if new lanes are being defined, e.g. when
4465 // casting from <vscale x 2 x i1> to <vscale x 16 x i1>. If this is not the
4466 // case (e.g. when casting from <vscale x 16 x i1> -> <vscale x 2 x i1>) then
4467 // we can return here.
4468 if (InVT.bitsGT(VT))
4471 // Check if the other lanes are already known to be zeroed by
4473 if (isZeroingInactiveLanes(Op))
4476 // Zero the newly introduced lanes.
4477 SDValue Mask = DAG.getConstant(1, DL, InVT);
4478 Mask = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Mask);
4479 return DAG.getNode(ISD::AND, DL, VT, Reinterpret, Mask);
4482 SDValue AArch64TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4483 SelectionDAG &DAG) const {
4484 unsigned IntNo = Op.getConstantOperandVal(1);
4488 return SDValue(); // Don't custom lower most intrinsics.
4489 case Intrinsic::aarch64_mops_memset_tag: {
4490 auto Node = cast<MemIntrinsicSDNode>(Op.getNode());
4491 SDValue Chain = Node->getChain();
4492 SDValue Dst = Op.getOperand(2);
4493 SDValue Val = Op.getOperand(3);
4494 Val = DAG.getAnyExtOrTrunc(Val, DL, MVT::i64);
4495 SDValue Size = Op.getOperand(4);
4496 auto Alignment = Node->getMemOperand()->getAlign();
4497 bool IsVol = Node->isVolatile();
4498 auto DstPtrInfo = Node->getPointerInfo();
4501 static_cast<const AArch64SelectionDAGInfo &>(DAG.getSelectionDAGInfo());
4503 SDI.EmitMOPS(AArch64ISD::MOPS_MEMSET_TAGGING, DAG, DL, Chain, Dst, Val,
4504 Size, Alignment, IsVol, DstPtrInfo, MachinePointerInfo{});
4506 // MOPS_MEMSET_TAGGING has 3 results (DstWb, SizeWb, Chain) whereas the
4507 // intrinsic has 2. So hide SizeWb using MERGE_VALUES. Otherwise
4508 // LowerOperationWrapper will complain that the number of results has
4510 return DAG.getMergeValues({MS.getValue(0), MS.getValue(2)}, DL);
4512 case Intrinsic::aarch64_sme_get_pstatesm: {
4513 SDValue Chain = Op.getOperand(0);
4514 SDValue MRS = DAG.getNode(
4515 AArch64ISD::MRS, DL, DAG.getVTList(MVT::i64, MVT::Glue, MVT::Other),
4516 Chain, DAG.getConstant(AArch64SysReg::SVCR, DL, MVT::i64));
4517 SDValue Mask = DAG.getConstant(/* PSTATE.SM */ 1, DL, MVT::i64);
4518 SDValue And = DAG.getNode(ISD::AND, DL, MVT::i64, MRS, Mask);
4519 return DAG.getMergeValues({And, Chain}, DL);
4524 SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4525 SelectionDAG &DAG) const {
4526 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4529 default: return SDValue(); // Don't custom lower most intrinsics.
4530 case Intrinsic::thread_pointer: {
4531 EVT PtrVT = getPointerTy(DAG.getDataLayout());
4532 return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT);
4534 case Intrinsic::aarch64_neon_abs: {
4535 EVT Ty = Op.getValueType();
4536 if (Ty == MVT::i64) {
4537 SDValue Result = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64,
4539 Result = DAG.getNode(ISD::ABS, dl, MVT::v1i64, Result);
4540 return DAG.getNode(ISD::BITCAST, dl, MVT::i64, Result);
4541 } else if (Ty.isVector() && Ty.isInteger() && isTypeLegal(Ty)) {
4542 return DAG.getNode(ISD::ABS, dl, Ty, Op.getOperand(1));
4544 report_fatal_error("Unexpected type for AArch64 NEON intrinic");
4547 case Intrinsic::aarch64_neon_pmull64: {
4548 SDValue Op1 = Op.getOperand(1);
4549 SDValue Op2 = Op.getOperand(2);
4551 // If both operands are higher half of two source SIMD & FP registers,
4552 // ISel could make use of tablegen patterns to emit PMULL2. So do not
4553 // legalize i64 to v1i64.
4554 if (areOperandsOfHigherHalf(Op1, Op2))
4557 // As a general convention, use "v1" types to represent scalar integer
4558 // operations in vector registers. This helps ISel to make use of
4559 // tablegen patterns and generate a load into SIMD & FP registers directly.
4560 if (Op1.getValueType() == MVT::i64)
4561 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Op1);
4562 if (Op2.getValueType() == MVT::i64)
4563 Op2 = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Op2);
4566 ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(),
4567 DAG.getConstant(Intrinsic::aarch64_neon_pmull64, dl, MVT::i32), Op1,
4570 case Intrinsic::aarch64_neon_smax:
4571 return DAG.getNode(ISD::SMAX, dl, Op.getValueType(),
4572 Op.getOperand(1), Op.getOperand(2));
4573 case Intrinsic::aarch64_neon_umax:
4574 return DAG.getNode(ISD::UMAX, dl, Op.getValueType(),
4575 Op.getOperand(1), Op.getOperand(2));
4576 case Intrinsic::aarch64_neon_smin:
4577 return DAG.getNode(ISD::SMIN, dl, Op.getValueType(),
4578 Op.getOperand(1), Op.getOperand(2));
4579 case Intrinsic::aarch64_neon_umin:
4580 return DAG.getNode(ISD::UMIN, dl, Op.getValueType(),
4581 Op.getOperand(1), Op.getOperand(2));
4583 case Intrinsic::aarch64_sve_sunpkhi:
4584 return DAG.getNode(AArch64ISD::SUNPKHI, dl, Op.getValueType(),
4586 case Intrinsic::aarch64_sve_sunpklo:
4587 return DAG.getNode(AArch64ISD::SUNPKLO, dl, Op.getValueType(),
4589 case Intrinsic::aarch64_sve_uunpkhi:
4590 return DAG.getNode(AArch64ISD::UUNPKHI, dl, Op.getValueType(),
4592 case Intrinsic::aarch64_sve_uunpklo:
4593 return DAG.getNode(AArch64ISD::UUNPKLO, dl, Op.getValueType(),
4595 case Intrinsic::aarch64_sve_clasta_n:
4596 return DAG.getNode(AArch64ISD::CLASTA_N, dl, Op.getValueType(),
4597 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4598 case Intrinsic::aarch64_sve_clastb_n:
4599 return DAG.getNode(AArch64ISD::CLASTB_N, dl, Op.getValueType(),
4600 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4601 case Intrinsic::aarch64_sve_lasta:
4602 return DAG.getNode(AArch64ISD::LASTA, dl, Op.getValueType(),
4603 Op.getOperand(1), Op.getOperand(2));
4604 case Intrinsic::aarch64_sve_lastb:
4605 return DAG.getNode(AArch64ISD::LASTB, dl, Op.getValueType(),
4606 Op.getOperand(1), Op.getOperand(2));
4607 case Intrinsic::aarch64_sve_rev:
4608 return DAG.getNode(ISD::VECTOR_REVERSE, dl, Op.getValueType(),
4610 case Intrinsic::aarch64_sve_tbl:
4611 return DAG.getNode(AArch64ISD::TBL, dl, Op.getValueType(),
4612 Op.getOperand(1), Op.getOperand(2));
4613 case Intrinsic::aarch64_sve_trn1:
4614 return DAG.getNode(AArch64ISD::TRN1, dl, Op.getValueType(),
4615 Op.getOperand(1), Op.getOperand(2));
4616 case Intrinsic::aarch64_sve_trn2:
4617 return DAG.getNode(AArch64ISD::TRN2, dl, Op.getValueType(),
4618 Op.getOperand(1), Op.getOperand(2));
4619 case Intrinsic::aarch64_sve_uzp1:
4620 return DAG.getNode(AArch64ISD::UZP1, dl, Op.getValueType(),
4621 Op.getOperand(1), Op.getOperand(2));
4622 case Intrinsic::aarch64_sve_uzp2:
4623 return DAG.getNode(AArch64ISD::UZP2, dl, Op.getValueType(),
4624 Op.getOperand(1), Op.getOperand(2));
4625 case Intrinsic::aarch64_sve_zip1:
4626 return DAG.getNode(AArch64ISD::ZIP1, dl, Op.getValueType(),
4627 Op.getOperand(1), Op.getOperand(2));
4628 case Intrinsic::aarch64_sve_zip2:
4629 return DAG.getNode(AArch64ISD::ZIP2, dl, Op.getValueType(),
4630 Op.getOperand(1), Op.getOperand(2));
4631 case Intrinsic::aarch64_sve_splice:
4632 return DAG.getNode(AArch64ISD::SPLICE, dl, Op.getValueType(),
4633 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4634 case Intrinsic::aarch64_sve_ptrue:
4635 return getPTrue(DAG, dl, Op.getValueType(),
4636 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
4637 case Intrinsic::aarch64_sve_clz:
4638 return DAG.getNode(AArch64ISD::CTLZ_MERGE_PASSTHRU, dl, Op.getValueType(),
4639 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4640 case Intrinsic::aarch64_sme_cntsb:
4641 return DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(),
4642 DAG.getConstant(1, dl, MVT::i32));
4643 case Intrinsic::aarch64_sme_cntsh: {
4644 SDValue One = DAG.getConstant(1, dl, MVT::i32);
4645 SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(), One);
4646 return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes, One);
4648 case Intrinsic::aarch64_sme_cntsw: {
4649 SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(),
4650 DAG.getConstant(1, dl, MVT::i32));
4651 return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes,
4652 DAG.getConstant(2, dl, MVT::i32));
4654 case Intrinsic::aarch64_sme_cntsd: {
4655 SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(),
4656 DAG.getConstant(1, dl, MVT::i32));
4657 return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes,
4658 DAG.getConstant(3, dl, MVT::i32));
4660 case Intrinsic::aarch64_sve_cnt: {
4661 SDValue Data = Op.getOperand(3);
4662 // CTPOP only supports integer operands.
4663 if (Data.getValueType().isFloatingPoint())
4664 Data = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Data);
4665 return DAG.getNode(AArch64ISD::CTPOP_MERGE_PASSTHRU, dl, Op.getValueType(),
4666 Op.getOperand(2), Data, Op.getOperand(1));
4668 case Intrinsic::aarch64_sve_dupq_lane:
4669 return LowerDUPQLane(Op, DAG);
4670 case Intrinsic::aarch64_sve_convert_from_svbool:
4671 return getSVEPredicateBitCast(Op.getValueType(), Op.getOperand(1), DAG);
4672 case Intrinsic::aarch64_sve_convert_to_svbool:
4673 return getSVEPredicateBitCast(MVT::nxv16i1, Op.getOperand(1), DAG);
4674 case Intrinsic::aarch64_sve_fneg:
4675 return DAG.getNode(AArch64ISD::FNEG_MERGE_PASSTHRU, dl, Op.getValueType(),
4676 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4677 case Intrinsic::aarch64_sve_frintp:
4678 return DAG.getNode(AArch64ISD::FCEIL_MERGE_PASSTHRU, dl, Op.getValueType(),
4679 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4680 case Intrinsic::aarch64_sve_frintm:
4681 return DAG.getNode(AArch64ISD::FFLOOR_MERGE_PASSTHRU, dl, Op.getValueType(),
4682 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4683 case Intrinsic::aarch64_sve_frinti:
4684 return DAG.getNode(AArch64ISD::FNEARBYINT_MERGE_PASSTHRU, dl, Op.getValueType(),
4685 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4686 case Intrinsic::aarch64_sve_frintx:
4687 return DAG.getNode(AArch64ISD::FRINT_MERGE_PASSTHRU, dl, Op.getValueType(),
4688 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4689 case Intrinsic::aarch64_sve_frinta:
4690 return DAG.getNode(AArch64ISD::FROUND_MERGE_PASSTHRU, dl, Op.getValueType(),
4691 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4692 case Intrinsic::aarch64_sve_frintn:
4693 return DAG.getNode(AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU, dl, Op.getValueType(),
4694 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4695 case Intrinsic::aarch64_sve_frintz:
4696 return DAG.getNode(AArch64ISD::FTRUNC_MERGE_PASSTHRU, dl, Op.getValueType(),
4697 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4698 case Intrinsic::aarch64_sve_ucvtf:
4699 return DAG.getNode(AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU, dl,
4700 Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4702 case Intrinsic::aarch64_sve_scvtf:
4703 return DAG.getNode(AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU, dl,
4704 Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4706 case Intrinsic::aarch64_sve_fcvtzu:
4707 return DAG.getNode(AArch64ISD::FCVTZU_MERGE_PASSTHRU, dl,
4708 Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4710 case Intrinsic::aarch64_sve_fcvtzs:
4711 return DAG.getNode(AArch64ISD::FCVTZS_MERGE_PASSTHRU, dl,
4712 Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4714 case Intrinsic::aarch64_sve_fsqrt:
4715 return DAG.getNode(AArch64ISD::FSQRT_MERGE_PASSTHRU, dl, Op.getValueType(),
4716 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4717 case Intrinsic::aarch64_sve_frecpx:
4718 return DAG.getNode(AArch64ISD::FRECPX_MERGE_PASSTHRU, dl, Op.getValueType(),
4719 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4720 case Intrinsic::aarch64_sve_frecpe_x:
4721 return DAG.getNode(AArch64ISD::FRECPE, dl, Op.getValueType(),
4723 case Intrinsic::aarch64_sve_frecps_x:
4724 return DAG.getNode(AArch64ISD::FRECPS, dl, Op.getValueType(),
4725 Op.getOperand(1), Op.getOperand(2));
4726 case Intrinsic::aarch64_sve_frsqrte_x:
4727 return DAG.getNode(AArch64ISD::FRSQRTE, dl, Op.getValueType(),
4729 case Intrinsic::aarch64_sve_frsqrts_x:
4730 return DAG.getNode(AArch64ISD::FRSQRTS, dl, Op.getValueType(),
4731 Op.getOperand(1), Op.getOperand(2));
4732 case Intrinsic::aarch64_sve_fabs:
4733 return DAG.getNode(AArch64ISD::FABS_MERGE_PASSTHRU, dl, Op.getValueType(),
4734 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4735 case Intrinsic::aarch64_sve_abs:
4736 return DAG.getNode(AArch64ISD::ABS_MERGE_PASSTHRU, dl, Op.getValueType(),
4737 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4738 case Intrinsic::aarch64_sve_neg:
4739 return DAG.getNode(AArch64ISD::NEG_MERGE_PASSTHRU, dl, Op.getValueType(),
4740 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4741 case Intrinsic::aarch64_sve_insr: {
4742 SDValue Scalar = Op.getOperand(2);
4743 EVT ScalarTy = Scalar.getValueType();
4744 if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
4745 Scalar = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Scalar);
4747 return DAG.getNode(AArch64ISD::INSR, dl, Op.getValueType(),
4748 Op.getOperand(1), Scalar);
4750 case Intrinsic::aarch64_sve_rbit:
4751 return DAG.getNode(AArch64ISD::BITREVERSE_MERGE_PASSTHRU, dl,
4752 Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4754 case Intrinsic::aarch64_sve_revb:
4755 return DAG.getNode(AArch64ISD::BSWAP_MERGE_PASSTHRU, dl, Op.getValueType(),
4756 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4757 case Intrinsic::aarch64_sve_revh:
4758 return DAG.getNode(AArch64ISD::REVH_MERGE_PASSTHRU, dl, Op.getValueType(),
4759 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4760 case Intrinsic::aarch64_sve_revw:
4761 return DAG.getNode(AArch64ISD::REVW_MERGE_PASSTHRU, dl, Op.getValueType(),
4762 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4763 case Intrinsic::aarch64_sve_revd:
4764 return DAG.getNode(AArch64ISD::REVD_MERGE_PASSTHRU, dl, Op.getValueType(),
4765 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4766 case Intrinsic::aarch64_sve_sxtb:
4768 AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4769 Op.getOperand(2), Op.getOperand(3),
4770 DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)),
4772 case Intrinsic::aarch64_sve_sxth:
4774 AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4775 Op.getOperand(2), Op.getOperand(3),
4776 DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)),
4778 case Intrinsic::aarch64_sve_sxtw:
4780 AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4781 Op.getOperand(2), Op.getOperand(3),
4782 DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)),
4784 case Intrinsic::aarch64_sve_uxtb:
4786 AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4787 Op.getOperand(2), Op.getOperand(3),
4788 DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)),
4790 case Intrinsic::aarch64_sve_uxth:
4792 AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4793 Op.getOperand(2), Op.getOperand(3),
4794 DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)),
4796 case Intrinsic::aarch64_sve_uxtw:
4798 AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4799 Op.getOperand(2), Op.getOperand(3),
4800 DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)),
4802 case Intrinsic::localaddress: {
4803 const auto &MF = DAG.getMachineFunction();
4804 const auto *RegInfo = Subtarget->getRegisterInfo();
4805 unsigned Reg = RegInfo->getLocalAddressRegister(MF);
4806 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg,
4807 Op.getSimpleValueType());
4810 case Intrinsic::eh_recoverfp: {
4811 // FIXME: This needs to be implemented to correctly handle highly aligned
4812 // stack objects. For now we simply return the incoming FP. Refer D53541
4813 // for more details.
4814 SDValue FnOp = Op.getOperand(1);
4815 SDValue IncomingFPOp = Op.getOperand(2);
4816 GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
4817 auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
4820 "llvm.eh.recoverfp must take a function as the first argument");
4821 return IncomingFPOp;
4824 case Intrinsic::aarch64_neon_vsri:
4825 case Intrinsic::aarch64_neon_vsli: {
4826 EVT Ty = Op.getValueType();
4829 report_fatal_error("Unexpected type for aarch64_neon_vsli");
4831 assert(Op.getConstantOperandVal(3) <= Ty.getScalarSizeInBits());
4833 bool IsShiftRight = IntNo == Intrinsic::aarch64_neon_vsri;
4834 unsigned Opcode = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI;
4835 return DAG.getNode(Opcode, dl, Ty, Op.getOperand(1), Op.getOperand(2),
4839 case Intrinsic::aarch64_neon_srhadd:
4840 case Intrinsic::aarch64_neon_urhadd:
4841 case Intrinsic::aarch64_neon_shadd:
4842 case Intrinsic::aarch64_neon_uhadd: {
4843 bool IsSignedAdd = (IntNo == Intrinsic::aarch64_neon_srhadd ||
4844 IntNo == Intrinsic::aarch64_neon_shadd);
4845 bool IsRoundingAdd = (IntNo == Intrinsic::aarch64_neon_srhadd ||
4846 IntNo == Intrinsic::aarch64_neon_urhadd);
4847 unsigned Opcode = IsSignedAdd
4848 ? (IsRoundingAdd ? ISD::AVGCEILS : ISD::AVGFLOORS)
4849 : (IsRoundingAdd ? ISD::AVGCEILU : ISD::AVGFLOORU);
4850 return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
4853 case Intrinsic::aarch64_neon_sabd:
4854 case Intrinsic::aarch64_neon_uabd: {
4855 unsigned Opcode = IntNo == Intrinsic::aarch64_neon_uabd ? ISD::ABDU
4857 return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
4860 case Intrinsic::aarch64_neon_saddlp:
4861 case Intrinsic::aarch64_neon_uaddlp: {
4862 unsigned Opcode = IntNo == Intrinsic::aarch64_neon_uaddlp
4863 ? AArch64ISD::UADDLP
4864 : AArch64ISD::SADDLP;
4865 return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1));
4867 case Intrinsic::aarch64_neon_sdot:
4868 case Intrinsic::aarch64_neon_udot:
4869 case Intrinsic::aarch64_sve_sdot:
4870 case Intrinsic::aarch64_sve_udot: {
4871 unsigned Opcode = (IntNo == Intrinsic::aarch64_neon_udot ||
4872 IntNo == Intrinsic::aarch64_sve_udot)
4875 return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
4876 Op.getOperand(2), Op.getOperand(3));
4878 case Intrinsic::get_active_lane_mask: {
4880 DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, dl, MVT::i64);
4881 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), ID,
4882 Op.getOperand(1), Op.getOperand(2));
4887 bool AArch64TargetLowering::shouldExtendGSIndex(EVT VT, EVT &EltTy) const {
4888 if (VT.getVectorElementType() == MVT::i8 ||
4889 VT.getVectorElementType() == MVT::i16) {
4896 bool AArch64TargetLowering::shouldRemoveExtendFromGSIndex(EVT IndexVT,
4898 // SVE only supports implicit extension of 32-bit indices.
4899 if (!Subtarget->hasSVE() || IndexVT.getVectorElementType() != MVT::i32)
4902 // Indices cannot be smaller than the main data type.
4903 if (IndexVT.getScalarSizeInBits() < DataVT.getScalarSizeInBits())
4906 // Scalable vectors with "vscale * 2" or fewer elements sit within a 64-bit
4907 // element container type, which would violate the previous clause.
4908 return DataVT.isFixedLengthVector() || DataVT.getVectorMinNumElements() > 2;
4911 bool AArch64TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
4912 return ExtVal.getValueType().isScalableVector() ||
4913 useSVEForFixedLengthVectorVT(
4914 ExtVal.getValueType(),
4915 /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors());
4918 unsigned getGatherVecOpcode(bool IsScaled, bool IsSigned, bool NeedsExtend) {
4919 std::map<std::tuple<bool, bool, bool>, unsigned> AddrModes = {
4920 {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ false),
4921 AArch64ISD::GLD1_MERGE_ZERO},
4922 {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ true),
4923 AArch64ISD::GLD1_UXTW_MERGE_ZERO},
4924 {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ false),
4925 AArch64ISD::GLD1_MERGE_ZERO},
4926 {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ true),
4927 AArch64ISD::GLD1_SXTW_MERGE_ZERO},
4928 {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ false),
4929 AArch64ISD::GLD1_SCALED_MERGE_ZERO},
4930 {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ true),
4931 AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO},
4932 {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ false),
4933 AArch64ISD::GLD1_SCALED_MERGE_ZERO},
4934 {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ true),
4935 AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO},
4937 auto Key = std::make_tuple(IsScaled, IsSigned, NeedsExtend);
4938 return AddrModes.find(Key)->second;
4941 unsigned getSignExtendedGatherOpcode(unsigned Opcode) {
4944 llvm_unreachable("unimplemented opcode");
4946 case AArch64ISD::GLD1_MERGE_ZERO:
4947 return AArch64ISD::GLD1S_MERGE_ZERO;
4948 case AArch64ISD::GLD1_IMM_MERGE_ZERO:
4949 return AArch64ISD::GLD1S_IMM_MERGE_ZERO;
4950 case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
4951 return AArch64ISD::GLD1S_UXTW_MERGE_ZERO;
4952 case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
4953 return AArch64ISD::GLD1S_SXTW_MERGE_ZERO;
4954 case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
4955 return AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
4956 case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
4957 return AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO;
4958 case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
4959 return AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO;
4963 SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
4964 SelectionDAG &DAG) const {
4965 MaskedGatherSDNode *MGT = cast<MaskedGatherSDNode>(Op);
4968 SDValue Chain = MGT->getChain();
4969 SDValue PassThru = MGT->getPassThru();
4970 SDValue Mask = MGT->getMask();
4971 SDValue BasePtr = MGT->getBasePtr();
4972 SDValue Index = MGT->getIndex();
4973 SDValue Scale = MGT->getScale();
4974 EVT VT = Op.getValueType();
4975 EVT MemVT = MGT->getMemoryVT();
4976 ISD::LoadExtType ExtType = MGT->getExtensionType();
4977 ISD::MemIndexType IndexType = MGT->getIndexType();
4979 // SVE supports zero (and so undef) passthrough values only, everything else
4980 // must be handled manually by an explicit select on the load's output.
4981 if (!PassThru->isUndef() && !isZerosVector(PassThru.getNode())) {
4982 SDValue Ops[] = {Chain, DAG.getUNDEF(VT), Mask, BasePtr, Index, Scale};
4984 DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops,
4985 MGT->getMemOperand(), IndexType, ExtType);
4986 SDValue Select = DAG.getSelect(DL, VT, Mask, Load, PassThru);
4987 return DAG.getMergeValues({Select, Load.getValue(1)}, DL);
4990 bool IsScaled = MGT->isIndexScaled();
4991 bool IsSigned = MGT->isIndexSigned();
4993 // SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
4994 // must be calculated before hand.
4995 uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
4996 if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
4997 assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
4998 EVT IndexVT = Index.getValueType();
4999 Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index,
5000 DAG.getConstant(Log2_32(ScaleVal), DL, IndexVT));
5001 Scale = DAG.getTargetConstant(1, DL, Scale.getValueType());
5003 SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
5004 return DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops,
5005 MGT->getMemOperand(), IndexType, ExtType);
5008 // Lower fixed length gather to a scalable equivalent.
5009 if (VT.isFixedLengthVector()) {
5010 assert(Subtarget->useSVEForFixedLengthVectors() &&
5011 "Cannot lower when not using SVE for fixed vectors!");
5013 // NOTE: Handle floating-point as if integer then bitcast the result.
5014 EVT DataVT = VT.changeVectorElementTypeToInteger();
5015 MemVT = MemVT.changeVectorElementTypeToInteger();
5017 // Find the smallest integer fixed length vector we can use for the gather.
5018 EVT PromotedVT = VT.changeVectorElementType(MVT::i32);
5019 if (DataVT.getVectorElementType() == MVT::i64 ||
5020 Index.getValueType().getVectorElementType() == MVT::i64 ||
5021 Mask.getValueType().getVectorElementType() == MVT::i64)
5022 PromotedVT = VT.changeVectorElementType(MVT::i64);
5024 // Promote vector operands except for passthrough, which we know is either
5025 // undef or zero, and thus best constructed directly.
5026 unsigned ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
5027 Index = DAG.getNode(ExtOpcode, DL, PromotedVT, Index);
5028 Mask = DAG.getNode(ISD::SIGN_EXTEND, DL, PromotedVT, Mask);
5030 // A promoted result type forces the need for an extending load.
5031 if (PromotedVT != DataVT && ExtType == ISD::NON_EXTLOAD)
5032 ExtType = ISD::EXTLOAD;
5034 EVT ContainerVT = getContainerForFixedLengthVector(DAG, PromotedVT);
5036 // Convert fixed length vector operands to scalable.
5037 MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType());
5038 Index = convertToScalableVector(DAG, ContainerVT, Index);
5039 Mask = convertFixedMaskToScalableVector(Mask, DAG);
5040 PassThru = PassThru->isUndef() ? DAG.getUNDEF(ContainerVT)
5041 : DAG.getConstant(0, DL, ContainerVT);
5043 // Emit equivalent scalable vector gather.
5044 SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
5046 DAG.getMaskedGather(DAG.getVTList(ContainerVT, MVT::Other), MemVT, DL,
5047 Ops, MGT->getMemOperand(), IndexType, ExtType);
5049 // Extract fixed length data then convert to the required result type.
5050 SDValue Result = convertFromScalableVector(DAG, PromotedVT, Load);
5051 Result = DAG.getNode(ISD::TRUNCATE, DL, DataVT, Result);
5052 if (VT.isFloatingPoint())
5053 Result = DAG.getNode(ISD::BITCAST, DL, VT, Result);
5055 return DAG.getMergeValues({Result, Load.getValue(1)}, DL);
5058 // Everything else is legal.
5062 SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op,
5063 SelectionDAG &DAG) const {
5064 MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(Op);
5067 SDValue Chain = MSC->getChain();
5068 SDValue StoreVal = MSC->getValue();
5069 SDValue Mask = MSC->getMask();
5070 SDValue BasePtr = MSC->getBasePtr();
5071 SDValue Index = MSC->getIndex();
5072 SDValue Scale = MSC->getScale();
5073 EVT VT = StoreVal.getValueType();
5074 EVT MemVT = MSC->getMemoryVT();
5075 ISD::MemIndexType IndexType = MSC->getIndexType();
5076 bool Truncating = MSC->isTruncatingStore();
5078 bool IsScaled = MSC->isIndexScaled();
5079 bool IsSigned = MSC->isIndexSigned();
5081 // SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
5082 // must be calculated before hand.
5083 uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
5084 if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
5085 assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
5086 EVT IndexVT = Index.getValueType();
5087 Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index,
5088 DAG.getConstant(Log2_32(ScaleVal), DL, IndexVT));
5089 Scale = DAG.getTargetConstant(1, DL, Scale.getValueType());
5091 SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
5092 return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops,
5093 MSC->getMemOperand(), IndexType, Truncating);
5096 // Lower fixed length scatter to a scalable equivalent.
5097 if (VT.isFixedLengthVector()) {
5098 assert(Subtarget->useSVEForFixedLengthVectors() &&
5099 "Cannot lower when not using SVE for fixed vectors!");
5101 // Once bitcast we treat floating-point scatters as if integer.
5102 if (VT.isFloatingPoint()) {
5103 VT = VT.changeVectorElementTypeToInteger();
5104 MemVT = MemVT.changeVectorElementTypeToInteger();
5105 StoreVal = DAG.getNode(ISD::BITCAST, DL, VT, StoreVal);
5108 // Find the smallest integer fixed length vector we can use for the scatter.
5109 EVT PromotedVT = VT.changeVectorElementType(MVT::i32);
5110 if (VT.getVectorElementType() == MVT::i64 ||
5111 Index.getValueType().getVectorElementType() == MVT::i64 ||
5112 Mask.getValueType().getVectorElementType() == MVT::i64)
5113 PromotedVT = VT.changeVectorElementType(MVT::i64);
5115 // Promote vector operands.
5116 unsigned ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
5117 Index = DAG.getNode(ExtOpcode, DL, PromotedVT, Index);
5118 Mask = DAG.getNode(ISD::SIGN_EXTEND, DL, PromotedVT, Mask);
5119 StoreVal = DAG.getNode(ISD::ANY_EXTEND, DL, PromotedVT, StoreVal);
5121 // A promoted value type forces the need for a truncating store.
5122 if (PromotedVT != VT)
5125 EVT ContainerVT = getContainerForFixedLengthVector(DAG, PromotedVT);
5127 // Convert fixed length vector operands to scalable.
5128 MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType());
5129 Index = convertToScalableVector(DAG, ContainerVT, Index);
5130 Mask = convertFixedMaskToScalableVector(Mask, DAG);
5131 StoreVal = convertToScalableVector(DAG, ContainerVT, StoreVal);
5133 // Emit equivalent scalable vector scatter.
5134 SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
5135 return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops,
5136 MSC->getMemOperand(), IndexType, Truncating);
5139 // Everything else is legal.
5143 SDValue AArch64TargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
5145 MaskedLoadSDNode *LoadNode = cast<MaskedLoadSDNode>(Op);
5146 assert(LoadNode && "Expected custom lowering of a masked load node");
5147 EVT VT = Op->getValueType(0);
5149 if (useSVEForFixedLengthVectorVT(
5151 /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))
5152 return LowerFixedLengthVectorMLoadToSVE(Op, DAG);
5154 SDValue PassThru = LoadNode->getPassThru();
5155 SDValue Mask = LoadNode->getMask();
5157 if (PassThru->isUndef() || isZerosVector(PassThru.getNode()))
5160 SDValue Load = DAG.getMaskedLoad(
5161 VT, DL, LoadNode->getChain(), LoadNode->getBasePtr(),
5162 LoadNode->getOffset(), Mask, DAG.getUNDEF(VT), LoadNode->getMemoryVT(),
5163 LoadNode->getMemOperand(), LoadNode->getAddressingMode(),
5164 LoadNode->getExtensionType());
5166 SDValue Result = DAG.getSelect(DL, VT, Mask, Load, PassThru);
5168 return DAG.getMergeValues({Result, Load.getValue(1)}, DL);
5171 // Custom lower trunc store for v4i8 vectors, since it is promoted to v4i16.
5172 static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST,
5174 SelectionDAG &DAG) {
5175 assert(VT.isVector() && "VT should be a vector type");
5176 assert(MemVT == MVT::v4i8 && VT == MVT::v4i16);
5178 SDValue Value = ST->getValue();
5180 // It first extend the promoted v4i16 to v8i16, truncate to v8i8, and extract
5181 // the word lane which represent the v4i8 subvector. It optimizes the store
5187 SDValue Undef = DAG.getUNDEF(MVT::i16);
5188 SDValue UndefVec = DAG.getBuildVector(MVT::v4i16, DL,
5189 {Undef, Undef, Undef, Undef});
5191 SDValue TruncExt = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16,
5193 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, TruncExt);
5195 Trunc = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Trunc);
5196 SDValue ExtractTrunc = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
5197 Trunc, DAG.getConstant(0, DL, MVT::i64));
5199 return DAG.getStore(ST->getChain(), DL, ExtractTrunc,
5200 ST->getBasePtr(), ST->getMemOperand());
5203 // Custom lowering for any store, vector or scalar and/or default or with
5204 // a truncate operations. Currently only custom lower truncate operation
5205 // from vector v4i16 to v4i8 or volatile stores of i128.
5206 SDValue AArch64TargetLowering::LowerSTORE(SDValue Op,
5207 SelectionDAG &DAG) const {
5209 StoreSDNode *StoreNode = cast<StoreSDNode>(Op);
5210 assert (StoreNode && "Can only custom lower store nodes");
5212 SDValue Value = StoreNode->getValue();
5214 EVT VT = Value.getValueType();
5215 EVT MemVT = StoreNode->getMemoryVT();
5217 if (VT.isVector()) {
5218 if (useSVEForFixedLengthVectorVT(
5220 /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))
5221 return LowerFixedLengthVectorStoreToSVE(Op, DAG);
5223 unsigned AS = StoreNode->getAddressSpace();
5224 Align Alignment = StoreNode->getAlign();
5225 if (Alignment < MemVT.getStoreSize() &&
5226 !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment,
5227 StoreNode->getMemOperand()->getFlags(),
5229 return scalarizeVectorStore(StoreNode, DAG);
5232 if (StoreNode->isTruncatingStore() && VT == MVT::v4i16 &&
5233 MemVT == MVT::v4i8) {
5234 return LowerTruncateVectorStore(Dl, StoreNode, VT, MemVT, DAG);
5236 // 256 bit non-temporal stores can be lowered to STNP. Do this as part of
5237 // the custom lowering, as there are no un-paired non-temporal stores and
5238 // legalization will break up 256 bit inputs.
5239 ElementCount EC = MemVT.getVectorElementCount();
5240 if (StoreNode->isNonTemporal() && MemVT.getSizeInBits() == 256u &&
5242 ((MemVT.getScalarSizeInBits() == 8u ||
5243 MemVT.getScalarSizeInBits() == 16u ||
5244 MemVT.getScalarSizeInBits() == 32u ||
5245 MemVT.getScalarSizeInBits() == 64u))) {
5247 DAG.getNode(ISD::EXTRACT_SUBVECTOR, Dl,
5248 MemVT.getHalfNumVectorElementsVT(*DAG.getContext()),
5249 StoreNode->getValue(), DAG.getConstant(0, Dl, MVT::i64));
5251 DAG.getNode(ISD::EXTRACT_SUBVECTOR, Dl,
5252 MemVT.getHalfNumVectorElementsVT(*DAG.getContext()),
5253 StoreNode->getValue(),
5254 DAG.getConstant(EC.getKnownMinValue() / 2, Dl, MVT::i64));
5255 SDValue Result = DAG.getMemIntrinsicNode(
5256 AArch64ISD::STNP, Dl, DAG.getVTList(MVT::Other),
5257 {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()},
5258 StoreNode->getMemoryVT(), StoreNode->getMemOperand());
5261 } else if (MemVT == MVT::i128 && StoreNode->isVolatile()) {
5262 return LowerStore128(Op, DAG);
5263 } else if (MemVT == MVT::i64x8) {
5264 SDValue Value = StoreNode->getValue();
5265 assert(Value->getValueType(0) == MVT::i64x8);
5266 SDValue Chain = StoreNode->getChain();
5267 SDValue Base = StoreNode->getBasePtr();
5268 EVT PtrVT = Base.getValueType();
5269 for (unsigned i = 0; i < 8; i++) {
5270 SDValue Part = DAG.getNode(AArch64ISD::LS64_EXTRACT, Dl, MVT::i64,
5271 Value, DAG.getConstant(i, Dl, MVT::i32));
5272 SDValue Ptr = DAG.getNode(ISD::ADD, Dl, PtrVT, Base,
5273 DAG.getConstant(i * 8, Dl, PtrVT));
5274 Chain = DAG.getStore(Chain, Dl, Part, Ptr, StoreNode->getPointerInfo(),
5275 StoreNode->getOriginalAlign());
5283 /// Lower atomic or volatile 128-bit stores to a single STP instruction.
5284 SDValue AArch64TargetLowering::LowerStore128(SDValue Op,
5285 SelectionDAG &DAG) const {
5286 MemSDNode *StoreNode = cast<MemSDNode>(Op);
5287 assert(StoreNode->getMemoryVT() == MVT::i128);
5288 assert(StoreNode->isVolatile() || StoreNode->isAtomic());
5289 assert(!StoreNode->isAtomic() ||
5290 StoreNode->getMergedOrdering() == AtomicOrdering::Unordered ||
5291 StoreNode->getMergedOrdering() == AtomicOrdering::Monotonic);
5293 SDValue Value = StoreNode->getOpcode() == ISD::STORE
5294 ? StoreNode->getOperand(1)
5295 : StoreNode->getOperand(2);
5297 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Value,
5298 DAG.getConstant(0, DL, MVT::i64));
5299 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Value,
5300 DAG.getConstant(1, DL, MVT::i64));
5301 SDValue Result = DAG.getMemIntrinsicNode(
5302 AArch64ISD::STP, DL, DAG.getVTList(MVT::Other),
5303 {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()},
5304 StoreNode->getMemoryVT(), StoreNode->getMemOperand());
5308 SDValue AArch64TargetLowering::LowerLOAD(SDValue Op,
5309 SelectionDAG &DAG) const {
5311 LoadSDNode *LoadNode = cast<LoadSDNode>(Op);
5312 assert(LoadNode && "Expected custom lowering of a load node");
5314 if (LoadNode->getMemoryVT() == MVT::i64x8) {
5315 SmallVector<SDValue, 8> Ops;
5316 SDValue Base = LoadNode->getBasePtr();
5317 SDValue Chain = LoadNode->getChain();
5318 EVT PtrVT = Base.getValueType();
5319 for (unsigned i = 0; i < 8; i++) {
5320 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base,
5321 DAG.getConstant(i * 8, DL, PtrVT));
5322 SDValue Part = DAG.getLoad(MVT::i64, DL, Chain, Ptr,
5323 LoadNode->getPointerInfo(),
5324 LoadNode->getOriginalAlign());
5325 Ops.push_back(Part);
5326 Chain = SDValue(Part.getNode(), 1);
5328 SDValue Loaded = DAG.getNode(AArch64ISD::LS64_BUILD, DL, MVT::i64x8, Ops);
5329 return DAG.getMergeValues({Loaded, Chain}, DL);
5332 // Custom lowering for extending v4i8 vector loads.
5333 EVT VT = Op->getValueType(0);
5334 assert((VT == MVT::v4i16 || VT == MVT::v4i32) && "Expected v4i16 or v4i32");
5336 if (LoadNode->getMemoryVT() != MVT::v4i8)
5340 if (LoadNode->getExtensionType() == ISD::SEXTLOAD)
5341 ExtType = ISD::SIGN_EXTEND;
5342 else if (LoadNode->getExtensionType() == ISD::ZEXTLOAD ||
5343 LoadNode->getExtensionType() == ISD::EXTLOAD)
5344 ExtType = ISD::ZERO_EXTEND;
5348 SDValue Load = DAG.getLoad(MVT::f32, DL, LoadNode->getChain(),
5349 LoadNode->getBasePtr(), MachinePointerInfo());
5350 SDValue Chain = Load.getValue(1);
5351 SDValue Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f32, Load);
5352 SDValue BC = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Vec);
5353 SDValue Ext = DAG.getNode(ExtType, DL, MVT::v8i16, BC);
5354 Ext = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Ext,
5355 DAG.getConstant(0, DL, MVT::i64));
5356 if (VT == MVT::v4i32)
5357 Ext = DAG.getNode(ExtType, DL, MVT::v4i32, Ext);
5358 return DAG.getMergeValues({Ext, Chain}, DL);
5361 // Generate SUBS and CSEL for integer abs.
5362 SDValue AArch64TargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
5363 MVT VT = Op.getSimpleValueType();
5366 return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABS_MERGE_PASSTHRU);
5369 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
5371 // Generate SUBS & CSEL.
5373 DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::i32),
5374 Op.getOperand(0), DAG.getConstant(0, DL, VT));
5375 return DAG.getNode(AArch64ISD::CSEL, DL, VT, Op.getOperand(0), Neg,
5376 DAG.getConstant(AArch64CC::PL, DL, MVT::i32),
5380 static SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) {
5381 SDValue Chain = Op.getOperand(0);
5382 SDValue Cond = Op.getOperand(1);
5383 SDValue Dest = Op.getOperand(2);
5385 AArch64CC::CondCode CC;
5386 if (SDValue Cmp = emitConjunction(DAG, Cond, CC)) {
5388 SDValue CCVal = DAG.getConstant(CC, dl, MVT::i32);
5389 return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
5396 SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
5397 SelectionDAG &DAG) const {
5398 LLVM_DEBUG(dbgs() << "Custom lowering: ");
5399 LLVM_DEBUG(Op.dump());
5401 switch (Op.getOpcode()) {
5403 llvm_unreachable("unimplemented operand");
5406 return LowerBITCAST(Op, DAG);
5407 case ISD::GlobalAddress:
5408 return LowerGlobalAddress(Op, DAG);
5409 case ISD::GlobalTLSAddress:
5410 return LowerGlobalTLSAddress(Op, DAG);
5412 case ISD::STRICT_FSETCC:
5413 case ISD::STRICT_FSETCCS:
5414 return LowerSETCC(Op, DAG);
5416 return LowerBRCOND(Op, DAG);
5418 return LowerBR_CC(Op, DAG);
5420 return LowerSELECT(Op, DAG);
5421 case ISD::SELECT_CC:
5422 return LowerSELECT_CC(Op, DAG);
5423 case ISD::JumpTable:
5424 return LowerJumpTable(Op, DAG);
5426 return LowerBR_JT(Op, DAG);
5427 case ISD::ConstantPool:
5428 return LowerConstantPool(Op, DAG);
5429 case ISD::BlockAddress:
5430 return LowerBlockAddress(Op, DAG);
5432 return LowerVASTART(Op, DAG);
5434 return LowerVACOPY(Op, DAG);
5436 return LowerVAARG(Op, DAG);
5438 return lowerADDSUBCARRY(Op, DAG, AArch64ISD::ADCS, false /*unsigned*/);
5440 return lowerADDSUBCARRY(Op, DAG, AArch64ISD::SBCS, false /*unsigned*/);
5441 case ISD::SADDO_CARRY:
5442 return lowerADDSUBCARRY(Op, DAG, AArch64ISD::ADCS, true /*signed*/);
5443 case ISD::SSUBO_CARRY:
5444 return lowerADDSUBCARRY(Op, DAG, AArch64ISD::SBCS, true /*signed*/);
5451 return LowerXALUO(Op, DAG);
5453 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FADD_PRED);
5455 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FSUB_PRED);
5457 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMUL_PRED);
5459 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMA_PRED);
5461 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FDIV_PRED);
5463 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FNEG_MERGE_PASSTHRU);
5465 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FCEIL_MERGE_PASSTHRU);
5467 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FFLOOR_MERGE_PASSTHRU);
5468 case ISD::FNEARBYINT:
5469 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FNEARBYINT_MERGE_PASSTHRU);
5471 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FRINT_MERGE_PASSTHRU);
5473 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FROUND_MERGE_PASSTHRU);
5474 case ISD::FROUNDEVEN:
5475 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU);
5477 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FTRUNC_MERGE_PASSTHRU);
5479 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FSQRT_MERGE_PASSTHRU);
5481 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FABS_MERGE_PASSTHRU);
5483 case ISD::STRICT_FP_ROUND:
5484 return LowerFP_ROUND(Op, DAG);
5485 case ISD::FP_EXTEND:
5486 return LowerFP_EXTEND(Op, DAG);
5487 case ISD::FRAMEADDR:
5488 return LowerFRAMEADDR(Op, DAG);
5489 case ISD::SPONENTRY:
5490 return LowerSPONENTRY(Op, DAG);
5491 case ISD::RETURNADDR:
5492 return LowerRETURNADDR(Op, DAG);
5493 case ISD::ADDROFRETURNADDR:
5494 return LowerADDROFRETURNADDR(Op, DAG);
5495 case ISD::CONCAT_VECTORS:
5496 return LowerCONCAT_VECTORS(Op, DAG);
5497 case ISD::INSERT_VECTOR_ELT:
5498 return LowerINSERT_VECTOR_ELT(Op, DAG);
5499 case ISD::EXTRACT_VECTOR_ELT:
5500 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
5501 case ISD::BUILD_VECTOR:
5502 return LowerBUILD_VECTOR(Op, DAG);
5503 case ISD::VECTOR_SHUFFLE:
5504 return LowerVECTOR_SHUFFLE(Op, DAG);
5505 case ISD::SPLAT_VECTOR:
5506 return LowerSPLAT_VECTOR(Op, DAG);
5507 case ISD::EXTRACT_SUBVECTOR:
5508 return LowerEXTRACT_SUBVECTOR(Op, DAG);
5509 case ISD::INSERT_SUBVECTOR:
5510 return LowerINSERT_SUBVECTOR(Op, DAG);
5513 return LowerDIV(Op, DAG);
5518 return LowerMinMax(Op, DAG);
5522 return LowerVectorSRA_SRL_SHL(Op, DAG);
5523 case ISD::SHL_PARTS:
5524 case ISD::SRL_PARTS:
5525 case ISD::SRA_PARTS:
5526 return LowerShiftParts(Op, DAG);
5529 return LowerCTPOP_PARITY(Op, DAG);
5530 case ISD::FCOPYSIGN:
5531 return LowerFCOPYSIGN(Op, DAG);
5533 return LowerVectorOR(Op, DAG);
5535 return LowerXOR(Op, DAG);
5537 return LowerPREFETCH(Op, DAG);
5538 case ISD::SINT_TO_FP:
5539 case ISD::UINT_TO_FP:
5540 case ISD::STRICT_SINT_TO_FP:
5541 case ISD::STRICT_UINT_TO_FP:
5542 return LowerINT_TO_FP(Op, DAG);
5543 case ISD::FP_TO_SINT:
5544 case ISD::FP_TO_UINT:
5545 case ISD::STRICT_FP_TO_SINT:
5546 case ISD::STRICT_FP_TO_UINT:
5547 return LowerFP_TO_INT(Op, DAG);
5548 case ISD::FP_TO_SINT_SAT:
5549 case ISD::FP_TO_UINT_SAT:
5550 return LowerFP_TO_INT_SAT(Op, DAG);
5552 return LowerFSINCOS(Op, DAG);
5553 case ISD::FLT_ROUNDS_:
5554 return LowerFLT_ROUNDS_(Op, DAG);
5555 case ISD::SET_ROUNDING:
5556 return LowerSET_ROUNDING(Op, DAG);
5558 return LowerMUL(Op, DAG);
5560 return LowerToPredicatedOp(Op, DAG, AArch64ISD::MULHS_PRED);
5562 return LowerToPredicatedOp(Op, DAG, AArch64ISD::MULHU_PRED);
5563 case ISD::INTRINSIC_W_CHAIN:
5564 return LowerINTRINSIC_W_CHAIN(Op, DAG);
5565 case ISD::INTRINSIC_WO_CHAIN:
5566 return LowerINTRINSIC_WO_CHAIN(Op, DAG);
5567 case ISD::ATOMIC_STORE:
5568 if (cast<MemSDNode>(Op)->getMemoryVT() == MVT::i128) {
5569 assert(Subtarget->hasLSE2());
5570 return LowerStore128(Op, DAG);
5574 return LowerSTORE(Op, DAG);
5576 return LowerFixedLengthVectorMStoreToSVE(Op, DAG);
5578 return LowerMGATHER(Op, DAG);
5580 return LowerMSCATTER(Op, DAG);
5581 case ISD::VECREDUCE_SEQ_FADD:
5582 return LowerVECREDUCE_SEQ_FADD(Op, DAG);
5583 case ISD::VECREDUCE_ADD:
5584 case ISD::VECREDUCE_AND:
5585 case ISD::VECREDUCE_OR:
5586 case ISD::VECREDUCE_XOR:
5587 case ISD::VECREDUCE_SMAX:
5588 case ISD::VECREDUCE_SMIN:
5589 case ISD::VECREDUCE_UMAX:
5590 case ISD::VECREDUCE_UMIN:
5591 case ISD::VECREDUCE_FADD:
5592 case ISD::VECREDUCE_FMAX:
5593 case ISD::VECREDUCE_FMIN:
5594 return LowerVECREDUCE(Op, DAG);
5595 case ISD::ATOMIC_LOAD_SUB:
5596 return LowerATOMIC_LOAD_SUB(Op, DAG);
5597 case ISD::ATOMIC_LOAD_AND:
5598 return LowerATOMIC_LOAD_AND(Op, DAG);
5599 case ISD::DYNAMIC_STACKALLOC:
5600 return LowerDYNAMIC_STACKALLOC(Op, DAG);
5602 return LowerVSCALE(Op, DAG);
5603 case ISD::ANY_EXTEND:
5604 case ISD::SIGN_EXTEND:
5605 case ISD::ZERO_EXTEND:
5606 return LowerFixedLengthVectorIntExtendToSVE(Op, DAG);
5607 case ISD::SIGN_EXTEND_INREG: {
5608 // Only custom lower when ExtraVT has a legal byte based element type.
5609 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
5610 EVT ExtraEltVT = ExtraVT.getVectorElementType();
5611 if ((ExtraEltVT != MVT::i8) && (ExtraEltVT != MVT::i16) &&
5612 (ExtraEltVT != MVT::i32) && (ExtraEltVT != MVT::i64))
5615 return LowerToPredicatedOp(Op, DAG,
5616 AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU);
5619 return LowerTRUNCATE(Op, DAG);
5621 return LowerMLOAD(Op, DAG);
5623 if (useSVEForFixedLengthVectorVT(Op.getValueType()))
5624 return LowerFixedLengthVectorLoadToSVE(Op, DAG);
5625 return LowerLOAD(Op, DAG);
5629 return LowerToScalableOp(Op, DAG);
5631 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMAX_PRED);
5633 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMAXNM_PRED);
5635 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMIN_PRED);
5637 return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMINNM_PRED);
5639 return LowerFixedLengthVectorSelectToSVE(Op, DAG);
5641 return LowerABS(Op, DAG);
5643 return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDS_PRED);
5645 return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDU_PRED);
5646 case ISD::BITREVERSE:
5647 return LowerBitreverse(Op, DAG);
5649 return LowerToPredicatedOp(Op, DAG, AArch64ISD::BSWAP_MERGE_PASSTHRU);
5651 return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTLZ_MERGE_PASSTHRU);
5653 return LowerCTTZ(Op, DAG);
5654 case ISD::VECTOR_SPLICE:
5655 return LowerVECTOR_SPLICE(Op, DAG);
5656 case ISD::STRICT_LROUND:
5657 case ISD::STRICT_LLROUND:
5658 case ISD::STRICT_LRINT:
5659 case ISD::STRICT_LLRINT: {
5660 assert(Op.getOperand(1).getValueType() == MVT::f16 &&
5661 "Expected custom lowering of rounding operations only for f16");
5663 SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other},
5664 {Op.getOperand(0), Op.getOperand(1)});
5665 return DAG.getNode(Op.getOpcode(), DL, {Op.getValueType(), MVT::Other},
5666 {Ext.getValue(1), Ext.getValue(0)});
5671 bool AArch64TargetLowering::mergeStoresAfterLegalization(EVT VT) const {
5672 return !Subtarget->useSVEForFixedLengthVectors();
5675 bool AArch64TargetLowering::useSVEForFixedLengthVectorVT(
5676 EVT VT, bool OverrideNEON) const {
5677 if (!VT.isFixedLengthVector() || !VT.isSimple())
5680 // Don't use SVE for vectors we cannot scalarize if required.
5681 switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
5682 // Fixed length predicates should be promoted to i8.
5683 // NOTE: This is consistent with how NEON (and thus 64/128bit vectors) work.
5697 // All SVE implementations support NEON sized vectors.
5698 if (OverrideNEON && (VT.is128BitVector() || VT.is64BitVector()))
5699 return Subtarget->hasSVE();
5701 // Ensure NEON MVTs only belong to a single register class.
5702 if (VT.getFixedSizeInBits() <= 128)
5705 // Ensure wider than NEON code generation is enabled.
5706 if (!Subtarget->useSVEForFixedLengthVectors())
5709 // Don't use SVE for types that don't fit.
5710 if (VT.getFixedSizeInBits() > Subtarget->getMinSVEVectorSizeInBits())
5713 // TODO: Perhaps an artificial restriction, but worth having whilst getting
5714 // the base fixed length SVE support in place.
5715 if (!VT.isPow2VectorType())
5721 //===----------------------------------------------------------------------===//
5722 // Calling Convention Implementation
5723 //===----------------------------------------------------------------------===//
5725 static unsigned getIntrinsicID(const SDNode *N) {
5726 unsigned Opcode = N->getOpcode();
5729 return Intrinsic::not_intrinsic;
5730 case ISD::INTRINSIC_WO_CHAIN: {
5731 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5732 if (IID < Intrinsic::num_intrinsics)
5734 return Intrinsic::not_intrinsic;
5739 bool AArch64TargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0,
5741 if (!N0.hasOneUse())
5744 unsigned IID = getIntrinsicID(N1.getNode());
5745 // Avoid reassociating expressions that can be lowered to smlal/umlal.
5746 if (IID == Intrinsic::aarch64_neon_umull ||
5747 N1.getOpcode() == AArch64ISD::UMULL ||
5748 IID == Intrinsic::aarch64_neon_smull ||
5749 N1.getOpcode() == AArch64ISD::SMULL)
5750 return N0.getOpcode() != ISD::ADD;
5755 /// Selects the correct CCAssignFn for a given CallingConvention value.
5756 CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
5757 bool IsVarArg) const {
5760 report_fatal_error("Unsupported calling convention.");
5761 case CallingConv::WebKit_JS:
5762 return CC_AArch64_WebKit_JS;
5763 case CallingConv::GHC:
5764 return CC_AArch64_GHC;
5765 case CallingConv::C:
5766 case CallingConv::Fast:
5767 case CallingConv::PreserveMost:
5768 case CallingConv::CXX_FAST_TLS:
5769 case CallingConv::Swift:
5770 case CallingConv::SwiftTail:
5771 case CallingConv::Tail:
5772 if (Subtarget->isTargetWindows() && IsVarArg)
5773 return CC_AArch64_Win64_VarArg;
5774 if (!Subtarget->isTargetDarwin())
5775 return CC_AArch64_AAPCS;
5777 return CC_AArch64_DarwinPCS;
5778 return Subtarget->isTargetILP32() ? CC_AArch64_DarwinPCS_ILP32_VarArg
5779 : CC_AArch64_DarwinPCS_VarArg;
5780 case CallingConv::Win64:
5781 return IsVarArg ? CC_AArch64_Win64_VarArg : CC_AArch64_AAPCS;
5782 case CallingConv::CFGuard_Check:
5783 return CC_AArch64_Win64_CFGuard_Check;
5784 case CallingConv::AArch64_VectorCall:
5785 case CallingConv::AArch64_SVE_VectorCall:
5786 return CC_AArch64_AAPCS;
5791 AArch64TargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const {
5792 return CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS
5793 : RetCC_AArch64_AAPCS;
5796 SDValue AArch64TargetLowering::LowerFormalArguments(
5797 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
5798 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
5799 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5800 MachineFunction &MF = DAG.getMachineFunction();
5801 const Function &F = MF.getFunction();
5802 MachineFrameInfo &MFI = MF.getFrameInfo();
5803 bool IsWin64 = Subtarget->isCallingConvWin64(F.getCallingConv());
5804 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
5806 SmallVector<ISD::OutputArg, 4> Outs;
5807 GetReturnInfo(CallConv, F.getReturnType(), F.getAttributes(), Outs,
5808 DAG.getTargetLoweringInfo(), MF.getDataLayout());
5809 if (any_of(Outs, [](ISD::OutputArg &Out){ return Out.VT.isScalableVector(); }))
5810 FuncInfo->setIsSVECC(true);
5812 // Assign locations to all of the incoming arguments.
5813 SmallVector<CCValAssign, 16> ArgLocs;
5814 DenseMap<unsigned, SDValue> CopiedRegs;
5815 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
5817 // At this point, Ins[].VT may already be promoted to i32. To correctly
5818 // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and
5819 // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT.
5820 // Since AnalyzeFormalArguments uses Ins[].VT for both ValVT and LocVT, here
5821 // we use a special version of AnalyzeFormalArguments to pass in ValVT and
5823 unsigned NumArgs = Ins.size();
5824 Function::const_arg_iterator CurOrigArg = F.arg_begin();
5825 unsigned CurArgIdx = 0;
5826 for (unsigned i = 0; i != NumArgs; ++i) {
5827 MVT ValVT = Ins[i].VT;
5828 if (Ins[i].isOrigArg()) {
5829 std::advance(CurOrigArg, Ins[i].getOrigArgIndex() - CurArgIdx);
5830 CurArgIdx = Ins[i].getOrigArgIndex();
5832 // Get type of the original argument.
5833 EVT ActualVT = getValueType(DAG.getDataLayout(), CurOrigArg->getType(),
5834 /*AllowUnknown*/ true);
5835 MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other;
5836 // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
5837 if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
5839 else if (ActualMVT == MVT::i16)
5842 bool UseVarArgCC = false;
5844 UseVarArgCC = isVarArg;
5845 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, UseVarArgCC);
5847 AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo);
5848 assert(!Res && "Call operand has unhandled type");
5851 SmallVector<SDValue, 16> ArgValues;
5852 unsigned ExtraArgLocs = 0;
5853 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5854 CCValAssign &VA = ArgLocs[i - ExtraArgLocs];
5856 if (Ins[i].Flags.isByVal()) {
5857 // Byval is used for HFAs in the PCS, but the system should work in a
5858 // non-compliant manner for larger structs.
5859 EVT PtrVT = getPointerTy(DAG.getDataLayout());
5860 int Size = Ins[i].Flags.getByValSize();
5861 unsigned NumRegs = (Size + 7) / 8;
5863 // FIXME: This works on big-endian for composite byvals, which are the common
5864 // case. It should also work for fundamental types too.
5866 MFI.CreateFixedObject(8 * NumRegs, VA.getLocMemOffset(), false);
5867 SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrVT);
5868 InVals.push_back(FrameIdxN);
5873 if (Ins[i].Flags.isSwiftAsync())
5874 MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
5877 if (VA.isRegLoc()) {
5878 // Arguments stored in registers.
5879 EVT RegVT = VA.getLocVT();
5880 const TargetRegisterClass *RC;
5882 if (RegVT == MVT::i32)
5883 RC = &AArch64::GPR32RegClass;
5884 else if (RegVT == MVT::i64)
5885 RC = &AArch64::GPR64RegClass;
5886 else if (RegVT == MVT::f16 || RegVT == MVT::bf16)
5887 RC = &AArch64::FPR16RegClass;
5888 else if (RegVT == MVT::f32)
5889 RC = &AArch64::FPR32RegClass;
5890 else if (RegVT == MVT::f64 || RegVT.is64BitVector())
5891 RC = &AArch64::FPR64RegClass;
5892 else if (RegVT == MVT::f128 || RegVT.is128BitVector())
5893 RC = &AArch64::FPR128RegClass;
5894 else if (RegVT.isScalableVector() &&
5895 RegVT.getVectorElementType() == MVT::i1) {
5896 FuncInfo->setIsSVECC(true);
5897 RC = &AArch64::PPRRegClass;
5898 } else if (RegVT.isScalableVector()) {
5899 FuncInfo->setIsSVECC(true);
5900 RC = &AArch64::ZPRRegClass;
5902 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
5904 // Transform the arguments in physical registers into virtual ones.
5905 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
5906 ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
5908 // If this is an 8, 16 or 32-bit value, it is really passed promoted
5909 // to 64 bits. Insert an assert[sz]ext to capture this, then
5910 // truncate to the right size.
5911 switch (VA.getLocInfo()) {
5913 llvm_unreachable("Unknown loc info!");
5914 case CCValAssign::Full:
5916 case CCValAssign::Indirect:
5917 assert(VA.getValVT().isScalableVector() &&
5918 "Only scalable vectors can be passed indirectly");
5920 case CCValAssign::BCvt:
5921 ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), ArgValue);
5923 case CCValAssign::AExt:
5924 case CCValAssign::SExt:
5925 case CCValAssign::ZExt:
5927 case CCValAssign::AExtUpper:
5928 ArgValue = DAG.getNode(ISD::SRL, DL, RegVT, ArgValue,
5929 DAG.getConstant(32, DL, RegVT));
5930 ArgValue = DAG.getZExtOrTrunc(ArgValue, DL, VA.getValVT());
5933 } else { // VA.isRegLoc()
5934 assert(VA.isMemLoc() && "CCValAssign is neither reg nor mem");
5935 unsigned ArgOffset = VA.getLocMemOffset();
5936 unsigned ArgSize = (VA.getLocInfo() == CCValAssign::Indirect
5937 ? VA.getLocVT().getSizeInBits()
5938 : VA.getValVT().getSizeInBits()) / 8;
5940 uint32_t BEAlign = 0;
5941 if (!Subtarget->isLittleEndian() && ArgSize < 8 &&
5942 !Ins[i].Flags.isInConsecutiveRegs())
5943 BEAlign = 8 - ArgSize;
5945 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset + BEAlign, true);
5947 // Create load nodes to retrieve arguments from the stack.
5948 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
5950 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
5951 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
5952 MVT MemVT = VA.getValVT();
5954 switch (VA.getLocInfo()) {
5957 case CCValAssign::Trunc:
5958 case CCValAssign::BCvt:
5959 MemVT = VA.getLocVT();
5961 case CCValAssign::Indirect:
5962 assert(VA.getValVT().isScalableVector() &&
5963 "Only scalable vectors can be passed indirectly");
5964 MemVT = VA.getLocVT();
5966 case CCValAssign::SExt:
5967 ExtType = ISD::SEXTLOAD;
5969 case CCValAssign::ZExt:
5970 ExtType = ISD::ZEXTLOAD;
5972 case CCValAssign::AExt:
5973 ExtType = ISD::EXTLOAD;
5978 DAG.getExtLoad(ExtType, DL, VA.getLocVT(), Chain, FIN,
5979 MachinePointerInfo::getFixedStack(MF, FI), MemVT);
5982 if (VA.getLocInfo() == CCValAssign::Indirect) {
5983 assert(VA.getValVT().isScalableVector() &&
5984 "Only scalable vectors can be passed indirectly");
5986 uint64_t PartSize = VA.getValVT().getStoreSize().getKnownMinSize();
5987 unsigned NumParts = 1;
5988 if (Ins[i].Flags.isInConsecutiveRegs()) {
5989 assert(!Ins[i].Flags.isInConsecutiveRegsLast());
5990 while (!Ins[i + NumParts - 1].Flags.isInConsecutiveRegsLast())
5994 MVT PartLoad = VA.getValVT();
5995 SDValue Ptr = ArgValue;
5997 // Ensure we generate all loads for each tuple part, whilst updating the
5998 // pointer after each load correctly using vscale.
5999 while (NumParts > 0) {
6000 ArgValue = DAG.getLoad(PartLoad, DL, Chain, Ptr, MachinePointerInfo());
6001 InVals.push_back(ArgValue);
6004 SDValue BytesIncrement = DAG.getVScale(
6005 DL, Ptr.getValueType(),
6006 APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize));
6008 Flags.setNoUnsignedWrap(true);
6009 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
6010 BytesIncrement, Flags);
6016 if (Subtarget->isTargetILP32() && Ins[i].Flags.isPointer())
6017 ArgValue = DAG.getNode(ISD::AssertZext, DL, ArgValue.getValueType(),
6018 ArgValue, DAG.getValueType(MVT::i32));
6020 // i1 arguments are zero-extended to i8 by the caller. Emit a
6021 // hint to reflect this.
6022 if (Ins[i].isOrigArg()) {
6023 Argument *OrigArg = F.getArg(Ins[i].getOrigArgIndex());
6024 if (OrigArg->getType()->isIntegerTy(1)) {
6025 if (!Ins[i].Flags.isZExt()) {
6026 ArgValue = DAG.getNode(AArch64ISD::ASSERT_ZEXT_BOOL, DL,
6027 ArgValue.getValueType(), ArgValue);
6032 InVals.push_back(ArgValue);
6035 assert((ArgLocs.size() + ExtraArgLocs) == Ins.size());
6039 if (!Subtarget->isTargetDarwin() || IsWin64) {
6040 // The AAPCS variadic function ABI is identical to the non-variadic
6041 // one. As a result there may be more arguments in registers and we should
6042 // save them for future reference.
6043 // Win64 variadic functions also pass arguments in registers, but all float
6044 // arguments are passed in integer registers.
6045 saveVarArgRegisters(CCInfo, DAG, DL, Chain);
6048 // This will point to the next argument passed via stack.
6049 unsigned StackOffset = CCInfo.getNextStackOffset();
6050 // We currently pass all varargs at 8-byte alignment, or 4 for ILP32
6051 StackOffset = alignTo(StackOffset, Subtarget->isTargetILP32() ? 4 : 8);
6052 FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
6054 if (MFI.hasMustTailInVarArgFunc()) {
6055 SmallVector<MVT, 2> RegParmTypes;
6056 RegParmTypes.push_back(MVT::i64);
6057 RegParmTypes.push_back(MVT::f128);
6058 // Compute the set of forwarded registers. The rest are scratch.
6059 SmallVectorImpl<ForwardedRegister> &Forwards =
6060 FuncInfo->getForwardedMustTailRegParms();
6061 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes,
6064 // Conservatively forward X8, since it might be used for aggregate return.
6065 if (!CCInfo.isAllocated(AArch64::X8)) {
6066 Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
6067 Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64));
6072 // On Windows, InReg pointers must be returned, so record the pointer in a
6073 // virtual register at the start of the function so it can be returned in the
6076 for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
6077 if (Ins[I].Flags.isInReg()) {
6078 assert(!FuncInfo->getSRetReturnReg());
6080 MVT PtrTy = getPointerTy(DAG.getDataLayout());
6082 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
6083 FuncInfo->setSRetReturnReg(Reg);
6085 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[I]);
6086 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
6092 unsigned StackArgSize = CCInfo.getNextStackOffset();
6093 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
6094 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
6095 // This is a non-standard ABI so by fiat I say we're allowed to make full
6096 // use of the stack area to be popped, which must be aligned to 16 bytes in
6098 StackArgSize = alignTo(StackArgSize, 16);
6100 // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
6101 // a multiple of 16.
6102 FuncInfo->setArgumentStackToRestore(StackArgSize);
6104 // This realignment carries over to the available bytes below. Our own
6105 // callers will guarantee the space is free by giving an aligned value to
6108 // Even if we're not expected to free up the space, it's useful to know how
6109 // much is there while considering tail calls (because we can reuse it).
6110 FuncInfo->setBytesInStackArgArea(StackArgSize);
6112 if (Subtarget->hasCustomCallingConv())
6113 Subtarget->getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
6118 void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
6121 SDValue &Chain) const {
6122 MachineFunction &MF = DAG.getMachineFunction();
6123 MachineFrameInfo &MFI = MF.getFrameInfo();
6124 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
6125 auto PtrVT = getPointerTy(DAG.getDataLayout());
6126 bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv());
6128 SmallVector<SDValue, 8> MemOps;
6130 static const MCPhysReg GPRArgRegs[] = { AArch64::X0, AArch64::X1, AArch64::X2,
6131 AArch64::X3, AArch64::X4, AArch64::X5,
6132 AArch64::X6, AArch64::X7 };
6133 static const unsigned NumGPRArgRegs = array_lengthof(GPRArgRegs);
6134 unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(GPRArgRegs);
6136 unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR);
6138 if (GPRSaveSize != 0) {
6140 GPRIdx = MFI.CreateFixedObject(GPRSaveSize, -(int)GPRSaveSize, false);
6141 if (GPRSaveSize & 15)
6142 // The extra size here, if triggered, will always be 8.
6143 MFI.CreateFixedObject(16 - (GPRSaveSize & 15), -(int)alignTo(GPRSaveSize, 16), false);
6145 GPRIdx = MFI.CreateStackObject(GPRSaveSize, Align(8), false);
6147 SDValue FIN = DAG.getFrameIndex(GPRIdx, PtrVT);
6149 for (unsigned i = FirstVariadicGPR; i < NumGPRArgRegs; ++i) {
6150 Register VReg = MF.addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass);
6151 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
6153 DAG.getStore(Val.getValue(1), DL, Val, FIN,
6154 IsWin64 ? MachinePointerInfo::getFixedStack(
6155 MF, GPRIdx, (i - FirstVariadicGPR) * 8)
6156 : MachinePointerInfo::getStack(MF, i * 8));
6157 MemOps.push_back(Store);
6159 DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getConstant(8, DL, PtrVT));
6162 FuncInfo->setVarArgsGPRIndex(GPRIdx);
6163 FuncInfo->setVarArgsGPRSize(GPRSaveSize);
6165 if (Subtarget->hasFPARMv8() && !IsWin64) {
6166 static const MCPhysReg FPRArgRegs[] = {
6167 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
6168 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7};
6169 static const unsigned NumFPRArgRegs = array_lengthof(FPRArgRegs);
6170 unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(FPRArgRegs);
6172 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
6174 if (FPRSaveSize != 0) {
6175 FPRIdx = MFI.CreateStackObject(FPRSaveSize, Align(16), false);
6177 SDValue FIN = DAG.getFrameIndex(FPRIdx, PtrVT);
6179 for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
6180 Register VReg = MF.addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass);
6181 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
6183 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
6184 MachinePointerInfo::getStack(MF, i * 16));
6185 MemOps.push_back(Store);
6186 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
6187 DAG.getConstant(16, DL, PtrVT));
6190 FuncInfo->setVarArgsFPRIndex(FPRIdx);
6191 FuncInfo->setVarArgsFPRSize(FPRSaveSize);
6194 if (!MemOps.empty()) {
6195 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
6199 /// LowerCallResult - Lower the result values of a call into the
6200 /// appropriate copies out of appropriate physical registers.
6201 SDValue AArch64TargetLowering::LowerCallResult(
6202 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
6203 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
6204 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
6205 SDValue ThisVal) const {
6206 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
6207 // Assign locations to each value returned by this call.
6208 SmallVector<CCValAssign, 16> RVLocs;
6209 DenseMap<unsigned, SDValue> CopiedRegs;
6210 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
6212 CCInfo.AnalyzeCallResult(Ins, RetCC);
6214 // Copy all of the result registers out of their specified physreg.
6215 for (unsigned i = 0; i != RVLocs.size(); ++i) {
6216 CCValAssign VA = RVLocs[i];
6218 // Pass 'this' value directly from the argument to return value, to avoid
6219 // reg unit interference
6220 if (i == 0 && isThisReturn) {
6221 assert(!VA.needsCustom() && VA.getLocVT() == MVT::i64 &&
6222 "unexpected return calling convention register assignment");
6223 InVals.push_back(ThisVal);
6227 // Avoid copying a physreg twice since RegAllocFast is incompetent and only
6228 // allows one use of a physreg per block.
6229 SDValue Val = CopiedRegs.lookup(VA.getLocReg());
6232 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
6233 Chain = Val.getValue(1);
6234 InFlag = Val.getValue(2);
6235 CopiedRegs[VA.getLocReg()] = Val;
6238 switch (VA.getLocInfo()) {
6240 llvm_unreachable("Unknown loc info!");
6241 case CCValAssign::Full:
6243 case CCValAssign::BCvt:
6244 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
6246 case CCValAssign::AExtUpper:
6247 Val = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Val,
6248 DAG.getConstant(32, DL, VA.getLocVT()));
6250 case CCValAssign::AExt:
6252 case CCValAssign::ZExt:
6253 Val = DAG.getZExtOrTrunc(Val, DL, VA.getValVT());
6257 InVals.push_back(Val);
6263 /// Return true if the calling convention is one that we can guarantee TCO for.
6264 static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) {
6265 return (CC == CallingConv::Fast && GuaranteeTailCalls) ||
6266 CC == CallingConv::Tail || CC == CallingConv::SwiftTail;
6269 /// Return true if we might ever do TCO for calls with this calling convention.
6270 static bool mayTailCallThisCC(CallingConv::ID CC) {
6272 case CallingConv::C:
6273 case CallingConv::AArch64_SVE_VectorCall:
6274 case CallingConv::PreserveMost:
6275 case CallingConv::Swift:
6276 case CallingConv::SwiftTail:
6277 case CallingConv::Tail:
6278 case CallingConv::Fast:
6285 static void analyzeCallOperands(const AArch64TargetLowering &TLI,
6286 const AArch64Subtarget *Subtarget,
6287 const TargetLowering::CallLoweringInfo &CLI,
6289 const SelectionDAG &DAG = CLI.DAG;
6290 CallingConv::ID CalleeCC = CLI.CallConv;
6291 bool IsVarArg = CLI.IsVarArg;
6292 const SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
6293 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
6295 unsigned NumArgs = Outs.size();
6296 for (unsigned i = 0; i != NumArgs; ++i) {
6297 MVT ArgVT = Outs[i].VT;
6298 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
6300 bool UseVarArgCC = false;
6302 // On Windows, the fixed arguments in a vararg call are passed in GPRs
6303 // too, so use the vararg CC to force them to integer registers.
6304 if (IsCalleeWin64) {
6307 UseVarArgCC = !Outs[i].IsFixed;
6310 // Get type of the original argument.
6312 TLI.getValueType(DAG.getDataLayout(), CLI.Args[Outs[i].OrigArgIndex].Ty,
6313 /*AllowUnknown*/ true);
6314 MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : ArgVT;
6315 // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
6316 if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
6318 else if (ActualMVT == MVT::i16)
6322 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CalleeCC, UseVarArgCC);
6323 bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
6324 assert(!Res && "Call operand has unhandled type");
6329 bool AArch64TargetLowering::isEligibleForTailCallOptimization(
6330 const CallLoweringInfo &CLI) const {
6331 CallingConv::ID CalleeCC = CLI.CallConv;
6332 if (!mayTailCallThisCC(CalleeCC))
6335 SDValue Callee = CLI.Callee;
6336 bool IsVarArg = CLI.IsVarArg;
6337 const SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
6338 const SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
6339 const SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
6340 const SelectionDAG &DAG = CLI.DAG;
6341 MachineFunction &MF = DAG.getMachineFunction();
6342 const Function &CallerF = MF.getFunction();
6343 CallingConv::ID CallerCC = CallerF.getCallingConv();
6345 // Functions using the C or Fast calling convention that have an SVE signature
6346 // preserve more registers and should assume the SVE_VectorCall CC.
6347 // The check for matching callee-saved regs will determine whether it is
6348 // eligible for TCO.
6349 if ((CallerCC == CallingConv::C || CallerCC == CallingConv::Fast) &&
6350 MF.getInfo<AArch64FunctionInfo>()->isSVECC())
6351 CallerCC = CallingConv::AArch64_SVE_VectorCall;
6353 bool CCMatch = CallerCC == CalleeCC;
6355 // When using the Windows calling convention on a non-windows OS, we want
6356 // to back up and restore X18 in such functions; we can't do a tail call
6357 // from those functions.
6358 if (CallerCC == CallingConv::Win64 && !Subtarget->isTargetWindows() &&
6359 CalleeCC != CallingConv::Win64)
6362 // Byval parameters hand the function a pointer directly into the stack area
6363 // we want to reuse during a tail call. Working around this *is* possible (see
6364 // X86) but less efficient and uglier in LowerCall.
6365 for (Function::const_arg_iterator i = CallerF.arg_begin(),
6366 e = CallerF.arg_end();
6368 if (i->hasByValAttr())
6371 // On Windows, "inreg" attributes signify non-aggregate indirect returns.
6372 // In this case, it is necessary to save/restore X0 in the callee. Tail
6373 // call opt interferes with this. So we disable tail call opt when the
6374 // caller has an argument with "inreg" attribute.
6376 // FIXME: Check whether the callee also has an "inreg" argument.
6377 if (i->hasInRegAttr())
6381 if (canGuaranteeTCO(CalleeCC, getTargetMachine().Options.GuaranteedTailCallOpt))
6384 // Externally-defined functions with weak linkage should not be
6385 // tail-called on AArch64 when the OS does not support dynamic
6386 // pre-emption of symbols, as the AAELF spec requires normal calls
6387 // to undefined weak functions to be replaced with a NOP or jump to the
6388 // next instruction. The behaviour of branch instructions in this
6389 // situation (as used for tail calls) is implementation-defined, so we
6390 // cannot rely on the linker replacing the tail call with a return.
6391 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
6392 const GlobalValue *GV = G->getGlobal();
6393 const Triple &TT = getTargetMachine().getTargetTriple();
6394 if (GV->hasExternalWeakLinkage() &&
6395 (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
6399 // Now we search for cases where we can use a tail call without changing the
6400 // ABI. Sibcall is used in some places (particularly gcc) to refer to this
6403 // I want anyone implementing a new calling convention to think long and hard
6404 // about this assert.
6405 assert((!IsVarArg || CalleeCC == CallingConv::C) &&
6406 "Unexpected variadic calling convention");
6408 LLVMContext &C = *DAG.getContext();
6409 // Check that the call results are passed in the same way.
6410 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
6411 CCAssignFnForCall(CalleeCC, IsVarArg),
6412 CCAssignFnForCall(CallerCC, IsVarArg)))
6414 // The callee has to preserve all registers the caller needs to preserve.
6415 const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
6416 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
6418 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
6419 if (Subtarget->hasCustomCallingConv()) {
6420 TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
6421 TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
6423 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
6427 // Nothing more to check if the callee is taking no arguments
6431 SmallVector<CCValAssign, 16> ArgLocs;
6432 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
6434 analyzeCallOperands(*this, Subtarget, CLI, CCInfo);
6436 if (IsVarArg && !(CLI.CB && CLI.CB->isMustTailCall())) {
6437 // When we are musttail, additional checks have been done and we can safely ignore this check
6438 // At least two cases here: if caller is fastcc then we can't have any
6439 // memory arguments (we'd be expected to clean up the stack afterwards). If
6440 // caller is C then we could potentially use its argument area.
6442 // FIXME: for now we take the most conservative of these in both cases:
6443 // disallow all variadic memory operands.
6444 for (const CCValAssign &ArgLoc : ArgLocs)
6445 if (!ArgLoc.isRegLoc())
6449 const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
6451 // If any of the arguments is passed indirectly, it must be SVE, so the
6452 // 'getBytesInStackArgArea' is not sufficient to determine whether we need to
6453 // allocate space on the stack. That is why we determine this explicitly here
6454 // the call cannot be a tailcall.
6455 if (llvm::any_of(ArgLocs, [](CCValAssign &A) {
6456 assert((A.getLocInfo() != CCValAssign::Indirect ||
6457 A.getValVT().isScalableVector()) &&
6458 "Expected value to be scalable");
6459 return A.getLocInfo() == CCValAssign::Indirect;
6463 // If the stack arguments for this call do not fit into our own save area then
6464 // the call cannot be made tail.
6465 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
6468 const MachineRegisterInfo &MRI = MF.getRegInfo();
6469 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
6475 SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
6477 MachineFrameInfo &MFI,
6478 int ClobberedFI) const {
6479 SmallVector<SDValue, 8> ArgChains;
6480 int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
6481 int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
6483 // Include the original chain at the beginning of the list. When this is
6484 // used by target LowerCall hooks, this helps legalize find the
6485 // CALLSEQ_BEGIN node.
6486 ArgChains.push_back(Chain);
6488 // Add a chain value for each stack argument corresponding
6489 for (SDNode *U : DAG.getEntryNode().getNode()->uses())
6490 if (LoadSDNode *L = dyn_cast<LoadSDNode>(U))
6491 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
6492 if (FI->getIndex() < 0) {
6493 int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
6494 int64_t InLastByte = InFirstByte;
6495 InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
6497 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
6498 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
6499 ArgChains.push_back(SDValue(L, 1));
6502 // Build a tokenfactor for all the chains.
6503 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
6506 bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
6507 bool TailCallOpt) const {
6508 return (CallCC == CallingConv::Fast && TailCallOpt) ||
6509 CallCC == CallingConv::Tail || CallCC == CallingConv::SwiftTail;
6512 // Check if the value is zero-extended from i1 to i8
6513 static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) {
6514 unsigned SizeInBits = Arg.getValueType().getSizeInBits();
6518 APInt RequredZero(SizeInBits, 0xFE);
6519 KnownBits Bits = DAG.computeKnownBits(Arg, 4);
6520 bool ZExtBool = (Bits.Zero & RequredZero) == RequredZero;
6524 /// LowerCall - Lower a call to a callseq_start + CALL + callseq_end chain,
6525 /// and add input and output parameter nodes.
6527 AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
6528 SmallVectorImpl<SDValue> &InVals) const {
6529 SelectionDAG &DAG = CLI.DAG;
6531 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
6532 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
6533 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
6534 SDValue Chain = CLI.Chain;
6535 SDValue Callee = CLI.Callee;
6536 bool &IsTailCall = CLI.IsTailCall;
6537 CallingConv::ID &CallConv = CLI.CallConv;
6538 bool IsVarArg = CLI.IsVarArg;
6540 MachineFunction &MF = DAG.getMachineFunction();
6541 MachineFunction::CallSiteInfo CSInfo;
6542 bool IsThisReturn = false;
6544 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
6545 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
6546 bool IsSibCall = false;
6547 bool GuardWithBTI = false;
6549 if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Attribute::ReturnsTwice) &&
6550 !Subtarget->noBTIAtReturnTwice()) {
6551 GuardWithBTI = FuncInfo->branchTargetEnforcement();
6554 // Check callee args/returns for SVE registers and set calling convention
6556 if (CallConv == CallingConv::C || CallConv == CallingConv::Fast) {
6557 bool CalleeOutSVE = any_of(Outs, [](ISD::OutputArg &Out){
6558 return Out.VT.isScalableVector();
6560 bool CalleeInSVE = any_of(Ins, [](ISD::InputArg &In){
6561 return In.VT.isScalableVector();
6564 if (CalleeInSVE || CalleeOutSVE)
6565 CallConv = CallingConv::AArch64_SVE_VectorCall;
6569 // Check if it's really possible to do a tail call.
6570 IsTailCall = isEligibleForTailCallOptimization(CLI);
6572 // A sibling call is one where we're under the usual C ABI and not planning
6573 // to change that but can still do a tail call:
6574 if (!TailCallOpt && IsTailCall && CallConv != CallingConv::Tail &&
6575 CallConv != CallingConv::SwiftTail)
6582 if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
6583 report_fatal_error("failed to perform tail call elimination on a call "
6584 "site marked musttail");
6586 // Analyze operands of the call, assigning locations to each operand.
6587 SmallVector<CCValAssign, 16> ArgLocs;
6588 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
6591 unsigned NumArgs = Outs.size();
6593 for (unsigned i = 0; i != NumArgs; ++i) {
6594 if (!Outs[i].IsFixed && Outs[i].VT.isScalableVector())
6595 report_fatal_error("Passing SVE types to variadic functions is "
6596 "currently not supported");
6600 analyzeCallOperands(*this, Subtarget, CLI, CCInfo);
6602 // Get a count of how many bytes are to be pushed on the stack.
6603 unsigned NumBytes = CCInfo.getNextStackOffset();
6606 // Since we're not changing the ABI to make this a tail call, the memory
6607 // operands are already available in the caller's incoming argument space.
6611 // FPDiff is the byte offset of the call's argument area from the callee's.
6612 // Stores to callee stack arguments will be placed in FixedStackSlots offset
6613 // by this amount for a tail call. In a sibling call it must be 0 because the
6614 // caller will deallocate the entire stack and the callee still expects its
6615 // arguments to begin at SP+0. Completely unused for non-tail calls.
6618 if (IsTailCall && !IsSibCall) {
6619 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
6621 // Since callee will pop argument stack as a tail call, we must keep the
6622 // popped size 16-byte aligned.
6623 NumBytes = alignTo(NumBytes, 16);
6625 // FPDiff will be negative if this tail call requires more space than we
6626 // would automatically have in our incoming argument space. Positive if we
6627 // can actually shrink the stack.
6628 FPDiff = NumReusableBytes - NumBytes;
6630 // Update the required reserved area if this is the tail call requiring the
6631 // most argument stack space.
6632 if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff)
6633 FuncInfo->setTailCallReservedStack(-FPDiff);
6635 // The stack pointer must be 16-byte aligned at all times it's used for a
6636 // memory operation, which in practice means at *all* times and in
6637 // particular across call boundaries. Therefore our own arguments started at
6638 // a 16-byte aligned SP and the delta applied for the tail call should
6639 // satisfy the same constraint.
6640 assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
6643 // Adjust the stack pointer for the new arguments...
6644 // These operations are automatically eliminated by the prolog/epilog pass
6646 Chain = DAG.getCALLSEQ_START(Chain, IsTailCall ? 0 : NumBytes, 0, DL);
6648 SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP,
6649 getPointerTy(DAG.getDataLayout()));
6651 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6652 SmallSet<unsigned, 8> RegsUsed;
6653 SmallVector<SDValue, 8> MemOpChains;
6654 auto PtrVT = getPointerTy(DAG.getDataLayout());
6656 if (IsVarArg && CLI.CB && CLI.CB->isMustTailCall()) {
6657 const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
6658 for (const auto &F : Forwards) {
6659 SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
6660 RegsToPass.emplace_back(F.PReg, Val);
6664 // Walk the register/memloc assignments, inserting copies/loads.
6665 unsigned ExtraArgLocs = 0;
6666 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
6667 CCValAssign &VA = ArgLocs[i - ExtraArgLocs];
6668 SDValue Arg = OutVals[i];
6669 ISD::ArgFlagsTy Flags = Outs[i].Flags;
6671 // Promote the value if needed.
6672 switch (VA.getLocInfo()) {
6674 llvm_unreachable("Unknown loc info!");
6675 case CCValAssign::Full:
6677 case CCValAssign::SExt:
6678 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
6680 case CCValAssign::ZExt:
6681 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
6683 case CCValAssign::AExt:
6684 if (Outs[i].ArgVT == MVT::i1) {
6685 // AAPCS requires i1 to be zero-extended to 8-bits by the caller.
6687 // Check if we actually have to do this, because the value may
6688 // already be zero-extended.
6690 // We cannot just emit a (zext i8 (trunc (assert-zext i8)))
6691 // and rely on DAGCombiner to fold this, because the following
6692 // (anyext i32) is combined with (zext i8) in DAG.getNode:
6694 // (ext (zext x)) -> (zext x)
6696 // This will give us (zext i32), which we cannot remove, so
6697 // try to check this beforehand.
6698 if (!checkZExtBool(Arg, DAG)) {
6699 Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg);
6700 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i8, Arg);
6703 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
6705 case CCValAssign::AExtUpper:
6706 assert(VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits");
6707 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
6708 Arg = DAG.getNode(ISD::SHL, DL, VA.getLocVT(), Arg,
6709 DAG.getConstant(32, DL, VA.getLocVT()));
6711 case CCValAssign::BCvt:
6712 Arg = DAG.getBitcast(VA.getLocVT(), Arg);
6714 case CCValAssign::Trunc:
6715 Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT());
6717 case CCValAssign::FPExt:
6718 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
6720 case CCValAssign::Indirect:
6721 assert(VA.getValVT().isScalableVector() &&
6722 "Only scalable vectors can be passed indirectly");
6724 uint64_t StoreSize = VA.getValVT().getStoreSize().getKnownMinSize();
6725 uint64_t PartSize = StoreSize;
6726 unsigned NumParts = 1;
6727 if (Outs[i].Flags.isInConsecutiveRegs()) {
6728 assert(!Outs[i].Flags.isInConsecutiveRegsLast());
6729 while (!Outs[i + NumParts - 1].Flags.isInConsecutiveRegsLast())
6731 StoreSize *= NumParts;
6734 MachineFrameInfo &MFI = MF.getFrameInfo();
6735 Type *Ty = EVT(VA.getValVT()).getTypeForEVT(*DAG.getContext());
6736 Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty);
6737 int FI = MFI.CreateStackObject(StoreSize, Alignment, false);
6738 MFI.setStackID(FI, TargetStackID::ScalableVector);
6740 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6741 SDValue Ptr = DAG.getFrameIndex(
6742 FI, DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout()));
6743 SDValue SpillSlot = Ptr;
6745 // Ensure we generate all stores for each tuple part, whilst updating the
6746 // pointer after each store correctly using vscale.
6748 Chain = DAG.getStore(Chain, DL, OutVals[i], Ptr, MPI);
6751 SDValue BytesIncrement = DAG.getVScale(
6752 DL, Ptr.getValueType(),
6753 APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize));
6755 Flags.setNoUnsignedWrap(true);
6757 MPI = MachinePointerInfo(MPI.getAddrSpace());
6758 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
6759 BytesIncrement, Flags);
6769 if (VA.isRegLoc()) {
6770 if (i == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
6771 Outs[0].VT == MVT::i64) {
6772 assert(VA.getLocVT() == MVT::i64 &&
6773 "unexpected calling convention register assignment");
6774 assert(!Ins.empty() && Ins[0].VT == MVT::i64 &&
6775 "unexpected use of 'returned'");
6776 IsThisReturn = true;
6778 if (RegsUsed.count(VA.getLocReg())) {
6779 // If this register has already been used then we're trying to pack
6780 // parts of an [N x i32] into an X-register. The extension type will
6781 // take care of putting the two halves in the right place but we have to
6784 llvm::find_if(RegsToPass,
6785 [=](const std::pair<unsigned, SDValue> &Elt) {
6786 return Elt.first == VA.getLocReg();
6789 Bits = DAG.getNode(ISD::OR, DL, Bits.getValueType(), Bits, Arg);
6790 // Call site info is used for function's parameter entry value
6791 // tracking. For now we track only simple cases when parameter
6792 // is transferred through whole register.
6793 llvm::erase_if(CSInfo, [&VA](MachineFunction::ArgRegPair ArgReg) {
6794 return ArgReg.Reg == VA.getLocReg();
6797 RegsToPass.emplace_back(VA.getLocReg(), Arg);
6798 RegsUsed.insert(VA.getLocReg());
6799 const TargetOptions &Options = DAG.getTarget().Options;
6800 if (Options.EmitCallSiteInfo)
6801 CSInfo.emplace_back(VA.getLocReg(), i);
6804 assert(VA.isMemLoc());
6807 MachinePointerInfo DstInfo;
6809 // FIXME: This works on big-endian for composite byvals, which are the
6810 // common case. It should also work for fundamental types too.
6811 uint32_t BEAlign = 0;
6813 if (VA.getLocInfo() == CCValAssign::Indirect ||
6814 VA.getLocInfo() == CCValAssign::Trunc)
6815 OpSize = VA.getLocVT().getFixedSizeInBits();
6817 OpSize = Flags.isByVal() ? Flags.getByValSize() * 8
6818 : VA.getValVT().getSizeInBits();
6819 OpSize = (OpSize + 7) / 8;
6820 if (!Subtarget->isLittleEndian() && !Flags.isByVal() &&
6821 !Flags.isInConsecutiveRegs()) {
6823 BEAlign = 8 - OpSize;
6825 unsigned LocMemOffset = VA.getLocMemOffset();
6826 int32_t Offset = LocMemOffset + BEAlign;
6827 SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL);
6828 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
6831 Offset = Offset + FPDiff;
6832 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
6834 DstAddr = DAG.getFrameIndex(FI, PtrVT);
6835 DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
6837 // Make sure any stack arguments overlapping with where we're storing
6838 // are loaded before this eventual operation. Otherwise they'll be
6840 Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
6842 SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL);
6844 DstAddr = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
6845 DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
6848 if (Outs[i].Flags.isByVal()) {
6850 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i64);
6851 SDValue Cpy = DAG.getMemcpy(
6852 Chain, DL, DstAddr, Arg, SizeNode,
6853 Outs[i].Flags.getNonZeroByValAlign(),
6854 /*isVol = */ false, /*AlwaysInline = */ false,
6855 /*isTailCall = */ false, DstInfo, MachinePointerInfo());
6857 MemOpChains.push_back(Cpy);
6859 // Since we pass i1/i8/i16 as i1/i8/i16 on stack and Arg is already
6860 // promoted to a legal register type i32, we should truncate Arg back to
6862 if (VA.getValVT() == MVT::i1 || VA.getValVT() == MVT::i8 ||
6863 VA.getValVT() == MVT::i16)
6864 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
6866 SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo);
6867 MemOpChains.push_back(Store);
6872 if (!MemOpChains.empty())
6873 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
6875 // Build a sequence of copy-to-reg nodes chained together with token chain
6876 // and flag operands which copy the outgoing args into the appropriate regs.
6878 for (auto &RegToPass : RegsToPass) {
6879 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
6880 RegToPass.second, InFlag);
6881 InFlag = Chain.getValue(1);
6884 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
6885 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
6886 // node so that legalize doesn't hack it.
6887 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
6888 auto GV = G->getGlobal();
6890 Subtarget->classifyGlobalFunctionReference(GV, getTargetMachine());
6891 if (OpFlags & AArch64II::MO_GOT) {
6892 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
6893 Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
6895 const GlobalValue *GV = G->getGlobal();
6896 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0);
6898 } else if (auto *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
6899 if (getTargetMachine().getCodeModel() == CodeModel::Large &&
6900 Subtarget->isTargetMachO()) {
6901 const char *Sym = S->getSymbol();
6902 Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, AArch64II::MO_GOT);
6903 Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
6905 const char *Sym = S->getSymbol();
6906 Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0);
6910 // We don't usually want to end the call-sequence here because we would tidy
6911 // the frame up *after* the call, however in the ABI-changing tail-call case
6912 // we've carefully laid out the parameters so that when sp is reset they'll be
6913 // in the correct location.
6914 if (IsTailCall && !IsSibCall) {
6915 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
6916 DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
6917 InFlag = Chain.getValue(1);
6920 std::vector<SDValue> Ops;
6921 Ops.push_back(Chain);
6922 Ops.push_back(Callee);
6925 // Each tail call may have to adjust the stack by a different amount, so
6926 // this information must travel along with the operation for eventual
6927 // consumption by emitEpilogue.
6928 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
6931 // Add argument registers to the end of the list so that they are known live
6933 for (auto &RegToPass : RegsToPass)
6934 Ops.push_back(DAG.getRegister(RegToPass.first,
6935 RegToPass.second.getValueType()));
6937 // Add a register mask operand representing the call-preserved registers.
6938 const uint32_t *Mask;
6939 const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
6941 // For 'this' returns, use the X0-preserving mask if applicable
6942 Mask = TRI->getThisReturnPreservedMask(MF, CallConv);
6944 IsThisReturn = false;
6945 Mask = TRI->getCallPreservedMask(MF, CallConv);
6948 Mask = TRI->getCallPreservedMask(MF, CallConv);
6950 if (Subtarget->hasCustomCallingConv())
6951 TRI->UpdateCustomCallPreservedMask(MF, &Mask);
6953 if (TRI->isAnyArgRegReserved(MF))
6954 TRI->emitReservedArgRegCallError(MF);
6956 assert(Mask && "Missing call preserved mask for calling convention");
6957 Ops.push_back(DAG.getRegisterMask(Mask));
6959 if (InFlag.getNode())
6960 Ops.push_back(InFlag);
6962 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6964 // If we're doing a tall call, use a TC_RETURN here rather than an
6965 // actual call instruction.
6967 MF.getFrameInfo().setHasTailCall();
6968 SDValue Ret = DAG.getNode(AArch64ISD::TC_RETURN, DL, NodeTys, Ops);
6969 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
6973 unsigned CallOpc = AArch64ISD::CALL;
6974 // Calls with operand bundle "clang.arc.attachedcall" are special. They should
6975 // be expanded to the call, directly followed by a special marker sequence and
6976 // a call to an ObjC library function. Use CALL_RVMARKER to do that.
6977 if (CLI.CB && objcarc::hasAttachedCallOpBundle(CLI.CB)) {
6978 assert(!IsTailCall &&
6979 "tail calls cannot be marked with clang.arc.attachedcall");
6980 CallOpc = AArch64ISD::CALL_RVMARKER;
6982 // Add a target global address for the retainRV/claimRV runtime function
6983 // just before the call target.
6984 Function *ARCFn = *objcarc::getAttachedARCFunction(CLI.CB);
6985 auto GA = DAG.getTargetGlobalAddress(ARCFn, DL, PtrVT);
6986 Ops.insert(Ops.begin() + 1, GA);
6987 } else if (GuardWithBTI)
6988 CallOpc = AArch64ISD::CALL_BTI;
6990 // Returns a chain and a flag for retval copy to use.
6991 Chain = DAG.getNode(CallOpc, DL, NodeTys, Ops);
6992 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
6993 InFlag = Chain.getValue(1);
6994 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
6996 uint64_t CalleePopBytes =
6997 DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0;
6999 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
7000 DAG.getIntPtrConstant(CalleePopBytes, DL, true),
7003 InFlag = Chain.getValue(1);
7005 // Handle result values, copying them out of physregs into vregs that we
7007 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
7008 InVals, IsThisReturn,
7009 IsThisReturn ? OutVals[0] : SDValue());
7012 bool AArch64TargetLowering::CanLowerReturn(
7013 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
7014 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
7015 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
7016 SmallVector<CCValAssign, 16> RVLocs;
7017 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7018 return CCInfo.CheckReturn(Outs, RetCC);
7022 AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7024 const SmallVectorImpl<ISD::OutputArg> &Outs,
7025 const SmallVectorImpl<SDValue> &OutVals,
7026 const SDLoc &DL, SelectionDAG &DAG) const {
7027 auto &MF = DAG.getMachineFunction();
7028 auto *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
7030 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
7031 SmallVector<CCValAssign, 16> RVLocs;
7032 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
7033 CCInfo.AnalyzeReturn(Outs, RetCC);
7035 // Copy the result values into the output registers.
7037 SmallVector<std::pair<unsigned, SDValue>, 4> RetVals;
7038 SmallSet<unsigned, 4> RegsUsed;
7039 for (unsigned i = 0, realRVLocIdx = 0; i != RVLocs.size();
7040 ++i, ++realRVLocIdx) {
7041 CCValAssign &VA = RVLocs[i];
7042 assert(VA.isRegLoc() && "Can only return in registers!");
7043 SDValue Arg = OutVals[realRVLocIdx];
7045 switch (VA.getLocInfo()) {
7047 llvm_unreachable("Unknown loc info!");
7048 case CCValAssign::Full:
7049 if (Outs[i].ArgVT == MVT::i1) {
7050 // AAPCS requires i1 to be zero-extended to i8 by the producer of the
7051 // value. This is strictly redundant on Darwin (which uses "zeroext
7052 // i1"), but will be optimised out before ISel.
7053 Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg);
7054 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
7057 case CCValAssign::BCvt:
7058 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
7060 case CCValAssign::AExt:
7061 case CCValAssign::ZExt:
7062 Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT());
7064 case CCValAssign::AExtUpper:
7065 assert(VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits");
7066 Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT());
7067 Arg = DAG.getNode(ISD::SHL, DL, VA.getLocVT(), Arg,
7068 DAG.getConstant(32, DL, VA.getLocVT()));
7072 if (RegsUsed.count(VA.getLocReg())) {
7074 llvm::find_if(RetVals, [=](const std::pair<unsigned, SDValue> &Elt) {
7075 return Elt.first == VA.getLocReg();
7077 Bits = DAG.getNode(ISD::OR, DL, Bits.getValueType(), Bits, Arg);
7079 RetVals.emplace_back(VA.getLocReg(), Arg);
7080 RegsUsed.insert(VA.getLocReg());
7084 SmallVector<SDValue, 4> RetOps(1, Chain);
7085 for (auto &RetVal : RetVals) {
7086 Chain = DAG.getCopyToReg(Chain, DL, RetVal.first, RetVal.second, Flag);
7087 Flag = Chain.getValue(1);
7089 DAG.getRegister(RetVal.first, RetVal.second.getValueType()));
7092 // Windows AArch64 ABIs require that for returning structs by value we copy
7093 // the sret argument into X0 for the return.
7094 // We saved the argument into a virtual register in the entry block,
7095 // so now we copy the value out and into X0.
7096 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
7097 SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg,
7098 getPointerTy(MF.getDataLayout()));
7100 unsigned RetValReg = AArch64::X0;
7101 Chain = DAG.getCopyToReg(Chain, DL, RetValReg, Val, Flag);
7102 Flag = Chain.getValue(1);
7105 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
7108 const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
7109 const MCPhysReg *I = TRI->getCalleeSavedRegsViaCopy(&MF);
7112 if (AArch64::GPR64RegClass.contains(*I))
7113 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
7114 else if (AArch64::FPR64RegClass.contains(*I))
7115 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
7117 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
7121 RetOps[0] = Chain; // Update chain.
7123 // Add the flag if we have it.
7125 RetOps.push_back(Flag);
7127 return DAG.getNode(AArch64ISD::RET_FLAG, DL, MVT::Other, RetOps);
7130 //===----------------------------------------------------------------------===//
7131 // Other Lowering Code
7132 //===----------------------------------------------------------------------===//
7134 SDValue AArch64TargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
7136 unsigned Flag) const {
7137 return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty,
7138 N->getOffset(), Flag);
7141 SDValue AArch64TargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
7143 unsigned Flag) const {
7144 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
7147 SDValue AArch64TargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
7149 unsigned Flag) const {
7150 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
7151 N->getOffset(), Flag);
7154 SDValue AArch64TargetLowering::getTargetNode(BlockAddressSDNode* N, EVT Ty,
7156 unsigned Flag) const {
7157 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
7161 template <class NodeTy>
7162 SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG,
7163 unsigned Flags) const {
7164 LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getGOT\n");
7166 EVT Ty = getPointerTy(DAG.getDataLayout());
7167 SDValue GotAddr = getTargetNode(N, Ty, DAG, AArch64II::MO_GOT | Flags);
7168 // FIXME: Once remat is capable of dealing with instructions with register
7169 // operands, expand this into two nodes instead of using a wrapper node.
7170 return DAG.getNode(AArch64ISD::LOADgot, DL, Ty, GotAddr);
7173 // (wrapper %highest(sym), %higher(sym), %hi(sym), %lo(sym))
7174 template <class NodeTy>
7175 SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG,
7176 unsigned Flags) const {
7177 LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrLarge\n");
7179 EVT Ty = getPointerTy(DAG.getDataLayout());
7180 const unsigned char MO_NC = AArch64II::MO_NC;
7182 AArch64ISD::WrapperLarge, DL, Ty,
7183 getTargetNode(N, Ty, DAG, AArch64II::MO_G3 | Flags),
7184 getTargetNode(N, Ty, DAG, AArch64II::MO_G2 | MO_NC | Flags),
7185 getTargetNode(N, Ty, DAG, AArch64II::MO_G1 | MO_NC | Flags),
7186 getTargetNode(N, Ty, DAG, AArch64II::MO_G0 | MO_NC | Flags));
7189 // (addlow (adrp %hi(sym)) %lo(sym))
7190 template <class NodeTy>
7191 SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
7192 unsigned Flags) const {
7193 LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddr\n");
7195 EVT Ty = getPointerTy(DAG.getDataLayout());
7196 SDValue Hi = getTargetNode(N, Ty, DAG, AArch64II::MO_PAGE | Flags);
7197 SDValue Lo = getTargetNode(N, Ty, DAG,
7198 AArch64II::MO_PAGEOFF | AArch64II::MO_NC | Flags);
7199 SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, Ty, Hi);
7200 return DAG.getNode(AArch64ISD::ADDlow, DL, Ty, ADRP, Lo);
7204 template <class NodeTy>
7205 SDValue AArch64TargetLowering::getAddrTiny(NodeTy *N, SelectionDAG &DAG,
7206 unsigned Flags) const {
7207 LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrTiny\n");
7209 EVT Ty = getPointerTy(DAG.getDataLayout());
7210 SDValue Sym = getTargetNode(N, Ty, DAG, Flags);
7211 return DAG.getNode(AArch64ISD::ADR, DL, Ty, Sym);
7214 SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
7215 SelectionDAG &DAG) const {
7216 GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
7217 const GlobalValue *GV = GN->getGlobal();
7218 unsigned OpFlags = Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
7220 if (OpFlags != AArch64II::MO_NO_FLAG)
7221 assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 &&
7222 "unexpected offset in global node");
7224 // This also catches the large code model case for Darwin, and tiny code
7225 // model with got relocations.
7226 if ((OpFlags & AArch64II::MO_GOT) != 0) {
7227 return getGOT(GN, DAG, OpFlags);
7231 if (getTargetMachine().getCodeModel() == CodeModel::Large) {
7232 Result = getAddrLarge(GN, DAG, OpFlags);
7233 } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
7234 Result = getAddrTiny(GN, DAG, OpFlags);
7236 Result = getAddr(GN, DAG, OpFlags);
7238 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7240 if (OpFlags & (AArch64II::MO_DLLIMPORT | AArch64II::MO_COFFSTUB))
7241 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
7242 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
7246 /// Convert a TLS address reference into the correct sequence of loads
7247 /// and calls to compute the variable's address (for Darwin, currently) and
7248 /// return an SDValue containing the final node.
7250 /// Darwin only has one TLS scheme which must be capable of dealing with the
7251 /// fully general situation, in the worst case. This means:
7252 /// + "extern __thread" declaration.
7253 /// + Defined in a possibly unknown dynamic library.
7255 /// The general system is that each __thread variable has a [3 x i64] descriptor
7256 /// which contains information used by the runtime to calculate the address. The
7257 /// only part of this the compiler needs to know about is the first xword, which
7258 /// contains a function pointer that must be called with the address of the
7259 /// entire descriptor in "x0".
7261 /// Since this descriptor may be in a different unit, in general even the
7262 /// descriptor must be accessed via an indirect load. The "ideal" code sequence
7264 /// adrp x0, _var@TLVPPAGE
7265 /// ldr x0, [x0, _var@TLVPPAGEOFF] ; x0 now contains address of descriptor
7266 /// ldr x1, [x0] ; x1 contains 1st entry of descriptor,
7267 /// ; the function pointer
7268 /// blr x1 ; Uses descriptor address in x0
7269 /// ; Address of _var is now in x0.
7271 /// If the address of _var's descriptor *is* known to the linker, then it can
7272 /// change the first "ldr" instruction to an appropriate "add x0, x0, #imm" for
7273 /// a slight efficiency gain.
7275 AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op,
7276 SelectionDAG &DAG) const {
7277 assert(Subtarget->isTargetDarwin() &&
7278 "This function expects a Darwin target");
7281 MVT PtrVT = getPointerTy(DAG.getDataLayout());
7282 MVT PtrMemVT = getPointerMemTy(DAG.getDataLayout());
7283 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
7286 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
7287 SDValue DescAddr = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TLVPAddr);
7289 // The first entry in the descriptor is a function pointer that we must call
7290 // to obtain the address of the variable.
7291 SDValue Chain = DAG.getEntryNode();
7292 SDValue FuncTLVGet = DAG.getLoad(
7293 PtrMemVT, DL, Chain, DescAddr,
7294 MachinePointerInfo::getGOT(DAG.getMachineFunction()),
7295 Align(PtrMemVT.getSizeInBits() / 8),
7296 MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
7297 Chain = FuncTLVGet.getValue(1);
7299 // Extend loaded pointer if necessary (i.e. if ILP32) to DAG pointer.
7300 FuncTLVGet = DAG.getZExtOrTrunc(FuncTLVGet, DL, PtrVT);
7302 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7303 MFI.setAdjustsStack(true);
7305 // TLS calls preserve all registers except those that absolutely must be
7306 // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be
7308 const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
7309 const uint32_t *Mask = TRI->getTLSCallPreservedMask();
7310 if (Subtarget->hasCustomCallingConv())
7311 TRI->UpdateCustomCallPreservedMask(DAG.getMachineFunction(), &Mask);
7313 // Finally, we can make the call. This is just a degenerate version of a
7314 // normal AArch64 call node: x0 takes the address of the descriptor, and
7315 // returns the address of the variable in this thread.
7316 Chain = DAG.getCopyToReg(Chain, DL, AArch64::X0, DescAddr, SDValue());
7318 DAG.getNode(AArch64ISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
7319 Chain, FuncTLVGet, DAG.getRegister(AArch64::X0, MVT::i64),
7320 DAG.getRegisterMask(Mask), Chain.getValue(1));
7321 return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Chain.getValue(1));
7324 /// Convert a thread-local variable reference into a sequence of instructions to
7325 /// compute the variable's address for the local exec TLS model of ELF targets.
7326 /// The sequence depends on the maximum TLS area size.
7327 SDValue AArch64TargetLowering::LowerELFTLSLocalExec(const GlobalValue *GV,
7330 SelectionDAG &DAG) const {
7331 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7332 SDValue TPOff, Addr;
7334 switch (DAG.getTarget().Options.TLSSize) {
7336 llvm_unreachable("Unexpected TLS size");
7339 // mrs x0, TPIDR_EL0
7340 // add x0, x0, :tprel_lo12:a
7341 SDValue Var = DAG.getTargetGlobalAddress(
7342 GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
7343 return SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase,
7345 DAG.getTargetConstant(0, DL, MVT::i32)),
7350 // mrs x0, TPIDR_EL0
7351 // add x0, x0, :tprel_hi12:a
7352 // add x0, x0, :tprel_lo12_nc:a
7353 SDValue HiVar = DAG.getTargetGlobalAddress(
7354 GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12);
7355 SDValue LoVar = DAG.getTargetGlobalAddress(
7357 AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7358 Addr = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase,
7360 DAG.getTargetConstant(0, DL, MVT::i32)),
7362 return SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, Addr,
7364 DAG.getTargetConstant(0, DL, MVT::i32)),
7369 // mrs x1, TPIDR_EL0
7370 // movz x0, #:tprel_g1:a
7371 // movk x0, #:tprel_g0_nc:a
7373 SDValue HiVar = DAG.getTargetGlobalAddress(
7374 GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G1);
7375 SDValue LoVar = DAG.getTargetGlobalAddress(
7377 AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC);
7378 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar,
7379 DAG.getTargetConstant(16, DL, MVT::i32)),
7381 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar,
7382 DAG.getTargetConstant(0, DL, MVT::i32)),
7384 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
7388 // mrs x1, TPIDR_EL0
7389 // movz x0, #:tprel_g2:a
7390 // movk x0, #:tprel_g1_nc:a
7391 // movk x0, #:tprel_g0_nc:a
7393 SDValue HiVar = DAG.getTargetGlobalAddress(
7394 GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G2);
7395 SDValue MiVar = DAG.getTargetGlobalAddress(
7397 AArch64II::MO_TLS | AArch64II::MO_G1 | AArch64II::MO_NC);
7398 SDValue LoVar = DAG.getTargetGlobalAddress(
7400 AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC);
7401 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar,
7402 DAG.getTargetConstant(32, DL, MVT::i32)),
7404 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, MiVar,
7405 DAG.getTargetConstant(16, DL, MVT::i32)),
7407 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar,
7408 DAG.getTargetConstant(0, DL, MVT::i32)),
7410 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
7415 /// When accessing thread-local variables under either the general-dynamic or
7416 /// local-dynamic system, we make a "TLS-descriptor" call. The variable will
7417 /// have a descriptor, accessible via a PC-relative ADRP, and whose first entry
7418 /// is a function pointer to carry out the resolution.
7420 /// The sequence is:
7421 /// adrp x0, :tlsdesc:var
7422 /// ldr x1, [x0, #:tlsdesc_lo12:var]
7423 /// add x0, x0, #:tlsdesc_lo12:var
7424 /// .tlsdesccall var
7426 /// (TPIDR_EL0 offset now in x0)
7428 /// The above sequence must be produced unscheduled, to enable the linker to
7429 /// optimize/relax this sequence.
7430 /// Therefore, a pseudo-instruction (TLSDESC_CALLSEQ) is used to represent the
7431 /// above sequence, and expanded really late in the compilation flow, to ensure
7432 /// the sequence is produced as per above.
7433 SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr,
7435 SelectionDAG &DAG) const {
7436 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7438 SDValue Chain = DAG.getEntryNode();
7439 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7442 DAG.getNode(AArch64ISD::TLSDESC_CALLSEQ, DL, NodeTys, {Chain, SymAddr});
7443 SDValue Glue = Chain.getValue(1);
7445 return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue);
7449 AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op,
7450 SelectionDAG &DAG) const {
7451 assert(Subtarget->isTargetELF() && "This function expects an ELF target");
7453 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
7455 TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
7457 if (!EnableAArch64ELFLocalDynamicTLSGeneration) {
7458 if (Model == TLSModel::LocalDynamic)
7459 Model = TLSModel::GeneralDynamic;
7462 if (getTargetMachine().getCodeModel() == CodeModel::Large &&
7463 Model != TLSModel::LocalExec)
7464 report_fatal_error("ELF TLS only supported in small memory model or "
7465 "in local exec TLS model");
7466 // Different choices can be made for the maximum size of the TLS area for a
7467 // module. For the small address model, the default TLS size is 16MiB and the
7468 // maximum TLS size is 4GiB.
7469 // FIXME: add tiny and large code model support for TLS access models other
7470 // than local exec. We currently generate the same code as small for tiny,
7471 // which may be larger than needed.
7474 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7476 const GlobalValue *GV = GA->getGlobal();
7478 SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
7480 if (Model == TLSModel::LocalExec) {
7481 return LowerELFTLSLocalExec(GV, ThreadBase, DL, DAG);
7482 } else if (Model == TLSModel::InitialExec) {
7483 TPOff = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
7484 TPOff = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TPOff);
7485 } else if (Model == TLSModel::LocalDynamic) {
7486 // Local-dynamic accesses proceed in two phases. A general-dynamic TLS
7487 // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate
7488 // the beginning of the module's TLS region, followed by a DTPREL offset
7491 // These accesses will need deduplicating if there's more than one.
7492 AArch64FunctionInfo *MFI =
7493 DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
7494 MFI->incNumLocalDynamicTLSAccesses();
7496 // The call needs a relocation too for linker relaxation. It doesn't make
7497 // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of
7499 SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
7502 // Now we can calculate the offset from TPIDR_EL0 to this module's
7503 // thread-local area.
7504 TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
7506 // Now use :dtprel_whatever: operations to calculate this variable's offset
7507 // in its thread-storage area.
7508 SDValue HiVar = DAG.getTargetGlobalAddress(
7509 GV, DL, MVT::i64, 0, AArch64II::MO_TLS | AArch64II::MO_HI12);
7510 SDValue LoVar = DAG.getTargetGlobalAddress(
7511 GV, DL, MVT::i64, 0,
7512 AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7514 TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, HiVar,
7515 DAG.getTargetConstant(0, DL, MVT::i32)),
7517 TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, LoVar,
7518 DAG.getTargetConstant(0, DL, MVT::i32)),
7520 } else if (Model == TLSModel::GeneralDynamic) {
7521 // The call needs a relocation too for linker relaxation. It doesn't make
7522 // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of
7525 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
7527 // Finally we can make a call to calculate the offset from tpidr_el0.
7528 TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
7530 llvm_unreachable("Unsupported ELF TLS access model");
7532 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
7536 AArch64TargetLowering::LowerWindowsGlobalTLSAddress(SDValue Op,
7537 SelectionDAG &DAG) const {
7538 assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");
7540 SDValue Chain = DAG.getEntryNode();
7541 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7544 SDValue TEB = DAG.getRegister(AArch64::X18, MVT::i64);
7546 // Load the ThreadLocalStoragePointer from the TEB
7547 // A pointer to the TLS array is located at offset 0x58 from the TEB.
7549 DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x58, DL));
7550 TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());
7551 Chain = TLSArray.getValue(1);
7553 // Load the TLS index from the C runtime;
7554 // This does the same as getAddr(), but without having a GlobalAddressSDNode.
7555 // This also does the same as LOADgot, but using a generic i32 load,
7556 // while LOADgot only loads i64.
7557 SDValue TLSIndexHi =
7558 DAG.getTargetExternalSymbol("_tls_index", PtrVT, AArch64II::MO_PAGE);
7559 SDValue TLSIndexLo = DAG.getTargetExternalSymbol(
7560 "_tls_index", PtrVT, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7561 SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, TLSIndexHi);
7563 DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, TLSIndexLo);
7564 TLSIndex = DAG.getLoad(MVT::i32, DL, Chain, TLSIndex, MachinePointerInfo());
7565 Chain = TLSIndex.getValue(1);
7567 // The pointer to the thread's TLS data area is at the TLS Index scaled by 8
7568 // offset into the TLSArray.
7569 TLSIndex = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TLSIndex);
7570 SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
7571 DAG.getConstant(3, DL, PtrVT));
7572 SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
7573 DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
7574 MachinePointerInfo());
7575 Chain = TLS.getValue(1);
7577 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
7578 const GlobalValue *GV = GA->getGlobal();
7579 SDValue TGAHi = DAG.getTargetGlobalAddress(
7580 GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12);
7581 SDValue TGALo = DAG.getTargetGlobalAddress(
7583 AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7585 // Add the offset from the start of the .tls section (section base).
7587 SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TLS, TGAHi,
7588 DAG.getTargetConstant(0, DL, MVT::i32)),
7590 Addr = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, Addr, TGALo);
7594 SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
7595 SelectionDAG &DAG) const {
7596 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
7597 if (DAG.getTarget().useEmulatedTLS())
7598 return LowerToTLSEmulatedModel(GA, DAG);
7600 if (Subtarget->isTargetDarwin())
7601 return LowerDarwinGlobalTLSAddress(Op, DAG);
7602 if (Subtarget->isTargetELF())
7603 return LowerELFGlobalTLSAddress(Op, DAG);
7604 if (Subtarget->isTargetWindows())
7605 return LowerWindowsGlobalTLSAddress(Op, DAG);
7607 llvm_unreachable("Unexpected platform trying to use TLS");
7610 // Looks through \param Val to determine the bit that can be used to
7611 // check the sign of the value. It returns the unextended value and
7612 // the sign bit position.
7613 std::pair<SDValue, uint64_t> lookThroughSignExtension(SDValue Val) {
7614 if (Val.getOpcode() == ISD::SIGN_EXTEND_INREG)
7615 return {Val.getOperand(0),
7616 cast<VTSDNode>(Val.getOperand(1))->getVT().getFixedSizeInBits() -
7619 if (Val.getOpcode() == ISD::SIGN_EXTEND)
7620 return {Val.getOperand(0),
7621 Val.getOperand(0)->getValueType(0).getFixedSizeInBits() - 1};
7623 return {Val, Val.getValueSizeInBits() - 1};
7626 SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
7627 SDValue Chain = Op.getOperand(0);
7628 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
7629 SDValue LHS = Op.getOperand(2);
7630 SDValue RHS = Op.getOperand(3);
7631 SDValue Dest = Op.getOperand(4);
7634 MachineFunction &MF = DAG.getMachineFunction();
7635 // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
7636 // will not be produced, as they are conditional branch instructions that do
7638 bool ProduceNonFlagSettingCondBr =
7639 !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening);
7641 // Handle f128 first, since lowering it will result in comparing the return
7642 // value of a libcall against zero, which is just what the rest of LowerBR_CC
7643 // is expecting to deal with.
7644 if (LHS.getValueType() == MVT::f128) {
7645 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS);
7647 // If softenSetCCOperands returned a scalar, we need to compare the result
7648 // against zero to select between true and false values.
7649 if (!RHS.getNode()) {
7650 RHS = DAG.getConstant(0, dl, LHS.getValueType());
7655 // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
7657 if (ISD::isOverflowIntrOpRes(LHS) && isOneConstant(RHS) &&
7658 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
7659 // Only lower legal XALUO ops.
7660 if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0)))
7663 // The actual operation with overflow check.
7664 AArch64CC::CondCode OFCC;
7665 SDValue Value, Overflow;
7666 std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, LHS.getValue(0), DAG);
7668 if (CC == ISD::SETNE)
7669 OFCC = getInvertedCondCode(OFCC);
7670 SDValue CCVal = DAG.getConstant(OFCC, dl, MVT::i32);
7672 return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
7676 if (LHS.getValueType().isInteger()) {
7677 assert((LHS.getValueType() == RHS.getValueType()) &&
7678 (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64));
7680 // If the RHS of the comparison is zero, we can potentially fold this
7681 // to a specialized branch.
7682 const ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS);
7683 if (RHSC && RHSC->getZExtValue() == 0 && ProduceNonFlagSettingCondBr) {
7684 if (CC == ISD::SETEQ) {
7685 // See if we can use a TBZ to fold in an AND as well.
7686 // TBZ has a smaller branch displacement than CBZ. If the offset is
7687 // out of bounds, a late MI-layer pass rewrites branches.
7688 // 403.gcc is an example that hits this case.
7689 if (LHS.getOpcode() == ISD::AND &&
7690 isa<ConstantSDNode>(LHS.getOperand(1)) &&
7691 isPowerOf2_64(LHS.getConstantOperandVal(1))) {
7692 SDValue Test = LHS.getOperand(0);
7693 uint64_t Mask = LHS.getConstantOperandVal(1);
7694 return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, Test,
7695 DAG.getConstant(Log2_64(Mask), dl, MVT::i64),
7699 return DAG.getNode(AArch64ISD::CBZ, dl, MVT::Other, Chain, LHS, Dest);
7700 } else if (CC == ISD::SETNE) {
7701 // See if we can use a TBZ to fold in an AND as well.
7702 // TBZ has a smaller branch displacement than CBZ. If the offset is
7703 // out of bounds, a late MI-layer pass rewrites branches.
7704 // 403.gcc is an example that hits this case.
7705 if (LHS.getOpcode() == ISD::AND &&
7706 isa<ConstantSDNode>(LHS.getOperand(1)) &&
7707 isPowerOf2_64(LHS.getConstantOperandVal(1))) {
7708 SDValue Test = LHS.getOperand(0);
7709 uint64_t Mask = LHS.getConstantOperandVal(1);
7710 return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, Test,
7711 DAG.getConstant(Log2_64(Mask), dl, MVT::i64),
7715 return DAG.getNode(AArch64ISD::CBNZ, dl, MVT::Other, Chain, LHS, Dest);
7716 } else if (CC == ISD::SETLT && LHS.getOpcode() != ISD::AND) {
7717 // Don't combine AND since emitComparison converts the AND to an ANDS
7718 // (a.k.a. TST) and the test in the test bit and branch instruction
7719 // becomes redundant. This would also increase register pressure.
7720 uint64_t SignBitPos;
7721 std::tie(LHS, SignBitPos) = lookThroughSignExtension(LHS);
7722 return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, LHS,
7723 DAG.getConstant(SignBitPos, dl, MVT::i64), Dest);
7726 if (RHSC && RHSC->getSExtValue() == -1 && CC == ISD::SETGT &&
7727 LHS.getOpcode() != ISD::AND && ProduceNonFlagSettingCondBr) {
7728 // Don't combine AND since emitComparison converts the AND to an ANDS
7729 // (a.k.a. TST) and the test in the test bit and branch instruction
7730 // becomes redundant. This would also increase register pressure.
7731 uint64_t SignBitPos;
7732 std::tie(LHS, SignBitPos) = lookThroughSignExtension(LHS);
7733 return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, LHS,
7734 DAG.getConstant(SignBitPos, dl, MVT::i64), Dest);
7738 SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
7739 return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
7743 assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::bf16 ||
7744 LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
7746 // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
7747 // clean. Some of them require two branches to implement.
7748 SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
7749 AArch64CC::CondCode CC1, CC2;
7750 changeFPCCToAArch64CC(CC, CC1, CC2);
7751 SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
7753 DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CC1Val, Cmp);
7754 if (CC2 != AArch64CC::AL) {
7755 SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
7756 return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, BR1, Dest, CC2Val,
7763 SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
7764 SelectionDAG &DAG) const {
7765 if (!Subtarget->hasNEON())
7768 EVT VT = Op.getValueType();
7769 EVT IntVT = VT.changeTypeToInteger();
7772 SDValue In1 = Op.getOperand(0);
7773 SDValue In2 = Op.getOperand(1);
7774 EVT SrcVT = In2.getValueType();
7776 if (SrcVT.bitsLT(VT))
7777 In2 = DAG.getNode(ISD::FP_EXTEND, DL, VT, In2);
7778 else if (SrcVT.bitsGT(VT))
7779 In2 = DAG.getNode(ISD::FP_ROUND, DL, VT, In2, DAG.getIntPtrConstant(0, DL));
7781 if (VT.isScalableVector())
7783 getPackedSVEVectorVT(VT.getVectorElementType().changeTypeToInteger());
7785 if (VT != In2.getValueType())
7788 auto BitCast = [this](EVT VT, SDValue Op, SelectionDAG &DAG) {
7789 if (VT.isScalableVector())
7790 return getSVESafeBitCast(VT, Op, DAG);
7792 return DAG.getBitcast(VT, Op);
7795 SDValue VecVal1, VecVal2;
7797 auto SetVecVal = [&](int Idx = -1) {
7798 if (!VT.isVector()) {
7800 DAG.getTargetInsertSubreg(Idx, DL, VecVT, DAG.getUNDEF(VecVT), In1);
7802 DAG.getTargetInsertSubreg(Idx, DL, VecVT, DAG.getUNDEF(VecVT), In2);
7804 VecVal1 = BitCast(VecVT, In1, DAG);
7805 VecVal2 = BitCast(VecVT, In2, DAG);
7808 if (VT.isVector()) {
7811 } else if (VT == MVT::f64) {
7813 SetVecVal(AArch64::dsub);
7814 } else if (VT == MVT::f32) {
7816 SetVecVal(AArch64::ssub);
7817 } else if (VT == MVT::f16) {
7819 SetVecVal(AArch64::hsub);
7821 llvm_unreachable("Invalid type for copysign!");
7824 unsigned BitWidth = In1.getScalarValueSizeInBits();
7825 SDValue SignMaskV = DAG.getConstant(~APInt::getSignMask(BitWidth), DL, VecVT);
7827 // We want to materialize a mask with every bit but the high bit set, but the
7828 // AdvSIMD immediate moves cannot materialize that in a single instruction for
7829 // 64-bit elements. Instead, materialize all bits set and then negate that.
7830 if (VT == MVT::f64 || VT == MVT::v2f64) {
7831 SignMaskV = DAG.getConstant(APInt::getAllOnes(BitWidth), DL, VecVT);
7832 SignMaskV = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, SignMaskV);
7833 SignMaskV = DAG.getNode(ISD::FNEG, DL, MVT::v2f64, SignMaskV);
7834 SignMaskV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, SignMaskV);
7838 DAG.getNode(AArch64ISD::BSP, DL, VecVT, SignMaskV, VecVal1, VecVal2);
7840 return DAG.getTargetExtractSubreg(AArch64::hsub, DL, VT, BSP);
7842 return DAG.getTargetExtractSubreg(AArch64::ssub, DL, VT, BSP);
7844 return DAG.getTargetExtractSubreg(AArch64::dsub, DL, VT, BSP);
7846 return BitCast(VT, BSP, DAG);
7849 SDValue AArch64TargetLowering::LowerCTPOP_PARITY(SDValue Op,
7850 SelectionDAG &DAG) const {
7851 if (DAG.getMachineFunction().getFunction().hasFnAttribute(
7852 Attribute::NoImplicitFloat))
7855 if (!Subtarget->hasNEON())
7858 bool IsParity = Op.getOpcode() == ISD::PARITY;
7860 // While there is no integer popcount instruction, it can
7861 // be more efficiently lowered to the following sequence that uses
7862 // AdvSIMD registers/instructions as long as the copies to/from
7863 // the AdvSIMD registers are cheap.
7864 // FMOV D0, X0 // copy 64-bit int to vector, high bits zero'd
7865 // CNT V0.8B, V0.8B // 8xbyte pop-counts
7866 // ADDV B0, V0.8B // sum 8xbyte pop-counts
7867 // UMOV X0, V0.B[0] // copy byte result back to integer reg
7868 SDValue Val = Op.getOperand(0);
7870 EVT VT = Op.getValueType();
7872 if (VT == MVT::i32 || VT == MVT::i64) {
7874 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val);
7875 Val = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Val);
7877 SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, Val);
7878 SDValue UaddLV = DAG.getNode(
7879 ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
7880 DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop);
7883 UaddLV = DAG.getNode(ISD::AND, DL, MVT::i32, UaddLV,
7884 DAG.getConstant(1, DL, MVT::i32));
7887 UaddLV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, UaddLV);
7889 } else if (VT == MVT::i128) {
7890 Val = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Val);
7892 SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v16i8, Val);
7893 SDValue UaddLV = DAG.getNode(
7894 ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
7895 DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop);
7898 UaddLV = DAG.getNode(ISD::AND, DL, MVT::i32, UaddLV,
7899 DAG.getConstant(1, DL, MVT::i32));
7901 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i128, UaddLV);
7904 assert(!IsParity && "ISD::PARITY of vector types not supported");
7906 if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT))
7907 return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTPOP_MERGE_PASSTHRU);
7909 assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 ||
7910 VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) &&
7911 "Unexpected type for custom ctpop lowering");
7913 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
7914 Val = DAG.getBitcast(VT8Bit, Val);
7915 Val = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Val);
7917 // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds.
7918 unsigned EltSize = 8;
7919 unsigned NumElts = VT.is64BitVector() ? 8 : 16;
7920 while (EltSize != VT.getScalarSizeInBits()) {
7923 MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts);
7925 ISD::INTRINSIC_WO_CHAIN, DL, WidenVT,
7926 DAG.getConstant(Intrinsic::aarch64_neon_uaddlp, DL, MVT::i32), Val);
7932 SDValue AArch64TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const {
7933 EVT VT = Op.getValueType();
7934 assert(VT.isScalableVector() ||
7935 useSVEForFixedLengthVectorVT(
7936 VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()));
7939 SDValue RBIT = DAG.getNode(ISD::BITREVERSE, DL, VT, Op.getOperand(0));
7940 return DAG.getNode(ISD::CTLZ, DL, VT, RBIT);
7943 SDValue AArch64TargetLowering::LowerMinMax(SDValue Op,
7944 SelectionDAG &DAG) const {
7946 EVT VT = Op.getValueType();
7948 unsigned Opcode = Op.getOpcode();
7952 llvm_unreachable("Wrong instruction");
7967 if (VT.isScalableVector() ||
7968 useSVEForFixedLengthVectorVT(
7969 VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors())) {
7972 llvm_unreachable("Wrong instruction");
7974 return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMAX_PRED);
7976 return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMIN_PRED);
7978 return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMAX_PRED);
7980 return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMIN_PRED);
7984 SDValue Op0 = Op.getOperand(0);
7985 SDValue Op1 = Op.getOperand(1);
7986 SDValue Cond = DAG.getSetCC(DL, VT, Op0, Op1, CC);
7987 return DAG.getSelect(DL, VT, Cond, Op0, Op1);
7990 SDValue AArch64TargetLowering::LowerBitreverse(SDValue Op,
7991 SelectionDAG &DAG) const {
7992 EVT VT = Op.getValueType();
7994 if (VT.isScalableVector() ||
7995 useSVEForFixedLengthVectorVT(
7996 VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))
7997 return LowerToPredicatedOp(Op, DAG, AArch64ISD::BITREVERSE_MERGE_PASSTHRU);
8003 switch (VT.getSimpleVT().SimpleTy) {
8005 llvm_unreachable("Invalid type for bitreverse!");
8009 REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0));
8016 REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0));
8023 REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0));
8030 REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0));
8036 return DAG.getNode(AArch64ISD::NVCAST, DL, VT,
8037 DAG.getNode(ISD::BITREVERSE, DL, VST, REVB));
8040 SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
8042 if (Op.getValueType().isVector())
8043 return LowerVSETCC(Op, DAG);
8045 bool IsStrict = Op->isStrictFPOpcode();
8046 bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
8047 unsigned OpNo = IsStrict ? 1 : 0;
8050 Chain = Op.getOperand(0);
8051 SDValue LHS = Op.getOperand(OpNo + 0);
8052 SDValue RHS = Op.getOperand(OpNo + 1);
8053 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(OpNo + 2))->get();
8056 // We chose ZeroOrOneBooleanContents, so use zero and one.
8057 EVT VT = Op.getValueType();
8058 SDValue TVal = DAG.getConstant(1, dl, VT);
8059 SDValue FVal = DAG.getConstant(0, dl, VT);
8061 // Handle f128 first, since one possible outcome is a normal integer
8062 // comparison which gets picked up by the next if statement.
8063 if (LHS.getValueType() == MVT::f128) {
8064 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS, Chain,
8067 // If softenSetCCOperands returned a scalar, use it.
8068 if (!RHS.getNode()) {
8069 assert(LHS.getValueType() == Op.getValueType() &&
8070 "Unexpected setcc expansion!");
8071 return IsStrict ? DAG.getMergeValues({LHS, Chain}, dl) : LHS;
8075 if (LHS.getValueType().isInteger()) {
8077 SDValue Cmp = getAArch64Cmp(
8078 LHS, RHS, ISD::getSetCCInverse(CC, LHS.getValueType()), CCVal, DAG, dl);
8080 // Note that we inverted the condition above, so we reverse the order of
8081 // the true and false operands here. This will allow the setcc to be
8082 // matched to a single CSINC instruction.
8083 SDValue Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CCVal, Cmp);
8084 return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
8087 // Now we know we're dealing with FP values.
8088 assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 ||
8089 LHS.getValueType() == MVT::f64);
8091 // If that fails, we'll need to perform an FCMP + CSEL sequence. Go ahead
8092 // and do the comparison.
8095 Cmp = emitStrictFPComparison(LHS, RHS, dl, DAG, Chain, IsSignaling);
8097 Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
8099 AArch64CC::CondCode CC1, CC2;
8100 changeFPCCToAArch64CC(CC, CC1, CC2);
8102 if (CC2 == AArch64CC::AL) {
8103 changeFPCCToAArch64CC(ISD::getSetCCInverse(CC, LHS.getValueType()), CC1,
8105 SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
8107 // Note that we inverted the condition above, so we reverse the order of
8108 // the true and false operands here. This will allow the setcc to be
8109 // matched to a single CSINC instruction.
8110 Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CC1Val, Cmp);
8112 // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't
8113 // totally clean. Some of them require two CSELs to implement. As is in
8114 // this case, we emit the first CSEL and then emit a second using the output
8115 // of the first as the RHS. We're effectively OR'ing the two CC's together.
8117 // FIXME: It would be nice if we could match the two CSELs to two CSINCs.
8118 SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
8120 DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp);
8122 SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
8123 Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp);
8125 return IsStrict ? DAG.getMergeValues({Res, Cmp.getValue(1)}, dl) : Res;
8128 SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS,
8129 SDValue RHS, SDValue TVal,
8130 SDValue FVal, const SDLoc &dl,
8131 SelectionDAG &DAG) const {
8132 // Handle f128 first, because it will result in a comparison of some RTLIB
8133 // call result against zero.
8134 if (LHS.getValueType() == MVT::f128) {
8135 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS);
8137 // If softenSetCCOperands returned a scalar, we need to compare the result
8138 // against zero to select between true and false values.
8139 if (!RHS.getNode()) {
8140 RHS = DAG.getConstant(0, dl, LHS.getValueType());
8145 // Also handle f16, for which we need to do a f32 comparison.
8146 if (LHS.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) {
8147 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS);
8148 RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS);
8151 // Next, handle integers.
8152 if (LHS.getValueType().isInteger()) {
8153 assert((LHS.getValueType() == RHS.getValueType()) &&
8154 (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64));
8156 ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal);
8157 ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal);
8158 ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS);
8159 // Check for sign pattern (SELECT_CC setgt, iN lhs, -1, 1, -1) and transform
8160 // into (OR (ASR lhs, N-1), 1), which requires less instructions for the
8162 if (CC == ISD::SETGT && RHSC && RHSC->isAllOnes() && CTVal && CFVal &&
8163 CTVal->isOne() && CFVal->isAllOnes() &&
8164 LHS.getValueType() == TVal.getValueType()) {
8165 EVT VT = LHS.getValueType();
8167 DAG.getNode(ISD::SRA, dl, VT, LHS,
8168 DAG.getConstant(VT.getSizeInBits() - 1, dl, VT));
8169 return DAG.getNode(ISD::OR, dl, VT, Shift, DAG.getConstant(1, dl, VT));
8172 unsigned Opcode = AArch64ISD::CSEL;
8174 // If both the TVal and the FVal are constants, see if we can swap them in
8175 // order to for a CSINV or CSINC out of them.
8176 if (CTVal && CFVal && CTVal->isAllOnes() && CFVal->isZero()) {
8177 std::swap(TVal, FVal);
8178 std::swap(CTVal, CFVal);
8179 CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8180 } else if (CTVal && CFVal && CTVal->isOne() && CFVal->isZero()) {
8181 std::swap(TVal, FVal);
8182 std::swap(CTVal, CFVal);
8183 CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8184 } else if (TVal.getOpcode() == ISD::XOR) {
8185 // If TVal is a NOT we want to swap TVal and FVal so that we can match
8186 // with a CSINV rather than a CSEL.
8187 if (isAllOnesConstant(TVal.getOperand(1))) {
8188 std::swap(TVal, FVal);
8189 std::swap(CTVal, CFVal);
8190 CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8192 } else if (TVal.getOpcode() == ISD::SUB) {
8193 // If TVal is a negation (SUB from 0) we want to swap TVal and FVal so
8194 // that we can match with a CSNEG rather than a CSEL.
8195 if (isNullConstant(TVal.getOperand(0))) {
8196 std::swap(TVal, FVal);
8197 std::swap(CTVal, CFVal);
8198 CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8200 } else if (CTVal && CFVal) {
8201 const int64_t TrueVal = CTVal->getSExtValue();
8202 const int64_t FalseVal = CFVal->getSExtValue();
8205 // If both TVal and FVal are constants, see if FVal is the
8206 // inverse/negation/increment of TVal and generate a CSINV/CSNEG/CSINC
8207 // instead of a CSEL in that case.
8208 if (TrueVal == ~FalseVal) {
8209 Opcode = AArch64ISD::CSINV;
8210 } else if (FalseVal > std::numeric_limits<int64_t>::min() &&
8211 TrueVal == -FalseVal) {
8212 Opcode = AArch64ISD::CSNEG;
8213 } else if (TVal.getValueType() == MVT::i32) {
8214 // If our operands are only 32-bit wide, make sure we use 32-bit
8215 // arithmetic for the check whether we can use CSINC. This ensures that
8216 // the addition in the check will wrap around properly in case there is
8217 // an overflow (which would not be the case if we do the check with
8218 // 64-bit arithmetic).
8219 const uint32_t TrueVal32 = CTVal->getZExtValue();
8220 const uint32_t FalseVal32 = CFVal->getZExtValue();
8222 if ((TrueVal32 == FalseVal32 + 1) || (TrueVal32 + 1 == FalseVal32)) {
8223 Opcode = AArch64ISD::CSINC;
8225 if (TrueVal32 > FalseVal32) {
8229 // 64-bit check whether we can use CSINC.
8230 } else if ((TrueVal == FalseVal + 1) || (TrueVal + 1 == FalseVal)) {
8231 Opcode = AArch64ISD::CSINC;
8233 if (TrueVal > FalseVal) {
8238 // Swap TVal and FVal if necessary.
8240 std::swap(TVal, FVal);
8241 std::swap(CTVal, CFVal);
8242 CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8245 if (Opcode != AArch64ISD::CSEL) {
8246 // Drop FVal since we can get its value by simply inverting/negating
8252 // Avoid materializing a constant when possible by reusing a known value in
8253 // a register. However, don't perform this optimization if the known value
8254 // is one, zero or negative one in the case of a CSEL. We can always
8255 // materialize these values using CSINC, CSEL and CSINV with wzr/xzr as the
8256 // FVal, respectively.
8257 ConstantSDNode *RHSVal = dyn_cast<ConstantSDNode>(RHS);
8258 if (Opcode == AArch64ISD::CSEL && RHSVal && !RHSVal->isOne() &&
8259 !RHSVal->isZero() && !RHSVal->isAllOnes()) {
8260 AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
8261 // Transform "a == C ? C : x" to "a == C ? a : x" and "a != C ? x : C" to
8262 // "a != C ? x : a" to avoid materializing C.
8263 if (CTVal && CTVal == RHSVal && AArch64CC == AArch64CC::EQ)
8265 else if (CFVal && CFVal == RHSVal && AArch64CC == AArch64CC::NE)
8267 } else if (Opcode == AArch64ISD::CSNEG && RHSVal && RHSVal->isOne()) {
8268 assert (CTVal && CFVal && "Expected constant operands for CSNEG.");
8269 // Use a CSINV to transform "a == C ? 1 : -1" to "a == C ? a : -1" to
8270 // avoid materializing C.
8271 AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
8272 if (CTVal == RHSVal && AArch64CC == AArch64CC::EQ) {
8273 Opcode = AArch64ISD::CSINV;
8275 FVal = DAG.getConstant(0, dl, FVal.getValueType());
8280 SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
8281 EVT VT = TVal.getValueType();
8282 return DAG.getNode(Opcode, dl, VT, TVal, FVal, CCVal, Cmp);
8285 // Now we know we're dealing with FP values.
8286 assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 ||
8287 LHS.getValueType() == MVT::f64);
8288 assert(LHS.getValueType() == RHS.getValueType());
8289 EVT VT = TVal.getValueType();
8290 SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
8292 // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
8293 // clean. Some of them require two CSELs to implement.
8294 AArch64CC::CondCode CC1, CC2;
8295 changeFPCCToAArch64CC(CC, CC1, CC2);
8297 if (DAG.getTarget().Options.UnsafeFPMath) {
8298 // Transform "a == 0.0 ? 0.0 : x" to "a == 0.0 ? a : x" and
8299 // "a != 0.0 ? x : 0.0" to "a != 0.0 ? x : a" to avoid materializing 0.0.
8300 ConstantFPSDNode *RHSVal = dyn_cast<ConstantFPSDNode>(RHS);
8301 if (RHSVal && RHSVal->isZero()) {
8302 ConstantFPSDNode *CFVal = dyn_cast<ConstantFPSDNode>(FVal);
8303 ConstantFPSDNode *CTVal = dyn_cast<ConstantFPSDNode>(TVal);
8305 if ((CC == ISD::SETEQ || CC == ISD::SETOEQ || CC == ISD::SETUEQ) &&
8306 CTVal && CTVal->isZero() && TVal.getValueType() == LHS.getValueType())
8308 else if ((CC == ISD::SETNE || CC == ISD::SETONE || CC == ISD::SETUNE) &&
8309 CFVal && CFVal->isZero() &&
8310 FVal.getValueType() == LHS.getValueType())
8315 // Emit first, and possibly only, CSEL.
8316 SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
8317 SDValue CS1 = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp);
8319 // If we need a second CSEL, emit it, using the output of the first as the
8320 // RHS. We're effectively OR'ing the two CC's together.
8321 if (CC2 != AArch64CC::AL) {
8322 SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
8323 return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp);
8326 // Otherwise, return the output of the first CSEL.
8330 SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op,
8331 SelectionDAG &DAG) const {
8332 EVT Ty = Op.getValueType();
8333 auto Idx = Op.getConstantOperandAPInt(2);
8334 int64_t IdxVal = Idx.getSExtValue();
8335 assert(Ty.isScalableVector() &&
8336 "Only expect scalable vectors for custom lowering of VECTOR_SPLICE");
8338 // We can use the splice instruction for certain index values where we are
8339 // able to efficiently generate the correct predicate. The index will be
8340 // inverted and used directly as the input to the ptrue instruction, i.e.
8341 // -1 -> vl1, -2 -> vl2, etc. The predicate will then be reversed to get the
8342 // splice predicate. However, we can only do this if we can guarantee that
8343 // there are enough elements in the vector, hence we check the index <= min
8344 // number of elements.
8345 Optional<unsigned> PredPattern;
8346 if (Ty.isScalableVector() && IdxVal < 0 &&
8347 (PredPattern = getSVEPredPatternFromNumElements(std::abs(IdxVal))) !=
8351 // Create a predicate where all but the last -IdxVal elements are false.
8352 EVT PredVT = Ty.changeVectorElementType(MVT::i1);
8353 SDValue Pred = getPTrue(DAG, DL, PredVT, *PredPattern);
8354 Pred = DAG.getNode(ISD::VECTOR_REVERSE, DL, PredVT, Pred);
8356 // Now splice the two inputs together using the predicate.
8357 return DAG.getNode(AArch64ISD::SPLICE, DL, Ty, Pred, Op.getOperand(0),
8361 // This will select to an EXT instruction, which has a maximum immediate
8362 // value of 255, hence 2048-bits is the maximum value we can lower.
8364 IdxVal < int64_t(2048 / Ty.getVectorElementType().getSizeInBits()))
8370 SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op,
8371 SelectionDAG &DAG) const {
8372 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
8373 SDValue LHS = Op.getOperand(0);
8374 SDValue RHS = Op.getOperand(1);
8375 SDValue TVal = Op.getOperand(2);
8376 SDValue FVal = Op.getOperand(3);
8378 return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
8381 SDValue AArch64TargetLowering::LowerSELECT(SDValue Op,
8382 SelectionDAG &DAG) const {
8383 SDValue CCVal = Op->getOperand(0);
8384 SDValue TVal = Op->getOperand(1);
8385 SDValue FVal = Op->getOperand(2);
8388 EVT Ty = Op.getValueType();
8389 if (Ty.isScalableVector()) {
8390 SDValue TruncCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, CCVal);
8391 MVT PredVT = MVT::getVectorVT(MVT::i1, Ty.getVectorElementCount());
8392 SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, TruncCC);
8393 return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal);
8396 if (useSVEForFixedLengthVectorVT(Ty)) {
8397 // FIXME: Ideally this would be the same as above using i1 types, however
8398 // for the moment we can't deal with fixed i1 vector types properly, so
8399 // instead extend the predicate to a result type sized integer vector.
8400 MVT SplatValVT = MVT::getIntegerVT(Ty.getScalarSizeInBits());
8401 MVT PredVT = MVT::getVectorVT(SplatValVT, Ty.getVectorElementCount());
8402 SDValue SplatVal = DAG.getSExtOrTrunc(CCVal, DL, SplatValVT);
8403 SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, SplatVal);
8404 return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal);
8407 // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a select
8409 if (ISD::isOverflowIntrOpRes(CCVal)) {
8410 // Only lower legal XALUO ops.
8411 if (!DAG.getTargetLoweringInfo().isTypeLegal(CCVal->getValueType(0)))
8414 AArch64CC::CondCode OFCC;
8415 SDValue Value, Overflow;
8416 std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, CCVal.getValue(0), DAG);
8417 SDValue CCVal = DAG.getConstant(OFCC, DL, MVT::i32);
8419 return DAG.getNode(AArch64ISD::CSEL, DL, Op.getValueType(), TVal, FVal,
8423 // Lower it the same way as we would lower a SELECT_CC node.
8426 if (CCVal.getOpcode() == ISD::SETCC) {
8427 LHS = CCVal.getOperand(0);
8428 RHS = CCVal.getOperand(1);
8429 CC = cast<CondCodeSDNode>(CCVal.getOperand(2))->get();
8432 RHS = DAG.getConstant(0, DL, CCVal.getValueType());
8435 return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
8438 SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op,
8439 SelectionDAG &DAG) const {
8440 // Jump table entries as PC relative offsets. No additional tweaking
8441 // is necessary here. Just get the address of the jump table.
8442 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
8444 if (getTargetMachine().getCodeModel() == CodeModel::Large &&
8445 !Subtarget->isTargetMachO()) {
8446 return getAddrLarge(JT, DAG);
8447 } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
8448 return getAddrTiny(JT, DAG);
8450 return getAddr(JT, DAG);
8453 SDValue AArch64TargetLowering::LowerBR_JT(SDValue Op,
8454 SelectionDAG &DAG) const {
8455 // Jump table entries as PC relative offsets. No additional tweaking
8456 // is necessary here. Just get the address of the jump table.
8458 SDValue JT = Op.getOperand(1);
8459 SDValue Entry = Op.getOperand(2);
8460 int JTI = cast<JumpTableSDNode>(JT.getNode())->getIndex();
8462 auto *AFI = DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
8463 AFI->setJumpTableEntryInfo(JTI, 4, nullptr);
8466 DAG.getMachineNode(AArch64::JumpTableDest32, DL, MVT::i64, MVT::i64, JT,
8467 Entry, DAG.getTargetJumpTable(JTI, MVT::i32));
8468 return DAG.getNode(ISD::BRIND, DL, MVT::Other, Op.getOperand(0),
8472 SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op,
8473 SelectionDAG &DAG) const {
8474 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
8476 if (getTargetMachine().getCodeModel() == CodeModel::Large) {
8477 // Use the GOT for the large code model on iOS.
8478 if (Subtarget->isTargetMachO()) {
8479 return getGOT(CP, DAG);
8481 return getAddrLarge(CP, DAG);
8482 } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
8483 return getAddrTiny(CP, DAG);
8485 return getAddr(CP, DAG);
8489 SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op,
8490 SelectionDAG &DAG) const {
8491 BlockAddressSDNode *BA = cast<BlockAddressSDNode>(Op);
8492 if (getTargetMachine().getCodeModel() == CodeModel::Large &&
8493 !Subtarget->isTargetMachO()) {
8494 return getAddrLarge(BA, DAG);
8495 } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
8496 return getAddrTiny(BA, DAG);
8498 return getAddr(BA, DAG);
8501 SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op,
8502 SelectionDAG &DAG) const {
8503 AArch64FunctionInfo *FuncInfo =
8504 DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
8507 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(),
8508 getPointerTy(DAG.getDataLayout()));
8509 FR = DAG.getZExtOrTrunc(FR, DL, getPointerMemTy(DAG.getDataLayout()));
8510 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8511 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
8512 MachinePointerInfo(SV));
8515 SDValue AArch64TargetLowering::LowerWin64_VASTART(SDValue Op,
8516 SelectionDAG &DAG) const {
8517 AArch64FunctionInfo *FuncInfo =
8518 DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
8521 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsGPRSize() > 0
8522 ? FuncInfo->getVarArgsGPRIndex()
8523 : FuncInfo->getVarArgsStackIndex(),
8524 getPointerTy(DAG.getDataLayout()));
8525 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8526 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
8527 MachinePointerInfo(SV));
8530 SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
8531 SelectionDAG &DAG) const {
8532 // The layout of the va_list struct is specified in the AArch64 Procedure Call
8533 // Standard, section B.3.
8534 MachineFunction &MF = DAG.getMachineFunction();
8535 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
8536 unsigned PtrSize = Subtarget->isTargetILP32() ? 4 : 8;
8537 auto PtrMemVT = getPointerMemTy(DAG.getDataLayout());
8538 auto PtrVT = getPointerTy(DAG.getDataLayout());
8541 SDValue Chain = Op.getOperand(0);
8542 SDValue VAList = Op.getOperand(1);
8543 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8544 SmallVector<SDValue, 4> MemOps;
8546 // void *__stack at offset 0
8547 unsigned Offset = 0;
8548 SDValue Stack = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), PtrVT);
8549 Stack = DAG.getZExtOrTrunc(Stack, DL, PtrMemVT);
8550 MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList,
8551 MachinePointerInfo(SV), Align(PtrSize)));
8553 // void *__gr_top at offset 8 (4 on ILP32)
8555 int GPRSize = FuncInfo->getVarArgsGPRSize();
8557 SDValue GRTop, GRTopAddr;
8559 GRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8560 DAG.getConstant(Offset, DL, PtrVT));
8562 GRTop = DAG.getFrameIndex(FuncInfo->getVarArgsGPRIndex(), PtrVT);
8563 GRTop = DAG.getNode(ISD::ADD, DL, PtrVT, GRTop,
8564 DAG.getConstant(GPRSize, DL, PtrVT));
8565 GRTop = DAG.getZExtOrTrunc(GRTop, DL, PtrMemVT);
8567 MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
8568 MachinePointerInfo(SV, Offset),
8572 // void *__vr_top at offset 16 (8 on ILP32)
8574 int FPRSize = FuncInfo->getVarArgsFPRSize();
8576 SDValue VRTop, VRTopAddr;
8577 VRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8578 DAG.getConstant(Offset, DL, PtrVT));
8580 VRTop = DAG.getFrameIndex(FuncInfo->getVarArgsFPRIndex(), PtrVT);
8581 VRTop = DAG.getNode(ISD::ADD, DL, PtrVT, VRTop,
8582 DAG.getConstant(FPRSize, DL, PtrVT));
8583 VRTop = DAG.getZExtOrTrunc(VRTop, DL, PtrMemVT);
8585 MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
8586 MachinePointerInfo(SV, Offset),
8590 // int __gr_offs at offset 24 (12 on ILP32)
8592 SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8593 DAG.getConstant(Offset, DL, PtrVT));
8595 DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, DL, MVT::i32),
8596 GROffsAddr, MachinePointerInfo(SV, Offset), Align(4)));
8598 // int __vr_offs at offset 28 (16 on ILP32)
8600 SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8601 DAG.getConstant(Offset, DL, PtrVT));
8603 DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, DL, MVT::i32),
8604 VROffsAddr, MachinePointerInfo(SV, Offset), Align(4)));
8606 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
8609 SDValue AArch64TargetLowering::LowerVASTART(SDValue Op,
8610 SelectionDAG &DAG) const {
8611 MachineFunction &MF = DAG.getMachineFunction();
8613 if (Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()))
8614 return LowerWin64_VASTART(Op, DAG);
8615 else if (Subtarget->isTargetDarwin())
8616 return LowerDarwin_VASTART(Op, DAG);
8618 return LowerAAPCS_VASTART(Op, DAG);
8621 SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op,
8622 SelectionDAG &DAG) const {
8623 // AAPCS has three pointers and two ints (= 32 bytes), Darwin has single
8626 unsigned PtrSize = Subtarget->isTargetILP32() ? 4 : 8;
8627 unsigned VaListSize =
8628 (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows())
8630 : Subtarget->isTargetILP32() ? 20 : 32;
8631 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
8632 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
8634 return DAG.getMemcpy(Op.getOperand(0), DL, Op.getOperand(1), Op.getOperand(2),
8635 DAG.getConstant(VaListSize, DL, MVT::i32),
8636 Align(PtrSize), false, false, false,
8637 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
8640 SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
8641 assert(Subtarget->isTargetDarwin() &&
8642 "automatic va_arg instruction only works on Darwin");
8644 const Value *V = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8645 EVT VT = Op.getValueType();
8647 SDValue Chain = Op.getOperand(0);
8648 SDValue Addr = Op.getOperand(1);
8649 MaybeAlign Align(Op.getConstantOperandVal(3));
8650 unsigned MinSlotSize = Subtarget->isTargetILP32() ? 4 : 8;
8651 auto PtrVT = getPointerTy(DAG.getDataLayout());
8652 auto PtrMemVT = getPointerMemTy(DAG.getDataLayout());
8654 DAG.getLoad(PtrMemVT, DL, Chain, Addr, MachinePointerInfo(V));
8655 Chain = VAList.getValue(1);
8656 VAList = DAG.getZExtOrTrunc(VAList, DL, PtrVT);
8658 if (VT.isScalableVector())
8659 report_fatal_error("Passing SVE types to variadic functions is "
8660 "currently not supported");
8662 if (Align && *Align > MinSlotSize) {
8663 VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8664 DAG.getConstant(Align->value() - 1, DL, PtrVT));
8665 VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList,
8666 DAG.getConstant(-(int64_t)Align->value(), DL, PtrVT));
8669 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
8670 unsigned ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
8672 // Scalar integer and FP values smaller than 64 bits are implicitly extended
8673 // up to 64 bits. At the very least, we have to increase the striding of the
8674 // vaargs list to match this, and for FP values we need to introduce
8675 // FP_ROUND nodes as well.
8676 if (VT.isInteger() && !VT.isVector())
8677 ArgSize = std::max(ArgSize, MinSlotSize);
8678 bool NeedFPTrunc = false;
8679 if (VT.isFloatingPoint() && !VT.isVector() && VT != MVT::f64) {
8684 // Increment the pointer, VAList, to the next vaarg
8685 SDValue VANext = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8686 DAG.getConstant(ArgSize, DL, PtrVT));
8687 VANext = DAG.getZExtOrTrunc(VANext, DL, PtrMemVT);
8689 // Store the incremented VAList to the legalized pointer
8691 DAG.getStore(Chain, DL, VANext, Addr, MachinePointerInfo(V));
8693 // Load the actual argument out of the pointer VAList
8695 // Load the value as an f64.
8697 DAG.getLoad(MVT::f64, DL, APStore, VAList, MachinePointerInfo());
8698 // Round the value down to an f32.
8699 SDValue NarrowFP = DAG.getNode(ISD::FP_ROUND, DL, VT, WideFP.getValue(0),
8700 DAG.getIntPtrConstant(1, DL));
8701 SDValue Ops[] = { NarrowFP, WideFP.getValue(1) };
8702 // Merge the rounded value with the chain output of the load.
8703 return DAG.getMergeValues(Ops, DL);
8706 return DAG.getLoad(VT, DL, APStore, VAList, MachinePointerInfo());
8709 SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op,
8710 SelectionDAG &DAG) const {
8711 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8712 MFI.setFrameAddressIsTaken(true);
8714 EVT VT = Op.getValueType();
8716 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8718 DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, MVT::i64);
8720 FrameAddr = DAG.getLoad(VT, DL, DAG.getEntryNode(), FrameAddr,
8721 MachinePointerInfo());
8723 if (Subtarget->isTargetILP32())
8724 FrameAddr = DAG.getNode(ISD::AssertZext, DL, MVT::i64, FrameAddr,
8725 DAG.getValueType(VT));
8730 SDValue AArch64TargetLowering::LowerSPONENTRY(SDValue Op,
8731 SelectionDAG &DAG) const {
8732 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8734 EVT VT = getPointerTy(DAG.getDataLayout());
8736 int FI = MFI.CreateFixedObject(4, 0, false);
8737 return DAG.getFrameIndex(FI, VT);
8740 #define GET_REGISTER_MATCHER
8741 #include "AArch64GenAsmMatcher.inc"
8743 // FIXME? Maybe this could be a TableGen attribute on some registers and
8744 // this table could be generated automatically from RegInfo.
8745 Register AArch64TargetLowering::
8746 getRegisterByName(const char* RegName, LLT VT, const MachineFunction &MF) const {
8747 Register Reg = MatchRegisterName(RegName);
8748 if (AArch64::X1 <= Reg && Reg <= AArch64::X28) {
8749 const MCRegisterInfo *MRI = Subtarget->getRegisterInfo();
8750 unsigned DwarfRegNum = MRI->getDwarfRegNum(Reg, false);
8751 if (!Subtarget->isXRegisterReserved(DwarfRegNum))
8756 report_fatal_error(Twine("Invalid register name \""
8757 + StringRef(RegName) + "\"."));
8760 SDValue AArch64TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
8761 SelectionDAG &DAG) const {
8762 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
8764 EVT VT = Op.getValueType();
8768 DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, VT);
8769 SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout()));
8771 return DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset);
8774 SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op,
8775 SelectionDAG &DAG) const {
8776 MachineFunction &MF = DAG.getMachineFunction();
8777 MachineFrameInfo &MFI = MF.getFrameInfo();
8778 MFI.setReturnAddressIsTaken(true);
8780 EVT VT = Op.getValueType();
8782 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8783 SDValue ReturnAddress;
8785 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
8786 SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout()));
8787 ReturnAddress = DAG.getLoad(
8788 VT, DL, DAG.getEntryNode(),
8789 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), MachinePointerInfo());
8791 // Return LR, which contains the return address. Mark it an implicit
8793 Register Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass);
8794 ReturnAddress = DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
8797 // The XPACLRI instruction assembles to a hint-space instruction before
8798 // Armv8.3-A therefore this instruction can be safely used for any pre
8799 // Armv8.3-A architectures. On Armv8.3-A and onwards XPACI is available so use
8802 if (Subtarget->hasPAuth()) {
8803 St = DAG.getMachineNode(AArch64::XPACI, DL, VT, ReturnAddress);
8805 // XPACLRI operates on LR therefore we must move the operand accordingly.
8807 DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::LR, ReturnAddress);
8808 St = DAG.getMachineNode(AArch64::XPACLRI, DL, VT, Chain);
8810 return SDValue(St, 0);
8813 /// LowerShiftParts - Lower SHL_PARTS/SRA_PARTS/SRL_PARTS, which returns two
8814 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
8815 SDValue AArch64TargetLowering::LowerShiftParts(SDValue Op,
8816 SelectionDAG &DAG) const {
8818 expandShiftParts(Op.getNode(), Lo, Hi, DAG);
8819 return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
8822 bool AArch64TargetLowering::isOffsetFoldingLegal(
8823 const GlobalAddressSDNode *GA) const {
8824 // Offsets are folded in the DAG combine rather than here so that we can
8825 // intelligently choose an offset based on the uses.
8829 bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
8830 bool OptForSize) const {
8831 bool IsLegal = false;
8832 // We can materialize #0.0 as fmov $Rd, XZR for 64-bit, 32-bit cases, and
8833 // 16-bit case when target has full fp16 support.
8834 // FIXME: We should be able to handle f128 as well with a clever lowering.
8835 const APInt ImmInt = Imm.bitcastToAPInt();
8837 IsLegal = AArch64_AM::getFP64Imm(ImmInt) != -1 || Imm.isPosZero();
8838 else if (VT == MVT::f32)
8839 IsLegal = AArch64_AM::getFP32Imm(ImmInt) != -1 || Imm.isPosZero();
8840 else if (VT == MVT::f16 && Subtarget->hasFullFP16())
8841 IsLegal = AArch64_AM::getFP16Imm(ImmInt) != -1 || Imm.isPosZero();
8842 // TODO: fmov h0, w0 is also legal, however on't have an isel pattern to
8843 // generate that fmov.
8845 // If we can not materialize in immediate field for fmov, check if the
8846 // value can be encoded as the immediate operand of a logical instruction.
8847 // The immediate value will be created with either MOVZ, MOVN, or ORR.
8848 if (!IsLegal && (VT == MVT::f64 || VT == MVT::f32)) {
8849 // The cost is actually exactly the same for mov+fmov vs. adrp+ldr;
8850 // however the mov+fmov sequence is always better because of the reduced
8851 // cache pressure. The timings are still the same if you consider
8852 // movw+movk+fmov vs. adrp+ldr (it's one instruction longer, but the
8853 // movw+movk is fused). So we limit up to 2 instrdduction at most.
8854 SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
8855 AArch64_IMM::expandMOVImm(ImmInt.getZExtValue(), VT.getSizeInBits(),
8857 unsigned Limit = (OptForSize ? 1 : (Subtarget->hasFuseLiterals() ? 5 : 2));
8858 IsLegal = Insn.size() <= Limit;
8861 LLVM_DEBUG(dbgs() << (IsLegal ? "Legal " : "Illegal ") << VT.getEVTString()
8862 << " imm value: "; Imm.dump(););
8866 //===----------------------------------------------------------------------===//
8867 // AArch64 Optimization Hooks
8868 //===----------------------------------------------------------------------===//
8870 static SDValue getEstimate(const AArch64Subtarget *ST, unsigned Opcode,
8871 SDValue Operand, SelectionDAG &DAG,
8873 EVT VT = Operand.getValueType();
8874 if ((ST->hasNEON() &&
8875 (VT == MVT::f64 || VT == MVT::v1f64 || VT == MVT::v2f64 ||
8876 VT == MVT::f32 || VT == MVT::v1f32 || VT == MVT::v2f32 ||
8877 VT == MVT::v4f32)) ||
8879 (VT == MVT::nxv8f16 || VT == MVT::nxv4f32 || VT == MVT::nxv2f64))) {
8880 if (ExtraSteps == TargetLoweringBase::ReciprocalEstimate::Unspecified)
8881 // For the reciprocal estimates, convergence is quadratic, so the number
8882 // of digits is doubled after each iteration. In ARMv8, the accuracy of
8883 // the initial estimate is 2^-8. Thus the number of extra steps to refine
8884 // the result for float (23 mantissa bits) is 2 and for double (52
8885 // mantissa bits) is 3.
8886 ExtraSteps = VT.getScalarType() == MVT::f64 ? 3 : 2;
8888 return DAG.getNode(Opcode, SDLoc(Operand), VT, Operand);
8895 AArch64TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
8896 const DenormalMode &Mode) const {
8898 EVT VT = Op.getValueType();
8899 EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
8900 SDValue FPZero = DAG.getConstantFP(0.0, DL, VT);
8901 return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ);
8905 AArch64TargetLowering::getSqrtResultForDenormInput(SDValue Op,
8906 SelectionDAG &DAG) const {
8910 SDValue AArch64TargetLowering::getSqrtEstimate(SDValue Operand,
8911 SelectionDAG &DAG, int Enabled,
8914 bool Reciprocal) const {
8915 if (Enabled == ReciprocalEstimate::Enabled ||
8916 (Enabled == ReciprocalEstimate::Unspecified && Subtarget->useRSqrt()))
8917 if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRSQRTE, Operand,
8920 EVT VT = Operand.getValueType();
8923 Flags.setAllowReassociation(true);
8925 // Newton reciprocal square root iteration: E * 0.5 * (3 - X * E^2)
8926 // AArch64 reciprocal square root iteration instruction: 0.5 * (3 - M * N)
8927 for (int i = ExtraSteps; i > 0; --i) {
8928 SDValue Step = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Estimate,
8930 Step = DAG.getNode(AArch64ISD::FRSQRTS, DL, VT, Operand, Step, Flags);
8931 Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags);
8934 Estimate = DAG.getNode(ISD::FMUL, DL, VT, Operand, Estimate, Flags);
8943 SDValue AArch64TargetLowering::getRecipEstimate(SDValue Operand,
8944 SelectionDAG &DAG, int Enabled,
8945 int &ExtraSteps) const {
8946 if (Enabled == ReciprocalEstimate::Enabled)
8947 if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRECPE, Operand,
8950 EVT VT = Operand.getValueType();
8953 Flags.setAllowReassociation(true);
8955 // Newton reciprocal iteration: E * (2 - X * E)
8956 // AArch64 reciprocal iteration instruction: (2 - M * N)
8957 for (int i = ExtraSteps; i > 0; --i) {
8958 SDValue Step = DAG.getNode(AArch64ISD::FRECPS, DL, VT, Operand,
8960 Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags);
8970 //===----------------------------------------------------------------------===//
8971 // AArch64 Inline Assembly Support
8972 //===----------------------------------------------------------------------===//
8974 // Table of Constraints
8975 // TODO: This is the current set of constraints supported by ARM for the
8976 // compiler, not all of them may make sense.
8978 // r - A general register
8979 // w - An FP/SIMD register of some size in the range v0-v31
8980 // x - An FP/SIMD register of some size in the range v0-v15
8981 // I - Constant that can be used with an ADD instruction
8982 // J - Constant that can be used with a SUB instruction
8983 // K - Constant that can be used with a 32-bit logical instruction
8984 // L - Constant that can be used with a 64-bit logical instruction
8985 // M - Constant that can be used as a 32-bit MOV immediate
8986 // N - Constant that can be used as a 64-bit MOV immediate
8987 // Q - A memory reference with base register and no offset
8988 // S - A symbolic address
8989 // Y - Floating point constant zero
8990 // Z - Integer constant zero
8992 // Note that general register operands will be output using their 64-bit x
8993 // register name, whatever the size of the variable, unless the asm operand
8994 // is prefixed by the %w modifier. Floating-point and SIMD register operands
8995 // will be output with the v prefix unless prefixed by the %b, %h, %s, %d or
8997 const char *AArch64TargetLowering::LowerXConstraint(EVT ConstraintVT) const {
8998 // At this point, we have to lower this constraint to something else, so we
8999 // lower it to an "r" or "w". However, by doing this we will force the result
9000 // to be in register, while the X constraint is much more permissive.
9002 // Although we are correct (we are free to emit anything, without
9003 // constraints), we might break use cases that would expect us to be more
9004 // efficient and emit something else.
9005 if (!Subtarget->hasFPARMv8())
9008 if (ConstraintVT.isFloatingPoint())
9011 if (ConstraintVT.isVector() &&
9012 (ConstraintVT.getSizeInBits() == 64 ||
9013 ConstraintVT.getSizeInBits() == 128))
9019 enum PredicateConstraint {
9025 static PredicateConstraint parsePredicateConstraint(StringRef Constraint) {
9026 PredicateConstraint P = PredicateConstraint::Invalid;
9027 if (Constraint == "Upa")
9028 P = PredicateConstraint::Upa;
9029 if (Constraint == "Upl")
9030 P = PredicateConstraint::Upl;
9034 /// getConstraintType - Given a constraint letter, return the type of
9035 /// constraint it is for this target.
9036 AArch64TargetLowering::ConstraintType
9037 AArch64TargetLowering::getConstraintType(StringRef Constraint) const {
9038 if (Constraint.size() == 1) {
9039 switch (Constraint[0]) {
9045 return C_RegisterClass;
9046 // An address with a single base register. Due to the way we
9047 // currently handle addresses it is the same as 'r'.
9060 case 'S': // A symbolic address
9063 } else if (parsePredicateConstraint(Constraint) !=
9064 PredicateConstraint::Invalid)
9065 return C_RegisterClass;
9066 return TargetLowering::getConstraintType(Constraint);
9069 /// Examine constraint type and operand type and determine a weight value.
9070 /// This object must already have been set up with the operand type
9071 /// and the current alternative constraint selected.
9072 TargetLowering::ConstraintWeight
9073 AArch64TargetLowering::getSingleConstraintMatchWeight(
9074 AsmOperandInfo &info, const char *constraint) const {
9075 ConstraintWeight weight = CW_Invalid;
9076 Value *CallOperandVal = info.CallOperandVal;
9077 // If we don't have a value, we can't do a match,
9078 // but allow it at the lowest weight.
9079 if (!CallOperandVal)
9081 Type *type = CallOperandVal->getType();
9082 // Look at the constraint type.
9083 switch (*constraint) {
9085 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
9090 if (type->isFloatingPointTy() || type->isVectorTy())
9091 weight = CW_Register;
9094 weight = CW_Constant;
9097 if (parsePredicateConstraint(constraint) != PredicateConstraint::Invalid)
9098 weight = CW_Register;
9104 std::pair<unsigned, const TargetRegisterClass *>
9105 AArch64TargetLowering::getRegForInlineAsmConstraint(
9106 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
9107 if (Constraint.size() == 1) {
9108 switch (Constraint[0]) {
9110 if (VT.isScalableVector())
9111 return std::make_pair(0U, nullptr);
9112 if (Subtarget->hasLS64() && VT.getSizeInBits() == 512)
9113 return std::make_pair(0U, &AArch64::GPR64x8ClassRegClass);
9114 if (VT.getFixedSizeInBits() == 64)
9115 return std::make_pair(0U, &AArch64::GPR64commonRegClass);
9116 return std::make_pair(0U, &AArch64::GPR32commonRegClass);
9118 if (!Subtarget->hasFPARMv8())
9120 if (VT.isScalableVector()) {
9121 if (VT.getVectorElementType() != MVT::i1)
9122 return std::make_pair(0U, &AArch64::ZPRRegClass);
9123 return std::make_pair(0U, nullptr);
9125 uint64_t VTSize = VT.getFixedSizeInBits();
9127 return std::make_pair(0U, &AArch64::FPR16RegClass);
9129 return std::make_pair(0U, &AArch64::FPR32RegClass);
9131 return std::make_pair(0U, &AArch64::FPR64RegClass);
9133 return std::make_pair(0U, &AArch64::FPR128RegClass);
9136 // The instructions that this constraint is designed for can
9137 // only take 128-bit registers so just use that regclass.
9139 if (!Subtarget->hasFPARMv8())
9141 if (VT.isScalableVector())
9142 return std::make_pair(0U, &AArch64::ZPR_4bRegClass);
9143 if (VT.getSizeInBits() == 128)
9144 return std::make_pair(0U, &AArch64::FPR128_loRegClass);
9147 if (!Subtarget->hasFPARMv8())
9149 if (VT.isScalableVector())
9150 return std::make_pair(0U, &AArch64::ZPR_3bRegClass);
9154 PredicateConstraint PC = parsePredicateConstraint(Constraint);
9155 if (PC != PredicateConstraint::Invalid) {
9156 if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1)
9157 return std::make_pair(0U, nullptr);
9158 bool restricted = (PC == PredicateConstraint::Upl);
9159 return restricted ? std::make_pair(0U, &AArch64::PPR_3bRegClass)
9160 : std::make_pair(0U, &AArch64::PPRRegClass);
9163 if (StringRef("{cc}").equals_insensitive(Constraint))
9164 return std::make_pair(unsigned(AArch64::NZCV), &AArch64::CCRRegClass);
9166 // Use the default implementation in TargetLowering to convert the register
9167 // constraint into a member of a register class.
9168 std::pair<unsigned, const TargetRegisterClass *> Res;
9169 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
9171 // Not found as a standard register?
9173 unsigned Size = Constraint.size();
9174 if ((Size == 4 || Size == 5) && Constraint[0] == '{' &&
9175 tolower(Constraint[1]) == 'v' && Constraint[Size - 1] == '}') {
9177 bool Failed = Constraint.slice(2, Size - 1).getAsInteger(10, RegNo);
9178 if (!Failed && RegNo >= 0 && RegNo <= 31) {
9179 // v0 - v31 are aliases of q0 - q31 or d0 - d31 depending on size.
9180 // By default we'll emit v0-v31 for this unless there's a modifier where
9181 // we'll emit the correct register as well.
9182 if (VT != MVT::Other && VT.getSizeInBits() == 64) {
9183 Res.first = AArch64::FPR64RegClass.getRegister(RegNo);
9184 Res.second = &AArch64::FPR64RegClass;
9186 Res.first = AArch64::FPR128RegClass.getRegister(RegNo);
9187 Res.second = &AArch64::FPR128RegClass;
9193 if (Res.second && !Subtarget->hasFPARMv8() &&
9194 !AArch64::GPR32allRegClass.hasSubClassEq(Res.second) &&
9195 !AArch64::GPR64allRegClass.hasSubClassEq(Res.second))
9196 return std::make_pair(0U, nullptr);
9201 EVT AArch64TargetLowering::getAsmOperandValueType(const DataLayout &DL,
9203 bool AllowUnknown) const {
9204 if (Subtarget->hasLS64() && Ty->isIntegerTy(512))
9205 return EVT(MVT::i64x8);
9207 return TargetLowering::getAsmOperandValueType(DL, Ty, AllowUnknown);
9210 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
9211 /// vector. If it is invalid, don't add anything to Ops.
9212 void AArch64TargetLowering::LowerAsmOperandForConstraint(
9213 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
9214 SelectionDAG &DAG) const {
9217 // Currently only support length 1 constraints.
9218 if (Constraint.length() != 1)
9221 char ConstraintLetter = Constraint[0];
9222 switch (ConstraintLetter) {
9226 // This set of constraints deal with valid constants for various instructions.
9227 // Validate and return a target constant for them if we can.
9229 // 'z' maps to xzr or wzr so it needs an input of 0.
9230 if (!isNullConstant(Op))
9233 if (Op.getValueType() == MVT::i64)
9234 Result = DAG.getRegister(AArch64::XZR, MVT::i64);
9236 Result = DAG.getRegister(AArch64::WZR, MVT::i32);
9240 // An absolute symbolic address or label reference.
9241 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9242 Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
9243 GA->getValueType(0));
9244 } else if (const BlockAddressSDNode *BA =
9245 dyn_cast<BlockAddressSDNode>(Op)) {
9247 DAG.getTargetBlockAddress(BA->getBlockAddress(), BA->getValueType(0));
9259 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
9263 // Grab the value and do some validation.
9264 uint64_t CVal = C->getZExtValue();
9265 switch (ConstraintLetter) {
9266 // The I constraint applies only to simple ADD or SUB immediate operands:
9267 // i.e. 0 to 4095 with optional shift by 12
9268 // The J constraint applies only to ADD or SUB immediates that would be
9269 // valid when negated, i.e. if [an add pattern] were to be output as a SUB
9270 // instruction [or vice versa], in other words -1 to -4095 with optional
9271 // left shift by 12.
9273 if (isUInt<12>(CVal) || isShiftedUInt<12, 12>(CVal))
9277 uint64_t NVal = -C->getSExtValue();
9278 if (isUInt<12>(NVal) || isShiftedUInt<12, 12>(NVal)) {
9279 CVal = C->getSExtValue();
9284 // The K and L constraints apply *only* to logical immediates, including
9285 // what used to be the MOVI alias for ORR (though the MOVI alias has now
9286 // been removed and MOV should be used). So these constraints have to
9287 // distinguish between bit patterns that are valid 32-bit or 64-bit
9288 // "bitmask immediates": for example 0xaaaaaaaa is a valid bimm32 (K), but
9289 // not a valid bimm64 (L) where 0xaaaaaaaaaaaaaaaa would be valid, and vice
9292 if (AArch64_AM::isLogicalImmediate(CVal, 32))
9296 if (AArch64_AM::isLogicalImmediate(CVal, 64))
9299 // The M and N constraints are a superset of K and L respectively, for use
9300 // with the MOV (immediate) alias. As well as the logical immediates they
9301 // also match 32 or 64-bit immediates that can be loaded either using a
9302 // *single* MOVZ or MOVN , such as 32-bit 0x12340000, 0x00001234, 0xffffedca
9303 // (M) or 64-bit 0x1234000000000000 (N) etc.
9304 // As a note some of this code is liberally stolen from the asm parser.
9306 if (!isUInt<32>(CVal))
9308 if (AArch64_AM::isLogicalImmediate(CVal, 32))
9310 if ((CVal & 0xFFFF) == CVal)
9312 if ((CVal & 0xFFFF0000ULL) == CVal)
9314 uint64_t NCVal = ~(uint32_t)CVal;
9315 if ((NCVal & 0xFFFFULL) == NCVal)
9317 if ((NCVal & 0xFFFF0000ULL) == NCVal)
9322 if (AArch64_AM::isLogicalImmediate(CVal, 64))
9324 if ((CVal & 0xFFFFULL) == CVal)
9326 if ((CVal & 0xFFFF0000ULL) == CVal)
9328 if ((CVal & 0xFFFF00000000ULL) == CVal)
9330 if ((CVal & 0xFFFF000000000000ULL) == CVal)
9332 uint64_t NCVal = ~CVal;
9333 if ((NCVal & 0xFFFFULL) == NCVal)
9335 if ((NCVal & 0xFFFF0000ULL) == NCVal)
9337 if ((NCVal & 0xFFFF00000000ULL) == NCVal)
9339 if ((NCVal & 0xFFFF000000000000ULL) == NCVal)
9347 // All assembler immediates are 64-bit integers.
9348 Result = DAG.getTargetConstant(CVal, SDLoc(Op), MVT::i64);
9352 if (Result.getNode()) {
9353 Ops.push_back(Result);
9357 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
9360 //===----------------------------------------------------------------------===//
9361 // AArch64 Advanced SIMD Support
9362 //===----------------------------------------------------------------------===//
9364 /// WidenVector - Given a value in the V64 register class, produce the
9365 /// equivalent value in the V128 register class.
9366 static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG) {
9367 EVT VT = V64Reg.getValueType();
9368 unsigned NarrowSize = VT.getVectorNumElements();
9369 MVT EltTy = VT.getVectorElementType().getSimpleVT();
9370 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
9373 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideTy, DAG.getUNDEF(WideTy),
9374 V64Reg, DAG.getConstant(0, DL, MVT::i64));
9377 /// getExtFactor - Determine the adjustment factor for the position when
9378 /// generating an "extract from vector registers" instruction.
9379 static unsigned getExtFactor(SDValue &V) {
9380 EVT EltType = V.getValueType().getVectorElementType();
9381 return EltType.getSizeInBits() / 8;
9384 /// NarrowVector - Given a value in the V128 register class, produce the
9385 /// equivalent value in the V64 register class.
9386 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
9387 EVT VT = V128Reg.getValueType();
9388 unsigned WideSize = VT.getVectorNumElements();
9389 MVT EltTy = VT.getVectorElementType().getSimpleVT();
9390 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
9393 return DAG.getTargetExtractSubreg(AArch64::dsub, DL, NarrowTy, V128Reg);
9396 // Gather data to see if the operation can be modelled as a
9397 // shuffle in combination with VEXTs.
9398 SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
9399 SelectionDAG &DAG) const {
9400 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
9401 LLVM_DEBUG(dbgs() << "AArch64TargetLowering::ReconstructShuffle\n");
9403 EVT VT = Op.getValueType();
9404 assert(!VT.isScalableVector() &&
9405 "Scalable vectors cannot be used with ISD::BUILD_VECTOR");
9406 unsigned NumElts = VT.getVectorNumElements();
9408 struct ShuffleSourceInfo {
9413 // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
9414 // be compatible with the shuffle we intend to construct. As a result
9415 // ShuffleVec will be some sliding window into the original Vec.
9418 // Code should guarantee that element i in Vec starts at element "WindowBase
9419 // + i * WindowScale in ShuffleVec".
9423 ShuffleSourceInfo(SDValue Vec)
9424 : Vec(Vec), MinElt(std::numeric_limits<unsigned>::max()), MaxElt(0),
9425 ShuffleVec(Vec), WindowBase(0), WindowScale(1) {}
9427 bool operator ==(SDValue OtherVec) { return Vec == OtherVec; }
9430 // First gather all vectors used as an immediate source for this BUILD_VECTOR
9432 SmallVector<ShuffleSourceInfo, 2> Sources;
9433 for (unsigned i = 0; i < NumElts; ++i) {
9434 SDValue V = Op.getOperand(i);
9437 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9438 !isa<ConstantSDNode>(V.getOperand(1)) ||
9439 V.getOperand(0).getValueType().isScalableVector()) {
9441 dbgs() << "Reshuffle failed: "
9442 "a shuffle can only come from building a vector from "
9443 "various elements of other fixed-width vectors, provided "
9444 "their indices are constant\n");
9448 // Add this element source to the list if it's not already there.
9449 SDValue SourceVec = V.getOperand(0);
9450 auto Source = find(Sources, SourceVec);
9451 if (Source == Sources.end())
9452 Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
9454 // Update the minimum and maximum lane number seen.
9455 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
9456 Source->MinElt = std::min(Source->MinElt, EltNo);
9457 Source->MaxElt = std::max(Source->MaxElt, EltNo);
9460 // If we have 3 or 4 sources, try to generate a TBL, which will at least be
9461 // better than moving to/from gpr registers for larger vectors.
9462 if ((Sources.size() == 3 || Sources.size() == 4) && NumElts > 4) {
9463 // Construct a mask for the tbl. We may need to adjust the index for types
9465 SmallVector<unsigned, 16> Mask;
9466 unsigned OutputFactor = VT.getScalarSizeInBits() / 8;
9467 for (unsigned I = 0; I < NumElts; ++I) {
9468 SDValue V = Op.getOperand(I);
9470 for (unsigned OF = 0; OF < OutputFactor; OF++)
9474 // Set the Mask lanes adjusted for the size of the input and output
9475 // lanes. The Mask is always i8, so it will set OutputFactor lanes per
9476 // output element, adjusted in their positions per input and output types.
9477 unsigned Lane = V.getConstantOperandVal(1);
9478 for (unsigned S = 0; S < Sources.size(); S++) {
9479 if (V.getOperand(0) == Sources[S].Vec) {
9480 unsigned InputSize = Sources[S].Vec.getScalarValueSizeInBits();
9481 unsigned InputBase = 16 * S + Lane * InputSize / 8;
9482 for (unsigned OF = 0; OF < OutputFactor; OF++)
9483 Mask.push_back(InputBase + OF);
9489 // Construct the tbl3/tbl4 out of an intrinsic, the sources converted to
9490 // v16i8, and the TBLMask
9491 SmallVector<SDValue, 16> TBLOperands;
9492 TBLOperands.push_back(DAG.getConstant(Sources.size() == 3
9493 ? Intrinsic::aarch64_neon_tbl3
9494 : Intrinsic::aarch64_neon_tbl4,
9496 for (unsigned i = 0; i < Sources.size(); i++) {
9497 SDValue Src = Sources[i].Vec;
9498 EVT SrcVT = Src.getValueType();
9499 Src = DAG.getBitcast(SrcVT.is64BitVector() ? MVT::v8i8 : MVT::v16i8, Src);
9500 assert((SrcVT.is64BitVector() || SrcVT.is128BitVector()) &&
9501 "Expected a legally typed vector");
9502 if (SrcVT.is64BitVector())
9503 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, Src,
9504 DAG.getUNDEF(MVT::v8i8));
9505 TBLOperands.push_back(Src);
9508 SmallVector<SDValue, 16> TBLMask;
9509 for (unsigned i = 0; i < Mask.size(); i++)
9510 TBLMask.push_back(DAG.getConstant(Mask[i], dl, MVT::i32));
9511 assert((Mask.size() == 8 || Mask.size() == 16) &&
9512 "Expected a v8i8 or v16i8 Mask");
9513 TBLOperands.push_back(
9514 DAG.getBuildVector(Mask.size() == 8 ? MVT::v8i8 : MVT::v16i8, dl, TBLMask));
9517 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
9518 Mask.size() == 8 ? MVT::v8i8 : MVT::v16i8, TBLOperands);
9519 return DAG.getBitcast(VT, Shuffle);
9522 if (Sources.size() > 2) {
9523 LLVM_DEBUG(dbgs() << "Reshuffle failed: currently only do something "
9524 << "sensible when at most two source vectors are "
9529 // Find out the smallest element size among result and two sources, and use
9530 // it as element size to build the shuffle_vector.
9531 EVT SmallestEltTy = VT.getVectorElementType();
9532 for (auto &Source : Sources) {
9533 EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
9534 if (SrcEltTy.bitsLT(SmallestEltTy)) {
9535 SmallestEltTy = SrcEltTy;
9538 unsigned ResMultiplier =
9539 VT.getScalarSizeInBits() / SmallestEltTy.getFixedSizeInBits();
9540 uint64_t VTSize = VT.getFixedSizeInBits();
9541 NumElts = VTSize / SmallestEltTy.getFixedSizeInBits();
9542 EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
9544 // If the source vector is too wide or too narrow, we may nevertheless be able
9545 // to construct a compatible shuffle either by concatenating it with UNDEF or
9546 // extracting a suitable range of elements.
9547 for (auto &Src : Sources) {
9548 EVT SrcVT = Src.ShuffleVec.getValueType();
9550 TypeSize SrcVTSize = SrcVT.getSizeInBits();
9551 if (SrcVTSize == TypeSize::Fixed(VTSize))
9554 // This stage of the search produces a source with the same element type as
9555 // the original, but with a total width matching the BUILD_VECTOR output.
9556 EVT EltVT = SrcVT.getVectorElementType();
9557 unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits();
9558 EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
9560 if (SrcVTSize.getFixedValue() < VTSize) {
9561 assert(2 * SrcVTSize == VTSize);
9562 // We can pad out the smaller vector for free, so if it's part of a
9565 DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec,
9566 DAG.getUNDEF(Src.ShuffleVec.getValueType()));
9570 if (SrcVTSize.getFixedValue() != 2 * VTSize) {
9572 dbgs() << "Reshuffle failed: result vector too small to extract\n");
9576 if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
9578 dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n");
9582 if (Src.MinElt >= NumSrcElts) {
9583 // The extraction can just take the second half
9585 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9586 DAG.getConstant(NumSrcElts, dl, MVT::i64));
9587 Src.WindowBase = -NumSrcElts;
9588 } else if (Src.MaxElt < NumSrcElts) {
9589 // The extraction can just take the first half
9591 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9592 DAG.getConstant(0, dl, MVT::i64));
9594 // An actual VEXT is needed
9596 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9597 DAG.getConstant(0, dl, MVT::i64));
9599 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9600 DAG.getConstant(NumSrcElts, dl, MVT::i64));
9601 unsigned Imm = Src.MinElt * getExtFactor(VEXTSrc1);
9603 if (!SrcVT.is64BitVector()) {
9605 dbgs() << "Reshuffle failed: don't know how to lower AArch64ISD::EXT "
9606 "for SVE vectors.");
9610 Src.ShuffleVec = DAG.getNode(AArch64ISD::EXT, dl, DestVT, VEXTSrc1,
9612 DAG.getConstant(Imm, dl, MVT::i32));
9613 Src.WindowBase = -Src.MinElt;
9617 // Another possible incompatibility occurs from the vector element types. We
9618 // can fix this by bitcasting the source vectors to the same type we intend
9620 for (auto &Src : Sources) {
9621 EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
9622 if (SrcEltTy == SmallestEltTy)
9624 assert(ShuffleVT.getVectorElementType() == SmallestEltTy);
9625 Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec);
9627 SrcEltTy.getFixedSizeInBits() / SmallestEltTy.getFixedSizeInBits();
9628 Src.WindowBase *= Src.WindowScale;
9631 // Final check before we try to actually produce a shuffle.
9632 LLVM_DEBUG(for (auto Src
9634 assert(Src.ShuffleVec.getValueType() == ShuffleVT););
9636 // The stars all align, our next step is to produce the mask for the shuffle.
9637 SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
9638 int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
9639 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
9640 SDValue Entry = Op.getOperand(i);
9641 if (Entry.isUndef())
9644 auto Src = find(Sources, Entry.getOperand(0));
9645 int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue();
9647 // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
9648 // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
9650 EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
9651 int BitsDefined = std::min(OrigEltTy.getScalarSizeInBits(),
9652 VT.getScalarSizeInBits());
9653 int LanesDefined = BitsDefined / BitsPerShuffleLane;
9655 // This source is expected to fill ResMultiplier lanes of the final shuffle,
9656 // starting at the appropriate offset.
9657 int *LaneMask = &Mask[i * ResMultiplier];
9659 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
9660 ExtractBase += NumElts * (Src - Sources.begin());
9661 for (int j = 0; j < LanesDefined; ++j)
9662 LaneMask[j] = ExtractBase + j;
9665 // Final check before we try to produce nonsense...
9666 if (!isShuffleMaskLegal(Mask, ShuffleVT)) {
9667 LLVM_DEBUG(dbgs() << "Reshuffle failed: illegal shuffle mask\n");
9671 SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) };
9672 for (unsigned i = 0; i < Sources.size(); ++i)
9673 ShuffleOps[i] = Sources[i].ShuffleVec;
9675 SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
9676 ShuffleOps[1], Mask);
9677 SDValue V = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle);
9679 LLVM_DEBUG(dbgs() << "Reshuffle, creating node: "; Shuffle.dump();
9680 dbgs() << "Reshuffle, creating node: "; V.dump(););
9685 // check if an EXT instruction can handle the shuffle mask when the
9686 // vector sources of the shuffle are the same.
9687 static bool isSingletonEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
9688 unsigned NumElts = VT.getVectorNumElements();
9690 // Assume that the first shuffle index is not UNDEF. Fail if it is.
9696 // If this is a VEXT shuffle, the immediate value is the index of the first
9697 // element. The other shuffle indices must be the successive elements after
9699 unsigned ExpectedElt = Imm;
9700 for (unsigned i = 1; i < NumElts; ++i) {
9701 // Increment the expected index. If it wraps around, just follow it
9702 // back to index zero and keep going.
9704 if (ExpectedElt == NumElts)
9708 continue; // ignore UNDEF indices
9709 if (ExpectedElt != static_cast<unsigned>(M[i]))
9716 // Detect patterns of a0,a1,a2,a3,b0,b1,b2,b3,c0,c1,c2,c3,d0,d1,d2,d3 from
9717 // v4i32s. This is really a truncate, which we can construct out of (legal)
9718 // concats and truncate nodes.
9719 static SDValue ReconstructTruncateFromBuildVector(SDValue V, SelectionDAG &DAG) {
9720 if (V.getValueType() != MVT::v16i8)
9722 assert(V.getNumOperands() == 16 && "Expected 16 operands on the BUILDVECTOR");
9724 for (unsigned X = 0; X < 4; X++) {
9725 // Check the first item in each group is an extract from lane 0 of a v4i32
9727 SDValue BaseExt = V.getOperand(X * 4);
9728 if (BaseExt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9729 (BaseExt.getOperand(0).getValueType() != MVT::v4i16 &&
9730 BaseExt.getOperand(0).getValueType() != MVT::v4i32) ||
9731 !isa<ConstantSDNode>(BaseExt.getOperand(1)) ||
9732 BaseExt.getConstantOperandVal(1) != 0)
9734 SDValue Base = BaseExt.getOperand(0);
9735 // And check the other items are extracts from the same vector.
9736 for (unsigned Y = 1; Y < 4; Y++) {
9737 SDValue Ext = V.getOperand(X * 4 + Y);
9738 if (Ext.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9739 Ext.getOperand(0) != Base ||
9740 !isa<ConstantSDNode>(Ext.getOperand(1)) ||
9741 Ext.getConstantOperandVal(1) != Y)
9746 // Turn the buildvector into a series of truncates and concates, which will
9747 // become uzip1's. Any v4i32s we found get truncated to v4i16, which are
9748 // concat together to produce 2 v8i16. These are both truncated and concat
9751 SDValue Trunc[4] = {
9752 V.getOperand(0).getOperand(0), V.getOperand(4).getOperand(0),
9753 V.getOperand(8).getOperand(0), V.getOperand(12).getOperand(0)};
9754 for (int I = 0; I < 4; I++)
9755 if (Trunc[I].getValueType() == MVT::v4i32)
9756 Trunc[I] = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i16, Trunc[I]);
9758 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, Trunc[0], Trunc[1]);
9760 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, Trunc[2], Trunc[3]);
9761 SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, Concat0);
9762 SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, Concat1);
9763 return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Trunc0, Trunc1);
9766 /// Check if a vector shuffle corresponds to a DUP instructions with a larger
9767 /// element width than the vector lane type. If that is the case the function
9768 /// returns true and writes the value of the DUP instruction lane operand into
9770 static bool isWideDUPMask(ArrayRef<int> M, EVT VT, unsigned BlockSize,
9771 unsigned &DupLaneOp) {
9772 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
9773 "Only possible block sizes for wide DUP are: 16, 32, 64");
9775 if (BlockSize <= VT.getScalarSizeInBits())
9777 if (BlockSize % VT.getScalarSizeInBits() != 0)
9779 if (VT.getSizeInBits() % BlockSize != 0)
9782 size_t SingleVecNumElements = VT.getVectorNumElements();
9783 size_t NumEltsPerBlock = BlockSize / VT.getScalarSizeInBits();
9784 size_t NumBlocks = VT.getSizeInBits() / BlockSize;
9786 // We are looking for masks like
9787 // [0, 1, 0, 1] or [2, 3, 2, 3] or [4, 5, 6, 7, 4, 5, 6, 7] where any element
9788 // might be replaced by 'undefined'. BlockIndices will eventually contain
9789 // lane indices of the duplicated block (i.e. [0, 1], [2, 3] and [4, 5, 6, 7]
9790 // for the above examples)
9791 SmallVector<int, 8> BlockElts(NumEltsPerBlock, -1);
9792 for (size_t BlockIndex = 0; BlockIndex < NumBlocks; BlockIndex++)
9793 for (size_t I = 0; I < NumEltsPerBlock; I++) {
9794 int Elt = M[BlockIndex * NumEltsPerBlock + I];
9797 // For now we don't support shuffles that use the second operand
9798 if ((unsigned)Elt >= SingleVecNumElements)
9800 if (BlockElts[I] < 0)
9802 else if (BlockElts[I] != Elt)
9806 // We found a candidate block (possibly with some undefs). It must be a
9807 // sequence of consecutive integers starting with a value divisible by
9808 // NumEltsPerBlock with some values possibly replaced by undef-s.
9810 // Find first non-undef element
9811 auto FirstRealEltIter = find_if(BlockElts, [](int Elt) { return Elt >= 0; });
9812 assert(FirstRealEltIter != BlockElts.end() &&
9813 "Shuffle with all-undefs must have been caught by previous cases, "
9815 if (FirstRealEltIter == BlockElts.end()) {
9820 // Index of FirstRealElt in BlockElts
9821 size_t FirstRealIndex = FirstRealEltIter - BlockElts.begin();
9823 if ((unsigned)*FirstRealEltIter < FirstRealIndex)
9825 // BlockElts[0] must have the following value if it isn't undef:
9826 size_t Elt0 = *FirstRealEltIter - FirstRealIndex;
9828 // Check the first element
9829 if (Elt0 % NumEltsPerBlock != 0)
9831 // Check that the sequence indeed consists of consecutive integers (modulo
9833 for (size_t I = 0; I < NumEltsPerBlock; I++)
9834 if (BlockElts[I] >= 0 && (unsigned)BlockElts[I] != Elt0 + I)
9837 DupLaneOp = Elt0 / NumEltsPerBlock;
9841 // check if an EXT instruction can handle the shuffle mask when the
9842 // vector sources of the shuffle are different.
9843 static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT,
9845 // Look for the first non-undef element.
9846 const int *FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; });
9848 // Benefit form APInt to handle overflow when calculating expected element.
9849 unsigned NumElts = VT.getVectorNumElements();
9850 unsigned MaskBits = APInt(32, NumElts * 2).logBase2();
9851 APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1);
9852 // The following shuffle indices must be the successive elements after the
9853 // first real element.
9854 bool FoundWrongElt = std::any_of(FirstRealElt + 1, M.end(), [&](int Elt) {
9855 return Elt != ExpectedElt++ && Elt != -1;
9860 // The index of an EXT is the first element if it is not UNDEF.
9861 // Watch out for the beginning UNDEFs. The EXT index should be the expected
9862 // value of the first element. E.g.
9863 // <-1, -1, 3, ...> is treated as <1, 2, 3, ...>.
9864 // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>.
9865 // ExpectedElt is the last mask index plus 1.
9866 Imm = ExpectedElt.getZExtValue();
9868 // There are two difference cases requiring to reverse input vectors.
9869 // For example, for vector <4 x i32> we have the following cases,
9870 // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>)
9871 // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>)
9872 // For both cases, we finally use mask <5, 6, 7, 0>, which requires
9873 // to reverse two input vectors.
9882 /// isREVMask - Check if a vector shuffle corresponds to a REV
9883 /// instruction with the specified blocksize. (The order of the elements
9884 /// within each block of the vector is reversed.)
9885 static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
9886 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
9887 "Only possible block sizes for REV are: 16, 32, 64");
9889 unsigned EltSz = VT.getScalarSizeInBits();
9893 unsigned NumElts = VT.getVectorNumElements();
9894 unsigned BlockElts = M[0] + 1;
9895 // If the first shuffle index is UNDEF, be optimistic.
9897 BlockElts = BlockSize / EltSz;
9899 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
9902 for (unsigned i = 0; i < NumElts; ++i) {
9904 continue; // ignore UNDEF indices
9905 if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
9912 static bool isZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9913 unsigned NumElts = VT.getVectorNumElements();
9914 if (NumElts % 2 != 0)
9916 WhichResult = (M[0] == 0 ? 0 : 1);
9917 unsigned Idx = WhichResult * NumElts / 2;
9918 for (unsigned i = 0; i != NumElts; i += 2) {
9919 if ((M[i] >= 0 && (unsigned)M[i] != Idx) ||
9920 (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx + NumElts))
9928 static bool isUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9929 unsigned NumElts = VT.getVectorNumElements();
9930 WhichResult = (M[0] == 0 ? 0 : 1);
9931 for (unsigned i = 0; i != NumElts; ++i) {
9933 continue; // ignore UNDEF indices
9934 if ((unsigned)M[i] != 2 * i + WhichResult)
9941 static bool isTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9942 unsigned NumElts = VT.getVectorNumElements();
9943 if (NumElts % 2 != 0)
9945 WhichResult = (M[0] == 0 ? 0 : 1);
9946 for (unsigned i = 0; i < NumElts; i += 2) {
9947 if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) ||
9948 (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + NumElts + WhichResult))
9954 /// isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of
9955 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
9956 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
9957 static bool isZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9958 unsigned NumElts = VT.getVectorNumElements();
9959 if (NumElts % 2 != 0)
9961 WhichResult = (M[0] == 0 ? 0 : 1);
9962 unsigned Idx = WhichResult * NumElts / 2;
9963 for (unsigned i = 0; i != NumElts; i += 2) {
9964 if ((M[i] >= 0 && (unsigned)M[i] != Idx) ||
9965 (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx))
9973 /// isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of
9974 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
9975 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
9976 static bool isUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9977 unsigned Half = VT.getVectorNumElements() / 2;
9978 WhichResult = (M[0] == 0 ? 0 : 1);
9979 for (unsigned j = 0; j != 2; ++j) {
9980 unsigned Idx = WhichResult;
9981 for (unsigned i = 0; i != Half; ++i) {
9982 int MIdx = M[i + j * Half];
9983 if (MIdx >= 0 && (unsigned)MIdx != Idx)
9992 /// isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of
9993 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
9994 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
9995 static bool isTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9996 unsigned NumElts = VT.getVectorNumElements();
9997 if (NumElts % 2 != 0)
9999 WhichResult = (M[0] == 0 ? 0 : 1);
10000 for (unsigned i = 0; i < NumElts; i += 2) {
10001 if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) ||
10002 (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + WhichResult))
10008 static bool isINSMask(ArrayRef<int> M, int NumInputElements,
10009 bool &DstIsLeft, int &Anomaly) {
10010 if (M.size() != static_cast<size_t>(NumInputElements))
10013 int NumLHSMatch = 0, NumRHSMatch = 0;
10014 int LastLHSMismatch = -1, LastRHSMismatch = -1;
10016 for (int i = 0; i < NumInputElements; ++i) {
10026 LastLHSMismatch = i;
10028 if (M[i] == i + NumInputElements)
10031 LastRHSMismatch = i;
10034 if (NumLHSMatch == NumInputElements - 1) {
10036 Anomaly = LastLHSMismatch;
10038 } else if (NumRHSMatch == NumInputElements - 1) {
10040 Anomaly = LastRHSMismatch;
10047 static bool isConcatMask(ArrayRef<int> Mask, EVT VT, bool SplitLHS) {
10048 if (VT.getSizeInBits() != 128)
10051 unsigned NumElts = VT.getVectorNumElements();
10053 for (int I = 0, E = NumElts / 2; I != E; I++) {
10058 int Offset = NumElts / 2;
10059 for (int I = NumElts / 2, E = NumElts; I != E; I++) {
10060 if (Mask[I] != I + SplitLHS * Offset)
10067 static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) {
10069 EVT VT = Op.getValueType();
10070 SDValue V0 = Op.getOperand(0);
10071 SDValue V1 = Op.getOperand(1);
10072 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
10074 if (VT.getVectorElementType() != V0.getValueType().getVectorElementType() ||
10075 VT.getVectorElementType() != V1.getValueType().getVectorElementType())
10078 bool SplitV0 = V0.getValueSizeInBits() == 128;
10080 if (!isConcatMask(Mask, VT, SplitV0))
10083 EVT CastVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
10085 V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0,
10086 DAG.getConstant(0, DL, MVT::i64));
10088 if (V1.getValueSizeInBits() == 128) {
10089 V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1,
10090 DAG.getConstant(0, DL, MVT::i64));
10092 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1);
10095 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
10096 /// the specified operations to build the shuffle. ID is the perfect-shuffle
10097 //ID, V1 and V2 are the original shuffle inputs. PFEntry is the Perfect shuffle
10098 //table entry and LHS/RHS are the immediate inputs for this stage of the
10100 static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1,
10101 SDValue V2, unsigned PFEntry, SDValue LHS,
10102 SDValue RHS, SelectionDAG &DAG,
10104 unsigned OpNum = (PFEntry >> 26) & 0x0F;
10105 unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1);
10106 unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1);
10109 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
10118 OP_VUZPL, // VUZP, left result
10119 OP_VUZPR, // VUZP, right result
10120 OP_VZIPL, // VZIP, left result
10121 OP_VZIPR, // VZIP, right result
10122 OP_VTRNL, // VTRN, left result
10123 OP_VTRNR, // VTRN, right result
10124 OP_MOVLANE // Move lane. RHSID is the lane to move into
10127 if (OpNum == OP_COPY) {
10128 if (LHSID == (1 * 9 + 2) * 9 + 3)
10130 assert(LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 && "Illegal OP_COPY!");
10134 if (OpNum == OP_MOVLANE) {
10135 // Decompose a PerfectShuffle ID to get the Mask for lane Elt
10136 auto getPFIDLane = [](unsigned ID, int Elt) -> int {
10137 assert(Elt < 4 && "Expected Perfect Lanes to be less than 4");
10143 return (ID % 9 == 8) ? -1 : ID % 9;
10146 // For OP_MOVLANE shuffles, the RHSID represents the lane to move into. We
10147 // get the lane to move from from the PFID, which is always from the
10148 // original vectors (V1 or V2).
10149 SDValue OpLHS = GeneratePerfectShuffle(
10150 LHSID, V1, V2, PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
10151 EVT VT = OpLHS.getValueType();
10152 assert(RHSID < 8 && "Expected a lane index for RHSID!");
10153 unsigned ExtLane = 0;
10156 // OP_MOVLANE are either D movs (if bit 0x4 is set) or S movs. D movs
10157 // convert into a higher type.
10159 int MaskElt = getPFIDLane(ID, (RHSID & 0x01) << 1) >> 1;
10161 MaskElt = (getPFIDLane(ID, ((RHSID & 0x01) << 1) + 1) - 1) >> 1;
10162 assert(MaskElt >= 0 && "Didn't expect an undef movlane index!");
10163 ExtLane = MaskElt < 2 ? MaskElt : (MaskElt - 2);
10164 Input = MaskElt < 2 ? V1 : V2;
10165 if (VT.getScalarSizeInBits() == 16) {
10166 Input = DAG.getBitcast(MVT::v2f32, Input);
10167 OpLHS = DAG.getBitcast(MVT::v2f32, OpLHS);
10169 assert(VT.getScalarSizeInBits() == 32 &&
10170 "Expected 16 or 32 bit shuffle elemements");
10171 Input = DAG.getBitcast(MVT::v2f64, Input);
10172 OpLHS = DAG.getBitcast(MVT::v2f64, OpLHS);
10175 int MaskElt = getPFIDLane(ID, RHSID);
10176 assert(MaskElt >= 0 && "Didn't expect an undef movlane index!");
10177 ExtLane = MaskElt < 4 ? MaskElt : (MaskElt - 4);
10178 Input = MaskElt < 4 ? V1 : V2;
10179 // Be careful about creating illegal types. Use f16 instead of i16.
10180 if (VT == MVT::v4i16) {
10181 Input = DAG.getBitcast(MVT::v4f16, Input);
10182 OpLHS = DAG.getBitcast(MVT::v4f16, OpLHS);
10185 SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
10186 Input.getValueType().getVectorElementType(),
10187 Input, DAG.getVectorIdxConstant(ExtLane, dl));
10189 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, Input.getValueType(), OpLHS,
10190 Ext, DAG.getVectorIdxConstant(RHSID & 0x3, dl));
10191 return DAG.getBitcast(VT, Ins);
10194 SDValue OpLHS, OpRHS;
10195 OpLHS = GeneratePerfectShuffle(LHSID, V1, V2, PerfectShuffleTable[LHSID], LHS,
10197 OpRHS = GeneratePerfectShuffle(RHSID, V1, V2, PerfectShuffleTable[RHSID], LHS,
10199 EVT VT = OpLHS.getValueType();
10203 llvm_unreachable("Unknown shuffle opcode!");
10205 // VREV divides the vector in half and swaps within the half.
10206 if (VT.getVectorElementType() == MVT::i32 ||
10207 VT.getVectorElementType() == MVT::f32)
10208 return DAG.getNode(AArch64ISD::REV64, dl, VT, OpLHS);
10209 // vrev <4 x i16> -> REV32
10210 if (VT.getVectorElementType() == MVT::i16 ||
10211 VT.getVectorElementType() == MVT::f16 ||
10212 VT.getVectorElementType() == MVT::bf16)
10213 return DAG.getNode(AArch64ISD::REV32, dl, VT, OpLHS);
10214 // vrev <4 x i8> -> REV16
10215 assert(VT.getVectorElementType() == MVT::i8);
10216 return DAG.getNode(AArch64ISD::REV16, dl, VT, OpLHS);
10221 EVT EltTy = VT.getVectorElementType();
10223 if (EltTy == MVT::i8)
10224 Opcode = AArch64ISD::DUPLANE8;
10225 else if (EltTy == MVT::i16 || EltTy == MVT::f16 || EltTy == MVT::bf16)
10226 Opcode = AArch64ISD::DUPLANE16;
10227 else if (EltTy == MVT::i32 || EltTy == MVT::f32)
10228 Opcode = AArch64ISD::DUPLANE32;
10229 else if (EltTy == MVT::i64 || EltTy == MVT::f64)
10230 Opcode = AArch64ISD::DUPLANE64;
10232 llvm_unreachable("Invalid vector element type?");
10234 if (VT.getSizeInBits() == 64)
10235 OpLHS = WidenVector(OpLHS, DAG);
10236 SDValue Lane = DAG.getConstant(OpNum - OP_VDUP0, dl, MVT::i64);
10237 return DAG.getNode(Opcode, dl, VT, OpLHS, Lane);
10242 unsigned Imm = (OpNum - OP_VEXT1 + 1) * getExtFactor(OpLHS);
10243 return DAG.getNode(AArch64ISD::EXT, dl, VT, OpLHS, OpRHS,
10244 DAG.getConstant(Imm, dl, MVT::i32));
10247 return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), OpLHS,
10250 return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), OpLHS,
10253 return DAG.getNode(AArch64ISD::ZIP1, dl, DAG.getVTList(VT, VT), OpLHS,
10256 return DAG.getNode(AArch64ISD::ZIP2, dl, DAG.getVTList(VT, VT), OpLHS,
10259 return DAG.getNode(AArch64ISD::TRN1, dl, DAG.getVTList(VT, VT), OpLHS,
10262 return DAG.getNode(AArch64ISD::TRN2, dl, DAG.getVTList(VT, VT), OpLHS,
10267 static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
10268 SelectionDAG &DAG) {
10269 // Check to see if we can use the TBL instruction.
10270 SDValue V1 = Op.getOperand(0);
10271 SDValue V2 = Op.getOperand(1);
10274 EVT EltVT = Op.getValueType().getVectorElementType();
10275 unsigned BytesPerElt = EltVT.getSizeInBits() / 8;
10278 if (V1.isUndef() || isZerosVector(V1.getNode())) {
10283 // If the V2 source is undef or zero then we can use a tbl1, as tbl1 will fill
10284 // out of range values with 0s. We do need to make sure that any out-of-range
10285 // values are really out-of-range for a v16i8 vector.
10286 bool IsUndefOrZero = V2.isUndef() || isZerosVector(V2.getNode());
10287 MVT IndexVT = MVT::v8i8;
10288 unsigned IndexLen = 8;
10289 if (Op.getValueSizeInBits() == 128) {
10290 IndexVT = MVT::v16i8;
10294 SmallVector<SDValue, 8> TBLMask;
10295 for (int Val : ShuffleMask) {
10296 for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) {
10297 unsigned Offset = Byte + Val * BytesPerElt;
10299 Offset = Offset < IndexLen ? Offset + IndexLen : Offset - IndexLen;
10300 if (IsUndefOrZero && Offset >= IndexLen)
10302 TBLMask.push_back(DAG.getConstant(Offset, DL, MVT::i32));
10306 SDValue V1Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V1);
10307 SDValue V2Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V2);
10310 if (IsUndefOrZero) {
10312 V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V1Cst);
10313 Shuffle = DAG.getNode(
10314 ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
10315 DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
10316 DAG.getBuildVector(IndexVT, DL,
10317 makeArrayRef(TBLMask.data(), IndexLen)));
10319 if (IndexLen == 8) {
10320 V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V2Cst);
10321 Shuffle = DAG.getNode(
10322 ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
10323 DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
10324 DAG.getBuildVector(IndexVT, DL,
10325 makeArrayRef(TBLMask.data(), IndexLen)));
10327 // FIXME: We cannot, for the moment, emit a TBL2 instruction because we
10328 // cannot currently represent the register constraints on the input
10329 // table registers.
10330 // Shuffle = DAG.getNode(AArch64ISD::TBL2, DL, IndexVT, V1Cst, V2Cst,
10331 // DAG.getBuildVector(IndexVT, DL, &TBLMask[0],
10333 Shuffle = DAG.getNode(
10334 ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
10335 DAG.getConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32), V1Cst,
10336 V2Cst, DAG.getBuildVector(IndexVT, DL,
10337 makeArrayRef(TBLMask.data(), IndexLen)));
10340 return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Shuffle);
10343 static unsigned getDUPLANEOp(EVT EltType) {
10344 if (EltType == MVT::i8)
10345 return AArch64ISD::DUPLANE8;
10346 if (EltType == MVT::i16 || EltType == MVT::f16 || EltType == MVT::bf16)
10347 return AArch64ISD::DUPLANE16;
10348 if (EltType == MVT::i32 || EltType == MVT::f32)
10349 return AArch64ISD::DUPLANE32;
10350 if (EltType == MVT::i64 || EltType == MVT::f64)
10351 return AArch64ISD::DUPLANE64;
10353 llvm_unreachable("Invalid vector element type?");
10356 static SDValue constructDup(SDValue V, int Lane, SDLoc dl, EVT VT,
10357 unsigned Opcode, SelectionDAG &DAG) {
10358 // Try to eliminate a bitcasted extract subvector before a DUPLANE.
10359 auto getScaledOffsetDup = [](SDValue BitCast, int &LaneC, MVT &CastVT) {
10360 // Match: dup (bitcast (extract_subv X, C)), LaneC
10361 if (BitCast.getOpcode() != ISD::BITCAST ||
10362 BitCast.getOperand(0).getOpcode() != ISD::EXTRACT_SUBVECTOR)
10365 // The extract index must align in the destination type. That may not
10366 // happen if the bitcast is from narrow to wide type.
10367 SDValue Extract = BitCast.getOperand(0);
10368 unsigned ExtIdx = Extract.getConstantOperandVal(1);
10369 unsigned SrcEltBitWidth = Extract.getScalarValueSizeInBits();
10370 unsigned ExtIdxInBits = ExtIdx * SrcEltBitWidth;
10371 unsigned CastedEltBitWidth = BitCast.getScalarValueSizeInBits();
10372 if (ExtIdxInBits % CastedEltBitWidth != 0)
10375 // Can't handle cases where vector size is not 128-bit
10376 if (!Extract.getOperand(0).getValueType().is128BitVector())
10379 // Update the lane value by offsetting with the scaled extract index.
10380 LaneC += ExtIdxInBits / CastedEltBitWidth;
10382 // Determine the casted vector type of the wide vector input.
10383 // dup (bitcast (extract_subv X, C)), LaneC --> dup (bitcast X), LaneC'
10385 // dup (bitcast (extract_subv v2f64 X, 1) to v2f32), 1 --> dup v4f32 X, 3
10386 // dup (bitcast (extract_subv v16i8 X, 8) to v4i16), 1 --> dup v8i16 X, 5
10387 unsigned SrcVecNumElts =
10388 Extract.getOperand(0).getValueSizeInBits() / CastedEltBitWidth;
10389 CastVT = MVT::getVectorVT(BitCast.getSimpleValueType().getScalarType(),
10394 if (getScaledOffsetDup(V, Lane, CastVT)) {
10395 V = DAG.getBitcast(CastVT, V.getOperand(0).getOperand(0));
10396 } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
10397 V.getOperand(0).getValueType().is128BitVector()) {
10398 // The lane is incremented by the index of the extract.
10399 // Example: dup v2f32 (extract v4f32 X, 2), 1 --> dup v4f32 X, 3
10400 Lane += V.getConstantOperandVal(1);
10401 V = V.getOperand(0);
10402 } else if (V.getOpcode() == ISD::CONCAT_VECTORS) {
10403 // The lane is decremented if we are splatting from the 2nd operand.
10404 // Example: dup v4i32 (concat v2i32 X, v2i32 Y), 3 --> dup v4i32 Y, 1
10405 unsigned Idx = Lane >= (int)VT.getVectorNumElements() / 2;
10406 Lane -= Idx * VT.getVectorNumElements() / 2;
10407 V = WidenVector(V.getOperand(Idx), DAG);
10408 } else if (VT.getSizeInBits() == 64) {
10409 // Widen the operand to 128-bit register with undef.
10410 V = WidenVector(V, DAG);
10412 return DAG.getNode(Opcode, dl, VT, V, DAG.getConstant(Lane, dl, MVT::i64));
10415 // Return true if we can get a new shuffle mask by checking the parameter mask
10416 // array to test whether every two adjacent mask values are continuous and
10417 // starting from an even number.
10418 static bool isWideTypeMask(ArrayRef<int> M, EVT VT,
10419 SmallVectorImpl<int> &NewMask) {
10420 unsigned NumElts = VT.getVectorNumElements();
10421 if (NumElts % 2 != 0)
10425 for (unsigned i = 0; i < NumElts; i += 2) {
10429 // If both elements are undef, new mask is undef too.
10430 if (M0 == -1 && M1 == -1) {
10431 NewMask.push_back(-1);
10435 if (M0 == -1 && M1 != -1 && (M1 % 2) == 1) {
10436 NewMask.push_back(M1 / 2);
10440 if (M0 != -1 && (M0 % 2) == 0 && ((M0 + 1) == M1 || M1 == -1)) {
10441 NewMask.push_back(M0 / 2);
10449 assert(NewMask.size() == NumElts / 2 && "Incorrect size for mask!");
10453 // Try to widen element type to get a new mask value for a better permutation
10454 // sequence, so that we can use NEON shuffle instructions, such as zip1/2,
10455 // UZP1/2, TRN1/2, REV, INS, etc.
10457 // shufflevector <4 x i32> %a, <4 x i32> %b,
10458 // <4 x i32> <i32 6, i32 7, i32 2, i32 3>
10459 // is equivalent to:
10460 // shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
10461 // Finally, we can get:
10462 // mov v0.d[0], v1.d[1]
10463 static SDValue tryWidenMaskForShuffle(SDValue Op, SelectionDAG &DAG) {
10465 EVT VT = Op.getValueType();
10466 EVT ScalarVT = VT.getVectorElementType();
10467 unsigned ElementSize = ScalarVT.getFixedSizeInBits();
10468 SDValue V0 = Op.getOperand(0);
10469 SDValue V1 = Op.getOperand(1);
10470 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
10472 // If combining adjacent elements, like two i16's -> i32, two i32's -> i64 ...
10473 // We need to make sure the wider element type is legal. Thus, ElementSize
10474 // should be not larger than 32 bits, and i1 type should also be excluded.
10475 if (ElementSize > 32 || ElementSize == 1)
10478 SmallVector<int, 8> NewMask;
10479 if (isWideTypeMask(Mask, VT, NewMask)) {
10480 MVT NewEltVT = VT.isFloatingPoint()
10481 ? MVT::getFloatingPointVT(ElementSize * 2)
10482 : MVT::getIntegerVT(ElementSize * 2);
10483 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
10484 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
10485 V0 = DAG.getBitcast(NewVT, V0);
10486 V1 = DAG.getBitcast(NewVT, V1);
10487 return DAG.getBitcast(VT,
10488 DAG.getVectorShuffle(NewVT, DL, V0, V1, NewMask));
10495 SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
10496 SelectionDAG &DAG) const {
10498 EVT VT = Op.getValueType();
10500 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
10502 if (useSVEForFixedLengthVectorVT(VT))
10503 return LowerFixedLengthVECTOR_SHUFFLEToSVE(Op, DAG);
10505 // Convert shuffles that are directly supported on NEON to target-specific
10506 // DAG nodes, instead of keeping them as shuffles and matching them again
10507 // during code selection. This is more efficient and avoids the possibility
10508 // of inconsistencies between legalization and selection.
10509 ArrayRef<int> ShuffleMask = SVN->getMask();
10511 SDValue V1 = Op.getOperand(0);
10512 SDValue V2 = Op.getOperand(1);
10514 assert(V1.getValueType() == VT && "Unexpected VECTOR_SHUFFLE type!");
10515 assert(ShuffleMask.size() == VT.getVectorNumElements() &&
10516 "Unexpected VECTOR_SHUFFLE mask size!");
10518 if (SVN->isSplat()) {
10519 int Lane = SVN->getSplatIndex();
10520 // If this is undef splat, generate it via "just" vdup, if possible.
10524 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR)
10525 return DAG.getNode(AArch64ISD::DUP, dl, V1.getValueType(),
10527 // Test if V1 is a BUILD_VECTOR and the lane being referenced is a non-
10528 // constant. If so, we can just reference the lane's definition directly.
10529 if (V1.getOpcode() == ISD::BUILD_VECTOR &&
10530 !isa<ConstantSDNode>(V1.getOperand(Lane)))
10531 return DAG.getNode(AArch64ISD::DUP, dl, VT, V1.getOperand(Lane));
10533 // Otherwise, duplicate from the lane of the input vector.
10534 unsigned Opcode = getDUPLANEOp(V1.getValueType().getVectorElementType());
10535 return constructDup(V1, Lane, dl, VT, Opcode, DAG);
10538 // Check if the mask matches a DUP for a wider element
10539 for (unsigned LaneSize : {64U, 32U, 16U}) {
10541 if (isWideDUPMask(ShuffleMask, VT, LaneSize, Lane)) {
10542 unsigned Opcode = LaneSize == 64 ? AArch64ISD::DUPLANE64
10543 : LaneSize == 32 ? AArch64ISD::DUPLANE32
10544 : AArch64ISD::DUPLANE16;
10545 // Cast V1 to an integer vector with required lane size
10546 MVT NewEltTy = MVT::getIntegerVT(LaneSize);
10547 unsigned NewEltCount = VT.getSizeInBits() / LaneSize;
10548 MVT NewVecTy = MVT::getVectorVT(NewEltTy, NewEltCount);
10549 V1 = DAG.getBitcast(NewVecTy, V1);
10550 // Constuct the DUP instruction
10551 V1 = constructDup(V1, Lane, dl, NewVecTy, Opcode, DAG);
10552 // Cast back to the original type
10553 return DAG.getBitcast(VT, V1);
10557 if (isREVMask(ShuffleMask, VT, 64))
10558 return DAG.getNode(AArch64ISD::REV64, dl, V1.getValueType(), V1, V2);
10559 if (isREVMask(ShuffleMask, VT, 32))
10560 return DAG.getNode(AArch64ISD::REV32, dl, V1.getValueType(), V1, V2);
10561 if (isREVMask(ShuffleMask, VT, 16))
10562 return DAG.getNode(AArch64ISD::REV16, dl, V1.getValueType(), V1, V2);
10564 if (((VT.getVectorNumElements() == 8 && VT.getScalarSizeInBits() == 16) ||
10565 (VT.getVectorNumElements() == 16 && VT.getScalarSizeInBits() == 8)) &&
10566 ShuffleVectorInst::isReverseMask(ShuffleMask)) {
10567 SDValue Rev = DAG.getNode(AArch64ISD::REV64, dl, VT, V1);
10568 return DAG.getNode(AArch64ISD::EXT, dl, VT, Rev, Rev,
10569 DAG.getConstant(8, dl, MVT::i32));
10572 bool ReverseEXT = false;
10574 if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) {
10577 Imm *= getExtFactor(V1);
10578 return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2,
10579 DAG.getConstant(Imm, dl, MVT::i32));
10580 } else if (V2->isUndef() && isSingletonEXTMask(ShuffleMask, VT, Imm)) {
10581 Imm *= getExtFactor(V1);
10582 return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1,
10583 DAG.getConstant(Imm, dl, MVT::i32));
10586 unsigned WhichResult;
10587 if (isZIPMask(ShuffleMask, VT, WhichResult)) {
10588 unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2;
10589 return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
10591 if (isUZPMask(ShuffleMask, VT, WhichResult)) {
10592 unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
10593 return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
10595 if (isTRNMask(ShuffleMask, VT, WhichResult)) {
10596 unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
10597 return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
10600 if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
10601 unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2;
10602 return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
10604 if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
10605 unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
10606 return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
10608 if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
10609 unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
10610 return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
10613 if (SDValue Concat = tryFormConcatFromShuffle(Op, DAG))
10618 int NumInputElements = V1.getValueType().getVectorNumElements();
10619 if (isINSMask(ShuffleMask, NumInputElements, DstIsLeft, Anomaly)) {
10620 SDValue DstVec = DstIsLeft ? V1 : V2;
10621 SDValue DstLaneV = DAG.getConstant(Anomaly, dl, MVT::i64);
10623 SDValue SrcVec = V1;
10624 int SrcLane = ShuffleMask[Anomaly];
10625 if (SrcLane >= NumInputElements) {
10627 SrcLane -= VT.getVectorNumElements();
10629 SDValue SrcLaneV = DAG.getConstant(SrcLane, dl, MVT::i64);
10631 EVT ScalarVT = VT.getVectorElementType();
10633 if (ScalarVT.getFixedSizeInBits() < 32 && ScalarVT.isInteger())
10634 ScalarVT = MVT::i32;
10636 return DAG.getNode(
10637 ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
10638 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, SrcVec, SrcLaneV),
10642 if (SDValue NewSD = tryWidenMaskForShuffle(Op, DAG))
10645 // If the shuffle is not directly supported and it has 4 elements, use
10646 // the PerfectShuffle-generated table to synthesize it from other shuffles.
10647 unsigned NumElts = VT.getVectorNumElements();
10648 if (NumElts == 4) {
10649 unsigned PFIndexes[4];
10650 for (unsigned i = 0; i != 4; ++i) {
10651 if (ShuffleMask[i] < 0)
10654 PFIndexes[i] = ShuffleMask[i];
10657 // Compute the index in the perfect shuffle table.
10658 unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
10659 PFIndexes[2] * 9 + PFIndexes[3];
10660 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
10661 return GeneratePerfectShuffle(PFTableIndex, V1, V2, PFEntry, V1, V2, DAG,
10665 return GenerateTBL(Op, ShuffleMask, DAG);
10668 SDValue AArch64TargetLowering::LowerSPLAT_VECTOR(SDValue Op,
10669 SelectionDAG &DAG) const {
10670 EVT VT = Op.getValueType();
10672 if (useSVEForFixedLengthVectorVT(VT))
10673 return LowerToScalableOp(Op, DAG);
10675 assert(VT.isScalableVector() && VT.getVectorElementType() == MVT::i1 &&
10676 "Unexpected vector type!");
10678 // We can handle the constant cases during isel.
10679 if (isa<ConstantSDNode>(Op.getOperand(0)))
10682 // There isn't a natural way to handle the general i1 case, so we use some
10683 // trickery with whilelo.
10685 SDValue SplatVal = DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, MVT::i64);
10686 SplatVal = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, SplatVal,
10687 DAG.getValueType(MVT::i1));
10689 DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64);
10690 SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
10691 if (VT == MVT::nxv1i1)
10692 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::nxv1i1,
10693 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::nxv2i1, ID,
10696 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, ID, Zero, SplatVal);
10699 SDValue AArch64TargetLowering::LowerDUPQLane(SDValue Op,
10700 SelectionDAG &DAG) const {
10703 EVT VT = Op.getValueType();
10704 if (!isTypeLegal(VT) || !VT.isScalableVector())
10707 // Current lowering only supports the SVE-ACLE types.
10708 if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock)
10711 // The DUPQ operation is indepedent of element type so normalise to i64s.
10712 SDValue Idx128 = Op.getOperand(2);
10714 // DUPQ can be used when idx is in range.
10715 auto *CIdx = dyn_cast<ConstantSDNode>(Idx128);
10716 if (CIdx && (CIdx->getZExtValue() <= 3)) {
10717 SDValue CI = DAG.getTargetConstant(CIdx->getZExtValue(), DL, MVT::i64);
10718 return DAG.getNode(AArch64ISD::DUPLANE128, DL, VT, Op.getOperand(1), CI);
10721 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::nxv2i64, Op.getOperand(1));
10723 // The ACLE says this must produce the same result as:
10724 // svtbl(data, svadd_x(svptrue_b64(),
10725 // svand_x(svptrue_b64(), svindex_u64(0, 1), 1),
10727 SDValue One = DAG.getConstant(1, DL, MVT::i64);
10728 SDValue SplatOne = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, One);
10730 // create the vector 0,1,0,1,...
10731 SDValue SV = DAG.getStepVector(DL, MVT::nxv2i64);
10732 SV = DAG.getNode(ISD::AND, DL, MVT::nxv2i64, SV, SplatOne);
10734 // create the vector idx64,idx64+1,idx64,idx64+1,...
10735 SDValue Idx64 = DAG.getNode(ISD::ADD, DL, MVT::i64, Idx128, Idx128);
10736 SDValue SplatIdx64 = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, Idx64);
10737 SDValue ShuffleMask = DAG.getNode(ISD::ADD, DL, MVT::nxv2i64, SV, SplatIdx64);
10739 // create the vector Val[idx64],Val[idx64+1],Val[idx64],Val[idx64+1],...
10740 SDValue TBL = DAG.getNode(AArch64ISD::TBL, DL, MVT::nxv2i64, V, ShuffleMask);
10741 return DAG.getNode(ISD::BITCAST, DL, VT, TBL);
10745 static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits,
10746 APInt &UndefBits) {
10747 EVT VT = BVN->getValueType(0);
10748 APInt SplatBits, SplatUndef;
10749 unsigned SplatBitSize;
10751 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
10752 unsigned NumSplats = VT.getSizeInBits() / SplatBitSize;
10754 for (unsigned i = 0; i < NumSplats; ++i) {
10755 CnstBits <<= SplatBitSize;
10756 UndefBits <<= SplatBitSize;
10757 CnstBits |= SplatBits.zextOrTrunc(VT.getSizeInBits());
10758 UndefBits |= (SplatBits ^ SplatUndef).zextOrTrunc(VT.getSizeInBits());
10767 // Try 64-bit splatted SIMD immediate.
10768 static SDValue tryAdvSIMDModImm64(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10769 const APInt &Bits) {
10770 if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10771 uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10772 EVT VT = Op.getValueType();
10773 MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v2i64 : MVT::f64;
10775 if (AArch64_AM::isAdvSIMDModImmType10(Value)) {
10776 Value = AArch64_AM::encodeAdvSIMDModImmType10(Value);
10779 SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10780 DAG.getConstant(Value, dl, MVT::i32));
10781 return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10788 // Try 32-bit splatted SIMD immediate.
10789 static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10791 const SDValue *LHS = nullptr) {
10792 if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10793 uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10794 EVT VT = Op.getValueType();
10795 MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
10796 bool isAdvSIMDModImm = false;
10799 if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType1(Value))) {
10800 Value = AArch64_AM::encodeAdvSIMDModImmType1(Value);
10803 else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType2(Value))) {
10804 Value = AArch64_AM::encodeAdvSIMDModImmType2(Value);
10807 else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType3(Value))) {
10808 Value = AArch64_AM::encodeAdvSIMDModImmType3(Value);
10811 else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType4(Value))) {
10812 Value = AArch64_AM::encodeAdvSIMDModImmType4(Value);
10816 if (isAdvSIMDModImm) {
10821 Mov = DAG.getNode(NewOp, dl, MovTy, *LHS,
10822 DAG.getConstant(Value, dl, MVT::i32),
10823 DAG.getConstant(Shift, dl, MVT::i32));
10825 Mov = DAG.getNode(NewOp, dl, MovTy,
10826 DAG.getConstant(Value, dl, MVT::i32),
10827 DAG.getConstant(Shift, dl, MVT::i32));
10829 return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10836 // Try 16-bit splatted SIMD immediate.
10837 static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10839 const SDValue *LHS = nullptr) {
10840 if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10841 uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10842 EVT VT = Op.getValueType();
10843 MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
10844 bool isAdvSIMDModImm = false;
10847 if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType5(Value))) {
10848 Value = AArch64_AM::encodeAdvSIMDModImmType5(Value);
10851 else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType6(Value))) {
10852 Value = AArch64_AM::encodeAdvSIMDModImmType6(Value);
10856 if (isAdvSIMDModImm) {
10861 Mov = DAG.getNode(NewOp, dl, MovTy, *LHS,
10862 DAG.getConstant(Value, dl, MVT::i32),
10863 DAG.getConstant(Shift, dl, MVT::i32));
10865 Mov = DAG.getNode(NewOp, dl, MovTy,
10866 DAG.getConstant(Value, dl, MVT::i32),
10867 DAG.getConstant(Shift, dl, MVT::i32));
10869 return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10876 // Try 32-bit splatted SIMD immediate with shifted ones.
10877 static SDValue tryAdvSIMDModImm321s(unsigned NewOp, SDValue Op,
10878 SelectionDAG &DAG, const APInt &Bits) {
10879 if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10880 uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10881 EVT VT = Op.getValueType();
10882 MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
10883 bool isAdvSIMDModImm = false;
10886 if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType7(Value))) {
10887 Value = AArch64_AM::encodeAdvSIMDModImmType7(Value);
10890 else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType8(Value))) {
10891 Value = AArch64_AM::encodeAdvSIMDModImmType8(Value);
10895 if (isAdvSIMDModImm) {
10897 SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10898 DAG.getConstant(Value, dl, MVT::i32),
10899 DAG.getConstant(Shift, dl, MVT::i32));
10900 return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10907 // Try 8-bit splatted SIMD immediate.
10908 static SDValue tryAdvSIMDModImm8(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10909 const APInt &Bits) {
10910 if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10911 uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10912 EVT VT = Op.getValueType();
10913 MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v16i8 : MVT::v8i8;
10915 if (AArch64_AM::isAdvSIMDModImmType9(Value)) {
10916 Value = AArch64_AM::encodeAdvSIMDModImmType9(Value);
10919 SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10920 DAG.getConstant(Value, dl, MVT::i32));
10921 return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10928 // Try FP splatted SIMD immediate.
10929 static SDValue tryAdvSIMDModImmFP(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10930 const APInt &Bits) {
10931 if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10932 uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10933 EVT VT = Op.getValueType();
10934 bool isWide = (VT.getSizeInBits() == 128);
10936 bool isAdvSIMDModImm = false;
10938 if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType11(Value))) {
10939 Value = AArch64_AM::encodeAdvSIMDModImmType11(Value);
10940 MovTy = isWide ? MVT::v4f32 : MVT::v2f32;
10943 (isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType12(Value))) {
10944 Value = AArch64_AM::encodeAdvSIMDModImmType12(Value);
10945 MovTy = MVT::v2f64;
10948 if (isAdvSIMDModImm) {
10950 SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10951 DAG.getConstant(Value, dl, MVT::i32));
10952 return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10959 // Specialized code to quickly find if PotentialBVec is a BuildVector that
10960 // consists of only the same constant int value, returned in reference arg
10962 static bool isAllConstantBuildVector(const SDValue &PotentialBVec,
10963 uint64_t &ConstVal) {
10964 BuildVectorSDNode *Bvec = dyn_cast<BuildVectorSDNode>(PotentialBVec);
10967 ConstantSDNode *FirstElt = dyn_cast<ConstantSDNode>(Bvec->getOperand(0));
10970 EVT VT = Bvec->getValueType(0);
10971 unsigned NumElts = VT.getVectorNumElements();
10972 for (unsigned i = 1; i < NumElts; ++i)
10973 if (dyn_cast<ConstantSDNode>(Bvec->getOperand(i)) != FirstElt)
10975 ConstVal = FirstElt->getZExtValue();
10979 // Attempt to form a vector S[LR]I from (or (and X, BvecC1), (lsl Y, C2)),
10980 // to (SLI X, Y, C2), where X and Y have matching vector types, BvecC1 is a
10981 // BUILD_VECTORs with constant element C1, C2 is a constant, and:
10982 // - for the SLI case: C1 == ~(Ones(ElemSizeInBits) << C2)
10983 // - for the SRI case: C1 == ~(Ones(ElemSizeInBits) >> C2)
10984 // The (or (lsl Y, C2), (and X, BvecC1)) case is also handled.
10985 static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) {
10986 EVT VT = N->getValueType(0);
10988 if (!VT.isVector())
10996 SDValue FirstOp = N->getOperand(0);
10997 unsigned FirstOpc = FirstOp.getOpcode();
10998 SDValue SecondOp = N->getOperand(1);
10999 unsigned SecondOpc = SecondOp.getOpcode();
11001 // Is one of the operands an AND or a BICi? The AND may have been optimised to
11002 // a BICi in order to use an immediate instead of a register.
11003 // Is the other operand an shl or lshr? This will have been turned into:
11004 // AArch64ISD::VSHL vector, #shift or AArch64ISD::VLSHR vector, #shift.
11005 if ((FirstOpc == ISD::AND || FirstOpc == AArch64ISD::BICi) &&
11006 (SecondOpc == AArch64ISD::VSHL || SecondOpc == AArch64ISD::VLSHR)) {
11010 } else if ((SecondOpc == ISD::AND || SecondOpc == AArch64ISD::BICi) &&
11011 (FirstOpc == AArch64ISD::VSHL || FirstOpc == AArch64ISD::VLSHR)) {
11017 bool IsAnd = And.getOpcode() == ISD::AND;
11018 bool IsShiftRight = Shift.getOpcode() == AArch64ISD::VLSHR;
11020 // Is the shift amount constant?
11021 ConstantSDNode *C2node = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
11027 // Is the and mask vector all constant?
11028 if (!isAllConstantBuildVector(And.getOperand(1), C1))
11031 // Reconstruct the corresponding AND immediate from the two BICi immediates.
11032 ConstantSDNode *C1nodeImm = dyn_cast<ConstantSDNode>(And.getOperand(1));
11033 ConstantSDNode *C1nodeShift = dyn_cast<ConstantSDNode>(And.getOperand(2));
11034 assert(C1nodeImm && C1nodeShift);
11035 C1 = ~(C1nodeImm->getZExtValue() << C1nodeShift->getZExtValue());
11038 // Is C1 == ~(Ones(ElemSizeInBits) << C2) or
11039 // C1 == ~(Ones(ElemSizeInBits) >> C2), taking into account
11040 // how much one can shift elements of a particular size?
11041 uint64_t C2 = C2node->getZExtValue();
11042 unsigned ElemSizeInBits = VT.getScalarSizeInBits();
11043 if (C2 > ElemSizeInBits)
11046 APInt C1AsAPInt(ElemSizeInBits, C1);
11047 APInt RequiredC1 = IsShiftRight ? APInt::getHighBitsSet(ElemSizeInBits, C2)
11048 : APInt::getLowBitsSet(ElemSizeInBits, C2);
11049 if (C1AsAPInt != RequiredC1)
11052 SDValue X = And.getOperand(0);
11053 SDValue Y = Shift.getOperand(0);
11055 unsigned Inst = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI;
11056 SDValue ResultSLI = DAG.getNode(Inst, DL, VT, X, Y, Shift.getOperand(1));
11058 LLVM_DEBUG(dbgs() << "aarch64-lower: transformed: \n");
11059 LLVM_DEBUG(N->dump(&DAG));
11060 LLVM_DEBUG(dbgs() << "into: \n");
11061 LLVM_DEBUG(ResultSLI->dump(&DAG));
11067 SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
11068 SelectionDAG &DAG) const {
11069 if (useSVEForFixedLengthVectorVT(Op.getValueType()))
11070 return LowerToScalableOp(Op, DAG);
11072 // Attempt to form a vector S[LR]I from (or (and X, C1), (lsl Y, C2))
11073 if (SDValue Res = tryLowerToSLI(Op.getNode(), DAG))
11076 EVT VT = Op.getValueType();
11078 SDValue LHS = Op.getOperand(0);
11079 BuildVectorSDNode *BVN =
11080 dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode());
11082 // OR commutes, so try swapping the operands.
11083 LHS = Op.getOperand(1);
11084 BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode());
11089 APInt DefBits(VT.getSizeInBits(), 0);
11090 APInt UndefBits(VT.getSizeInBits(), 0);
11091 if (resolveBuildVector(BVN, DefBits, UndefBits)) {
11094 if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG,
11096 (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG,
11100 if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG,
11101 UndefBits, &LHS)) ||
11102 (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG,
11107 // We can always fall back to a non-immediate OR.
11111 // Normalize the operands of BUILD_VECTOR. The value of constant operands will
11112 // be truncated to fit element width.
11113 static SDValue NormalizeBuildVector(SDValue Op,
11114 SelectionDAG &DAG) {
11115 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
11117 EVT VT = Op.getValueType();
11118 EVT EltTy= VT.getVectorElementType();
11120 if (EltTy.isFloatingPoint() || EltTy.getSizeInBits() > 16)
11123 SmallVector<SDValue, 16> Ops;
11124 for (SDValue Lane : Op->ops()) {
11125 // For integer vectors, type legalization would have promoted the
11126 // operands already. Otherwise, if Op is a floating-point splat
11127 // (with operands cast to integers), then the only possibilities
11128 // are constants and UNDEFs.
11129 if (auto *CstLane = dyn_cast<ConstantSDNode>(Lane)) {
11130 APInt LowBits(EltTy.getSizeInBits(),
11131 CstLane->getZExtValue());
11132 Lane = DAG.getConstant(LowBits.getZExtValue(), dl, MVT::i32);
11133 } else if (Lane.getNode()->isUndef()) {
11134 Lane = DAG.getUNDEF(MVT::i32);
11136 assert(Lane.getValueType() == MVT::i32 &&
11137 "Unexpected BUILD_VECTOR operand type");
11139 Ops.push_back(Lane);
11141 return DAG.getBuildVector(VT, dl, Ops);
11144 static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG) {
11145 EVT VT = Op.getValueType();
11147 APInt DefBits(VT.getSizeInBits(), 0);
11148 APInt UndefBits(VT.getSizeInBits(), 0);
11149 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
11150 if (resolveBuildVector(BVN, DefBits, UndefBits)) {
11152 if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) ||
11153 (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11154 (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) ||
11155 (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11156 (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) ||
11157 (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits)))
11160 DefBits = ~DefBits;
11161 if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) ||
11162 (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) ||
11163 (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits)))
11166 DefBits = UndefBits;
11167 if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) ||
11168 (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11169 (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) ||
11170 (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11171 (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) ||
11172 (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits)))
11175 DefBits = ~UndefBits;
11176 if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) ||
11177 (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) ||
11178 (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits)))
11185 SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
11186 SelectionDAG &DAG) const {
11187 EVT VT = Op.getValueType();
11189 if (useSVEForFixedLengthVectorVT(VT)) {
11190 if (auto SeqInfo = cast<BuildVectorSDNode>(Op)->isConstantSequence()) {
11192 EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
11193 SDValue Start = DAG.getConstant(SeqInfo->first, DL, ContainerVT);
11194 SDValue Steps = DAG.getStepVector(DL, ContainerVT, SeqInfo->second);
11195 SDValue Seq = DAG.getNode(ISD::ADD, DL, ContainerVT, Start, Steps);
11196 return convertFromScalableVector(DAG, Op.getValueType(), Seq);
11199 // Revert to common legalisation for all other variants.
11203 // Try to build a simple constant vector.
11204 Op = NormalizeBuildVector(Op, DAG);
11205 if (VT.isInteger()) {
11206 // Certain vector constants, used to express things like logical NOT and
11207 // arithmetic NEG, are passed through unmodified. This allows special
11208 // patterns for these operations to match, which will lower these constants
11209 // to whatever is proven necessary.
11210 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
11211 if (BVN->isConstant())
11212 if (ConstantSDNode *Const = BVN->getConstantSplatNode()) {
11213 unsigned BitSize = VT.getVectorElementType().getSizeInBits();
11215 Const->getAPIntValue().zextOrTrunc(BitSize).getZExtValue());
11216 if (Val.isZero() || Val.isAllOnes())
11221 if (SDValue V = ConstantBuildVector(Op, DAG))
11224 // Scan through the operands to find some interesting properties we can
11226 // 1) If only one value is used, we can use a DUP, or
11227 // 2) if only the low element is not undef, we can just insert that, or
11228 // 3) if only one constant value is used (w/ some non-constant lanes),
11229 // we can splat the constant value into the whole vector then fill
11230 // in the non-constant lanes.
11231 // 4) FIXME: If different constant values are used, but we can intelligently
11232 // select the values we'll be overwriting for the non-constant
11233 // lanes such that we can directly materialize the vector
11234 // some other way (MOVI, e.g.), we can be sneaky.
11235 // 5) if all operands are EXTRACT_VECTOR_ELT, check for VUZP.
11237 unsigned NumElts = VT.getVectorNumElements();
11238 bool isOnlyLowElement = true;
11239 bool usesOnlyOneValue = true;
11240 bool usesOnlyOneConstantValue = true;
11241 bool isConstant = true;
11242 bool AllLanesExtractElt = true;
11243 unsigned NumConstantLanes = 0;
11244 unsigned NumDifferentLanes = 0;
11245 unsigned NumUndefLanes = 0;
11247 SDValue ConstantValue;
11248 for (unsigned i = 0; i < NumElts; ++i) {
11249 SDValue V = Op.getOperand(i);
11250 if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11251 AllLanesExtractElt = false;
11257 isOnlyLowElement = false;
11258 if (!isIntOrFPConstant(V))
11259 isConstant = false;
11261 if (isIntOrFPConstant(V)) {
11262 ++NumConstantLanes;
11263 if (!ConstantValue.getNode())
11265 else if (ConstantValue != V)
11266 usesOnlyOneConstantValue = false;
11269 if (!Value.getNode())
11271 else if (V != Value) {
11272 usesOnlyOneValue = false;
11273 ++NumDifferentLanes;
11277 if (!Value.getNode()) {
11279 dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n");
11280 return DAG.getUNDEF(VT);
11283 // Convert BUILD_VECTOR where all elements but the lowest are undef into
11284 // SCALAR_TO_VECTOR, except for when we have a single-element constant vector
11285 // as SimplifyDemandedBits will just turn that back into BUILD_VECTOR.
11286 if (isOnlyLowElement && !(NumElts == 1 && isIntOrFPConstant(Value))) {
11287 LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 "
11288 "SCALAR_TO_VECTOR node\n");
11289 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
11292 if (AllLanesExtractElt) {
11293 SDNode *Vector = nullptr;
11296 // Check whether the extract elements match the Even pattern <0,2,4,...> or
11297 // the Odd pattern <1,3,5,...>.
11298 for (unsigned i = 0; i < NumElts; ++i) {
11299 SDValue V = Op.getOperand(i);
11300 const SDNode *N = V.getNode();
11301 if (!isa<ConstantSDNode>(N->getOperand(1)))
11303 SDValue N0 = N->getOperand(0);
11305 // All elements are extracted from the same vector.
11307 Vector = N0.getNode();
11308 // Check that the type of EXTRACT_VECTOR_ELT matches the type of
11310 if (VT.getVectorElementType() !=
11311 N0.getValueType().getVectorElementType())
11313 } else if (Vector != N0.getNode()) {
11319 // Extracted values are either at Even indices <0,2,4,...> or at Odd
11320 // indices <1,3,5,...>.
11321 uint64_t Val = N->getConstantOperandVal(1);
11322 if (Val == 2 * i) {
11326 if (Val - 1 == 2 * i) {
11331 // Something does not match: abort.
11338 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0),
11339 DAG.getConstant(0, dl, MVT::i64));
11341 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0),
11342 DAG.getConstant(NumElts, dl, MVT::i64));
11345 return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), LHS,
11348 return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), LHS,
11353 // Use DUP for non-constant splats. For f32 constant splats, reduce to
11354 // i32 and try again.
11355 if (usesOnlyOneValue) {
11357 if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
11358 Value.getValueType() != VT) {
11360 dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n");
11361 return DAG.getNode(AArch64ISD::DUP, dl, VT, Value);
11364 // This is actually a DUPLANExx operation, which keeps everything vectory.
11366 SDValue Lane = Value.getOperand(1);
11367 Value = Value.getOperand(0);
11368 if (Value.getValueSizeInBits() == 64) {
11370 dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, "
11372 Value = WidenVector(Value, DAG);
11375 unsigned Opcode = getDUPLANEOp(VT.getVectorElementType());
11376 return DAG.getNode(Opcode, dl, VT, Value, Lane);
11379 if (VT.getVectorElementType().isFloatingPoint()) {
11380 SmallVector<SDValue, 8> Ops;
11381 EVT EltTy = VT.getVectorElementType();
11382 assert ((EltTy == MVT::f16 || EltTy == MVT::bf16 || EltTy == MVT::f32 ||
11383 EltTy == MVT::f64) && "Unsupported floating-point vector type");
11385 dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int "
11386 "BITCASTS, and try again\n");
11387 MVT NewType = MVT::getIntegerVT(EltTy.getSizeInBits());
11388 for (unsigned i = 0; i < NumElts; ++i)
11389 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, NewType, Op.getOperand(i)));
11390 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), NewType, NumElts);
11391 SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
11392 LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: ";
11394 Val = LowerBUILD_VECTOR(Val, DAG);
11396 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
11400 // If we need to insert a small number of different non-constant elements and
11401 // the vector width is sufficiently large, prefer using DUP with the common
11402 // value and INSERT_VECTOR_ELT for the different lanes. If DUP is preferred,
11403 // skip the constant lane handling below.
11404 bool PreferDUPAndInsert =
11405 !isConstant && NumDifferentLanes >= 1 &&
11406 NumDifferentLanes < ((NumElts - NumUndefLanes) / 2) &&
11407 NumDifferentLanes >= NumConstantLanes;
11409 // If there was only one constant value used and for more than one lane,
11410 // start by splatting that value, then replace the non-constant lanes. This
11411 // is better than the default, which will perform a separate initialization
11413 if (!PreferDUPAndInsert && NumConstantLanes > 0 && usesOnlyOneConstantValue) {
11414 // Firstly, try to materialize the splat constant.
11415 SDValue Vec = DAG.getSplatBuildVector(VT, dl, ConstantValue),
11416 Val = ConstantBuildVector(Vec, DAG);
11418 // Otherwise, materialize the constant and splat it.
11419 Val = DAG.getNode(AArch64ISD::DUP, dl, VT, ConstantValue);
11420 DAG.ReplaceAllUsesWith(Vec.getNode(), &Val);
11423 // Now insert the non-constant lanes.
11424 for (unsigned i = 0; i < NumElts; ++i) {
11425 SDValue V = Op.getOperand(i);
11426 SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64);
11427 if (!isIntOrFPConstant(V))
11428 // Note that type legalization likely mucked about with the VT of the
11429 // source operand, so we may have to convert it here before inserting.
11430 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, V, LaneIdx);
11435 // This will generate a load from the constant pool.
11438 dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default "
11443 // Detect patterns of a0,a1,a2,a3,b0,b1,b2,b3,c0,c1,c2,c3,d0,d1,d2,d3 from
11444 // v4i32s. This is really a truncate, which we can construct out of (legal)
11445 // concats and truncate nodes.
11446 if (SDValue M = ReconstructTruncateFromBuildVector(Op, DAG))
11449 // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
11450 if (NumElts >= 4) {
11451 if (SDValue shuffle = ReconstructShuffle(Op, DAG))
11455 if (PreferDUPAndInsert) {
11456 // First, build a constant vector with the common element.
11457 SmallVector<SDValue, 8> Ops(NumElts, Value);
11458 SDValue NewVector = LowerBUILD_VECTOR(DAG.getBuildVector(VT, dl, Ops), DAG);
11459 // Next, insert the elements that do not match the common value.
11460 for (unsigned I = 0; I < NumElts; ++I)
11461 if (Op.getOperand(I) != Value)
11463 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, NewVector,
11464 Op.getOperand(I), DAG.getConstant(I, dl, MVT::i64));
11469 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
11470 // know the default expansion would otherwise fall back on something even
11471 // worse. For a vector with one or two non-undef values, that's
11472 // scalar_to_vector for the elements followed by a shuffle (provided the
11473 // shuffle is valid for the target) and materialization element by element
11474 // on the stack followed by a load for everything else.
11475 if (!isConstant && !usesOnlyOneValue) {
11477 dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence "
11478 "of INSERT_VECTOR_ELT\n");
11480 SDValue Vec = DAG.getUNDEF(VT);
11481 SDValue Op0 = Op.getOperand(0);
11484 // Use SCALAR_TO_VECTOR for lane zero to
11485 // a) Avoid a RMW dependency on the full vector register, and
11486 // b) Allow the register coalescer to fold away the copy if the
11487 // value is already in an S or D register, and we're forced to emit an
11488 // INSERT_SUBREG that we can't fold anywhere.
11490 // We also allow types like i8 and i16 which are illegal scalar but legal
11491 // vector element types. After type-legalization the inserted value is
11492 // extended (i32) and it is safe to cast them to the vector type by ignoring
11493 // the upper bits of the lowest lane (e.g. v8i8, v4i16).
11494 if (!Op0.isUndef()) {
11495 LLVM_DEBUG(dbgs() << "Creating node for op0, it is not undefined:\n");
11496 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op0);
11499 LLVM_DEBUG(if (i < NumElts) dbgs()
11500 << "Creating nodes for the other vector elements:\n";);
11501 for (; i < NumElts; ++i) {
11502 SDValue V = Op.getOperand(i);
11505 SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64);
11506 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
11512 dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find "
11513 "better alternative\n");
11517 SDValue AArch64TargetLowering::LowerCONCAT_VECTORS(SDValue Op,
11518 SelectionDAG &DAG) const {
11519 if (useSVEForFixedLengthVectorVT(Op.getValueType()))
11520 return LowerFixedLengthConcatVectorsToSVE(Op, DAG);
11522 assert(Op.getValueType().isScalableVector() &&
11523 isTypeLegal(Op.getValueType()) &&
11524 "Expected legal scalable vector type!");
11526 if (isTypeLegal(Op.getOperand(0).getValueType())) {
11527 unsigned NumOperands = Op->getNumOperands();
11528 assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
11529 "Unexpected number of operands in CONCAT_VECTORS");
11531 if (NumOperands == 2)
11534 // Concat each pair of subvectors and pack into the lower half of the array.
11535 SmallVector<SDValue> ConcatOps(Op->op_begin(), Op->op_end());
11536 while (ConcatOps.size() > 1) {
11537 for (unsigned I = 0, E = ConcatOps.size(); I != E; I += 2) {
11538 SDValue V1 = ConcatOps[I];
11539 SDValue V2 = ConcatOps[I + 1];
11540 EVT SubVT = V1.getValueType();
11541 EVT PairVT = SubVT.getDoubleNumVectorElementsVT(*DAG.getContext());
11543 DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), PairVT, V1, V2);
11545 ConcatOps.resize(ConcatOps.size() / 2);
11547 return ConcatOps[0];
11553 SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
11554 SelectionDAG &DAG) const {
11555 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!");
11557 if (useSVEForFixedLengthVectorVT(Op.getValueType()))
11558 return LowerFixedLengthInsertVectorElt(Op, DAG);
11560 // Check for non-constant or out of range lane.
11561 EVT VT = Op.getOperand(0).getValueType();
11563 if (VT.getScalarType() == MVT::i1) {
11564 EVT VectorVT = getPromotedVTForPredicate(VT);
11566 SDValue ExtendedVector =
11567 DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, VectorVT);
11568 SDValue ExtendedValue =
11569 DAG.getAnyExtOrTrunc(Op.getOperand(1), DL,
11570 VectorVT.getScalarType().getSizeInBits() < 32
11572 : VectorVT.getScalarType());
11574 DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VectorVT, ExtendedVector,
11575 ExtendedValue, Op.getOperand(2));
11576 return DAG.getAnyExtOrTrunc(ExtendedVector, DL, VT);
11579 ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(2));
11580 if (!CI || CI->getZExtValue() >= VT.getVectorNumElements())
11583 // Insertion/extraction are legal for V128 types.
11584 if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
11585 VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
11586 VT == MVT::v8f16 || VT == MVT::v8bf16)
11589 if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 &&
11590 VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16 &&
11594 // For V64 types, we perform insertion by expanding the value
11595 // to a V128 type and perform the insertion on that.
11597 SDValue WideVec = WidenVector(Op.getOperand(0), DAG);
11598 EVT WideTy = WideVec.getValueType();
11600 SDValue Node = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideTy, WideVec,
11601 Op.getOperand(1), Op.getOperand(2));
11602 // Re-narrow the resultant vector.
11603 return NarrowVector(Node, DAG);
11607 AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
11608 SelectionDAG &DAG) const {
11609 assert(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!");
11610 EVT VT = Op.getOperand(0).getValueType();
11612 if (VT.getScalarType() == MVT::i1) {
11613 // We can't directly extract from an SVE predicate; extend it first.
11614 // (This isn't the only possible lowering, but it's straightforward.)
11615 EVT VectorVT = getPromotedVTForPredicate(VT);
11618 DAG.getNode(ISD::ANY_EXTEND, DL, VectorVT, Op.getOperand(0));
11619 MVT ExtractTy = VectorVT == MVT::nxv2i64 ? MVT::i64 : MVT::i32;
11620 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractTy,
11621 Extend, Op.getOperand(1));
11622 return DAG.getAnyExtOrTrunc(Extract, DL, Op.getValueType());
11625 if (useSVEForFixedLengthVectorVT(VT))
11626 return LowerFixedLengthExtractVectorElt(Op, DAG);
11628 // Check for non-constant or out of range lane.
11629 ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(1));
11630 if (!CI || CI->getZExtValue() >= VT.getVectorNumElements())
11633 // Insertion/extraction are legal for V128 types.
11634 if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
11635 VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
11636 VT == MVT::v8f16 || VT == MVT::v8bf16)
11639 if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 &&
11640 VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16 &&
11644 // For V64 types, we perform extraction by expanding the value
11645 // to a V128 type and perform the extraction on that.
11647 SDValue WideVec = WidenVector(Op.getOperand(0), DAG);
11648 EVT WideTy = WideVec.getValueType();
11650 EVT ExtrTy = WideTy.getVectorElementType();
11651 if (ExtrTy == MVT::i16 || ExtrTy == MVT::i8)
11654 // For extractions, we just return the result directly.
11655 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtrTy, WideVec,
11659 SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
11660 SelectionDAG &DAG) const {
11661 assert(Op.getValueType().isFixedLengthVector() &&
11662 "Only cases that extract a fixed length vector are supported!");
11664 EVT InVT = Op.getOperand(0).getValueType();
11665 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
11666 unsigned Size = Op.getValueSizeInBits();
11668 // If we don't have legal types yet, do nothing
11669 if (!DAG.getTargetLoweringInfo().isTypeLegal(InVT))
11672 if (InVT.isScalableVector()) {
11673 // This will be matched by custom code during ISelDAGToDAG.
11674 if (Idx == 0 && isPackedVectorType(InVT, DAG))
11680 // This will get lowered to an appropriate EXTRACT_SUBREG in ISel.
11681 if (Idx == 0 && InVT.getSizeInBits() <= 128)
11684 // If this is extracting the upper 64-bits of a 128-bit vector, we match
11686 if (Size == 64 && Idx * InVT.getScalarSizeInBits() == 64 &&
11687 InVT.getSizeInBits() == 128)
11690 if (useSVEForFixedLengthVectorVT(InVT)) {
11693 EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
11695 convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
11697 SDValue Splice = DAG.getNode(ISD::VECTOR_SPLICE, DL, ContainerVT, NewInVec,
11698 NewInVec, DAG.getConstant(Idx, DL, MVT::i64));
11699 return convertFromScalableVector(DAG, Op.getValueType(), Splice);
11705 SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
11706 SelectionDAG &DAG) const {
11707 assert(Op.getValueType().isScalableVector() &&
11708 "Only expect to lower inserts into scalable vectors!");
11710 EVT InVT = Op.getOperand(1).getValueType();
11711 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
11713 SDValue Vec0 = Op.getOperand(0);
11714 SDValue Vec1 = Op.getOperand(1);
11716 EVT VT = Op.getValueType();
11718 if (InVT.isScalableVector()) {
11719 if (!isTypeLegal(VT))
11722 // Break down insert_subvector into simpler parts.
11723 if (VT.getVectorElementType() == MVT::i1) {
11724 unsigned NumElts = VT.getVectorMinNumElements();
11725 EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
11728 Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, Vec0,
11729 DAG.getVectorIdxConstant(0, DL));
11730 Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, Vec0,
11731 DAG.getVectorIdxConstant(NumElts / 2, DL));
11732 if (Idx < (NumElts / 2)) {
11733 SDValue NewLo = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, HalfVT, Lo, Vec1,
11734 DAG.getVectorIdxConstant(Idx, DL));
11735 return DAG.getNode(AArch64ISD::UZP1, DL, VT, NewLo, Hi);
11738 DAG.getNode(ISD::INSERT_SUBVECTOR, DL, HalfVT, Hi, Vec1,
11739 DAG.getVectorIdxConstant(Idx - (NumElts / 2), DL));
11740 return DAG.getNode(AArch64ISD::UZP1, DL, VT, Lo, NewHi);
11744 // Ensure the subvector is half the size of the main vector.
11745 if (VT.getVectorElementCount() != (InVT.getVectorElementCount() * 2))
11748 // Here narrow and wide refers to the vector element types. After "casting"
11749 // both vectors must have the same bit length and so because the subvector
11750 // has fewer elements, those elements need to be bigger.
11751 EVT NarrowVT = getPackedSVEVectorVT(VT.getVectorElementCount());
11752 EVT WideVT = getPackedSVEVectorVT(InVT.getVectorElementCount());
11754 // NOP cast operands to the largest legal vector of the same element count.
11755 if (VT.isFloatingPoint()) {
11756 Vec0 = getSVESafeBitCast(NarrowVT, Vec0, DAG);
11757 Vec1 = getSVESafeBitCast(WideVT, Vec1, DAG);
11759 // Legal integer vectors are already their largest so Vec0 is fine as is.
11760 Vec1 = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Vec1);
11763 // To replace the top/bottom half of vector V with vector SubV we widen the
11764 // preserved half of V, concatenate this to SubV (the order depending on the
11765 // half being replaced) and then narrow the result.
11768 SDValue HiVec0 = DAG.getNode(AArch64ISD::UUNPKHI, DL, WideVT, Vec0);
11769 Narrow = DAG.getNode(AArch64ISD::UZP1, DL, NarrowVT, Vec1, HiVec0);
11771 assert(Idx == InVT.getVectorMinNumElements() &&
11772 "Invalid subvector index!");
11773 SDValue LoVec0 = DAG.getNode(AArch64ISD::UUNPKLO, DL, WideVT, Vec0);
11774 Narrow = DAG.getNode(AArch64ISD::UZP1, DL, NarrowVT, LoVec0, Vec1);
11777 return getSVESafeBitCast(VT, Narrow, DAG);
11780 if (Idx == 0 && isPackedVectorType(VT, DAG)) {
11781 // This will be matched by custom code during ISelDAGToDAG.
11782 if (Vec0.isUndef())
11785 Optional<unsigned> PredPattern =
11786 getSVEPredPatternFromNumElements(InVT.getVectorNumElements());
11787 auto PredTy = VT.changeVectorElementType(MVT::i1);
11788 SDValue PTrue = getPTrue(DAG, DL, PredTy, *PredPattern);
11789 SDValue ScalableVec1 = convertToScalableVector(DAG, VT, Vec1);
11790 return DAG.getNode(ISD::VSELECT, DL, VT, PTrue, ScalableVec1, Vec0);
11796 static bool isPow2Splat(SDValue Op, uint64_t &SplatVal, bool &Negated) {
11797 if (Op.getOpcode() != AArch64ISD::DUP &&
11798 Op.getOpcode() != ISD::SPLAT_VECTOR &&
11799 Op.getOpcode() != ISD::BUILD_VECTOR)
11802 if (Op.getOpcode() == ISD::BUILD_VECTOR &&
11803 !isAllConstantBuildVector(Op, SplatVal))
11806 if (Op.getOpcode() != ISD::BUILD_VECTOR &&
11807 !isa<ConstantSDNode>(Op->getOperand(0)))
11810 SplatVal = Op->getConstantOperandVal(0);
11811 if (Op.getValueType().getVectorElementType() != MVT::i64)
11812 SplatVal = (int32_t)SplatVal;
11815 if (isPowerOf2_64(SplatVal))
11819 if (isPowerOf2_64(-SplatVal)) {
11820 SplatVal = -SplatVal;
11827 SDValue AArch64TargetLowering::LowerDIV(SDValue Op, SelectionDAG &DAG) const {
11828 EVT VT = Op.getValueType();
11831 if (useSVEForFixedLengthVectorVT(VT, /*OverrideNEON=*/true))
11832 return LowerFixedLengthVectorIntDivideToSVE(Op, DAG);
11834 assert(VT.isScalableVector() && "Expected a scalable vector.");
11836 bool Signed = Op.getOpcode() == ISD::SDIV;
11837 unsigned PredOpcode = Signed ? AArch64ISD::SDIV_PRED : AArch64ISD::UDIV_PRED;
11841 if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) {
11842 SDValue Pg = getPredicateForScalableVector(DAG, dl, VT);
11844 DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, dl, VT, Pg, Op->getOperand(0),
11845 DAG.getTargetConstant(Log2_64(SplatVal), dl, MVT::i32));
11847 Res = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), Res);
11852 if (VT == MVT::nxv4i32 || VT == MVT::nxv2i64)
11853 return LowerToPredicatedOp(Op, DAG, PredOpcode);
11855 // SVE doesn't have i8 and i16 DIV operations; widen them to 32-bit
11856 // operations, and truncate the result.
11858 if (VT == MVT::nxv16i8)
11859 WidenedVT = MVT::nxv8i16;
11860 else if (VT == MVT::nxv8i16)
11861 WidenedVT = MVT::nxv4i32;
11863 llvm_unreachable("Unexpected Custom DIV operation");
11865 unsigned UnpkLo = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO;
11866 unsigned UnpkHi = Signed ? AArch64ISD::SUNPKHI : AArch64ISD::UUNPKHI;
11867 SDValue Op0Lo = DAG.getNode(UnpkLo, dl, WidenedVT, Op.getOperand(0));
11868 SDValue Op1Lo = DAG.getNode(UnpkLo, dl, WidenedVT, Op.getOperand(1));
11869 SDValue Op0Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(0));
11870 SDValue Op1Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(1));
11871 SDValue ResultLo = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Lo, Op1Lo);
11872 SDValue ResultHi = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Hi, Op1Hi);
11873 return DAG.getNode(AArch64ISD::UZP1, dl, VT, ResultLo, ResultHi);
11876 bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
11877 // Currently no fixed length shuffles that require SVE are legal.
11878 if (useSVEForFixedLengthVectorVT(VT))
11881 if (VT.getVectorNumElements() == 4 &&
11882 (VT.is128BitVector() || VT.is64BitVector())) {
11883 unsigned Cost = getPerfectShuffleCost(M);
11890 unsigned DummyUnsigned;
11892 return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) || isREVMask(M, VT, 64) ||
11893 isREVMask(M, VT, 32) || isREVMask(M, VT, 16) ||
11894 isEXTMask(M, VT, DummyBool, DummyUnsigned) ||
11895 // isTBLMask(M, VT) || // FIXME: Port TBL support from ARM.
11896 isTRNMask(M, VT, DummyUnsigned) || isUZPMask(M, VT, DummyUnsigned) ||
11897 isZIPMask(M, VT, DummyUnsigned) ||
11898 isTRN_v_undef_Mask(M, VT, DummyUnsigned) ||
11899 isUZP_v_undef_Mask(M, VT, DummyUnsigned) ||
11900 isZIP_v_undef_Mask(M, VT, DummyUnsigned) ||
11901 isINSMask(M, VT.getVectorNumElements(), DummyBool, DummyInt) ||
11902 isConcatMask(M, VT, VT.getSizeInBits() == 128));
11905 bool AArch64TargetLowering::isVectorClearMaskLegal(ArrayRef<int> M,
11907 // Just delegate to the generic legality, clear masks aren't special.
11908 return isShuffleMaskLegal(M, VT);
11911 /// getVShiftImm - Check if this is a valid build_vector for the immediate
11912 /// operand of a vector shift operation, where all the elements of the
11913 /// build_vector must have the same constant integer value.
11914 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
11915 // Ignore bit_converts.
11916 while (Op.getOpcode() == ISD::BITCAST)
11917 Op = Op.getOperand(0);
11918 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
11919 APInt SplatBits, SplatUndef;
11920 unsigned SplatBitSize;
11922 if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
11923 HasAnyUndefs, ElementBits) ||
11924 SplatBitSize > ElementBits)
11926 Cnt = SplatBits.getSExtValue();
11930 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
11931 /// operand of a vector shift left operation. That value must be in the range:
11932 /// 0 <= Value < ElementBits for a left shift; or
11933 /// 0 <= Value <= ElementBits for a long left shift.
11934 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
11935 assert(VT.isVector() && "vector shift count is not a vector type");
11936 int64_t ElementBits = VT.getScalarSizeInBits();
11937 if (!getVShiftImm(Op, ElementBits, Cnt))
11939 return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
11942 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
11943 /// operand of a vector shift right operation. The value must be in the range:
11944 /// 1 <= Value <= ElementBits for a right shift; or
11945 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt) {
11946 assert(VT.isVector() && "vector shift count is not a vector type");
11947 int64_t ElementBits = VT.getScalarSizeInBits();
11948 if (!getVShiftImm(Op, ElementBits, Cnt))
11950 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
11953 SDValue AArch64TargetLowering::LowerTRUNCATE(SDValue Op,
11954 SelectionDAG &DAG) const {
11955 EVT VT = Op.getValueType();
11957 if (VT.getScalarType() == MVT::i1) {
11958 // Lower i1 truncate to `(x & 1) != 0`.
11960 EVT OpVT = Op.getOperand(0).getValueType();
11961 SDValue Zero = DAG.getConstant(0, dl, OpVT);
11962 SDValue One = DAG.getConstant(1, dl, OpVT);
11963 SDValue And = DAG.getNode(ISD::AND, dl, OpVT, Op.getOperand(0), One);
11964 return DAG.getSetCC(dl, VT, And, Zero, ISD::SETNE);
11967 if (!VT.isVector() || VT.isScalableVector())
11970 if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType()))
11971 return LowerFixedLengthVectorTruncateToSVE(Op, DAG);
11976 SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
11977 SelectionDAG &DAG) const {
11978 EVT VT = Op.getValueType();
11982 if (!Op.getOperand(1).getValueType().isVector())
11984 unsigned EltSize = VT.getScalarSizeInBits();
11986 switch (Op.getOpcode()) {
11988 if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT))
11989 return LowerToPredicatedOp(Op, DAG, AArch64ISD::SHL_PRED);
11991 if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize)
11992 return DAG.getNode(AArch64ISD::VSHL, DL, VT, Op.getOperand(0),
11993 DAG.getConstant(Cnt, DL, MVT::i32));
11994 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
11995 DAG.getConstant(Intrinsic::aarch64_neon_ushl, DL,
11997 Op.getOperand(0), Op.getOperand(1));
12000 if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT)) {
12001 unsigned Opc = Op.getOpcode() == ISD::SRA ? AArch64ISD::SRA_PRED
12002 : AArch64ISD::SRL_PRED;
12003 return LowerToPredicatedOp(Op, DAG, Opc);
12006 // Right shift immediate
12007 if (isVShiftRImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) {
12009 (Op.getOpcode() == ISD::SRA) ? AArch64ISD::VASHR : AArch64ISD::VLSHR;
12010 return DAG.getNode(Opc, DL, VT, Op.getOperand(0),
12011 DAG.getConstant(Cnt, DL, MVT::i32));
12014 // Right shift register. Note, there is not a shift right register
12015 // instruction, but the shift left register instruction takes a signed
12016 // value, where negative numbers specify a right shift.
12017 unsigned Opc = (Op.getOpcode() == ISD::SRA) ? Intrinsic::aarch64_neon_sshl
12018 : Intrinsic::aarch64_neon_ushl;
12019 // negate the shift amount
12020 SDValue NegShift = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
12022 SDValue NegShiftLeft =
12023 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
12024 DAG.getConstant(Opc, DL, MVT::i32), Op.getOperand(0),
12026 return NegShiftLeft;
12029 llvm_unreachable("unexpected shift opcode");
12032 static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
12033 AArch64CC::CondCode CC, bool NoNans, EVT VT,
12034 const SDLoc &dl, SelectionDAG &DAG) {
12035 EVT SrcVT = LHS.getValueType();
12036 assert(VT.getSizeInBits() == SrcVT.getSizeInBits() &&
12037 "function only supposed to emit natural comparisons");
12039 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
12040 APInt CnstBits(VT.getSizeInBits(), 0);
12041 APInt UndefBits(VT.getSizeInBits(), 0);
12042 bool IsCnst = BVN && resolveBuildVector(BVN, CnstBits, UndefBits);
12043 bool IsZero = IsCnst && (CnstBits == 0);
12045 if (SrcVT.getVectorElementType().isFloatingPoint()) {
12049 case AArch64CC::NE: {
12052 Fcmeq = DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS);
12054 Fcmeq = DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS);
12055 return DAG.getNOT(dl, Fcmeq, VT);
12057 case AArch64CC::EQ:
12059 return DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS);
12060 return DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS);
12061 case AArch64CC::GE:
12063 return DAG.getNode(AArch64ISD::FCMGEz, dl, VT, LHS);
12064 return DAG.getNode(AArch64ISD::FCMGE, dl, VT, LHS, RHS);
12065 case AArch64CC::GT:
12067 return DAG.getNode(AArch64ISD::FCMGTz, dl, VT, LHS);
12068 return DAG.getNode(AArch64ISD::FCMGT, dl, VT, LHS, RHS);
12069 case AArch64CC::LE:
12072 // If we ignore NaNs then we can use to the LS implementation.
12074 case AArch64CC::LS:
12076 return DAG.getNode(AArch64ISD::FCMLEz, dl, VT, LHS);
12077 return DAG.getNode(AArch64ISD::FCMGE, dl, VT, RHS, LHS);
12078 case AArch64CC::LT:
12081 // If we ignore NaNs then we can use to the MI implementation.
12083 case AArch64CC::MI:
12085 return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS);
12086 return DAG.getNode(AArch64ISD::FCMGT, dl, VT, RHS, LHS);
12093 case AArch64CC::NE: {
12096 Cmeq = DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS);
12098 Cmeq = DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS);
12099 return DAG.getNOT(dl, Cmeq, VT);
12101 case AArch64CC::EQ:
12103 return DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS);
12104 return DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS);
12105 case AArch64CC::GE:
12107 return DAG.getNode(AArch64ISD::CMGEz, dl, VT, LHS);
12108 return DAG.getNode(AArch64ISD::CMGE, dl, VT, LHS, RHS);
12109 case AArch64CC::GT:
12111 return DAG.getNode(AArch64ISD::CMGTz, dl, VT, LHS);
12112 return DAG.getNode(AArch64ISD::CMGT, dl, VT, LHS, RHS);
12113 case AArch64CC::LE:
12115 return DAG.getNode(AArch64ISD::CMLEz, dl, VT, LHS);
12116 return DAG.getNode(AArch64ISD::CMGE, dl, VT, RHS, LHS);
12117 case AArch64CC::LS:
12118 return DAG.getNode(AArch64ISD::CMHS, dl, VT, RHS, LHS);
12119 case AArch64CC::LO:
12120 return DAG.getNode(AArch64ISD::CMHI, dl, VT, RHS, LHS);
12121 case AArch64CC::LT:
12123 return DAG.getNode(AArch64ISD::CMLTz, dl, VT, LHS);
12124 return DAG.getNode(AArch64ISD::CMGT, dl, VT, RHS, LHS);
12125 case AArch64CC::HI:
12126 return DAG.getNode(AArch64ISD::CMHI, dl, VT, LHS, RHS);
12127 case AArch64CC::HS:
12128 return DAG.getNode(AArch64ISD::CMHS, dl, VT, LHS, RHS);
12132 SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
12133 SelectionDAG &DAG) const {
12134 if (Op.getValueType().isScalableVector())
12135 return LowerToPredicatedOp(Op, DAG, AArch64ISD::SETCC_MERGE_ZERO);
12137 if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType()))
12138 return LowerFixedLengthVectorSetccToSVE(Op, DAG);
12140 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
12141 SDValue LHS = Op.getOperand(0);
12142 SDValue RHS = Op.getOperand(1);
12143 EVT CmpVT = LHS.getValueType().changeVectorElementTypeToInteger();
12146 if (LHS.getValueType().getVectorElementType().isInteger()) {
12147 assert(LHS.getValueType() == RHS.getValueType());
12148 AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
12150 EmitVectorComparison(LHS, RHS, AArch64CC, false, CmpVT, dl, DAG);
12151 return DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
12154 const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
12156 // Make v4f16 (only) fcmp operations utilise vector instructions
12157 // v8f16 support will be a litle more complicated
12158 if (!FullFP16 && LHS.getValueType().getVectorElementType() == MVT::f16) {
12159 if (LHS.getValueType().getVectorNumElements() == 4) {
12160 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, LHS);
12161 RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, RHS);
12162 SDValue NewSetcc = DAG.getSetCC(dl, MVT::v4i16, LHS, RHS, CC);
12163 DAG.ReplaceAllUsesWith(Op, NewSetcc);
12164 CmpVT = MVT::v4i32;
12169 assert((!FullFP16 && LHS.getValueType().getVectorElementType() != MVT::f16) ||
12170 LHS.getValueType().getVectorElementType() != MVT::f128);
12172 // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
12173 // clean. Some of them require two branches to implement.
12174 AArch64CC::CondCode CC1, CC2;
12176 changeVectorFPCCToAArch64CC(CC, CC1, CC2, ShouldInvert);
12178 bool NoNaNs = getTargetMachine().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs();
12180 EmitVectorComparison(LHS, RHS, CC1, NoNaNs, CmpVT, dl, DAG);
12181 if (!Cmp.getNode())
12184 if (CC2 != AArch64CC::AL) {
12186 EmitVectorComparison(LHS, RHS, CC2, NoNaNs, CmpVT, dl, DAG);
12187 if (!Cmp2.getNode())
12190 Cmp = DAG.getNode(ISD::OR, dl, CmpVT, Cmp, Cmp2);
12193 Cmp = DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
12196 Cmp = DAG.getNOT(dl, Cmp, Cmp.getValueType());
12201 static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp,
12202 SelectionDAG &DAG) {
12203 SDValue VecOp = ScalarOp.getOperand(0);
12204 auto Rdx = DAG.getNode(Op, DL, VecOp.getSimpleValueType(), VecOp);
12205 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarOp.getValueType(), Rdx,
12206 DAG.getConstant(0, DL, MVT::i64));
12209 SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op,
12210 SelectionDAG &DAG) const {
12211 SDValue Src = Op.getOperand(0);
12213 // Try to lower fixed length reductions to SVE.
12214 EVT SrcVT = Src.getValueType();
12215 bool OverrideNEON = Op.getOpcode() == ISD::VECREDUCE_AND ||
12216 Op.getOpcode() == ISD::VECREDUCE_OR ||
12217 Op.getOpcode() == ISD::VECREDUCE_XOR ||
12218 Op.getOpcode() == ISD::VECREDUCE_FADD ||
12219 (Op.getOpcode() != ISD::VECREDUCE_ADD &&
12220 SrcVT.getVectorElementType() == MVT::i64);
12221 if (SrcVT.isScalableVector() ||
12222 useSVEForFixedLengthVectorVT(
12223 SrcVT, OverrideNEON && Subtarget->useSVEForFixedLengthVectors())) {
12225 if (SrcVT.getVectorElementType() == MVT::i1)
12226 return LowerPredReductionToSVE(Op, DAG);
12228 switch (Op.getOpcode()) {
12229 case ISD::VECREDUCE_ADD:
12230 return LowerReductionToSVE(AArch64ISD::UADDV_PRED, Op, DAG);
12231 case ISD::VECREDUCE_AND:
12232 return LowerReductionToSVE(AArch64ISD::ANDV_PRED, Op, DAG);
12233 case ISD::VECREDUCE_OR:
12234 return LowerReductionToSVE(AArch64ISD::ORV_PRED, Op, DAG);
12235 case ISD::VECREDUCE_SMAX:
12236 return LowerReductionToSVE(AArch64ISD::SMAXV_PRED, Op, DAG);
12237 case ISD::VECREDUCE_SMIN:
12238 return LowerReductionToSVE(AArch64ISD::SMINV_PRED, Op, DAG);
12239 case ISD::VECREDUCE_UMAX:
12240 return LowerReductionToSVE(AArch64ISD::UMAXV_PRED, Op, DAG);
12241 case ISD::VECREDUCE_UMIN:
12242 return LowerReductionToSVE(AArch64ISD::UMINV_PRED, Op, DAG);
12243 case ISD::VECREDUCE_XOR:
12244 return LowerReductionToSVE(AArch64ISD::EORV_PRED, Op, DAG);
12245 case ISD::VECREDUCE_FADD:
12246 return LowerReductionToSVE(AArch64ISD::FADDV_PRED, Op, DAG);
12247 case ISD::VECREDUCE_FMAX:
12248 return LowerReductionToSVE(AArch64ISD::FMAXNMV_PRED, Op, DAG);
12249 case ISD::VECREDUCE_FMIN:
12250 return LowerReductionToSVE(AArch64ISD::FMINNMV_PRED, Op, DAG);
12252 llvm_unreachable("Unhandled fixed length reduction");
12256 // Lower NEON reductions.
12258 switch (Op.getOpcode()) {
12259 case ISD::VECREDUCE_ADD:
12260 return getReductionSDNode(AArch64ISD::UADDV, dl, Op, DAG);
12261 case ISD::VECREDUCE_SMAX:
12262 return getReductionSDNode(AArch64ISD::SMAXV, dl, Op, DAG);
12263 case ISD::VECREDUCE_SMIN:
12264 return getReductionSDNode(AArch64ISD::SMINV, dl, Op, DAG);
12265 case ISD::VECREDUCE_UMAX:
12266 return getReductionSDNode(AArch64ISD::UMAXV, dl, Op, DAG);
12267 case ISD::VECREDUCE_UMIN:
12268 return getReductionSDNode(AArch64ISD::UMINV, dl, Op, DAG);
12269 case ISD::VECREDUCE_FMAX: {
12270 return DAG.getNode(
12271 ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(),
12272 DAG.getConstant(Intrinsic::aarch64_neon_fmaxnmv, dl, MVT::i32),
12275 case ISD::VECREDUCE_FMIN: {
12276 return DAG.getNode(
12277 ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(),
12278 DAG.getConstant(Intrinsic::aarch64_neon_fminnmv, dl, MVT::i32),
12282 llvm_unreachable("Unhandled reduction");
12286 SDValue AArch64TargetLowering::LowerATOMIC_LOAD_SUB(SDValue Op,
12287 SelectionDAG &DAG) const {
12288 auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
12289 if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics())
12292 // LSE has an atomic load-add instruction, but not a load-sub.
12294 MVT VT = Op.getSimpleValueType();
12295 SDValue RHS = Op.getOperand(2);
12296 AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode());
12297 RHS = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), RHS);
12298 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, AN->getMemoryVT(),
12299 Op.getOperand(0), Op.getOperand(1), RHS,
12300 AN->getMemOperand());
12303 SDValue AArch64TargetLowering::LowerATOMIC_LOAD_AND(SDValue Op,
12304 SelectionDAG &DAG) const {
12305 auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
12306 if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics())
12309 // LSE has an atomic load-clear instruction, but not a load-and.
12311 MVT VT = Op.getSimpleValueType();
12312 SDValue RHS = Op.getOperand(2);
12313 AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode());
12314 RHS = DAG.getNode(ISD::XOR, dl, VT, DAG.getConstant(-1ULL, dl, VT), RHS);
12315 return DAG.getAtomic(ISD::ATOMIC_LOAD_CLR, dl, AN->getMemoryVT(),
12316 Op.getOperand(0), Op.getOperand(1), RHS,
12317 AN->getMemOperand());
12320 SDValue AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC(
12321 SDValue Op, SDValue Chain, SDValue &Size, SelectionDAG &DAG) const {
12323 EVT PtrVT = getPointerTy(DAG.getDataLayout());
12324 SDValue Callee = DAG.getTargetExternalSymbol("__chkstk", PtrVT, 0);
12326 const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
12327 const uint32_t *Mask = TRI->getWindowsStackProbePreservedMask();
12328 if (Subtarget->hasCustomCallingConv())
12329 TRI->UpdateCustomCallPreservedMask(DAG.getMachineFunction(), &Mask);
12331 Size = DAG.getNode(ISD::SRL, dl, MVT::i64, Size,
12332 DAG.getConstant(4, dl, MVT::i64));
12333 Chain = DAG.getCopyToReg(Chain, dl, AArch64::X15, Size, SDValue());
12335 DAG.getNode(AArch64ISD::CALL, dl, DAG.getVTList(MVT::Other, MVT::Glue),
12336 Chain, Callee, DAG.getRegister(AArch64::X15, MVT::i64),
12337 DAG.getRegisterMask(Mask), Chain.getValue(1));
12338 // To match the actual intent better, we should read the output from X15 here
12339 // again (instead of potentially spilling it to the stack), but rereading Size
12340 // from X15 here doesn't work at -O0, since it thinks that X15 is undefined
12343 Size = DAG.getNode(ISD::SHL, dl, MVT::i64, Size,
12344 DAG.getConstant(4, dl, MVT::i64));
12349 AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
12350 SelectionDAG &DAG) const {
12351 assert(Subtarget->isTargetWindows() &&
12352 "Only Windows alloca probing supported");
12355 SDNode *Node = Op.getNode();
12356 SDValue Chain = Op.getOperand(0);
12357 SDValue Size = Op.getOperand(1);
12359 cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
12360 EVT VT = Node->getValueType(0);
12362 if (DAG.getMachineFunction().getFunction().hasFnAttribute(
12363 "no-stack-arg-probe")) {
12364 SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64);
12365 Chain = SP.getValue(1);
12366 SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size);
12368 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
12369 DAG.getConstant(-(uint64_t)Align->value(), dl, VT));
12370 Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP);
12371 SDValue Ops[2] = {SP, Chain};
12372 return DAG.getMergeValues(Ops, dl);
12375 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
12377 Chain = LowerWindowsDYNAMIC_STACKALLOC(Op, Chain, Size, DAG);
12379 SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64);
12380 Chain = SP.getValue(1);
12381 SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size);
12383 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
12384 DAG.getConstant(-(uint64_t)Align->value(), dl, VT));
12385 Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP);
12387 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
12388 DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
12390 SDValue Ops[2] = {SP, Chain};
12391 return DAG.getMergeValues(Ops, dl);
12394 SDValue AArch64TargetLowering::LowerVSCALE(SDValue Op,
12395 SelectionDAG &DAG) const {
12396 EVT VT = Op.getValueType();
12397 assert(VT != MVT::i64 && "Expected illegal VSCALE node");
12400 APInt MulImm = cast<ConstantSDNode>(Op.getOperand(0))->getAPIntValue();
12401 return DAG.getZExtOrTrunc(DAG.getVScale(DL, MVT::i64, MulImm.sext(64)), DL,
12405 /// Set the IntrinsicInfo for the `aarch64_sve_st<N>` intrinsics.
12406 template <unsigned NumVecs>
12408 setInfoSVEStN(const AArch64TargetLowering &TLI, const DataLayout &DL,
12409 AArch64TargetLowering::IntrinsicInfo &Info, const CallInst &CI) {
12410 Info.opc = ISD::INTRINSIC_VOID;
12411 // Retrieve EC from first vector argument.
12412 const EVT VT = TLI.getMemValueType(DL, CI.getArgOperand(0)->getType());
12413 ElementCount EC = VT.getVectorElementCount();
12415 // Check the assumption that all input vectors are the same type.
12416 for (unsigned I = 0; I < NumVecs; ++I)
12417 assert(VT == TLI.getMemValueType(DL, CI.getArgOperand(I)->getType()) &&
12420 // memVT is `NumVecs * VT`.
12421 Info.memVT = EVT::getVectorVT(CI.getType()->getContext(), VT.getScalarType(),
12423 Info.ptrVal = CI.getArgOperand(CI.arg_size() - 1);
12425 Info.align.reset();
12426 Info.flags = MachineMemOperand::MOStore;
12430 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
12431 /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
12432 /// specified in the intrinsic calls.
12433 bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
12435 MachineFunction &MF,
12436 unsigned Intrinsic) const {
12437 auto &DL = I.getModule()->getDataLayout();
12438 switch (Intrinsic) {
12439 case Intrinsic::aarch64_sve_st2:
12440 return setInfoSVEStN<2>(*this, DL, Info, I);
12441 case Intrinsic::aarch64_sve_st3:
12442 return setInfoSVEStN<3>(*this, DL, Info, I);
12443 case Intrinsic::aarch64_sve_st4:
12444 return setInfoSVEStN<4>(*this, DL, Info, I);
12445 case Intrinsic::aarch64_neon_ld2:
12446 case Intrinsic::aarch64_neon_ld3:
12447 case Intrinsic::aarch64_neon_ld4:
12448 case Intrinsic::aarch64_neon_ld1x2:
12449 case Intrinsic::aarch64_neon_ld1x3:
12450 case Intrinsic::aarch64_neon_ld1x4:
12451 case Intrinsic::aarch64_neon_ld2lane:
12452 case Intrinsic::aarch64_neon_ld3lane:
12453 case Intrinsic::aarch64_neon_ld4lane:
12454 case Intrinsic::aarch64_neon_ld2r:
12455 case Intrinsic::aarch64_neon_ld3r:
12456 case Intrinsic::aarch64_neon_ld4r: {
12457 Info.opc = ISD::INTRINSIC_W_CHAIN;
12458 // Conservatively set memVT to the entire set of vectors loaded.
12459 uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
12460 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
12461 Info.ptrVal = I.getArgOperand(I.arg_size() - 1);
12463 Info.align.reset();
12464 // volatile loads with NEON intrinsics not supported
12465 Info.flags = MachineMemOperand::MOLoad;
12468 case Intrinsic::aarch64_neon_st2:
12469 case Intrinsic::aarch64_neon_st3:
12470 case Intrinsic::aarch64_neon_st4:
12471 case Intrinsic::aarch64_neon_st1x2:
12472 case Intrinsic::aarch64_neon_st1x3:
12473 case Intrinsic::aarch64_neon_st1x4:
12474 case Intrinsic::aarch64_neon_st2lane:
12475 case Intrinsic::aarch64_neon_st3lane:
12476 case Intrinsic::aarch64_neon_st4lane: {
12477 Info.opc = ISD::INTRINSIC_VOID;
12478 // Conservatively set memVT to the entire set of vectors stored.
12479 unsigned NumElts = 0;
12480 for (const Value *Arg : I.args()) {
12481 Type *ArgTy = Arg->getType();
12482 if (!ArgTy->isVectorTy())
12484 NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
12486 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
12487 Info.ptrVal = I.getArgOperand(I.arg_size() - 1);
12489 Info.align.reset();
12490 // volatile stores with NEON intrinsics not supported
12491 Info.flags = MachineMemOperand::MOStore;
12494 case Intrinsic::aarch64_ldaxr:
12495 case Intrinsic::aarch64_ldxr: {
12496 Type *ValTy = I.getParamElementType(0);
12497 Info.opc = ISD::INTRINSIC_W_CHAIN;
12498 Info.memVT = MVT::getVT(ValTy);
12499 Info.ptrVal = I.getArgOperand(0);
12501 Info.align = DL.getABITypeAlign(ValTy);
12502 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
12505 case Intrinsic::aarch64_stlxr:
12506 case Intrinsic::aarch64_stxr: {
12507 Type *ValTy = I.getParamElementType(1);
12508 Info.opc = ISD::INTRINSIC_W_CHAIN;
12509 Info.memVT = MVT::getVT(ValTy);
12510 Info.ptrVal = I.getArgOperand(1);
12512 Info.align = DL.getABITypeAlign(ValTy);
12513 Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
12516 case Intrinsic::aarch64_ldaxp:
12517 case Intrinsic::aarch64_ldxp:
12518 Info.opc = ISD::INTRINSIC_W_CHAIN;
12519 Info.memVT = MVT::i128;
12520 Info.ptrVal = I.getArgOperand(0);
12522 Info.align = Align(16);
12523 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
12525 case Intrinsic::aarch64_stlxp:
12526 case Intrinsic::aarch64_stxp:
12527 Info.opc = ISD::INTRINSIC_W_CHAIN;
12528 Info.memVT = MVT::i128;
12529 Info.ptrVal = I.getArgOperand(2);
12531 Info.align = Align(16);
12532 Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
12534 case Intrinsic::aarch64_sve_ldnt1: {
12535 Type *ElTy = cast<VectorType>(I.getType())->getElementType();
12536 Info.opc = ISD::INTRINSIC_W_CHAIN;
12537 Info.memVT = MVT::getVT(I.getType());
12538 Info.ptrVal = I.getArgOperand(1);
12540 Info.align = DL.getABITypeAlign(ElTy);
12541 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MONonTemporal;
12544 case Intrinsic::aarch64_sve_stnt1: {
12546 cast<VectorType>(I.getArgOperand(0)->getType())->getElementType();
12547 Info.opc = ISD::INTRINSIC_W_CHAIN;
12548 Info.memVT = MVT::getVT(I.getOperand(0)->getType());
12549 Info.ptrVal = I.getArgOperand(2);
12551 Info.align = DL.getABITypeAlign(ElTy);
12552 Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MONonTemporal;
12555 case Intrinsic::aarch64_mops_memset_tag: {
12556 Value *Dst = I.getArgOperand(0);
12557 Value *Val = I.getArgOperand(1);
12558 Info.opc = ISD::INTRINSIC_W_CHAIN;
12559 Info.memVT = MVT::getVT(Val->getType());
12562 Info.align = I.getParamAlign(0).valueOrOne();
12563 Info.flags = MachineMemOperand::MOStore;
12564 // The size of the memory being operated on is unknown at this point
12565 Info.size = MemoryLocation::UnknownSize;
12575 bool AArch64TargetLowering::shouldReduceLoadWidth(SDNode *Load,
12576 ISD::LoadExtType ExtTy,
12578 // TODO: This may be worth removing. Check regression tests for diffs.
12579 if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT))
12582 // If we're reducing the load width in order to avoid having to use an extra
12583 // instruction to do extension then it's probably a good idea.
12584 if (ExtTy != ISD::NON_EXTLOAD)
12586 // Don't reduce load width if it would prevent us from combining a shift into
12588 MemSDNode *Mem = dyn_cast<MemSDNode>(Load);
12590 const SDValue &Base = Mem->getBasePtr();
12591 if (Base.getOpcode() == ISD::ADD &&
12592 Base.getOperand(1).getOpcode() == ISD::SHL &&
12593 Base.getOperand(1).hasOneUse() &&
12594 Base.getOperand(1).getOperand(1).getOpcode() == ISD::Constant) {
12595 // It's unknown whether a scalable vector has a power-of-2 bitwidth.
12596 if (Mem->getMemoryVT().isScalableVector())
12598 // The shift can be combined if it matches the size of the value being
12599 // loaded (and so reducing the width would make it not match).
12600 uint64_t ShiftAmount = Base.getOperand(1).getConstantOperandVal(1);
12601 uint64_t LoadBytes = Mem->getMemoryVT().getSizeInBits()/8;
12602 if (ShiftAmount == Log2_32(LoadBytes))
12605 // We have no reason to disallow reducing the load width, so allow it.
12609 // Truncations from 64-bit GPR to 32-bit GPR is free.
12610 bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
12611 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
12613 uint64_t NumBits1 = Ty1->getPrimitiveSizeInBits().getFixedSize();
12614 uint64_t NumBits2 = Ty2->getPrimitiveSizeInBits().getFixedSize();
12615 return NumBits1 > NumBits2;
12617 bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
12618 if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger())
12620 uint64_t NumBits1 = VT1.getFixedSizeInBits();
12621 uint64_t NumBits2 = VT2.getFixedSizeInBits();
12622 return NumBits1 > NumBits2;
12625 /// Check if it is profitable to hoist instruction in then/else to if.
12626 /// Not profitable if I and it's user can form a FMA instruction
12627 /// because we prefer FMSUB/FMADD.
12628 bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const {
12629 if (I->getOpcode() != Instruction::FMul)
12632 if (!I->hasOneUse())
12635 Instruction *User = I->user_back();
12637 if (!(User->getOpcode() == Instruction::FSub ||
12638 User->getOpcode() == Instruction::FAdd))
12641 const TargetOptions &Options = getTargetMachine().Options;
12642 const Function *F = I->getFunction();
12643 const DataLayout &DL = F->getParent()->getDataLayout();
12644 Type *Ty = User->getOperand(0)->getType();
12646 return !(isFMAFasterThanFMulAndFAdd(*F, Ty) &&
12647 isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
12648 (Options.AllowFPOpFusion == FPOpFusion::Fast ||
12649 Options.UnsafeFPMath));
12652 // All 32-bit GPR operations implicitly zero the high-half of the corresponding
12654 bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
12655 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
12657 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
12658 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
12659 return NumBits1 == 32 && NumBits2 == 64;
12661 bool AArch64TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
12662 if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger())
12664 unsigned NumBits1 = VT1.getSizeInBits();
12665 unsigned NumBits2 = VT2.getSizeInBits();
12666 return NumBits1 == 32 && NumBits2 == 64;
12669 bool AArch64TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
12670 EVT VT1 = Val.getValueType();
12671 if (isZExtFree(VT1, VT2)) {
12675 if (Val.getOpcode() != ISD::LOAD)
12678 // 8-, 16-, and 32-bit integer loads all implicitly zero-extend.
12679 return (VT1.isSimple() && !VT1.isVector() && VT1.isInteger() &&
12680 VT2.isSimple() && !VT2.isVector() && VT2.isInteger() &&
12681 VT1.getSizeInBits() <= 32);
12684 bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const {
12685 if (isa<FPExtInst>(Ext))
12688 // Vector types are not free.
12689 if (Ext->getType()->isVectorTy())
12692 for (const Use &U : Ext->uses()) {
12693 // The extension is free if we can fold it with a left shift in an
12694 // addressing mode or an arithmetic operation: add, sub, and cmp.
12696 // Is there a shift?
12697 const Instruction *Instr = cast<Instruction>(U.getUser());
12699 // Is this a constant shift?
12700 switch (Instr->getOpcode()) {
12701 case Instruction::Shl:
12702 if (!isa<ConstantInt>(Instr->getOperand(1)))
12705 case Instruction::GetElementPtr: {
12706 gep_type_iterator GTI = gep_type_begin(Instr);
12707 auto &DL = Ext->getModule()->getDataLayout();
12708 std::advance(GTI, U.getOperandNo()-1);
12709 Type *IdxTy = GTI.getIndexedType();
12710 // This extension will end up with a shift because of the scaling factor.
12711 // 8-bit sized types have a scaling factor of 1, thus a shift amount of 0.
12712 // Get the shift amount based on the scaling factor:
12713 // log2(sizeof(IdxTy)) - log2(8).
12714 uint64_t ShiftAmt =
12715 countTrailingZeros(DL.getTypeStoreSizeInBits(IdxTy).getFixedSize()) - 3;
12716 // Is the constant foldable in the shift of the addressing mode?
12717 // I.e., shift amount is between 1 and 4 inclusive.
12718 if (ShiftAmt == 0 || ShiftAmt > 4)
12722 case Instruction::Trunc:
12723 // Check if this is a noop.
12724 // trunc(sext ty1 to ty2) to ty1.
12725 if (Instr->getType() == Ext->getOperand(0)->getType())
12732 // At this point we can use the bfm family, so this extension is free
12738 /// Check if both Op1 and Op2 are shufflevector extracts of either the lower
12739 /// or upper half of the vector elements.
12740 static bool areExtractShuffleVectors(Value *Op1, Value *Op2) {
12741 auto areTypesHalfed = [](Value *FullV, Value *HalfV) {
12742 auto *FullTy = FullV->getType();
12743 auto *HalfTy = HalfV->getType();
12744 return FullTy->getPrimitiveSizeInBits().getFixedSize() ==
12745 2 * HalfTy->getPrimitiveSizeInBits().getFixedSize();
12748 auto extractHalf = [](Value *FullV, Value *HalfV) {
12749 auto *FullVT = cast<FixedVectorType>(FullV->getType());
12750 auto *HalfVT = cast<FixedVectorType>(HalfV->getType());
12751 return FullVT->getNumElements() == 2 * HalfVT->getNumElements();
12754 ArrayRef<int> M1, M2;
12755 Value *S1Op1, *S2Op1;
12756 if (!match(Op1, m_Shuffle(m_Value(S1Op1), m_Undef(), m_Mask(M1))) ||
12757 !match(Op2, m_Shuffle(m_Value(S2Op1), m_Undef(), m_Mask(M2))))
12760 // Check that the operands are half as wide as the result and we extract
12761 // half of the elements of the input vectors.
12762 if (!areTypesHalfed(S1Op1, Op1) || !areTypesHalfed(S2Op1, Op2) ||
12763 !extractHalf(S1Op1, Op1) || !extractHalf(S2Op1, Op2))
12766 // Check the mask extracts either the lower or upper half of vector
12770 int NumElements = cast<FixedVectorType>(Op1->getType())->getNumElements() * 2;
12771 if (!ShuffleVectorInst::isExtractSubvectorMask(M1, NumElements, M1Start) ||
12772 !ShuffleVectorInst::isExtractSubvectorMask(M2, NumElements, M2Start) ||
12773 M1Start != M2Start || (M1Start != 0 && M2Start != (NumElements / 2)))
12779 /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth
12780 /// of the vector elements.
12781 static bool areExtractExts(Value *Ext1, Value *Ext2) {
12782 auto areExtDoubled = [](Instruction *Ext) {
12783 return Ext->getType()->getScalarSizeInBits() ==
12784 2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
12787 if (!match(Ext1, m_ZExtOrSExt(m_Value())) ||
12788 !match(Ext2, m_ZExtOrSExt(m_Value())) ||
12789 !areExtDoubled(cast<Instruction>(Ext1)) ||
12790 !areExtDoubled(cast<Instruction>(Ext2)))
12796 /// Check if Op could be used with vmull_high_p64 intrinsic.
12797 static bool isOperandOfVmullHighP64(Value *Op) {
12798 Value *VectorOperand = nullptr;
12799 ConstantInt *ElementIndex = nullptr;
12800 return match(Op, m_ExtractElt(m_Value(VectorOperand),
12801 m_ConstantInt(ElementIndex))) &&
12802 ElementIndex->getValue() == 1 &&
12803 isa<FixedVectorType>(VectorOperand->getType()) &&
12804 cast<FixedVectorType>(VectorOperand->getType())->getNumElements() == 2;
12807 /// Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic.
12808 static bool areOperandsOfVmullHighP64(Value *Op1, Value *Op2) {
12809 return isOperandOfVmullHighP64(Op1) && isOperandOfVmullHighP64(Op2);
12812 static bool isSplatShuffle(Value *V) {
12813 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V))
12814 return is_splat(Shuf->getShuffleMask());
12818 /// Check if sinking \p I's operands to I's basic block is profitable, because
12819 /// the operands can be folded into a target instruction, e.g.
12820 /// shufflevectors extracts and/or sext/zext can be folded into (u,s)subl(2).
12821 bool AArch64TargetLowering::shouldSinkOperands(
12822 Instruction *I, SmallVectorImpl<Use *> &Ops) const {
12823 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
12824 switch (II->getIntrinsicID()) {
12825 case Intrinsic::aarch64_neon_smull:
12826 case Intrinsic::aarch64_neon_umull:
12827 if (areExtractShuffleVectors(II->getOperand(0), II->getOperand(1))) {
12828 Ops.push_back(&II->getOperandUse(0));
12829 Ops.push_back(&II->getOperandUse(1));
12834 case Intrinsic::fma:
12835 if (isa<VectorType>(I->getType()) &&
12836 cast<VectorType>(I->getType())->getElementType()->isHalfTy() &&
12837 !Subtarget->hasFullFP16())
12840 case Intrinsic::aarch64_neon_sqdmull:
12841 case Intrinsic::aarch64_neon_sqdmulh:
12842 case Intrinsic::aarch64_neon_sqrdmulh:
12843 // Sink splats for index lane variants
12844 if (isSplatShuffle(II->getOperand(0)))
12845 Ops.push_back(&II->getOperandUse(0));
12846 if (isSplatShuffle(II->getOperand(1)))
12847 Ops.push_back(&II->getOperandUse(1));
12848 return !Ops.empty();
12849 case Intrinsic::aarch64_sve_ptest_first:
12850 case Intrinsic::aarch64_sve_ptest_last:
12851 if (auto *IIOp = dyn_cast<IntrinsicInst>(II->getOperand(0)))
12852 if (IIOp->getIntrinsicID() == Intrinsic::aarch64_sve_ptrue)
12853 Ops.push_back(&II->getOperandUse(0));
12854 return !Ops.empty();
12855 case Intrinsic::aarch64_sme_write_horiz:
12856 case Intrinsic::aarch64_sme_write_vert:
12857 case Intrinsic::aarch64_sme_writeq_horiz:
12858 case Intrinsic::aarch64_sme_writeq_vert: {
12859 auto *Idx = dyn_cast<Instruction>(II->getOperand(1));
12860 if (!Idx || Idx->getOpcode() != Instruction::Add)
12862 Ops.push_back(&II->getOperandUse(1));
12865 case Intrinsic::aarch64_sme_read_horiz:
12866 case Intrinsic::aarch64_sme_read_vert:
12867 case Intrinsic::aarch64_sme_readq_horiz:
12868 case Intrinsic::aarch64_sme_readq_vert:
12869 case Intrinsic::aarch64_sme_ld1b_vert:
12870 case Intrinsic::aarch64_sme_ld1h_vert:
12871 case Intrinsic::aarch64_sme_ld1w_vert:
12872 case Intrinsic::aarch64_sme_ld1d_vert:
12873 case Intrinsic::aarch64_sme_ld1q_vert:
12874 case Intrinsic::aarch64_sme_st1b_vert:
12875 case Intrinsic::aarch64_sme_st1h_vert:
12876 case Intrinsic::aarch64_sme_st1w_vert:
12877 case Intrinsic::aarch64_sme_st1d_vert:
12878 case Intrinsic::aarch64_sme_st1q_vert:
12879 case Intrinsic::aarch64_sme_ld1b_horiz:
12880 case Intrinsic::aarch64_sme_ld1h_horiz:
12881 case Intrinsic::aarch64_sme_ld1w_horiz:
12882 case Intrinsic::aarch64_sme_ld1d_horiz:
12883 case Intrinsic::aarch64_sme_ld1q_horiz:
12884 case Intrinsic::aarch64_sme_st1b_horiz:
12885 case Intrinsic::aarch64_sme_st1h_horiz:
12886 case Intrinsic::aarch64_sme_st1w_horiz:
12887 case Intrinsic::aarch64_sme_st1d_horiz:
12888 case Intrinsic::aarch64_sme_st1q_horiz: {
12889 auto *Idx = dyn_cast<Instruction>(II->getOperand(3));
12890 if (!Idx || Idx->getOpcode() != Instruction::Add)
12892 Ops.push_back(&II->getOperandUse(3));
12895 case Intrinsic::aarch64_neon_pmull:
12896 if (!areExtractShuffleVectors(II->getOperand(0), II->getOperand(1)))
12898 Ops.push_back(&II->getOperandUse(0));
12899 Ops.push_back(&II->getOperandUse(1));
12901 case Intrinsic::aarch64_neon_pmull64:
12902 if (!areOperandsOfVmullHighP64(II->getArgOperand(0),
12903 II->getArgOperand(1)))
12905 Ops.push_back(&II->getArgOperandUse(0));
12906 Ops.push_back(&II->getArgOperandUse(1));
12913 if (!I->getType()->isVectorTy())
12916 switch (I->getOpcode()) {
12917 case Instruction::Sub:
12918 case Instruction::Add: {
12919 if (!areExtractExts(I->getOperand(0), I->getOperand(1)))
12922 // If the exts' operands extract either the lower or upper elements, we
12923 // can sink them too.
12924 auto Ext1 = cast<Instruction>(I->getOperand(0));
12925 auto Ext2 = cast<Instruction>(I->getOperand(1));
12926 if (areExtractShuffleVectors(Ext1->getOperand(0), Ext2->getOperand(0))) {
12927 Ops.push_back(&Ext1->getOperandUse(0));
12928 Ops.push_back(&Ext2->getOperandUse(0));
12931 Ops.push_back(&I->getOperandUse(0));
12932 Ops.push_back(&I->getOperandUse(1));
12936 case Instruction::Mul: {
12937 bool IsProfitable = false;
12938 for (auto &Op : I->operands()) {
12939 // Make sure we are not already sinking this operand
12940 if (any_of(Ops, [&](Use *U) { return U->get() == Op; }))
12943 ShuffleVectorInst *Shuffle = dyn_cast<ShuffleVectorInst>(Op);
12944 if (!Shuffle || !Shuffle->isZeroEltSplat())
12947 Value *ShuffleOperand = Shuffle->getOperand(0);
12948 InsertElementInst *Insert = dyn_cast<InsertElementInst>(ShuffleOperand);
12952 Instruction *OperandInstr = dyn_cast<Instruction>(Insert->getOperand(1));
12956 ConstantInt *ElementConstant =
12957 dyn_cast<ConstantInt>(Insert->getOperand(2));
12958 // Check that the insertelement is inserting into element 0
12959 if (!ElementConstant || ElementConstant->getZExtValue() != 0)
12962 unsigned Opcode = OperandInstr->getOpcode();
12963 if (Opcode != Instruction::SExt && Opcode != Instruction::ZExt)
12966 Ops.push_back(&Shuffle->getOperandUse(0));
12967 Ops.push_back(&Op);
12968 IsProfitable = true;
12971 return IsProfitable;
12979 bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
12980 Align &RequiredAligment) const {
12981 if (!LoadedType.isSimple() ||
12982 (!LoadedType.isInteger() && !LoadedType.isFloatingPoint()))
12984 // Cyclone supports unaligned accesses.
12985 RequiredAligment = Align(1);
12986 unsigned NumBits = LoadedType.getSizeInBits();
12987 return NumBits == 32 || NumBits == 64;
12990 /// A helper function for determining the number of interleaved accesses we
12991 /// will generate when lowering accesses of the given type.
12992 unsigned AArch64TargetLowering::getNumInterleavedAccesses(
12993 VectorType *VecTy, const DataLayout &DL, bool UseScalable) const {
12994 unsigned VecSize = UseScalable ? Subtarget->getMinSVEVectorSizeInBits() : 128;
12995 return std::max<unsigned>(1, (DL.getTypeSizeInBits(VecTy) + 127) / VecSize);
12998 MachineMemOperand::Flags
12999 AArch64TargetLowering::getTargetMMOFlags(const Instruction &I) const {
13000 if (Subtarget->getProcFamily() == AArch64Subtarget::Falkor &&
13001 I.getMetadata(FALKOR_STRIDED_ACCESS_MD) != nullptr)
13002 return MOStridedAccess;
13003 return MachineMemOperand::MONone;
13006 bool AArch64TargetLowering::isLegalInterleavedAccessType(
13007 VectorType *VecTy, const DataLayout &DL, bool &UseScalable) const {
13009 unsigned VecSize = DL.getTypeSizeInBits(VecTy);
13010 unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType());
13011 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
13013 UseScalable = false;
13015 // Ensure the number of vector elements is greater than 1.
13016 if (NumElements < 2)
13019 // Ensure the element type is legal.
13020 if (ElSize != 8 && ElSize != 16 && ElSize != 32 && ElSize != 64)
13023 if (Subtarget->useSVEForFixedLengthVectors() &&
13024 (VecSize % Subtarget->getMinSVEVectorSizeInBits() == 0 ||
13025 (VecSize < Subtarget->getMinSVEVectorSizeInBits() &&
13026 isPowerOf2_32(NumElements) && VecSize > 128))) {
13027 UseScalable = true;
13031 // Ensure the total vector size is 64 or a multiple of 128. Types larger than
13032 // 128 will be split into multiple interleaved accesses.
13033 return VecSize == 64 || VecSize % 128 == 0;
13036 static ScalableVectorType *getSVEContainerIRType(FixedVectorType *VTy) {
13037 if (VTy->getElementType() == Type::getDoubleTy(VTy->getContext()))
13038 return ScalableVectorType::get(VTy->getElementType(), 2);
13040 if (VTy->getElementType() == Type::getFloatTy(VTy->getContext()))
13041 return ScalableVectorType::get(VTy->getElementType(), 4);
13043 if (VTy->getElementType() == Type::getBFloatTy(VTy->getContext()))
13044 return ScalableVectorType::get(VTy->getElementType(), 8);
13046 if (VTy->getElementType() == Type::getHalfTy(VTy->getContext()))
13047 return ScalableVectorType::get(VTy->getElementType(), 8);
13049 if (VTy->getElementType() == Type::getInt64Ty(VTy->getContext()))
13050 return ScalableVectorType::get(VTy->getElementType(), 2);
13052 if (VTy->getElementType() == Type::getInt32Ty(VTy->getContext()))
13053 return ScalableVectorType::get(VTy->getElementType(), 4);
13055 if (VTy->getElementType() == Type::getInt16Ty(VTy->getContext()))
13056 return ScalableVectorType::get(VTy->getElementType(), 8);
13058 if (VTy->getElementType() == Type::getInt8Ty(VTy->getContext()))
13059 return ScalableVectorType::get(VTy->getElementType(), 16);
13061 llvm_unreachable("Cannot handle input vector type");
13064 /// Lower an interleaved load into a ldN intrinsic.
13066 /// E.g. Lower an interleaved load (Factor = 2):
13067 /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr
13068 /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements
13069 /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements
13072 /// %ld2 = { <4 x i32>, <4 x i32> } call llvm.aarch64.neon.ld2(%ptr)
13073 /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
13074 /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
13075 bool AArch64TargetLowering::lowerInterleavedLoad(
13076 LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
13077 ArrayRef<unsigned> Indices, unsigned Factor) const {
13078 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
13079 "Invalid interleave factor");
13080 assert(!Shuffles.empty() && "Empty shufflevector input");
13081 assert(Shuffles.size() == Indices.size() &&
13082 "Unmatched number of shufflevectors and indices");
13084 const DataLayout &DL = LI->getModule()->getDataLayout();
13086 VectorType *VTy = Shuffles[0]->getType();
13088 // Skip if we do not have NEON and skip illegal vector types. We can
13089 // "legalize" wide vector types into multiple interleaved accesses as long as
13090 // the vector types are divisible by 128.
13092 if (!Subtarget->hasNEON() ||
13093 !isLegalInterleavedAccessType(VTy, DL, UseScalable))
13096 unsigned NumLoads = getNumInterleavedAccesses(VTy, DL, UseScalable);
13098 auto *FVTy = cast<FixedVectorType>(VTy);
13100 // A pointer vector can not be the return type of the ldN intrinsics. Need to
13101 // load integer vectors first and then convert to pointer vectors.
13102 Type *EltTy = FVTy->getElementType();
13103 if (EltTy->isPointerTy())
13105 FixedVectorType::get(DL.getIntPtrType(EltTy), FVTy->getNumElements());
13107 // If we're going to generate more than one load, reset the sub-vector type
13108 // to something legal.
13109 FVTy = FixedVectorType::get(FVTy->getElementType(),
13110 FVTy->getNumElements() / NumLoads);
13113 UseScalable ? cast<VectorType>(getSVEContainerIRType(FVTy)) : FVTy;
13115 IRBuilder<> Builder(LI);
13117 // The base address of the load.
13118 Value *BaseAddr = LI->getPointerOperand();
13120 if (NumLoads > 1) {
13121 // We will compute the pointer operand of each load from the original base
13122 // address using GEPs. Cast the base address to a pointer to the scalar
13124 BaseAddr = Builder.CreateBitCast(
13126 LDVTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()));
13131 ? LDVTy->getElementType()->getPointerTo(LI->getPointerAddressSpace())
13132 : LDVTy->getPointerTo(LI->getPointerAddressSpace());
13133 Type *PredTy = VectorType::get(Type::getInt1Ty(LDVTy->getContext()),
13134 LDVTy->getElementCount());
13136 static const Intrinsic::ID SVELoadIntrs[3] = {
13137 Intrinsic::aarch64_sve_ld2_sret, Intrinsic::aarch64_sve_ld3_sret,
13138 Intrinsic::aarch64_sve_ld4_sret};
13139 static const Intrinsic::ID NEONLoadIntrs[3] = {Intrinsic::aarch64_neon_ld2,
13140 Intrinsic::aarch64_neon_ld3,
13141 Intrinsic::aarch64_neon_ld4};
13144 LdNFunc = Intrinsic::getDeclaration(LI->getModule(),
13145 SVELoadIntrs[Factor - 2], {LDVTy});
13147 LdNFunc = Intrinsic::getDeclaration(
13148 LI->getModule(), NEONLoadIntrs[Factor - 2], {LDVTy, PtrTy});
13150 // Holds sub-vectors extracted from the load intrinsic return values. The
13151 // sub-vectors are associated with the shufflevector instructions they will
13153 DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs;
13155 Value *PTrue = nullptr;
13157 Optional<unsigned> PgPattern =
13158 getSVEPredPatternFromNumElements(FVTy->getNumElements());
13159 if (Subtarget->getMinSVEVectorSizeInBits() ==
13160 Subtarget->getMaxSVEVectorSizeInBits() &&
13161 Subtarget->getMinSVEVectorSizeInBits() == DL.getTypeSizeInBits(FVTy))
13162 PgPattern = AArch64SVEPredPattern::all;
13165 ConstantInt::get(Type::getInt32Ty(LDVTy->getContext()), *PgPattern);
13166 PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {PredTy},
13170 for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
13172 // If we're generating more than one load, compute the base address of
13173 // subsequent loads as an offset from the previous.
13175 BaseAddr = Builder.CreateConstGEP1_32(LDVTy->getElementType(), BaseAddr,
13176 FVTy->getNumElements() * Factor);
13180 LdN = Builder.CreateCall(
13181 LdNFunc, {PTrue, Builder.CreateBitCast(BaseAddr, PtrTy)}, "ldN");
13183 LdN = Builder.CreateCall(LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy),
13186 // Extract and store the sub-vectors returned by the load intrinsic.
13187 for (unsigned i = 0; i < Shuffles.size(); i++) {
13188 ShuffleVectorInst *SVI = Shuffles[i];
13189 unsigned Index = Indices[i];
13191 Value *SubVec = Builder.CreateExtractValue(LdN, Index);
13194 SubVec = Builder.CreateExtractVector(
13196 ConstantInt::get(Type::getInt64Ty(VTy->getContext()), 0));
13198 // Convert the integer vector to pointer vector if the element is pointer.
13199 if (EltTy->isPointerTy())
13200 SubVec = Builder.CreateIntToPtr(
13201 SubVec, FixedVectorType::get(SVI->getType()->getElementType(),
13202 FVTy->getNumElements()));
13204 SubVecs[SVI].push_back(SubVec);
13208 // Replace uses of the shufflevector instructions with the sub-vectors
13209 // returned by the load intrinsic. If a shufflevector instruction is
13210 // associated with more than one sub-vector, those sub-vectors will be
13211 // concatenated into a single wide vector.
13212 for (ShuffleVectorInst *SVI : Shuffles) {
13213 auto &SubVec = SubVecs[SVI];
13215 SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0];
13216 SVI->replaceAllUsesWith(WideVec);
13222 /// Lower an interleaved store into a stN intrinsic.
13224 /// E.g. Lower an interleaved store (Factor = 3):
13225 /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
13226 /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
13227 /// store <12 x i32> %i.vec, <12 x i32>* %ptr
13230 /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
13231 /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
13232 /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
13233 /// call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr)
13235 /// Note that the new shufflevectors will be removed and we'll only generate one
13236 /// st3 instruction in CodeGen.
13238 /// Example for a more general valid mask (Factor 3). Lower:
13239 /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
13240 /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
13241 /// store <12 x i32> %i.vec, <12 x i32>* %ptr
13244 /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
13245 /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
13246 /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
13247 /// call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr)
13248 bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
13249 ShuffleVectorInst *SVI,
13250 unsigned Factor) const {
13251 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
13252 "Invalid interleave factor");
13254 auto *VecTy = cast<FixedVectorType>(SVI->getType());
13255 assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store");
13257 unsigned LaneLen = VecTy->getNumElements() / Factor;
13258 Type *EltTy = VecTy->getElementType();
13259 auto *SubVecTy = FixedVectorType::get(EltTy, LaneLen);
13261 const DataLayout &DL = SI->getModule()->getDataLayout();
13264 // Skip if we do not have NEON and skip illegal vector types. We can
13265 // "legalize" wide vector types into multiple interleaved accesses as long as
13266 // the vector types are divisible by 128.
13267 if (!Subtarget->hasNEON() ||
13268 !isLegalInterleavedAccessType(SubVecTy, DL, UseScalable))
13271 unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL, UseScalable);
13273 Value *Op0 = SVI->getOperand(0);
13274 Value *Op1 = SVI->getOperand(1);
13275 IRBuilder<> Builder(SI);
13277 // StN intrinsics don't support pointer vectors as arguments. Convert pointer
13278 // vectors to integer vectors.
13279 if (EltTy->isPointerTy()) {
13280 Type *IntTy = DL.getIntPtrType(EltTy);
13281 unsigned NumOpElts =
13282 cast<FixedVectorType>(Op0->getType())->getNumElements();
13284 // Convert to the corresponding integer vector.
13285 auto *IntVecTy = FixedVectorType::get(IntTy, NumOpElts);
13286 Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
13287 Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);
13289 SubVecTy = FixedVectorType::get(IntTy, LaneLen);
13292 // If we're going to generate more than one store, reset the lane length
13293 // and sub-vector type to something legal.
13294 LaneLen /= NumStores;
13295 SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen);
13297 auto *STVTy = UseScalable ? cast<VectorType>(getSVEContainerIRType(SubVecTy))
13300 // The base address of the store.
13301 Value *BaseAddr = SI->getPointerOperand();
13303 if (NumStores > 1) {
13304 // We will compute the pointer operand of each store from the original base
13305 // address using GEPs. Cast the base address to a pointer to the scalar
13307 BaseAddr = Builder.CreateBitCast(
13309 SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace()));
13312 auto Mask = SVI->getShuffleMask();
13316 ? STVTy->getElementType()->getPointerTo(SI->getPointerAddressSpace())
13317 : STVTy->getPointerTo(SI->getPointerAddressSpace());
13318 Type *PredTy = VectorType::get(Type::getInt1Ty(STVTy->getContext()),
13319 STVTy->getElementCount());
13321 static const Intrinsic::ID SVEStoreIntrs[3] = {Intrinsic::aarch64_sve_st2,
13322 Intrinsic::aarch64_sve_st3,
13323 Intrinsic::aarch64_sve_st4};
13324 static const Intrinsic::ID NEONStoreIntrs[3] = {Intrinsic::aarch64_neon_st2,
13325 Intrinsic::aarch64_neon_st3,
13326 Intrinsic::aarch64_neon_st4};
13329 StNFunc = Intrinsic::getDeclaration(SI->getModule(),
13330 SVEStoreIntrs[Factor - 2], {STVTy});
13332 StNFunc = Intrinsic::getDeclaration(
13333 SI->getModule(), NEONStoreIntrs[Factor - 2], {STVTy, PtrTy});
13335 Value *PTrue = nullptr;
13337 Optional<unsigned> PgPattern =
13338 getSVEPredPatternFromNumElements(SubVecTy->getNumElements());
13339 if (Subtarget->getMinSVEVectorSizeInBits() ==
13340 Subtarget->getMaxSVEVectorSizeInBits() &&
13341 Subtarget->getMinSVEVectorSizeInBits() ==
13342 DL.getTypeSizeInBits(SubVecTy))
13343 PgPattern = AArch64SVEPredPattern::all;
13346 ConstantInt::get(Type::getInt32Ty(STVTy->getContext()), *PgPattern);
13347 PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {PredTy},
13351 for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
13353 SmallVector<Value *, 5> Ops;
13355 // Split the shufflevector operands into sub vectors for the new stN call.
13356 for (unsigned i = 0; i < Factor; i++) {
13358 unsigned IdxI = StoreCount * LaneLen * Factor + i;
13359 if (Mask[IdxI] >= 0) {
13360 Shuffle = Builder.CreateShuffleVector(
13361 Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0));
13363 unsigned StartMask = 0;
13364 for (unsigned j = 1; j < LaneLen; j++) {
13365 unsigned IdxJ = StoreCount * LaneLen * Factor + j;
13366 if (Mask[IdxJ * Factor + IdxI] >= 0) {
13367 StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ;
13371 // Note: Filling undef gaps with random elements is ok, since
13372 // those elements were being written anyway (with undefs).
13373 // In the case of all undefs we're defaulting to using elems from 0
13374 // Note: StartMask cannot be negative, it's checked in
13375 // isReInterleaveMask
13376 Shuffle = Builder.CreateShuffleVector(
13377 Op0, Op1, createSequentialMask(StartMask, LaneLen, 0));
13381 Shuffle = Builder.CreateInsertVector(
13382 STVTy, UndefValue::get(STVTy), Shuffle,
13383 ConstantInt::get(Type::getInt64Ty(STVTy->getContext()), 0));
13385 Ops.push_back(Shuffle);
13389 Ops.push_back(PTrue);
13391 // If we generating more than one store, we compute the base address of
13392 // subsequent stores as an offset from the previous.
13393 if (StoreCount > 0)
13394 BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(),
13395 BaseAddr, LaneLen * Factor);
13397 Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy));
13398 Builder.CreateCall(StNFunc, Ops);
13403 // Lower an SVE structured load intrinsic returning a tuple type to target
13404 // specific intrinsic taking the same input but returning a multi-result value
13405 // of the split tuple type.
13407 // E.g. Lowering an LD3:
13409 // call <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32(
13410 // <vscale x 4 x i1> %pred,
13411 // <vscale x 4 x i32>* %addr)
13415 // t0: ch = EntryToken
13416 // t2: nxv4i1,ch = CopyFromReg t0, Register:nxv4i1 %0
13417 // t4: i64,ch = CopyFromReg t0, Register:i64 %1
13418 // t5: nxv4i32,nxv4i32,nxv4i32,ch = AArch64ISD::SVE_LD3 t0, t2, t4
13419 // t6: nxv12i32 = concat_vectors t5, t5:1, t5:2
13421 // This is called pre-legalization to avoid widening/splitting issues with
13422 // non-power-of-2 tuple types used for LD3, such as nxv12i32.
13423 SDValue AArch64TargetLowering::LowerSVEStructLoad(unsigned Intrinsic,
13424 ArrayRef<SDValue> LoadOps,
13425 EVT VT, SelectionDAG &DAG,
13426 const SDLoc &DL) const {
13427 assert(VT.isScalableVector() && "Can only lower scalable vectors");
13429 unsigned N, Opcode;
13430 static const std::pair<unsigned, std::pair<unsigned, unsigned>>
13432 {Intrinsic::aarch64_sve_ld2, {2, AArch64ISD::SVE_LD2_MERGE_ZERO}},
13433 {Intrinsic::aarch64_sve_ld3, {3, AArch64ISD::SVE_LD3_MERGE_ZERO}},
13434 {Intrinsic::aarch64_sve_ld4, {4, AArch64ISD::SVE_LD4_MERGE_ZERO}}};
13436 std::tie(N, Opcode) = llvm::find_if(IntrinsicMap, [&](auto P) {
13437 return P.first == Intrinsic;
13439 assert(VT.getVectorElementCount().getKnownMinValue() % N == 0 &&
13440 "invalid tuple vector type!");
13443 EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
13444 VT.getVectorElementCount().divideCoefficientBy(N));
13445 assert(isTypeLegal(SplitVT));
13447 SmallVector<EVT, 5> VTs(N, SplitVT);
13448 VTs.push_back(MVT::Other); // Chain
13449 SDVTList NodeTys = DAG.getVTList(VTs);
13451 SDValue PseudoLoad = DAG.getNode(Opcode, DL, NodeTys, LoadOps);
13452 SmallVector<SDValue, 4> PseudoLoadOps;
13453 for (unsigned I = 0; I < N; ++I)
13454 PseudoLoadOps.push_back(SDValue(PseudoLoad.getNode(), I));
13455 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, PseudoLoadOps);
13458 EVT AArch64TargetLowering::getOptimalMemOpType(
13459 const MemOp &Op, const AttributeList &FuncAttributes) const {
13460 bool CanImplicitFloat = !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
13461 bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat;
13462 bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat;
13463 // Only use AdvSIMD to implement memset of 32-byte and above. It would have
13464 // taken one instruction to materialize the v2i64 zero and one store (with
13465 // restrictive addressing mode). Just do i64 stores.
13466 bool IsSmallMemset = Op.isMemset() && Op.size() < 32;
13467 auto AlignmentIsAcceptable = [&](EVT VT, Align AlignCheck) {
13468 if (Op.isAligned(AlignCheck))
13471 return allowsMisalignedMemoryAccesses(VT, 0, Align(1),
13472 MachineMemOperand::MONone, &Fast) &&
13476 if (CanUseNEON && Op.isMemset() && !IsSmallMemset &&
13477 AlignmentIsAcceptable(MVT::v16i8, Align(16)))
13479 if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16)))
13481 if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8)))
13483 if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4)))
13488 LLT AArch64TargetLowering::getOptimalMemOpLLT(
13489 const MemOp &Op, const AttributeList &FuncAttributes) const {
13490 bool CanImplicitFloat = !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
13491 bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat;
13492 bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat;
13493 // Only use AdvSIMD to implement memset of 32-byte and above. It would have
13494 // taken one instruction to materialize the v2i64 zero and one store (with
13495 // restrictive addressing mode). Just do i64 stores.
13496 bool IsSmallMemset = Op.isMemset() && Op.size() < 32;
13497 auto AlignmentIsAcceptable = [&](EVT VT, Align AlignCheck) {
13498 if (Op.isAligned(AlignCheck))
13501 return allowsMisalignedMemoryAccesses(VT, 0, Align(1),
13502 MachineMemOperand::MONone, &Fast) &&
13506 if (CanUseNEON && Op.isMemset() && !IsSmallMemset &&
13507 AlignmentIsAcceptable(MVT::v2i64, Align(16)))
13508 return LLT::fixed_vector(2, 64);
13509 if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16)))
13510 return LLT::scalar(128);
13511 if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8)))
13512 return LLT::scalar(64);
13513 if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4)))
13514 return LLT::scalar(32);
13518 // 12-bit optionally shifted immediates are legal for adds.
13519 bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const {
13520 if (Immed == std::numeric_limits<int64_t>::min()) {
13521 LLVM_DEBUG(dbgs() << "Illegal add imm " << Immed
13522 << ": avoid UB for INT64_MIN\n");
13525 // Same encoding for add/sub, just flip the sign.
13526 Immed = std::abs(Immed);
13527 bool IsLegal = ((Immed >> 12) == 0 ||
13528 ((Immed & 0xfff) == 0 && Immed >> 24 == 0));
13529 LLVM_DEBUG(dbgs() << "Is " << Immed
13530 << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n");
13534 // Return false to prevent folding
13535 // (mul (add x, c1), c2) -> (add (mul x, c2), c2*c1) in DAGCombine,
13536 // if the folding leads to worse code.
13537 bool AArch64TargetLowering::isMulAddWithConstProfitable(
13538 SDValue AddNode, SDValue ConstNode) const {
13539 // Let the DAGCombiner decide for vector types and large types.
13540 const EVT VT = AddNode.getValueType();
13541 if (VT.isVector() || VT.getScalarSizeInBits() > 64)
13544 // It is worse if c1 is legal add immediate, while c1*c2 is not
13545 // and has to be composed by at least two instructions.
13546 const ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
13547 const ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
13548 const int64_t C1 = C1Node->getSExtValue();
13549 const APInt C1C2 = C1Node->getAPIntValue() * C2Node->getAPIntValue();
13550 if (!isLegalAddImmediate(C1) || isLegalAddImmediate(C1C2.getSExtValue()))
13552 SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
13553 AArch64_IMM::expandMOVImm(C1C2.getZExtValue(), VT.getSizeInBits(), Insn);
13554 if (Insn.size() > 1)
13557 // Default to true and let the DAGCombiner decide.
13561 // Integer comparisons are implemented with ADDS/SUBS, so the range of valid
13562 // immediates is the same as for an add or a sub.
13563 bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Immed) const {
13564 return isLegalAddImmediate(Immed);
13567 /// isLegalAddressingMode - Return true if the addressing mode represented
13568 /// by AM is legal for this target, for a load/store of the specified type.
13569 bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
13570 const AddrMode &AM, Type *Ty,
13571 unsigned AS, Instruction *I) const {
13572 // AArch64 has five basic addressing modes:
13574 // reg + 9-bit signed offset
13575 // reg + SIZE_IN_BYTES * 12-bit unsigned offset
13577 // reg + SIZE_IN_BYTES * reg
13579 // No global is ever allowed as a base.
13583 // No reg+reg+imm addressing.
13584 if (AM.HasBaseReg && AM.BaseOffs && AM.Scale)
13587 // FIXME: Update this method to support scalable addressing modes.
13588 if (isa<ScalableVectorType>(Ty)) {
13589 uint64_t VecElemNumBytes =
13590 DL.getTypeSizeInBits(cast<VectorType>(Ty)->getElementType()) / 8;
13591 return AM.HasBaseReg && !AM.BaseOffs &&
13592 (AM.Scale == 0 || (uint64_t)AM.Scale == VecElemNumBytes);
13595 // check reg + imm case:
13596 // i.e., reg + 0, reg + imm9, reg + SIZE_IN_BYTES * uimm12
13597 uint64_t NumBytes = 0;
13598 if (Ty->isSized()) {
13599 uint64_t NumBits = DL.getTypeSizeInBits(Ty);
13600 NumBytes = NumBits / 8;
13601 if (!isPowerOf2_64(NumBits))
13606 int64_t Offset = AM.BaseOffs;
13608 // 9-bit signed offset
13609 if (isInt<9>(Offset))
13612 // 12-bit unsigned offset
13613 unsigned shift = Log2_64(NumBytes);
13614 if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 &&
13615 // Must be a multiple of NumBytes (NumBytes is a power of 2)
13616 (Offset >> shift) << shift == Offset)
13621 // Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2
13623 return AM.Scale == 1 || (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes);
13626 bool AArch64TargetLowering::shouldConsiderGEPOffsetSplit() const {
13627 // Consider splitting large offset of struct or array.
13631 InstructionCost AArch64TargetLowering::getScalingFactorCost(
13632 const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const {
13633 // Scaling factors are not free at all.
13634 // Operands | Rt Latency
13635 // -------------------------------------------
13636 // Rt, [Xn, Xm] | 4
13637 // -------------------------------------------
13638 // Rt, [Xn, Xm, lsl #imm] | Rn: 4 Rm: 5
13639 // Rt, [Xn, Wm, <extend> #imm] |
13640 if (isLegalAddressingMode(DL, AM, Ty, AS))
13641 // Scale represents reg2 * scale, thus account for 1 if
13642 // it is not equal to 0 or 1.
13643 return AM.Scale != 0 && AM.Scale != 1;
13647 bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(
13648 const MachineFunction &MF, EVT VT) const {
13649 VT = VT.getScalarType();
13651 if (!VT.isSimple())
13654 switch (VT.getSimpleVT().SimpleTy) {
13656 return Subtarget->hasFullFP16();
13667 bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
13669 switch (Ty->getScalarType()->getTypeID()) {
13670 case Type::FloatTyID:
13671 case Type::DoubleTyID:
13678 bool AArch64TargetLowering::generateFMAsInMachineCombiner(
13679 EVT VT, CodeGenOpt::Level OptLevel) const {
13680 return (OptLevel >= CodeGenOpt::Aggressive) && !VT.isScalableVector() &&
13681 !useSVEForFixedLengthVectorVT(VT);
13685 AArch64TargetLowering::getScratchRegisters(CallingConv::ID) const {
13686 // LR is a callee-save register, but we must treat it as clobbered by any call
13687 // site. Hence we include LR in the scratch registers, which are in turn added
13688 // as implicit-defs for stackmaps and patchpoints.
13689 static const MCPhysReg ScratchRegs[] = {
13690 AArch64::X16, AArch64::X17, AArch64::LR, 0
13692 return ScratchRegs;
13696 AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
13697 CombineLevel Level) const {
13698 assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
13699 N->getOpcode() == ISD::SRL) &&
13700 "Expected shift op");
13702 SDValue ShiftLHS = N->getOperand(0);
13703 EVT VT = N->getValueType(0);
13705 // If ShiftLHS is unsigned bit extraction: ((x >> C) & mask), then do not combine
13706 // it with shift 'N' to let it be lowered to UBFX.
13707 if (ShiftLHS.getOpcode() == ISD::AND && (VT == MVT::i32 || VT == MVT::i64) &&
13708 isa<ConstantSDNode>(ShiftLHS.getOperand(1))) {
13709 uint64_t TruncMask = ShiftLHS.getConstantOperandVal(1);
13710 if (isMask_64(TruncMask) &&
13711 ShiftLHS.getOperand(0).getOpcode() == ISD::SRL &&
13712 isa<ConstantSDNode>(ShiftLHS.getOperand(0).getOperand(1)))
13718 bool AArch64TargetLowering::isDesirableToCommuteXorWithShift(
13719 const SDNode *N) const {
13720 assert(N->getOpcode() == ISD::XOR &&
13721 (N->getOperand(0).getOpcode() == ISD::SHL ||
13722 N->getOperand(0).getOpcode() == ISD::SRL) &&
13723 "Expected XOR(SHIFT) pattern");
13725 // Only commute if the entire NOT mask is a hidden shifted mask.
13726 auto *XorC = dyn_cast<ConstantSDNode>(N->getOperand(1));
13727 auto *ShiftC = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1));
13728 if (XorC && ShiftC) {
13729 unsigned MaskIdx, MaskLen;
13730 if (XorC->getAPIntValue().isShiftedMask(MaskIdx, MaskLen)) {
13731 unsigned ShiftAmt = ShiftC->getZExtValue();
13732 unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
13733 if (N->getOperand(0).getOpcode() == ISD::SHL)
13734 return MaskIdx == ShiftAmt && MaskLen == (BitWidth - ShiftAmt);
13735 return MaskIdx == 0 && MaskLen == (BitWidth - ShiftAmt);
13742 bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask(
13743 const SDNode *N, CombineLevel Level) const {
13744 assert(((N->getOpcode() == ISD::SHL &&
13745 N->getOperand(0).getOpcode() == ISD::SRL) ||
13746 (N->getOpcode() == ISD::SRL &&
13747 N->getOperand(0).getOpcode() == ISD::SHL)) &&
13748 "Expected shift-shift mask");
13749 // Don't allow multiuse shift folding with the same shift amount.
13750 if (!N->getOperand(0)->hasOneUse())
13753 // Only fold srl(shl(x,c1),c2) iff C1 >= C2 to prevent loss of UBFX patterns.
13754 EVT VT = N->getValueType(0);
13755 if (N->getOpcode() == ISD::SRL && (VT == MVT::i32 || VT == MVT::i64)) {
13756 auto *C1 = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1));
13757 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
13758 return (!C1 || !C2 || C1->getZExtValue() >= C2->getZExtValue());
13764 bool AArch64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
13766 assert(Ty->isIntegerTy());
13768 unsigned BitSize = Ty->getPrimitiveSizeInBits();
13772 int64_t Val = Imm.getSExtValue();
13773 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, BitSize))
13776 if ((int64_t)Val < 0)
13779 Val &= (1LL << 32) - 1;
13781 unsigned LZ = countLeadingZeros((uint64_t)Val);
13782 unsigned Shift = (63 - LZ) / 16;
13783 // MOVZ is free so return true for one or fewer MOVK.
13787 bool AArch64TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
13788 unsigned Index) const {
13789 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
13792 return (Index == 0 || Index == ResVT.getVectorMinNumElements());
13795 /// Turn vector tests of the signbit in the form of:
13796 /// xor (sra X, elt_size(X)-1), -1
13799 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
13800 const AArch64Subtarget *Subtarget) {
13801 EVT VT = N->getValueType(0);
13802 if (!Subtarget->hasNEON() || !VT.isVector())
13805 // There must be a shift right algebraic before the xor, and the xor must be a
13806 // 'not' operation.
13807 SDValue Shift = N->getOperand(0);
13808 SDValue Ones = N->getOperand(1);
13809 if (Shift.getOpcode() != AArch64ISD::VASHR || !Shift.hasOneUse() ||
13810 !ISD::isBuildVectorAllOnes(Ones.getNode()))
13813 // The shift should be smearing the sign bit across each vector element.
13814 auto *ShiftAmt = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
13815 EVT ShiftEltTy = Shift.getValueType().getVectorElementType();
13816 if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1)
13819 return DAG.getNode(AArch64ISD::CMGEz, SDLoc(N), VT, Shift.getOperand(0));
13822 // Given a vecreduce_add node, detect the below pattern and convert it to the
13823 // node sequence with UABDL, [S|U]ADB and UADDLP.
13825 // i32 vecreduce_add(
13828 // v16i32 [sign|zero]_extend(v16i8 a), v16i32 [sign|zero]_extend(v16i8 b))))
13829 // =================>
13830 // i32 vecreduce_add(
13834 // v8i8 [S|U]ABD low8:v16i8 a, low8:v16i8 b
13836 // v8i8 [S|U]ABD high8:v16i8 a, high8:v16i8 b
13837 static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N,
13838 SelectionDAG &DAG) {
13839 // Assumed i32 vecreduce_add
13840 if (N->getValueType(0) != MVT::i32)
13843 SDValue VecReduceOp0 = N->getOperand(0);
13844 unsigned Opcode = VecReduceOp0.getOpcode();
13845 // Assumed v16i32 abs
13846 if (Opcode != ISD::ABS || VecReduceOp0->getValueType(0) != MVT::v16i32)
13849 SDValue ABS = VecReduceOp0;
13850 // Assumed v16i32 sub
13851 if (ABS->getOperand(0)->getOpcode() != ISD::SUB ||
13852 ABS->getOperand(0)->getValueType(0) != MVT::v16i32)
13855 SDValue SUB = ABS->getOperand(0);
13856 unsigned Opcode0 = SUB->getOperand(0).getOpcode();
13857 unsigned Opcode1 = SUB->getOperand(1).getOpcode();
13858 // Assumed v16i32 type
13859 if (SUB->getOperand(0)->getValueType(0) != MVT::v16i32 ||
13860 SUB->getOperand(1)->getValueType(0) != MVT::v16i32)
13863 // Assumed zext or sext
13864 bool IsZExt = false;
13865 if (Opcode0 == ISD::ZERO_EXTEND && Opcode1 == ISD::ZERO_EXTEND) {
13867 } else if (Opcode0 == ISD::SIGN_EXTEND && Opcode1 == ISD::SIGN_EXTEND) {
13872 SDValue EXT0 = SUB->getOperand(0);
13873 SDValue EXT1 = SUB->getOperand(1);
13874 // Assumed zext's operand has v16i8 type
13875 if (EXT0->getOperand(0)->getValueType(0) != MVT::v16i8 ||
13876 EXT1->getOperand(0)->getValueType(0) != MVT::v16i8)
13879 // Pattern is dectected. Let's convert it to sequence of nodes.
13882 // First, create the node pattern of UABD/SABD.
13883 SDValue UABDHigh8Op0 =
13884 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0),
13885 DAG.getConstant(8, DL, MVT::i64));
13886 SDValue UABDHigh8Op1 =
13887 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0),
13888 DAG.getConstant(8, DL, MVT::i64));
13889 SDValue UABDHigh8 = DAG.getNode(IsZExt ? ISD::ABDU : ISD::ABDS, DL, MVT::v8i8,
13890 UABDHigh8Op0, UABDHigh8Op1);
13891 SDValue UABDL = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, UABDHigh8);
13893 // Second, create the node pattern of UABAL.
13894 SDValue UABDLo8Op0 =
13895 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0),
13896 DAG.getConstant(0, DL, MVT::i64));
13897 SDValue UABDLo8Op1 =
13898 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0),
13899 DAG.getConstant(0, DL, MVT::i64));
13900 SDValue UABDLo8 = DAG.getNode(IsZExt ? ISD::ABDU : ISD::ABDS, DL, MVT::v8i8,
13901 UABDLo8Op0, UABDLo8Op1);
13902 SDValue ZExtUABD = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, UABDLo8);
13903 SDValue UABAL = DAG.getNode(ISD::ADD, DL, MVT::v8i16, UABDL, ZExtUABD);
13905 // Third, create the node of UADDLP.
13906 SDValue UADDLP = DAG.getNode(AArch64ISD::UADDLP, DL, MVT::v4i32, UABAL);
13908 // Fourth, create the node of VECREDUCE_ADD.
13909 return DAG.getNode(ISD::VECREDUCE_ADD, DL, MVT::i32, UADDLP);
13912 // Turn a v8i8/v16i8 extended vecreduce into a udot/sdot and vecreduce
13913 // vecreduce.add(ext(A)) to vecreduce.add(DOT(zero, A, one))
13914 // vecreduce.add(mul(ext(A), ext(B))) to vecreduce.add(DOT(zero, A, B))
13915 static SDValue performVecReduceAddCombine(SDNode *N, SelectionDAG &DAG,
13916 const AArch64Subtarget *ST) {
13917 if (!ST->hasDotProd())
13918 return performVecReduceAddCombineWithUADDLP(N, DAG);
13920 SDValue Op0 = N->getOperand(0);
13921 if (N->getValueType(0) != MVT::i32 ||
13922 Op0.getValueType().getVectorElementType() != MVT::i32)
13925 unsigned ExtOpcode = Op0.getOpcode();
13928 if (ExtOpcode == ISD::MUL) {
13929 A = Op0.getOperand(0);
13930 B = Op0.getOperand(1);
13931 if (A.getOpcode() != B.getOpcode() ||
13932 A.getOperand(0).getValueType() != B.getOperand(0).getValueType())
13934 ExtOpcode = A.getOpcode();
13936 if (ExtOpcode != ISD::ZERO_EXTEND && ExtOpcode != ISD::SIGN_EXTEND)
13939 EVT Op0VT = A.getOperand(0).getValueType();
13940 if (Op0VT != MVT::v8i8 && Op0VT != MVT::v16i8)
13944 // For non-mla reductions B can be set to 1. For MLA we take the operand of
13947 B = DAG.getConstant(1, DL, Op0VT);
13949 B = B.getOperand(0);
13952 DAG.getConstant(0, DL, Op0VT == MVT::v8i8 ? MVT::v2i32 : MVT::v4i32);
13954 (ExtOpcode == ISD::ZERO_EXTEND) ? AArch64ISD::UDOT : AArch64ISD::SDOT;
13955 SDValue Dot = DAG.getNode(DotOpcode, DL, Zeros.getValueType(), Zeros,
13956 A.getOperand(0), B);
13957 return DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0), Dot);
13960 // Given an (integer) vecreduce, we know the order of the inputs does not
13961 // matter. We can convert UADDV(add(zext(extract_lo(x)), zext(extract_hi(x))))
13962 // into UADDV(UADDLP(x)). This can also happen through an extra add, where we
13963 // transform UADDV(add(y, add(zext(extract_lo(x)), zext(extract_hi(x))))).
13964 static SDValue performUADDVCombine(SDNode *N, SelectionDAG &DAG) {
13965 auto DetectAddExtract = [&](SDValue A) {
13966 // Look for add(zext(extract_lo(x)), zext(extract_hi(x))), returning
13967 // UADDLP(x) if found.
13968 if (A.getOpcode() != ISD::ADD)
13970 EVT VT = A.getValueType();
13971 SDValue Op0 = A.getOperand(0);
13972 SDValue Op1 = A.getOperand(1);
13973 if (Op0.getOpcode() != Op0.getOpcode() ||
13974 (Op0.getOpcode() != ISD::ZERO_EXTEND &&
13975 Op0.getOpcode() != ISD::SIGN_EXTEND))
13977 SDValue Ext0 = Op0.getOperand(0);
13978 SDValue Ext1 = Op1.getOperand(0);
13979 if (Ext0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
13980 Ext1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
13981 Ext0.getOperand(0) != Ext1.getOperand(0))
13983 // Check that the type is twice the add types, and the extract are from
13984 // upper/lower parts of the same source.
13985 if (Ext0.getOperand(0).getValueType().getVectorNumElements() !=
13986 VT.getVectorNumElements() * 2)
13988 if ((Ext0.getConstantOperandVal(1) != 0 &&
13989 Ext1.getConstantOperandVal(1) != VT.getVectorNumElements()) &&
13990 (Ext1.getConstantOperandVal(1) != 0 &&
13991 Ext0.getConstantOperandVal(1) != VT.getVectorNumElements()))
13993 unsigned Opcode = Op0.getOpcode() == ISD::ZERO_EXTEND ? AArch64ISD::UADDLP
13994 : AArch64ISD::SADDLP;
13995 return DAG.getNode(Opcode, SDLoc(A), VT, Ext0.getOperand(0));
13998 SDValue A = N->getOperand(0);
13999 if (SDValue R = DetectAddExtract(A))
14000 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), R);
14001 if (A.getOpcode() == ISD::ADD) {
14002 if (SDValue R = DetectAddExtract(A.getOperand(0)))
14003 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
14004 DAG.getNode(ISD::ADD, SDLoc(A), A.getValueType(), R,
14006 if (SDValue R = DetectAddExtract(A.getOperand(1)))
14007 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
14008 DAG.getNode(ISD::ADD, SDLoc(A), A.getValueType(), R,
14015 static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG,
14016 TargetLowering::DAGCombinerInfo &DCI,
14017 const AArch64Subtarget *Subtarget) {
14018 if (DCI.isBeforeLegalizeOps())
14021 return foldVectorXorShiftIntoCmp(N, DAG, Subtarget);
14025 AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
14027 SmallVectorImpl<SDNode *> &Created) const {
14028 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
14029 if (isIntDivCheap(N->getValueType(0), Attr))
14030 return SDValue(N,0); // Lower SDIV as SDIV
14032 EVT VT = N->getValueType(0);
14034 // For scalable and fixed types, mark them as cheap so we can handle it much
14035 // later. This allows us to handle larger than legal types.
14036 if (VT.isScalableVector() || Subtarget->useSVEForFixedLengthVectors())
14037 return SDValue(N, 0);
14039 // fold (sdiv X, pow2)
14040 if ((VT != MVT::i32 && VT != MVT::i64) ||
14041 !(Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()))
14045 SDValue N0 = N->getOperand(0);
14046 unsigned Lg2 = Divisor.countTrailingZeros();
14047 SDValue Zero = DAG.getConstant(0, DL, VT);
14048 SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
14050 // Add (N0 < 0) ? Pow2 - 1 : 0;
14052 SDValue Cmp = getAArch64Cmp(N0, Zero, ISD::SETLT, CCVal, DAG, DL);
14053 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
14054 SDValue CSel = DAG.getNode(AArch64ISD::CSEL, DL, VT, Add, N0, CCVal, Cmp);
14056 Created.push_back(Cmp.getNode());
14057 Created.push_back(Add.getNode());
14058 Created.push_back(CSel.getNode());
14062 DAG.getNode(ISD::SRA, DL, VT, CSel, DAG.getConstant(Lg2, DL, MVT::i64));
14064 // If we're dividing by a positive value, we're done. Otherwise, we must
14065 // negate the result.
14066 if (Divisor.isNonNegative())
14069 Created.push_back(SRA.getNode());
14070 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
14074 AArch64TargetLowering::BuildSREMPow2(SDNode *N, const APInt &Divisor,
14076 SmallVectorImpl<SDNode *> &Created) const {
14077 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
14078 if (isIntDivCheap(N->getValueType(0), Attr))
14079 return SDValue(N, 0); // Lower SREM as SREM
14081 EVT VT = N->getValueType(0);
14083 // For scalable and fixed types, mark them as cheap so we can handle it much
14084 // later. This allows us to handle larger than legal types.
14085 if (VT.isScalableVector() || Subtarget->useSVEForFixedLengthVectors())
14086 return SDValue(N, 0);
14088 // fold (srem X, pow2)
14089 if ((VT != MVT::i32 && VT != MVT::i64) ||
14090 !(Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()))
14093 unsigned Lg2 = Divisor.countTrailingZeros();
14098 SDValue N0 = N->getOperand(0);
14099 SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
14100 SDValue Zero = DAG.getConstant(0, DL, VT);
14101 SDValue CCVal, CSNeg;
14103 SDValue Cmp = getAArch64Cmp(N0, Zero, ISD::SETGE, CCVal, DAG, DL);
14104 SDValue And = DAG.getNode(ISD::AND, DL, VT, N0, Pow2MinusOne);
14105 CSNeg = DAG.getNode(AArch64ISD::CSNEG, DL, VT, And, And, CCVal, Cmp);
14107 Created.push_back(Cmp.getNode());
14108 Created.push_back(And.getNode());
14110 SDValue CCVal = DAG.getConstant(AArch64CC::MI, DL, MVT_CC);
14111 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
14113 SDValue Negs = DAG.getNode(AArch64ISD::SUBS, DL, VTs, Zero, N0);
14114 SDValue AndPos = DAG.getNode(ISD::AND, DL, VT, N0, Pow2MinusOne);
14115 SDValue AndNeg = DAG.getNode(ISD::AND, DL, VT, Negs, Pow2MinusOne);
14116 CSNeg = DAG.getNode(AArch64ISD::CSNEG, DL, VT, AndPos, AndNeg, CCVal,
14119 Created.push_back(Negs.getNode());
14120 Created.push_back(AndPos.getNode());
14121 Created.push_back(AndNeg.getNode());
14127 static bool IsSVECntIntrinsic(SDValue S) {
14128 switch(getIntrinsicID(S.getNode())) {
14131 case Intrinsic::aarch64_sve_cntb:
14132 case Intrinsic::aarch64_sve_cnth:
14133 case Intrinsic::aarch64_sve_cntw:
14134 case Intrinsic::aarch64_sve_cntd:
14140 /// Calculates what the pre-extend type is, based on the extension
14141 /// operation node provided by \p Extend.
14143 /// In the case that \p Extend is a SIGN_EXTEND or a ZERO_EXTEND, the
14144 /// pre-extend type is pulled directly from the operand, while other extend
14145 /// operations need a bit more inspection to get this information.
14147 /// \param Extend The SDNode from the DAG that represents the extend operation
14149 /// \returns The type representing the \p Extend source type, or \p MVT::Other
14150 /// if no valid type can be determined
14151 static EVT calculatePreExtendType(SDValue Extend) {
14152 switch (Extend.getOpcode()) {
14153 case ISD::SIGN_EXTEND:
14154 case ISD::ZERO_EXTEND:
14155 return Extend.getOperand(0).getValueType();
14156 case ISD::AssertSext:
14157 case ISD::AssertZext:
14158 case ISD::SIGN_EXTEND_INREG: {
14159 VTSDNode *TypeNode = dyn_cast<VTSDNode>(Extend.getOperand(1));
14162 return TypeNode->getVT();
14165 ConstantSDNode *Constant =
14166 dyn_cast<ConstantSDNode>(Extend.getOperand(1).getNode());
14170 uint32_t Mask = Constant->getZExtValue();
14172 if (Mask == UCHAR_MAX)
14174 else if (Mask == USHRT_MAX)
14176 else if (Mask == UINT_MAX)
14186 /// Combines a buildvector(sext/zext) or shuffle(sext/zext, undef) node pattern
14187 /// into sext/zext(buildvector) or sext/zext(shuffle) making use of the vector
14188 /// SExt/ZExt rather than the scalar SExt/ZExt
14189 static SDValue performBuildShuffleExtendCombine(SDValue BV, SelectionDAG &DAG) {
14190 EVT VT = BV.getValueType();
14191 if (BV.getOpcode() != ISD::BUILD_VECTOR &&
14192 BV.getOpcode() != ISD::VECTOR_SHUFFLE)
14195 // Use the first item in the buildvector/shuffle to get the size of the
14196 // extend, and make sure it looks valid.
14197 SDValue Extend = BV->getOperand(0);
14198 unsigned ExtendOpcode = Extend.getOpcode();
14199 bool IsSExt = ExtendOpcode == ISD::SIGN_EXTEND ||
14200 ExtendOpcode == ISD::SIGN_EXTEND_INREG ||
14201 ExtendOpcode == ISD::AssertSext;
14202 if (!IsSExt && ExtendOpcode != ISD::ZERO_EXTEND &&
14203 ExtendOpcode != ISD::AssertZext && ExtendOpcode != ISD::AND)
14205 // Shuffle inputs are vector, limit to SIGN_EXTEND and ZERO_EXTEND to ensure
14206 // calculatePreExtendType will work without issue.
14207 if (BV.getOpcode() == ISD::VECTOR_SHUFFLE &&
14208 ExtendOpcode != ISD::SIGN_EXTEND && ExtendOpcode != ISD::ZERO_EXTEND)
14211 // Restrict valid pre-extend data type
14212 EVT PreExtendType = calculatePreExtendType(Extend);
14213 if (PreExtendType == MVT::Other ||
14214 PreExtendType.getScalarSizeInBits() != VT.getScalarSizeInBits() / 2)
14217 // Make sure all other operands are equally extended
14218 for (SDValue Op : drop_begin(BV->ops())) {
14221 unsigned Opc = Op.getOpcode();
14222 bool OpcIsSExt = Opc == ISD::SIGN_EXTEND || Opc == ISD::SIGN_EXTEND_INREG ||
14223 Opc == ISD::AssertSext;
14224 if (OpcIsSExt != IsSExt || calculatePreExtendType(Op) != PreExtendType)
14230 if (BV.getOpcode() == ISD::BUILD_VECTOR) {
14231 EVT PreExtendVT = VT.changeVectorElementType(PreExtendType);
14232 EVT PreExtendLegalType =
14233 PreExtendType.getScalarSizeInBits() < 32 ? MVT::i32 : PreExtendType;
14234 SmallVector<SDValue, 8> NewOps;
14235 for (SDValue Op : BV->ops())
14236 NewOps.push_back(Op.isUndef() ? DAG.getUNDEF(PreExtendLegalType)
14237 : DAG.getAnyExtOrTrunc(Op.getOperand(0), DL,
14238 PreExtendLegalType));
14239 NBV = DAG.getNode(ISD::BUILD_VECTOR, DL, PreExtendVT, NewOps);
14240 } else { // BV.getOpcode() == ISD::VECTOR_SHUFFLE
14241 EVT PreExtendVT = VT.changeVectorElementType(PreExtendType.getScalarType());
14242 NBV = DAG.getVectorShuffle(PreExtendVT, DL, BV.getOperand(0).getOperand(0),
14243 BV.getOperand(1).isUndef()
14244 ? DAG.getUNDEF(PreExtendVT)
14245 : BV.getOperand(1).getOperand(0),
14246 cast<ShuffleVectorSDNode>(BV)->getMask());
14248 return DAG.getNode(IsSExt ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, VT, NBV);
14251 /// Combines a mul(dup(sext/zext)) node pattern into mul(sext/zext(dup))
14252 /// making use of the vector SExt/ZExt rather than the scalar SExt/ZExt
14253 static SDValue performMulVectorExtendCombine(SDNode *Mul, SelectionDAG &DAG) {
14254 // If the value type isn't a vector, none of the operands are going to be dups
14255 EVT VT = Mul->getValueType(0);
14256 if (VT != MVT::v8i16 && VT != MVT::v4i32 && VT != MVT::v2i64)
14259 SDValue Op0 = performBuildShuffleExtendCombine(Mul->getOperand(0), DAG);
14260 SDValue Op1 = performBuildShuffleExtendCombine(Mul->getOperand(1), DAG);
14262 // Neither operands have been changed, don't make any further changes
14267 return DAG.getNode(Mul->getOpcode(), DL, VT, Op0 ? Op0 : Mul->getOperand(0),
14268 Op1 ? Op1 : Mul->getOperand(1));
14271 // Combine v4i32 Mul(And(Srl(X, 15), 0x10001), 0xffff) -> v8i16 CMLTz
14272 // Same for other types with equivalent constants.
14273 static SDValue performMulVectorCmpZeroCombine(SDNode *N, SelectionDAG &DAG) {
14274 EVT VT = N->getValueType(0);
14275 if (VT != MVT::v2i64 && VT != MVT::v1i64 && VT != MVT::v2i32 &&
14276 VT != MVT::v4i32 && VT != MVT::v4i16 && VT != MVT::v8i16)
14278 if (N->getOperand(0).getOpcode() != ISD::AND ||
14279 N->getOperand(0).getOperand(0).getOpcode() != ISD::SRL)
14282 SDValue And = N->getOperand(0);
14283 SDValue Srl = And.getOperand(0);
14286 if (!ISD::isConstantSplatVector(N->getOperand(1).getNode(), V1) ||
14287 !ISD::isConstantSplatVector(And.getOperand(1).getNode(), V2) ||
14288 !ISD::isConstantSplatVector(Srl.getOperand(1).getNode(), V3))
14291 unsigned HalfSize = VT.getScalarSizeInBits() / 2;
14292 if (!V1.isMask(HalfSize) || V2 != (1ULL | 1ULL << HalfSize) ||
14293 V3 != (HalfSize - 1))
14296 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(),
14297 EVT::getIntegerVT(*DAG.getContext(), HalfSize),
14298 VT.getVectorElementCount() * 2);
14301 SDValue In = DAG.getNode(AArch64ISD::NVCAST, DL, HalfVT, Srl.getOperand(0));
14302 SDValue CM = DAG.getNode(AArch64ISD::CMLTz, DL, HalfVT, In);
14303 return DAG.getNode(AArch64ISD::NVCAST, DL, VT, CM);
14306 static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
14307 TargetLowering::DAGCombinerInfo &DCI,
14308 const AArch64Subtarget *Subtarget) {
14310 if (SDValue Ext = performMulVectorExtendCombine(N, DAG))
14312 if (SDValue Ext = performMulVectorCmpZeroCombine(N, DAG))
14315 if (DCI.isBeforeLegalizeOps())
14318 // Canonicalize X*(Y+1) -> X*Y+X and (X+1)*Y -> X*Y+Y,
14319 // and in MachineCombiner pass, add+mul will be combined into madd.
14320 // Similarly, X*(1-Y) -> X - X*Y and (1-Y)*X -> X - Y*X.
14322 EVT VT = N->getValueType(0);
14323 SDValue N0 = N->getOperand(0);
14324 SDValue N1 = N->getOperand(1);
14326 unsigned AddSubOpc;
14328 auto IsAddSubWith1 = [&](SDValue V) -> bool {
14329 AddSubOpc = V->getOpcode();
14330 if ((AddSubOpc == ISD::ADD || AddSubOpc == ISD::SUB) && V->hasOneUse()) {
14331 SDValue Opnd = V->getOperand(1);
14332 MulOper = V->getOperand(0);
14333 if (AddSubOpc == ISD::SUB)
14334 std::swap(Opnd, MulOper);
14335 if (auto C = dyn_cast<ConstantSDNode>(Opnd))
14341 if (IsAddSubWith1(N0)) {
14342 SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N1, MulOper);
14343 return DAG.getNode(AddSubOpc, DL, VT, N1, MulVal);
14346 if (IsAddSubWith1(N1)) {
14347 SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N0, MulOper);
14348 return DAG.getNode(AddSubOpc, DL, VT, N0, MulVal);
14351 // The below optimizations require a constant RHS.
14352 if (!isa<ConstantSDNode>(N1))
14355 ConstantSDNode *C = cast<ConstantSDNode>(N1);
14356 const APInt &ConstValue = C->getAPIntValue();
14358 // Allow the scaling to be folded into the `cnt` instruction by preventing
14359 // the scaling to be obscured here. This makes it easier to pattern match.
14360 if (IsSVECntIntrinsic(N0) ||
14361 (N0->getOpcode() == ISD::TRUNCATE &&
14362 (IsSVECntIntrinsic(N0->getOperand(0)))))
14363 if (ConstValue.sge(1) && ConstValue.sle(16))
14366 // Multiplication of a power of two plus/minus one can be done more
14367 // cheaply as as shift+add/sub. For now, this is true unilaterally. If
14368 // future CPUs have a cheaper MADD instruction, this may need to be
14369 // gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and
14370 // 64-bit is 5 cycles, so this is always a win.
14371 // More aggressively, some multiplications N0 * C can be lowered to
14372 // shift+add+shift if the constant C = A * B where A = 2^N + 1 and B = 2^M,
14373 // e.g. 6=3*2=(2+1)*2.
14374 // TODO: consider lowering more cases, e.g. C = 14, -6, -14 or even 45
14375 // which equals to (1+2)*16-(1+2).
14377 // TrailingZeroes is used to test if the mul can be lowered to
14378 // shift+add+shift.
14379 unsigned TrailingZeroes = ConstValue.countTrailingZeros();
14380 if (TrailingZeroes) {
14381 // Conservatively do not lower to shift+add+shift if the mul might be
14382 // folded into smul or umul.
14383 if (N0->hasOneUse() && (isSignExtended(N0.getNode(), DAG) ||
14384 isZeroExtended(N0.getNode(), DAG)))
14386 // Conservatively do not lower to shift+add+shift if the mul might be
14387 // folded into madd or msub.
14388 if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ADD ||
14389 N->use_begin()->getOpcode() == ISD::SUB))
14392 // Use ShiftedConstValue instead of ConstValue to support both shift+add/sub
14393 // and shift+add+shift.
14394 APInt ShiftedConstValue = ConstValue.ashr(TrailingZeroes);
14397 // Is the shifted value the LHS operand of the add/sub?
14398 bool ShiftValUseIsN0 = true;
14399 // Do we need to negate the result?
14400 bool NegateResult = false;
14402 if (ConstValue.isNonNegative()) {
14403 // (mul x, 2^N + 1) => (add (shl x, N), x)
14404 // (mul x, 2^N - 1) => (sub (shl x, N), x)
14405 // (mul x, (2^N + 1) * 2^M) => (shl (add (shl x, N), x), M)
14406 APInt SCVMinus1 = ShiftedConstValue - 1;
14407 APInt CVPlus1 = ConstValue + 1;
14408 if (SCVMinus1.isPowerOf2()) {
14409 ShiftAmt = SCVMinus1.logBase2();
14410 AddSubOpc = ISD::ADD;
14411 } else if (CVPlus1.isPowerOf2()) {
14412 ShiftAmt = CVPlus1.logBase2();
14413 AddSubOpc = ISD::SUB;
14417 // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
14418 // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
14419 APInt CVNegPlus1 = -ConstValue + 1;
14420 APInt CVNegMinus1 = -ConstValue - 1;
14421 if (CVNegPlus1.isPowerOf2()) {
14422 ShiftAmt = CVNegPlus1.logBase2();
14423 AddSubOpc = ISD::SUB;
14424 ShiftValUseIsN0 = false;
14425 } else if (CVNegMinus1.isPowerOf2()) {
14426 ShiftAmt = CVNegMinus1.logBase2();
14427 AddSubOpc = ISD::ADD;
14428 NegateResult = true;
14433 SDValue ShiftedVal = DAG.getNode(ISD::SHL, DL, VT, N0,
14434 DAG.getConstant(ShiftAmt, DL, MVT::i64));
14436 SDValue AddSubN0 = ShiftValUseIsN0 ? ShiftedVal : N0;
14437 SDValue AddSubN1 = ShiftValUseIsN0 ? N0 : ShiftedVal;
14438 SDValue Res = DAG.getNode(AddSubOpc, DL, VT, AddSubN0, AddSubN1);
14439 assert(!(NegateResult && TrailingZeroes) &&
14440 "NegateResult and TrailingZeroes cannot both be true for now.");
14441 // Negate the result.
14443 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
14444 // Shift the result.
14445 if (TrailingZeroes)
14446 return DAG.getNode(ISD::SHL, DL, VT, Res,
14447 DAG.getConstant(TrailingZeroes, DL, MVT::i64));
14451 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
14452 SelectionDAG &DAG) {
14453 // Take advantage of vector comparisons producing 0 or -1 in each lane to
14454 // optimize away operation when it's from a constant.
14456 // The general transformation is:
14457 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
14458 // AND(VECTOR_CMP(x,y), constant2)
14459 // constant2 = UNARYOP(constant)
14461 // Early exit if this isn't a vector operation, the operand of the
14462 // unary operation isn't a bitwise AND, or if the sizes of the operations
14463 // aren't the same.
14464 EVT VT = N->getValueType(0);
14465 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
14466 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
14467 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
14470 // Now check that the other operand of the AND is a constant. We could
14471 // make the transformation for non-constant splats as well, but it's unclear
14472 // that would be a benefit as it would not eliminate any operations, just
14473 // perform one more step in scalar code before moving to the vector unit.
14474 if (BuildVectorSDNode *BV =
14475 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
14476 // Bail out if the vector isn't a constant.
14477 if (!BV->isConstant())
14480 // Everything checks out. Build up the new and improved node.
14482 EVT IntVT = BV->getValueType(0);
14483 // Create a new constant of the appropriate type for the transformed
14485 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
14486 // The AND node needs bitcasts to/from an integer vector type around it.
14487 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
14488 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
14489 N->getOperand(0)->getOperand(0), MaskConst);
14490 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
14497 static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG,
14498 const AArch64Subtarget *Subtarget) {
14499 // First try to optimize away the conversion when it's conditionally from
14500 // a constant. Vectors only.
14501 if (SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG))
14504 EVT VT = N->getValueType(0);
14505 if (VT != MVT::f32 && VT != MVT::f64)
14508 // Only optimize when the source and destination types have the same width.
14509 if (VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits())
14512 // If the result of an integer load is only used by an integer-to-float
14513 // conversion, use a fp load instead and a AdvSIMD scalar {S|U}CVTF instead.
14514 // This eliminates an "integer-to-vector-move" UOP and improves throughput.
14515 SDValue N0 = N->getOperand(0);
14516 if (Subtarget->hasNEON() && ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
14517 // Do not change the width of a volatile load.
14518 !cast<LoadSDNode>(N0)->isVolatile()) {
14519 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
14520 SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(),
14521 LN0->getPointerInfo(), LN0->getAlign(),
14522 LN0->getMemOperand()->getFlags());
14524 // Make sure successors of the original load stay after it by updating them
14525 // to use the new Chain.
14526 DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), Load.getValue(1));
14529 (N->getOpcode() == ISD::SINT_TO_FP) ? AArch64ISD::SITOF : AArch64ISD::UITOF;
14530 return DAG.getNode(Opcode, SDLoc(N), VT, Load);
14536 /// Fold a floating-point multiply by power of two into floating-point to
14537 /// fixed-point conversion.
14538 static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG,
14539 TargetLowering::DAGCombinerInfo &DCI,
14540 const AArch64Subtarget *Subtarget) {
14541 if (!Subtarget->hasNEON())
14544 if (!N->getValueType(0).isSimple())
14547 SDValue Op = N->getOperand(0);
14548 if (!Op.getValueType().isSimple() || Op.getOpcode() != ISD::FMUL)
14551 if (!Op.getValueType().is64BitVector() && !Op.getValueType().is128BitVector())
14554 SDValue ConstVec = Op->getOperand(1);
14555 if (!isa<BuildVectorSDNode>(ConstVec))
14558 MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
14559 uint32_t FloatBits = FloatTy.getSizeInBits();
14560 if (FloatBits != 32 && FloatBits != 64 &&
14561 (FloatBits != 16 || !Subtarget->hasFullFP16()))
14564 MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
14565 uint32_t IntBits = IntTy.getSizeInBits();
14566 if (IntBits != 16 && IntBits != 32 && IntBits != 64)
14569 // Avoid conversions where iN is larger than the float (e.g., float -> i64).
14570 if (IntBits > FloatBits)
14573 BitVector UndefElements;
14574 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
14575 int32_t Bits = IntBits == 64 ? 64 : 32;
14576 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, Bits + 1);
14577 if (C == -1 || C == 0 || C > Bits)
14580 EVT ResTy = Op.getValueType().changeVectorElementTypeToInteger();
14581 if (!DAG.getTargetLoweringInfo().isTypeLegal(ResTy))
14584 if (N->getOpcode() == ISD::FP_TO_SINT_SAT ||
14585 N->getOpcode() == ISD::FP_TO_UINT_SAT) {
14586 EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
14587 if (SatVT.getScalarSizeInBits() != IntBits || IntBits != FloatBits)
14592 bool IsSigned = (N->getOpcode() == ISD::FP_TO_SINT ||
14593 N->getOpcode() == ISD::FP_TO_SINT_SAT);
14594 unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfp2fxs
14595 : Intrinsic::aarch64_neon_vcvtfp2fxu;
14597 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, ResTy,
14598 DAG.getConstant(IntrinsicOpcode, DL, MVT::i32),
14599 Op->getOperand(0), DAG.getConstant(C, DL, MVT::i32));
14600 // We can handle smaller integers by generating an extra trunc.
14601 if (IntBits < FloatBits)
14602 FixConv = DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), FixConv);
14607 /// Fold a floating-point divide by power of two into fixed-point to
14608 /// floating-point conversion.
14609 static SDValue performFDivCombine(SDNode *N, SelectionDAG &DAG,
14610 TargetLowering::DAGCombinerInfo &DCI,
14611 const AArch64Subtarget *Subtarget) {
14612 if (!Subtarget->hasNEON())
14615 SDValue Op = N->getOperand(0);
14616 unsigned Opc = Op->getOpcode();
14617 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
14618 !Op.getOperand(0).getValueType().isSimple() ||
14619 (Opc != ISD::SINT_TO_FP && Opc != ISD::UINT_TO_FP))
14622 SDValue ConstVec = N->getOperand(1);
14623 if (!isa<BuildVectorSDNode>(ConstVec))
14626 MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
14627 int32_t IntBits = IntTy.getSizeInBits();
14628 if (IntBits != 16 && IntBits != 32 && IntBits != 64)
14631 MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
14632 int32_t FloatBits = FloatTy.getSizeInBits();
14633 if (FloatBits != 32 && FloatBits != 64)
14636 // Avoid conversions where iN is larger than the float (e.g., i64 -> float).
14637 if (IntBits > FloatBits)
14640 BitVector UndefElements;
14641 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
14642 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, FloatBits + 1);
14643 if (C == -1 || C == 0 || C > FloatBits)
14647 unsigned NumLanes = Op.getValueType().getVectorNumElements();
14648 switch (NumLanes) {
14652 ResTy = FloatBits == 32 ? MVT::v2i32 : MVT::v2i64;
14655 ResTy = FloatBits == 32 ? MVT::v4i32 : MVT::v4i64;
14659 if (ResTy == MVT::v4i64 && DCI.isBeforeLegalizeOps())
14663 SDValue ConvInput = Op.getOperand(0);
14664 bool IsSigned = Opc == ISD::SINT_TO_FP;
14665 if (IntBits < FloatBits)
14666 ConvInput = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL,
14669 unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfxs2fp
14670 : Intrinsic::aarch64_neon_vcvtfxu2fp;
14671 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
14672 DAG.getConstant(IntrinsicOpcode, DL, MVT::i32), ConvInput,
14673 DAG.getConstant(C, DL, MVT::i32));
14676 /// An EXTR instruction is made up of two shifts, ORed together. This helper
14677 /// searches for and classifies those shifts.
14678 static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
14680 if (N.getOpcode() == ISD::SHL)
14682 else if (N.getOpcode() == ISD::SRL)
14687 if (!isa<ConstantSDNode>(N.getOperand(1)))
14690 ShiftAmount = N->getConstantOperandVal(1);
14691 Src = N->getOperand(0);
14695 /// EXTR instruction extracts a contiguous chunk of bits from two existing
14696 /// registers viewed as a high/low pair. This function looks for the pattern:
14697 /// <tt>(or (shl VAL1, \#N), (srl VAL2, \#RegWidth-N))</tt> and replaces it
14698 /// with an EXTR. Can't quite be done in TableGen because the two immediates
14699 /// aren't independent.
14700 static SDValue tryCombineToEXTR(SDNode *N,
14701 TargetLowering::DAGCombinerInfo &DCI) {
14702 SelectionDAG &DAG = DCI.DAG;
14704 EVT VT = N->getValueType(0);
14706 assert(N->getOpcode() == ISD::OR && "Unexpected root");
14708 if (VT != MVT::i32 && VT != MVT::i64)
14712 uint32_t ShiftLHS = 0;
14713 bool LHSFromHi = false;
14714 if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi))
14718 uint32_t ShiftRHS = 0;
14719 bool RHSFromHi = false;
14720 if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi))
14723 // If they're both trying to come from the high part of the register, they're
14724 // not really an EXTR.
14725 if (LHSFromHi == RHSFromHi)
14728 if (ShiftLHS + ShiftRHS != VT.getSizeInBits())
14732 std::swap(LHS, RHS);
14733 std::swap(ShiftLHS, ShiftRHS);
14736 return DAG.getNode(AArch64ISD::EXTR, DL, VT, LHS, RHS,
14737 DAG.getConstant(ShiftRHS, DL, MVT::i64));
14740 static SDValue tryCombineToBSL(SDNode *N,
14741 TargetLowering::DAGCombinerInfo &DCI) {
14742 EVT VT = N->getValueType(0);
14743 SelectionDAG &DAG = DCI.DAG;
14746 if (!VT.isVector())
14749 // The combining code currently only works for NEON vectors. In particular,
14750 // it does not work for SVE when dealing with vectors wider than 128 bits.
14751 if (!VT.is64BitVector() && !VT.is128BitVector())
14754 SDValue N0 = N->getOperand(0);
14755 if (N0.getOpcode() != ISD::AND)
14758 SDValue N1 = N->getOperand(1);
14759 if (N1.getOpcode() != ISD::AND)
14762 // InstCombine does (not (neg a)) => (add a -1).
14763 // Try: (or (and (neg a) b) (and (add a -1) c)) => (bsl (neg a) b c)
14764 // Loop over all combinations of AND operands.
14765 for (int i = 1; i >= 0; --i) {
14766 for (int j = 1; j >= 0; --j) {
14767 SDValue O0 = N0->getOperand(i);
14768 SDValue O1 = N1->getOperand(j);
14769 SDValue Sub, Add, SubSibling, AddSibling;
14771 // Find a SUB and an ADD operand, one from each AND.
14772 if (O0.getOpcode() == ISD::SUB && O1.getOpcode() == ISD::ADD) {
14775 SubSibling = N0->getOperand(1 - i);
14776 AddSibling = N1->getOperand(1 - j);
14777 } else if (O0.getOpcode() == ISD::ADD && O1.getOpcode() == ISD::SUB) {
14780 AddSibling = N0->getOperand(1 - i);
14781 SubSibling = N1->getOperand(1 - j);
14785 if (!ISD::isBuildVectorAllZeros(Sub.getOperand(0).getNode()))
14788 // Constant ones is always righthand operand of the Add.
14789 if (!ISD::isBuildVectorAllOnes(Add.getOperand(1).getNode()))
14792 if (Sub.getOperand(1) != Add.getOperand(0))
14795 return DAG.getNode(AArch64ISD::BSP, DL, VT, Sub, SubSibling, AddSibling);
14799 // (or (and a b) (and (not a) c)) => (bsl a b c)
14800 // We only have to look for constant vectors here since the general, variable
14801 // case can be handled in TableGen.
14802 unsigned Bits = VT.getScalarSizeInBits();
14803 uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1);
14804 for (int i = 1; i >= 0; --i)
14805 for (int j = 1; j >= 0; --j) {
14806 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(i));
14807 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(j));
14808 if (!BVN0 || !BVN1)
14811 bool FoundMatch = true;
14812 for (unsigned k = 0; k < VT.getVectorNumElements(); ++k) {
14813 ConstantSDNode *CN0 = dyn_cast<ConstantSDNode>(BVN0->getOperand(k));
14814 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(BVN1->getOperand(k));
14815 if (!CN0 || !CN1 ||
14816 CN0->getZExtValue() != (BitMask & ~CN1->getZExtValue())) {
14817 FoundMatch = false;
14823 return DAG.getNode(AArch64ISD::BSP, DL, VT, SDValue(BVN0, 0),
14824 N0->getOperand(1 - i), N1->getOperand(1 - j));
14830 // Given a tree of and/or(csel(0, 1, cc0), csel(0, 1, cc1)), we may be able to
14831 // convert to csel(ccmp(.., cc0)), depending on cc1:
14833 // (AND (CSET cc0 cmp0) (CSET cc1 (CMP x1 y1)))
14835 // (CSET cc1 (CCMP x1 y1 !cc1 cc0 cmp0))
14837 // (OR (CSET cc0 cmp0) (CSET cc1 (CMP x1 y1)))
14839 // (CSET cc1 (CCMP x1 y1 cc1 !cc0 cmp0))
14840 static SDValue performANDORCSELCombine(SDNode *N, SelectionDAG &DAG) {
14841 EVT VT = N->getValueType(0);
14842 SDValue CSel0 = N->getOperand(0);
14843 SDValue CSel1 = N->getOperand(1);
14845 if (CSel0.getOpcode() != AArch64ISD::CSEL ||
14846 CSel1.getOpcode() != AArch64ISD::CSEL)
14849 if (!CSel0->hasOneUse() || !CSel1->hasOneUse())
14852 if (!isNullConstant(CSel0.getOperand(0)) ||
14853 !isOneConstant(CSel0.getOperand(1)) ||
14854 !isNullConstant(CSel1.getOperand(0)) ||
14855 !isOneConstant(CSel1.getOperand(1)))
14858 SDValue Cmp0 = CSel0.getOperand(3);
14859 SDValue Cmp1 = CSel1.getOperand(3);
14860 AArch64CC::CondCode CC0 = (AArch64CC::CondCode)CSel0.getConstantOperandVal(2);
14861 AArch64CC::CondCode CC1 = (AArch64CC::CondCode)CSel1.getConstantOperandVal(2);
14862 if (!Cmp0->hasOneUse() || !Cmp1->hasOneUse())
14864 if (Cmp1.getOpcode() != AArch64ISD::SUBS &&
14865 Cmp0.getOpcode() == AArch64ISD::SUBS) {
14866 std::swap(Cmp0, Cmp1);
14867 std::swap(CC0, CC1);
14870 if (Cmp1.getOpcode() != AArch64ISD::SUBS)
14876 if (N->getOpcode() == ISD::AND) {
14877 AArch64CC::CondCode InvCC0 = AArch64CC::getInvertedCondCode(CC0);
14878 SDValue Condition = DAG.getConstant(InvCC0, DL, MVT_CC);
14879 unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(CC1);
14880 SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
14881 CCmp = DAG.getNode(AArch64ISD::CCMP, DL, MVT_CC, Cmp1.getOperand(0),
14882 Cmp1.getOperand(1), NZCVOp, Condition, Cmp0);
14885 AArch64CC::CondCode InvCC1 = AArch64CC::getInvertedCondCode(CC1);
14886 SDValue Condition = DAG.getConstant(CC0, DL, MVT_CC);
14887 unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvCC1);
14888 SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
14889 CCmp = DAG.getNode(AArch64ISD::CCMP, DL, MVT_CC, Cmp1.getOperand(0),
14890 Cmp1.getOperand(1), NZCVOp, Condition, Cmp0);
14892 return DAG.getNode(AArch64ISD::CSEL, DL, VT, CSel0.getOperand(0),
14893 CSel0.getOperand(1), DAG.getConstant(CC1, DL, MVT::i32),
14897 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
14898 const AArch64Subtarget *Subtarget) {
14899 SelectionDAG &DAG = DCI.DAG;
14900 EVT VT = N->getValueType(0);
14902 if (SDValue R = performANDORCSELCombine(N, DAG))
14905 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
14908 // Attempt to form an EXTR from (or (shl VAL1, #N), (srl VAL2, #RegWidth-N))
14909 if (SDValue Res = tryCombineToEXTR(N, DCI))
14912 if (SDValue Res = tryCombineToBSL(N, DCI))
14918 static bool isConstantSplatVectorMaskForType(SDNode *N, EVT MemVT) {
14919 if (!MemVT.getVectorElementType().isSimple())
14922 uint64_t MaskForTy = 0ull;
14923 switch (MemVT.getVectorElementType().getSimpleVT().SimpleTy) {
14925 MaskForTy = 0xffull;
14928 MaskForTy = 0xffffull;
14931 MaskForTy = 0xffffffffull;
14938 if (N->getOpcode() == AArch64ISD::DUP || N->getOpcode() == ISD::SPLAT_VECTOR)
14939 if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0)))
14940 return Op0->getAPIntValue().getLimitedValue() == MaskForTy;
14945 static SDValue performSVEAndCombine(SDNode *N,
14946 TargetLowering::DAGCombinerInfo &DCI) {
14947 if (DCI.isBeforeLegalizeOps())
14950 SelectionDAG &DAG = DCI.DAG;
14951 SDValue Src = N->getOperand(0);
14952 unsigned Opc = Src->getOpcode();
14954 // Zero/any extend of an unsigned unpack
14955 if (Opc == AArch64ISD::UUNPKHI || Opc == AArch64ISD::UUNPKLO) {
14956 SDValue UnpkOp = Src->getOperand(0);
14957 SDValue Dup = N->getOperand(1);
14959 if (Dup.getOpcode() != ISD::SPLAT_VECTOR)
14963 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Dup->getOperand(0));
14967 uint64_t ExtVal = C->getZExtValue();
14969 // If the mask is fully covered by the unpack, we don't need to push
14970 // a new AND onto the operand
14971 EVT EltTy = UnpkOp->getValueType(0).getVectorElementType();
14972 if ((ExtVal == 0xFF && EltTy == MVT::i8) ||
14973 (ExtVal == 0xFFFF && EltTy == MVT::i16) ||
14974 (ExtVal == 0xFFFFFFFF && EltTy == MVT::i32))
14977 // Truncate to prevent a DUP with an over wide constant
14978 APInt Mask = C->getAPIntValue().trunc(EltTy.getSizeInBits());
14980 // Otherwise, make sure we propagate the AND to the operand
14982 Dup = DAG.getNode(ISD::SPLAT_VECTOR, DL, UnpkOp->getValueType(0),
14983 DAG.getConstant(Mask.zextOrTrunc(32), DL, MVT::i32));
14985 SDValue And = DAG.getNode(ISD::AND, DL,
14986 UnpkOp->getValueType(0), UnpkOp, Dup);
14988 return DAG.getNode(Opc, DL, N->getValueType(0), And);
14991 if (!EnableCombineMGatherIntrinsics)
14994 SDValue Mask = N->getOperand(1);
14996 if (!Src.hasOneUse())
15001 // SVE load instructions perform an implicit zero-extend, which makes them
15002 // perfect candidates for combining.
15004 case AArch64ISD::LD1_MERGE_ZERO:
15005 case AArch64ISD::LDNF1_MERGE_ZERO:
15006 case AArch64ISD::LDFF1_MERGE_ZERO:
15007 MemVT = cast<VTSDNode>(Src->getOperand(3))->getVT();
15009 case AArch64ISD::GLD1_MERGE_ZERO:
15010 case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
15011 case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
15012 case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
15013 case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
15014 case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
15015 case AArch64ISD::GLD1_IMM_MERGE_ZERO:
15016 case AArch64ISD::GLDFF1_MERGE_ZERO:
15017 case AArch64ISD::GLDFF1_SCALED_MERGE_ZERO:
15018 case AArch64ISD::GLDFF1_SXTW_MERGE_ZERO:
15019 case AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO:
15020 case AArch64ISD::GLDFF1_UXTW_MERGE_ZERO:
15021 case AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO:
15022 case AArch64ISD::GLDFF1_IMM_MERGE_ZERO:
15023 case AArch64ISD::GLDNT1_MERGE_ZERO:
15024 MemVT = cast<VTSDNode>(Src->getOperand(4))->getVT();
15030 if (isConstantSplatVectorMaskForType(Mask.getNode(), MemVT))
15036 static SDValue performANDCombine(SDNode *N,
15037 TargetLowering::DAGCombinerInfo &DCI) {
15038 SelectionDAG &DAG = DCI.DAG;
15039 SDValue LHS = N->getOperand(0);
15040 SDValue RHS = N->getOperand(1);
15041 EVT VT = N->getValueType(0);
15043 if (SDValue R = performANDORCSELCombine(N, DAG))
15046 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
15049 if (VT.isScalableVector())
15050 return performSVEAndCombine(N, DCI);
15052 // The combining code below works only for NEON vectors. In particular, it
15053 // does not work for SVE when dealing with vectors wider than 128 bits.
15054 if (!VT.is64BitVector() && !VT.is128BitVector())
15057 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
15061 // AND does not accept an immediate, so check if we can use a BIC immediate
15062 // instruction instead. We do this here instead of using a (and x, (mvni imm))
15063 // pattern in isel, because some immediates may be lowered to the preferred
15064 // (and x, (movi imm)) form, even though an mvni representation also exists.
15065 APInt DefBits(VT.getSizeInBits(), 0);
15066 APInt UndefBits(VT.getSizeInBits(), 0);
15067 if (resolveBuildVector(BVN, DefBits, UndefBits)) {
15070 DefBits = ~DefBits;
15071 if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG,
15073 (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG,
15077 UndefBits = ~UndefBits;
15078 if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG,
15079 UndefBits, &LHS)) ||
15080 (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG,
15088 static bool hasPairwiseAdd(unsigned Opcode, EVT VT, bool FullFP16) {
15090 case ISD::STRICT_FADD:
15092 return (FullFP16 && VT == MVT::f16) || VT == MVT::f32 || VT == MVT::f64;
15094 return VT == MVT::i64;
15100 static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
15101 AArch64CC::CondCode Cond);
15103 static bool isPredicateCCSettingOp(SDValue N) {
15104 if ((N.getOpcode() == ISD::SETCC) ||
15105 (N.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15106 (N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilege ||
15107 N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilegt ||
15108 N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilehi ||
15109 N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilehs ||
15110 N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilele ||
15111 N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelo ||
15112 N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilels ||
15113 N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelt ||
15114 // get_active_lane_mask is lowered to a whilelo instruction.
15115 N.getConstantOperandVal(0) == Intrinsic::get_active_lane_mask)))
15121 // Materialize : i1 = extract_vector_elt t37, Constant:i64<0>
15122 // ... into: "ptrue p, all" + PTEST
15124 performFirstTrueTestVectorCombine(SDNode *N,
15125 TargetLowering::DAGCombinerInfo &DCI,
15126 const AArch64Subtarget *Subtarget) {
15127 assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
15128 // Make sure PTEST can be legalised with illegal types.
15129 if (!Subtarget->hasSVE() || DCI.isBeforeLegalize())
15132 SDValue N0 = N->getOperand(0);
15133 EVT VT = N0.getValueType();
15135 if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1 ||
15136 !isNullConstant(N->getOperand(1)))
15139 // Restricted the DAG combine to only cases where we're extracting from a
15140 // flag-setting operation.
15141 if (!isPredicateCCSettingOp(N0))
15144 // Extracts of lane 0 for SVE can be expressed as PTEST(Op, FIRST) ? 1 : 0
15145 SelectionDAG &DAG = DCI.DAG;
15146 SDValue Pg = getPTrue(DAG, SDLoc(N), VT, AArch64SVEPredPattern::all);
15147 return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::FIRST_ACTIVE);
15150 // Materialize : Idx = (add (mul vscale, NumEls), -1)
15151 // i1 = extract_vector_elt t37, Constant:i64<Idx>
15152 // ... into: "ptrue p, all" + PTEST
15154 performLastTrueTestVectorCombine(SDNode *N,
15155 TargetLowering::DAGCombinerInfo &DCI,
15156 const AArch64Subtarget *Subtarget) {
15157 assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
15158 // Make sure PTEST is legal types.
15159 if (!Subtarget->hasSVE() || DCI.isBeforeLegalize())
15162 SDValue N0 = N->getOperand(0);
15163 EVT OpVT = N0.getValueType();
15165 if (!OpVT.isScalableVector() || OpVT.getVectorElementType() != MVT::i1)
15168 // Idx == (add (mul vscale, NumEls), -1)
15169 SDValue Idx = N->getOperand(1);
15170 if (Idx.getOpcode() != ISD::ADD || !isAllOnesConstant(Idx.getOperand(1)))
15173 SDValue VS = Idx.getOperand(0);
15174 if (VS.getOpcode() != ISD::VSCALE)
15177 unsigned NumEls = OpVT.getVectorElementCount().getKnownMinValue();
15178 if (VS.getConstantOperandVal(0) != NumEls)
15181 // Extracts of lane EC-1 for SVE can be expressed as PTEST(Op, LAST) ? 1 : 0
15182 SelectionDAG &DAG = DCI.DAG;
15183 SDValue Pg = getPTrue(DAG, SDLoc(N), OpVT, AArch64SVEPredPattern::all);
15184 return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::LAST_ACTIVE);
15188 performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
15189 const AArch64Subtarget *Subtarget) {
15190 assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
15191 if (SDValue Res = performFirstTrueTestVectorCombine(N, DCI, Subtarget))
15193 if (SDValue Res = performLastTrueTestVectorCombine(N, DCI, Subtarget))
15196 SelectionDAG &DAG = DCI.DAG;
15197 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
15198 ConstantSDNode *ConstantN1 = dyn_cast<ConstantSDNode>(N1);
15200 EVT VT = N->getValueType(0);
15201 const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
15202 bool IsStrict = N0->isStrictFPOpcode();
15204 // extract(dup x) -> x
15205 if (N0.getOpcode() == AArch64ISD::DUP)
15206 return DAG.getZExtOrTrunc(N0.getOperand(0), SDLoc(N), VT);
15208 // Rewrite for pairwise fadd pattern
15209 // (f32 (extract_vector_elt
15210 // (fadd (vXf32 Other)
15211 // (vector_shuffle (vXf32 Other) undef <1,X,...> )) 0))
15213 // (f32 (fadd (extract_vector_elt (vXf32 Other) 0)
15214 // (extract_vector_elt (vXf32 Other) 1))
15215 // For strict_fadd we need to make sure the old strict_fadd can be deleted, so
15216 // we can only do this when it's used only by the extract_vector_elt.
15217 if (ConstantN1 && ConstantN1->getZExtValue() == 0 &&
15218 hasPairwiseAdd(N0->getOpcode(), VT, FullFP16) &&
15219 (!IsStrict || N0.hasOneUse())) {
15221 SDValue N00 = N0->getOperand(IsStrict ? 1 : 0);
15222 SDValue N01 = N0->getOperand(IsStrict ? 2 : 1);
15224 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(N01);
15225 SDValue Other = N00;
15227 // And handle the commutative case.
15229 Shuffle = dyn_cast<ShuffleVectorSDNode>(N00);
15233 if (Shuffle && Shuffle->getMaskElt(0) == 1 &&
15234 Other == Shuffle->getOperand(0)) {
15235 SDValue Extract1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Other,
15236 DAG.getConstant(0, DL, MVT::i64));
15237 SDValue Extract2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Other,
15238 DAG.getConstant(1, DL, MVT::i64));
15240 return DAG.getNode(N0->getOpcode(), DL, VT, Extract1, Extract2);
15242 // For strict_fadd we need uses of the final extract_vector to be replaced
15243 // with the strict_fadd, but we also need uses of the chain output of the
15244 // original strict_fadd to use the chain output of the new strict_fadd as
15245 // otherwise it may not be deleted.
15246 SDValue Ret = DAG.getNode(N0->getOpcode(), DL,
15248 {N0->getOperand(0), Extract1, Extract2});
15249 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Ret);
15250 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Ret.getValue(1));
15251 return SDValue(N, 0);
15258 static SDValue performConcatVectorsCombine(SDNode *N,
15259 TargetLowering::DAGCombinerInfo &DCI,
15260 SelectionDAG &DAG) {
15262 EVT VT = N->getValueType(0);
15263 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
15264 unsigned N0Opc = N0->getOpcode(), N1Opc = N1->getOpcode();
15266 if (VT.isScalableVector())
15269 // Optimize concat_vectors of truncated vectors, where the intermediate
15270 // type is illegal, to avoid said illegality, e.g.,
15271 // (v4i16 (concat_vectors (v2i16 (truncate (v2i64))),
15272 // (v2i16 (truncate (v2i64)))))
15274 // (v4i16 (truncate (vector_shuffle (v4i32 (bitcast (v2i64))),
15275 // (v4i32 (bitcast (v2i64))),
15277 // This isn't really target-specific, but ISD::TRUNCATE legality isn't keyed
15278 // on both input and result type, so we might generate worse code.
15279 // On AArch64 we know it's fine for v2i64->v4i16 and v4i32->v8i8.
15280 if (N->getNumOperands() == 2 && N0Opc == ISD::TRUNCATE &&
15281 N1Opc == ISD::TRUNCATE) {
15282 SDValue N00 = N0->getOperand(0);
15283 SDValue N10 = N1->getOperand(0);
15284 EVT N00VT = N00.getValueType();
15286 if (N00VT == N10.getValueType() &&
15287 (N00VT == MVT::v2i64 || N00VT == MVT::v4i32) &&
15288 N00VT.getScalarSizeInBits() == 4 * VT.getScalarSizeInBits()) {
15289 MVT MidVT = (N00VT == MVT::v2i64 ? MVT::v4i32 : MVT::v8i16);
15290 SmallVector<int, 8> Mask(MidVT.getVectorNumElements());
15291 for (size_t i = 0; i < Mask.size(); ++i)
15293 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15294 DAG.getVectorShuffle(
15296 DAG.getNode(ISD::BITCAST, dl, MidVT, N00),
15297 DAG.getNode(ISD::BITCAST, dl, MidVT, N10), Mask));
15301 if (N->getOperand(0).getValueType() == MVT::v4i8) {
15302 // If we have a concat of v4i8 loads, convert them to a buildvector of f32
15303 // loads to prevent having to go through the v4i8 load legalization that
15304 // needs to extend each element into a larger type.
15305 if (N->getNumOperands() % 2 == 0 && all_of(N->op_values(), [](SDValue V) {
15306 if (V.getValueType() != MVT::v4i8)
15310 LoadSDNode *LD = dyn_cast<LoadSDNode>(V);
15311 return LD && V.hasOneUse() && LD->isSimple() && !LD->isIndexed() &&
15312 LD->getExtensionType() == ISD::NON_EXTLOAD;
15315 EVT::getVectorVT(*DAG.getContext(), MVT::f32, N->getNumOperands());
15316 SmallVector<SDValue> Ops;
15318 for (unsigned i = 0; i < N->getNumOperands(); i++) {
15319 SDValue V = N->getOperand(i);
15321 Ops.push_back(DAG.getUNDEF(MVT::f32));
15323 LoadSDNode *LD = cast<LoadSDNode>(V);
15325 DAG.getLoad(MVT::f32, dl, LD->getChain(), LD->getBasePtr(),
15326 LD->getMemOperand());
15327 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLoad.getValue(1));
15328 Ops.push_back(NewLoad);
15331 return DAG.getBitcast(N->getValueType(0),
15332 DAG.getBuildVector(NVT, dl, Ops));
15337 // Wait 'til after everything is legalized to try this. That way we have
15338 // legal vector types and such.
15339 if (DCI.isBeforeLegalizeOps())
15342 // Optimise concat_vectors of two [us]avgceils or [us]avgfloors that use
15343 // extracted subvectors from the same original vectors. Combine these into a
15344 // single avg that operates on the two original vectors.
15345 // avgceil is the target independant name for rhadd, avgfloor is a hadd.
15347 // (concat_vectors (v8i8 (avgceils (extract_subvector (v16i8 OpA, <0>),
15348 // extract_subvector (v16i8 OpB, <0>))),
15349 // (v8i8 (avgceils (extract_subvector (v16i8 OpA, <8>),
15350 // extract_subvector (v16i8 OpB, <8>)))))
15352 // (v16i8(avgceils(v16i8 OpA, v16i8 OpB)))
15353 if (N->getNumOperands() == 2 && N0Opc == N1Opc &&
15354 (N0Opc == ISD::AVGCEILU || N0Opc == ISD::AVGCEILS ||
15355 N0Opc == ISD::AVGFLOORU || N0Opc == ISD::AVGFLOORS)) {
15356 SDValue N00 = N0->getOperand(0);
15357 SDValue N01 = N0->getOperand(1);
15358 SDValue N10 = N1->getOperand(0);
15359 SDValue N11 = N1->getOperand(1);
15361 EVT N00VT = N00.getValueType();
15362 EVT N10VT = N10.getValueType();
15364 if (N00->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15365 N01->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15366 N10->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15367 N11->getOpcode() == ISD::EXTRACT_SUBVECTOR && N00VT == N10VT) {
15368 SDValue N00Source = N00->getOperand(0);
15369 SDValue N01Source = N01->getOperand(0);
15370 SDValue N10Source = N10->getOperand(0);
15371 SDValue N11Source = N11->getOperand(0);
15373 if (N00Source == N10Source && N01Source == N11Source &&
15374 N00Source.getValueType() == VT && N01Source.getValueType() == VT) {
15375 assert(N0.getValueType() == N1.getValueType());
15377 uint64_t N00Index = N00.getConstantOperandVal(1);
15378 uint64_t N01Index = N01.getConstantOperandVal(1);
15379 uint64_t N10Index = N10.getConstantOperandVal(1);
15380 uint64_t N11Index = N11.getConstantOperandVal(1);
15382 if (N00Index == N01Index && N10Index == N11Index && N00Index == 0 &&
15383 N10Index == N00VT.getVectorNumElements())
15384 return DAG.getNode(N0Opc, dl, VT, N00Source, N01Source);
15389 // If we see a (concat_vectors (v1x64 A), (v1x64 A)) it's really a vector
15390 // splat. The indexed instructions are going to be expecting a DUPLANE64, so
15391 // canonicalise to that.
15392 if (N->getNumOperands() == 2 && N0 == N1 && VT.getVectorNumElements() == 2) {
15393 assert(VT.getScalarSizeInBits() == 64);
15394 return DAG.getNode(AArch64ISD::DUPLANE64, dl, VT, WidenVector(N0, DAG),
15395 DAG.getConstant(0, dl, MVT::i64));
15398 // Canonicalise concat_vectors so that the right-hand vector has as few
15399 // bit-casts as possible before its real operation. The primary matching
15400 // destination for these operations will be the narrowing "2" instructions,
15401 // which depend on the operation being performed on this right-hand vector.
15403 // (concat_vectors LHS, (v1i64 (bitconvert (v4i16 RHS))))
15405 // (bitconvert (concat_vectors (v4i16 (bitconvert LHS)), RHS))
15407 if (N->getNumOperands() != 2 || N1Opc != ISD::BITCAST)
15409 SDValue RHS = N1->getOperand(0);
15410 MVT RHSTy = RHS.getValueType().getSimpleVT();
15411 // If the RHS is not a vector, this is not the pattern we're looking for.
15412 if (!RHSTy.isVector())
15416 dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n");
15418 MVT ConcatTy = MVT::getVectorVT(RHSTy.getVectorElementType(),
15419 RHSTy.getVectorNumElements() * 2);
15420 return DAG.getNode(ISD::BITCAST, dl, VT,
15421 DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatTy,
15422 DAG.getNode(ISD::BITCAST, dl, RHSTy, N0),
15427 performExtractSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
15428 SelectionDAG &DAG) {
15429 if (DCI.isBeforeLegalizeOps())
15432 EVT VT = N->getValueType(0);
15433 if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1)
15436 SDValue V = N->getOperand(0);
15438 // NOTE: This combine exists in DAGCombiner, but that version's legality check
15439 // blocks this combine because the non-const case requires custom lowering.
15441 // ty1 extract_vector(ty2 splat(const))) -> ty1 splat(const)
15442 if (V.getOpcode() == ISD::SPLAT_VECTOR)
15443 if (isa<ConstantSDNode>(V.getOperand(0)))
15444 return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), VT, V.getOperand(0));
15450 performInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
15451 SelectionDAG &DAG) {
15453 SDValue Vec = N->getOperand(0);
15454 SDValue SubVec = N->getOperand(1);
15455 uint64_t IdxVal = N->getConstantOperandVal(2);
15456 EVT VecVT = Vec.getValueType();
15457 EVT SubVT = SubVec.getValueType();
15459 // Only do this for legal fixed vector types.
15460 if (!VecVT.isFixedLengthVector() ||
15461 !DAG.getTargetLoweringInfo().isTypeLegal(VecVT) ||
15462 !DAG.getTargetLoweringInfo().isTypeLegal(SubVT))
15465 // Ignore widening patterns.
15466 if (IdxVal == 0 && Vec.isUndef())
15469 // Subvector must be half the width and an "aligned" insertion.
15470 unsigned NumSubElts = SubVT.getVectorNumElements();
15471 if ((SubVT.getSizeInBits() * 2) != VecVT.getSizeInBits() ||
15472 (IdxVal != 0 && IdxVal != NumSubElts))
15475 // Fold insert_subvector -> concat_vectors
15476 // insert_subvector(Vec,Sub,lo) -> concat_vectors(Sub,extract(Vec,hi))
15477 // insert_subvector(Vec,Sub,hi) -> concat_vectors(extract(Vec,lo),Sub)
15481 Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec,
15482 DAG.getVectorIdxConstant(NumSubElts, DL));
15484 Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec,
15485 DAG.getVectorIdxConstant(0, DL));
15488 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, Lo, Hi);
15491 static SDValue tryCombineFixedPointConvert(SDNode *N,
15492 TargetLowering::DAGCombinerInfo &DCI,
15493 SelectionDAG &DAG) {
15494 // Wait until after everything is legalized to try this. That way we have
15495 // legal vector types and such.
15496 if (DCI.isBeforeLegalizeOps())
15498 // Transform a scalar conversion of a value from a lane extract into a
15499 // lane extract of a vector conversion. E.g., from foo1 to foo2:
15500 // double foo1(int64x2_t a) { return vcvtd_n_f64_s64(a[1], 9); }
15501 // double foo2(int64x2_t a) { return vcvtq_n_f64_s64(a, 9)[1]; }
15503 // The second form interacts better with instruction selection and the
15504 // register allocator to avoid cross-class register copies that aren't
15505 // coalescable due to a lane reference.
15507 // Check the operand and see if it originates from a lane extract.
15508 SDValue Op1 = N->getOperand(1);
15509 if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15512 // Yep, no additional predication needed. Perform the transform.
15513 SDValue IID = N->getOperand(0);
15514 SDValue Shift = N->getOperand(2);
15515 SDValue Vec = Op1.getOperand(0);
15516 SDValue Lane = Op1.getOperand(1);
15517 EVT ResTy = N->getValueType(0);
15521 // The vector width should be 128 bits by the time we get here, even
15522 // if it started as 64 bits (the extract_vector handling will have
15523 // done so). Bail if it is not.
15524 if (Vec.getValueSizeInBits() != 128)
15527 if (Vec.getValueType() == MVT::v4i32)
15528 VecResTy = MVT::v4f32;
15529 else if (Vec.getValueType() == MVT::v2i64)
15530 VecResTy = MVT::v2f64;
15535 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VecResTy, IID, Vec, Shift);
15536 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResTy, Convert, Lane);
15539 // AArch64 high-vector "long" operations are formed by performing the non-high
15540 // version on an extract_subvector of each operand which gets the high half:
15542 // (longop2 LHS, RHS) == (longop (extract_high LHS), (extract_high RHS))
15544 // However, there are cases which don't have an extract_high explicitly, but
15545 // have another operation that can be made compatible with one for free. For
15548 // (dupv64 scalar) --> (extract_high (dup128 scalar))
15550 // This routine does the actual conversion of such DUPs, once outer routines
15551 // have determined that everything else is in order.
15552 // It also supports immediate DUP-like nodes (MOVI/MVNi), which we can fold
15554 static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) {
15555 MVT VT = N.getSimpleValueType();
15556 if (N.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15557 N.getConstantOperandVal(1) == 0)
15558 N = N.getOperand(0);
15560 switch (N.getOpcode()) {
15561 case AArch64ISD::DUP:
15562 case AArch64ISD::DUPLANE8:
15563 case AArch64ISD::DUPLANE16:
15564 case AArch64ISD::DUPLANE32:
15565 case AArch64ISD::DUPLANE64:
15566 case AArch64ISD::MOVI:
15567 case AArch64ISD::MOVIshift:
15568 case AArch64ISD::MOVIedit:
15569 case AArch64ISD::MOVImsl:
15570 case AArch64ISD::MVNIshift:
15571 case AArch64ISD::MVNImsl:
15574 // FMOV could be supported, but isn't very useful, as it would only occur
15575 // if you passed a bitcast' floating point immediate to an eligible long
15576 // integer op (addl, smull, ...).
15580 if (!VT.is64BitVector())
15584 unsigned NumElems = VT.getVectorNumElements();
15585 if (N.getValueType().is64BitVector()) {
15586 MVT ElementTy = VT.getVectorElementType();
15587 MVT NewVT = MVT::getVectorVT(ElementTy, NumElems * 2);
15588 N = DAG.getNode(N->getOpcode(), DL, NewVT, N->ops());
15591 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N,
15592 DAG.getConstant(NumElems, DL, MVT::i64));
15595 static bool isEssentiallyExtractHighSubvector(SDValue N) {
15596 if (N.getOpcode() == ISD::BITCAST)
15597 N = N.getOperand(0);
15598 if (N.getOpcode() != ISD::EXTRACT_SUBVECTOR)
15600 if (N.getOperand(0).getValueType().isScalableVector())
15602 return cast<ConstantSDNode>(N.getOperand(1))->getAPIntValue() ==
15603 N.getOperand(0).getValueType().getVectorNumElements() / 2;
15606 /// Helper structure to keep track of ISD::SET_CC operands.
15607 struct GenericSetCCInfo {
15608 const SDValue *Opnd0;
15609 const SDValue *Opnd1;
15613 /// Helper structure to keep track of a SET_CC lowered into AArch64 code.
15614 struct AArch64SetCCInfo {
15615 const SDValue *Cmp;
15616 AArch64CC::CondCode CC;
15619 /// Helper structure to keep track of SetCC information.
15621 GenericSetCCInfo Generic;
15622 AArch64SetCCInfo AArch64;
15625 /// Helper structure to be able to read SetCC information. If set to
15626 /// true, IsAArch64 field, Info is a AArch64SetCCInfo, otherwise Info is a
15627 /// GenericSetCCInfo.
15628 struct SetCCInfoAndKind {
15633 /// Check whether or not \p Op is a SET_CC operation, either a generic or
15635 /// AArch64 lowered one.
15636 /// \p SetCCInfo is filled accordingly.
15637 /// \post SetCCInfo is meanginfull only when this function returns true.
15638 /// \return True when Op is a kind of SET_CC operation.
15639 static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo) {
15640 // If this is a setcc, this is straight forward.
15641 if (Op.getOpcode() == ISD::SETCC) {
15642 SetCCInfo.Info.Generic.Opnd0 = &Op.getOperand(0);
15643 SetCCInfo.Info.Generic.Opnd1 = &Op.getOperand(1);
15644 SetCCInfo.Info.Generic.CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
15645 SetCCInfo.IsAArch64 = false;
15648 // Otherwise, check if this is a matching csel instruction.
15651 // - csel 0, 1, !cc
15652 if (Op.getOpcode() != AArch64ISD::CSEL)
15654 // Set the information about the operands.
15655 // TODO: we want the operands of the Cmp not the csel
15656 SetCCInfo.Info.AArch64.Cmp = &Op.getOperand(3);
15657 SetCCInfo.IsAArch64 = true;
15658 SetCCInfo.Info.AArch64.CC = static_cast<AArch64CC::CondCode>(
15659 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
15661 // Check that the operands matches the constraints:
15662 // (1) Both operands must be constants.
15663 // (2) One must be 1 and the other must be 0.
15664 ConstantSDNode *TValue = dyn_cast<ConstantSDNode>(Op.getOperand(0));
15665 ConstantSDNode *FValue = dyn_cast<ConstantSDNode>(Op.getOperand(1));
15668 if (!TValue || !FValue)
15672 if (!TValue->isOne()) {
15673 // Update the comparison when we are interested in !cc.
15674 std::swap(TValue, FValue);
15675 SetCCInfo.Info.AArch64.CC =
15676 AArch64CC::getInvertedCondCode(SetCCInfo.Info.AArch64.CC);
15678 return TValue->isOne() && FValue->isZero();
15681 // Returns true if Op is setcc or zext of setcc.
15682 static bool isSetCCOrZExtSetCC(const SDValue& Op, SetCCInfoAndKind &Info) {
15683 if (isSetCC(Op, Info))
15685 return ((Op.getOpcode() == ISD::ZERO_EXTEND) &&
15686 isSetCC(Op->getOperand(0), Info));
15689 // The folding we want to perform is:
15690 // (add x, [zext] (setcc cc ...) )
15692 // (csel x, (add x, 1), !cc ...)
15694 // The latter will get matched to a CSINC instruction.
15695 static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG) {
15696 assert(Op && Op->getOpcode() == ISD::ADD && "Unexpected operation!");
15697 SDValue LHS = Op->getOperand(0);
15698 SDValue RHS = Op->getOperand(1);
15699 SetCCInfoAndKind InfoAndKind;
15701 // If both operands are a SET_CC, then we don't want to perform this
15702 // folding and create another csel as this results in more instructions
15703 // (and higher register usage).
15704 if (isSetCCOrZExtSetCC(LHS, InfoAndKind) &&
15705 isSetCCOrZExtSetCC(RHS, InfoAndKind))
15708 // If neither operand is a SET_CC, give up.
15709 if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) {
15710 std::swap(LHS, RHS);
15711 if (!isSetCCOrZExtSetCC(LHS, InfoAndKind))
15715 // FIXME: This could be generatized to work for FP comparisons.
15716 EVT CmpVT = InfoAndKind.IsAArch64
15717 ? InfoAndKind.Info.AArch64.Cmp->getOperand(0).getValueType()
15718 : InfoAndKind.Info.Generic.Opnd0->getValueType();
15719 if (CmpVT != MVT::i32 && CmpVT != MVT::i64)
15725 if (InfoAndKind.IsAArch64) {
15726 CCVal = DAG.getConstant(
15727 AArch64CC::getInvertedCondCode(InfoAndKind.Info.AArch64.CC), dl,
15729 Cmp = *InfoAndKind.Info.AArch64.Cmp;
15731 Cmp = getAArch64Cmp(
15732 *InfoAndKind.Info.Generic.Opnd0, *InfoAndKind.Info.Generic.Opnd1,
15733 ISD::getSetCCInverse(InfoAndKind.Info.Generic.CC, CmpVT), CCVal, DAG,
15736 EVT VT = Op->getValueType(0);
15737 LHS = DAG.getNode(ISD::ADD, dl, VT, RHS, DAG.getConstant(1, dl, VT));
15738 return DAG.getNode(AArch64ISD::CSEL, dl, VT, RHS, LHS, CCVal, Cmp);
15741 // ADD(UADDV a, UADDV b) --> UADDV(ADD a, b)
15742 static SDValue performAddUADDVCombine(SDNode *N, SelectionDAG &DAG) {
15743 EVT VT = N->getValueType(0);
15744 // Only scalar integer and vector types.
15745 if (N->getOpcode() != ISD::ADD || !VT.isScalarInteger())
15748 SDValue LHS = N->getOperand(0);
15749 SDValue RHS = N->getOperand(1);
15750 if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
15751 RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT || LHS.getValueType() != VT)
15754 auto *LHSN1 = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
15755 auto *RHSN1 = dyn_cast<ConstantSDNode>(RHS->getOperand(1));
15756 if (!LHSN1 || LHSN1 != RHSN1 || !RHSN1->isZero())
15759 SDValue Op1 = LHS->getOperand(0);
15760 SDValue Op2 = RHS->getOperand(0);
15761 EVT OpVT1 = Op1.getValueType();
15762 EVT OpVT2 = Op2.getValueType();
15763 if (Op1.getOpcode() != AArch64ISD::UADDV || OpVT1 != OpVT2 ||
15764 Op2.getOpcode() != AArch64ISD::UADDV ||
15765 OpVT1.getVectorElementType() != VT)
15768 SDValue Val1 = Op1.getOperand(0);
15769 SDValue Val2 = Op2.getOperand(0);
15770 EVT ValVT = Val1->getValueType(0);
15772 SDValue AddVal = DAG.getNode(ISD::ADD, DL, ValVT, Val1, Val2);
15773 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
15774 DAG.getNode(AArch64ISD::UADDV, DL, ValVT, AddVal),
15775 DAG.getConstant(0, DL, MVT::i64));
15778 /// Perform the scalar expression combine in the form of:
15779 /// CSEL(c, 1, cc) + b => CSINC(b+c, b, cc)
15780 /// CSNEG(c, -1, cc) + b => CSINC(b+c, b, cc)
15781 static SDValue performAddCSelIntoCSinc(SDNode *N, SelectionDAG &DAG) {
15782 EVT VT = N->getValueType(0);
15783 if (!VT.isScalarInteger() || N->getOpcode() != ISD::ADD)
15786 SDValue LHS = N->getOperand(0);
15787 SDValue RHS = N->getOperand(1);
15789 // Handle commutivity.
15790 if (LHS.getOpcode() != AArch64ISD::CSEL &&
15791 LHS.getOpcode() != AArch64ISD::CSNEG) {
15792 std::swap(LHS, RHS);
15793 if (LHS.getOpcode() != AArch64ISD::CSEL &&
15794 LHS.getOpcode() != AArch64ISD::CSNEG) {
15799 if (!LHS.hasOneUse())
15802 AArch64CC::CondCode AArch64CC =
15803 static_cast<AArch64CC::CondCode>(LHS.getConstantOperandVal(2));
15805 // The CSEL should include a const one operand, and the CSNEG should include
15806 // One or NegOne operand.
15807 ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(LHS.getOperand(0));
15808 ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
15809 if (!CTVal || !CFVal)
15812 if (!(LHS.getOpcode() == AArch64ISD::CSEL &&
15813 (CTVal->isOne() || CFVal->isOne())) &&
15814 !(LHS.getOpcode() == AArch64ISD::CSNEG &&
15815 (CTVal->isOne() || CFVal->isAllOnes())))
15818 // Switch CSEL(1, c, cc) to CSEL(c, 1, !cc)
15819 if (LHS.getOpcode() == AArch64ISD::CSEL && CTVal->isOne() &&
15821 std::swap(CTVal, CFVal);
15822 AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
15826 // Switch CSNEG(1, c, cc) to CSNEG(-c, -1, !cc)
15827 if (LHS.getOpcode() == AArch64ISD::CSNEG && CTVal->isOne() &&
15828 !CFVal->isAllOnes()) {
15829 APInt C = -1 * CFVal->getAPIntValue();
15830 CTVal = cast<ConstantSDNode>(DAG.getConstant(C, DL, VT));
15831 CFVal = cast<ConstantSDNode>(DAG.getAllOnesConstant(DL, VT));
15832 AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
15835 // It might be neutral for larger constants, as the immediate need to be
15836 // materialized in a register.
15837 APInt ADDC = CTVal->getAPIntValue();
15838 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15839 if (!TLI.isLegalAddImmediate(ADDC.getSExtValue()))
15842 assert(((LHS.getOpcode() == AArch64ISD::CSEL && CFVal->isOne()) ||
15843 (LHS.getOpcode() == AArch64ISD::CSNEG && CFVal->isAllOnes())) &&
15844 "Unexpected constant value");
15846 SDValue NewNode = DAG.getNode(ISD::ADD, DL, VT, RHS, SDValue(CTVal, 0));
15847 SDValue CCVal = DAG.getConstant(AArch64CC, DL, MVT::i32);
15848 SDValue Cmp = LHS.getOperand(3);
15850 return DAG.getNode(AArch64ISD::CSINC, DL, VT, NewNode, RHS, CCVal, Cmp);
15853 // ADD(UDOT(zero, x, y), A) --> UDOT(A, x, y)
15854 static SDValue performAddDotCombine(SDNode *N, SelectionDAG &DAG) {
15855 EVT VT = N->getValueType(0);
15856 if (N->getOpcode() != ISD::ADD)
15859 SDValue Dot = N->getOperand(0);
15860 SDValue A = N->getOperand(1);
15861 // Handle commutivity
15862 auto isZeroDot = [](SDValue Dot) {
15863 return (Dot.getOpcode() == AArch64ISD::UDOT ||
15864 Dot.getOpcode() == AArch64ISD::SDOT) &&
15865 isZerosVector(Dot.getOperand(0).getNode());
15867 if (!isZeroDot(Dot))
15869 if (!isZeroDot(Dot))
15872 return DAG.getNode(Dot.getOpcode(), SDLoc(N), VT, A, Dot.getOperand(1),
15873 Dot.getOperand(2));
15876 static bool isNegatedInteger(SDValue Op) {
15877 return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0));
15880 static SDValue getNegatedInteger(SDValue Op, SelectionDAG &DAG) {
15882 EVT VT = Op.getValueType();
15883 SDValue Zero = DAG.getConstant(0, DL, VT);
15884 return DAG.getNode(ISD::SUB, DL, VT, Zero, Op);
15889 // (neg (csel X, Y)) -> (csel (neg X), (neg Y))
15891 // The folding helps csel to be matched with csneg without generating
15892 // redundant neg instruction, which includes negation of the csel expansion
15893 // of abs node lowered by lowerABS.
15894 static SDValue performNegCSelCombine(SDNode *N, SelectionDAG &DAG) {
15895 if (!isNegatedInteger(SDValue(N, 0)))
15898 SDValue CSel = N->getOperand(1);
15899 if (CSel.getOpcode() != AArch64ISD::CSEL || !CSel->hasOneUse())
15902 SDValue N0 = CSel.getOperand(0);
15903 SDValue N1 = CSel.getOperand(1);
15905 // If both of them is not negations, it's not worth the folding as it
15906 // introduces two additional negations while reducing one negation.
15907 if (!isNegatedInteger(N0) && !isNegatedInteger(N1))
15910 SDValue N0N = getNegatedInteger(N0, DAG);
15911 SDValue N1N = getNegatedInteger(N1, DAG);
15914 EVT VT = CSel.getValueType();
15915 return DAG.getNode(AArch64ISD::CSEL, DL, VT, N0N, N1N, CSel.getOperand(2),
15916 CSel.getOperand(3));
15919 // The basic add/sub long vector instructions have variants with "2" on the end
15920 // which act on the high-half of their inputs. They are normally matched by
15923 // (add (zeroext (extract_high LHS)),
15924 // (zeroext (extract_high RHS)))
15925 // -> uaddl2 vD, vN, vM
15927 // However, if one of the extracts is something like a duplicate, this
15928 // instruction can still be used profitably. This function puts the DAG into a
15929 // more appropriate form for those patterns to trigger.
15930 static SDValue performAddSubLongCombine(SDNode *N,
15931 TargetLowering::DAGCombinerInfo &DCI,
15932 SelectionDAG &DAG) {
15933 if (DCI.isBeforeLegalizeOps())
15936 MVT VT = N->getSimpleValueType(0);
15937 if (!VT.is128BitVector()) {
15938 if (N->getOpcode() == ISD::ADD)
15939 return performSetccAddFolding(N, DAG);
15943 // Make sure both branches are extended in the same way.
15944 SDValue LHS = N->getOperand(0);
15945 SDValue RHS = N->getOperand(1);
15946 if ((LHS.getOpcode() != ISD::ZERO_EXTEND &&
15947 LHS.getOpcode() != ISD::SIGN_EXTEND) ||
15948 LHS.getOpcode() != RHS.getOpcode())
15951 unsigned ExtType = LHS.getOpcode();
15953 // It's not worth doing if at least one of the inputs isn't already an
15954 // extract, but we don't know which it'll be so we have to try both.
15955 if (isEssentiallyExtractHighSubvector(LHS.getOperand(0))) {
15956 RHS = tryExtendDUPToExtractHigh(RHS.getOperand(0), DAG);
15957 if (!RHS.getNode())
15960 RHS = DAG.getNode(ExtType, SDLoc(N), VT, RHS);
15961 } else if (isEssentiallyExtractHighSubvector(RHS.getOperand(0))) {
15962 LHS = tryExtendDUPToExtractHigh(LHS.getOperand(0), DAG);
15963 if (!LHS.getNode())
15966 LHS = DAG.getNode(ExtType, SDLoc(N), VT, LHS);
15969 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, LHS, RHS);
15972 static bool isCMP(SDValue Op) {
15973 return Op.getOpcode() == AArch64ISD::SUBS &&
15974 !Op.getNode()->hasAnyUseOfValue(0);
15977 // (CSEL 1 0 CC Cond) => CC
15978 // (CSEL 0 1 CC Cond) => !CC
15979 static Optional<AArch64CC::CondCode> getCSETCondCode(SDValue Op) {
15980 if (Op.getOpcode() != AArch64ISD::CSEL)
15982 auto CC = static_cast<AArch64CC::CondCode>(Op.getConstantOperandVal(2));
15983 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
15985 SDValue OpLHS = Op.getOperand(0);
15986 SDValue OpRHS = Op.getOperand(1);
15987 if (isOneConstant(OpLHS) && isNullConstant(OpRHS))
15989 if (isNullConstant(OpLHS) && isOneConstant(OpRHS))
15990 return getInvertedCondCode(CC);
15995 // (ADC{S} l r (CMP (CSET HS carry) 1)) => (ADC{S} l r carry)
15996 // (SBC{S} l r (CMP 0 (CSET LO carry))) => (SBC{S} l r carry)
15997 static SDValue foldOverflowCheck(SDNode *Op, SelectionDAG &DAG, bool IsAdd) {
15998 SDValue CmpOp = Op->getOperand(2);
16003 if (!isOneConstant(CmpOp.getOperand(1)))
16006 if (!isNullConstant(CmpOp.getOperand(0)))
16010 SDValue CsetOp = CmpOp->getOperand(IsAdd ? 0 : 1);
16011 auto CC = getCSETCondCode(CsetOp);
16012 if (CC != (IsAdd ? AArch64CC::HS : AArch64CC::LO))
16015 return DAG.getNode(Op->getOpcode(), SDLoc(Op), Op->getVTList(),
16016 Op->getOperand(0), Op->getOperand(1),
16017 CsetOp.getOperand(3));
16020 // (ADC x 0 cond) => (CINC x HS cond)
16021 static SDValue foldADCToCINC(SDNode *N, SelectionDAG &DAG) {
16022 SDValue LHS = N->getOperand(0);
16023 SDValue RHS = N->getOperand(1);
16024 SDValue Cond = N->getOperand(2);
16026 if (!isNullConstant(RHS))
16029 EVT VT = N->getValueType(0);
16032 // (CINC x cc cond) <=> (CSINC x x !cc cond)
16033 SDValue CC = DAG.getConstant(AArch64CC::LO, DL, MVT::i32);
16034 return DAG.getNode(AArch64ISD::CSINC, DL, VT, LHS, LHS, CC, Cond);
16037 // Transform vector add(zext i8 to i32, zext i8 to i32)
16038 // into sext(add(zext(i8 to i16), zext(i8 to i16)) to i32)
16039 // This allows extra uses of saddl/uaddl at the lower vector widths, and less
16041 static SDValue performVectorAddSubExtCombine(SDNode *N, SelectionDAG &DAG) {
16042 EVT VT = N->getValueType(0);
16043 if (!VT.isFixedLengthVector() || VT.getSizeInBits() <= 128 ||
16044 (N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
16045 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND) ||
16046 (N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
16047 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND) ||
16048 N->getOperand(0).getOperand(0).getValueType() !=
16049 N->getOperand(1).getOperand(0).getValueType())
16052 SDValue N0 = N->getOperand(0).getOperand(0);
16053 SDValue N1 = N->getOperand(1).getOperand(0);
16054 EVT InVT = N0.getValueType();
16056 EVT S1 = InVT.getScalarType();
16057 EVT S2 = VT.getScalarType();
16058 if ((S2 == MVT::i32 && S1 == MVT::i8) ||
16059 (S2 == MVT::i64 && (S1 == MVT::i8 || S1 == MVT::i16))) {
16061 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(),
16062 S2.getHalfSizedIntegerVT(*DAG.getContext()),
16063 VT.getVectorElementCount());
16064 SDValue NewN0 = DAG.getNode(N->getOperand(0).getOpcode(), DL, HalfVT, N0);
16065 SDValue NewN1 = DAG.getNode(N->getOperand(1).getOpcode(), DL, HalfVT, N1);
16066 SDValue NewOp = DAG.getNode(N->getOpcode(), DL, HalfVT, NewN0, NewN1);
16067 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, NewOp);
16072 static SDValue performBuildVectorCombine(SDNode *N,
16073 TargetLowering::DAGCombinerInfo &DCI,
16074 SelectionDAG &DAG) {
16077 // A build vector of two extracted elements is equivalent to an
16078 // extract subvector where the inner vector is any-extended to the
16079 // extract_vector_elt VT.
16080 // (build_vector (extract_elt_iXX_to_i32 vec Idx+0)
16081 // (extract_elt_iXX_to_i32 vec Idx+1))
16082 // => (extract_subvector (anyext_iXX_to_i32 vec) Idx)
16084 // For now, only consider the v2i32 case, which arises as a result of
16086 if (N->getValueType(0) != MVT::v2i32)
16089 SDValue Elt0 = N->getOperand(0), Elt1 = N->getOperand(1);
16090 // Reminder, EXTRACT_VECTOR_ELT has the effect of any-extending to its VT.
16091 if (Elt0->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
16092 Elt1->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
16094 isa<ConstantSDNode>(Elt0->getOperand(1)) &&
16095 isa<ConstantSDNode>(Elt1->getOperand(1)) &&
16096 // Both EXTRACT_VECTOR_ELT from same vector...
16097 Elt0->getOperand(0) == Elt1->getOperand(0) &&
16098 // ... and contiguous. First element's index +1 == second element's index.
16099 Elt0->getConstantOperandVal(1) + 1 == Elt1->getConstantOperandVal(1)) {
16100 SDValue VecToExtend = Elt0->getOperand(0);
16101 EVT ExtVT = VecToExtend.getValueType().changeVectorElementType(MVT::i32);
16102 if (!DAG.getTargetLoweringInfo().isTypeLegal(ExtVT))
16105 SDValue SubvectorIdx = DAG.getVectorIdxConstant(Elt0->getConstantOperandVal(1), DL);
16107 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, DL, ExtVT, VecToExtend);
16108 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Ext,
16115 static SDValue performAddSubCombine(SDNode *N,
16116 TargetLowering::DAGCombinerInfo &DCI,
16117 SelectionDAG &DAG) {
16118 // Try to change sum of two reductions.
16119 if (SDValue Val = performAddUADDVCombine(N, DAG))
16121 if (SDValue Val = performAddDotCombine(N, DAG))
16123 if (SDValue Val = performAddCSelIntoCSinc(N, DAG))
16125 if (SDValue Val = performNegCSelCombine(N, DAG))
16127 if (SDValue Val = performVectorAddSubExtCombine(N, DAG))
16130 return performAddSubLongCombine(N, DCI, DAG);
16133 // Massage DAGs which we can use the high-half "long" operations on into
16134 // something isel will recognize better. E.g.
16136 // (aarch64_neon_umull (extract_high vec) (dupv64 scalar)) -->
16137 // (aarch64_neon_umull (extract_high (v2i64 vec)))
16138 // (extract_high (v2i64 (dup128 scalar)))))
16140 static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N,
16141 TargetLowering::DAGCombinerInfo &DCI,
16142 SelectionDAG &DAG) {
16143 if (DCI.isBeforeLegalizeOps())
16146 SDValue LHS = N->getOperand((IID == Intrinsic::not_intrinsic) ? 0 : 1);
16147 SDValue RHS = N->getOperand((IID == Intrinsic::not_intrinsic) ? 1 : 2);
16148 assert(LHS.getValueType().is64BitVector() &&
16149 RHS.getValueType().is64BitVector() &&
16150 "unexpected shape for long operation");
16152 // Either node could be a DUP, but it's not worth doing both of them (you'd
16153 // just as well use the non-high version) so look for a corresponding extract
16154 // operation on the other "wing".
16155 if (isEssentiallyExtractHighSubvector(LHS)) {
16156 RHS = tryExtendDUPToExtractHigh(RHS, DAG);
16157 if (!RHS.getNode())
16159 } else if (isEssentiallyExtractHighSubvector(RHS)) {
16160 LHS = tryExtendDUPToExtractHigh(LHS, DAG);
16161 if (!LHS.getNode())
16165 if (IID == Intrinsic::not_intrinsic)
16166 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), LHS, RHS);
16168 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), N->getValueType(0),
16169 N->getOperand(0), LHS, RHS);
16172 static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) {
16173 MVT ElemTy = N->getSimpleValueType(0).getScalarType();
16174 unsigned ElemBits = ElemTy.getSizeInBits();
16176 int64_t ShiftAmount;
16177 if (BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(2))) {
16178 APInt SplatValue, SplatUndef;
16179 unsigned SplatBitSize;
16181 if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
16182 HasAnyUndefs, ElemBits) ||
16183 SplatBitSize != ElemBits)
16186 ShiftAmount = SplatValue.getSExtValue();
16187 } else if (ConstantSDNode *CVN = dyn_cast<ConstantSDNode>(N->getOperand(2))) {
16188 ShiftAmount = CVN->getSExtValue();
16196 llvm_unreachable("Unknown shift intrinsic");
16197 case Intrinsic::aarch64_neon_sqshl:
16198 Opcode = AArch64ISD::SQSHL_I;
16199 IsRightShift = false;
16201 case Intrinsic::aarch64_neon_uqshl:
16202 Opcode = AArch64ISD::UQSHL_I;
16203 IsRightShift = false;
16205 case Intrinsic::aarch64_neon_srshl:
16206 Opcode = AArch64ISD::SRSHR_I;
16207 IsRightShift = true;
16209 case Intrinsic::aarch64_neon_urshl:
16210 Opcode = AArch64ISD::URSHR_I;
16211 IsRightShift = true;
16213 case Intrinsic::aarch64_neon_sqshlu:
16214 Opcode = AArch64ISD::SQSHLU_I;
16215 IsRightShift = false;
16217 case Intrinsic::aarch64_neon_sshl:
16218 case Intrinsic::aarch64_neon_ushl:
16219 // For positive shift amounts we can use SHL, as ushl/sshl perform a regular
16220 // left shift for positive shift amounts. Below, we only replace the current
16221 // node with VSHL, if this condition is met.
16222 Opcode = AArch64ISD::VSHL;
16223 IsRightShift = false;
16227 if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(int)ElemBits) {
16229 return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1),
16230 DAG.getConstant(-ShiftAmount, dl, MVT::i32));
16231 } else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits) {
16233 return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1),
16234 DAG.getConstant(ShiftAmount, dl, MVT::i32));
16240 // The CRC32[BH] instructions ignore the high bits of their data operand. Since
16241 // the intrinsics must be legal and take an i32, this means there's almost
16242 // certainly going to be a zext in the DAG which we can eliminate.
16243 static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG) {
16244 SDValue AndN = N->getOperand(2);
16245 if (AndN.getOpcode() != ISD::AND)
16248 ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(AndN.getOperand(1));
16249 if (!CMask || CMask->getZExtValue() != Mask)
16252 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), MVT::i32,
16253 N->getOperand(0), N->getOperand(1), AndN.getOperand(0));
16256 static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N,
16257 SelectionDAG &DAG) {
16259 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0),
16260 DAG.getNode(Opc, dl,
16261 N->getOperand(1).getSimpleValueType(),
16263 DAG.getConstant(0, dl, MVT::i64));
16266 static SDValue LowerSVEIntrinsicIndex(SDNode *N, SelectionDAG &DAG) {
16268 SDValue Op1 = N->getOperand(1);
16269 SDValue Op2 = N->getOperand(2);
16270 EVT ScalarTy = Op2.getValueType();
16271 if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
16272 ScalarTy = MVT::i32;
16274 // Lower index_vector(base, step) to mul(step step_vector(1)) + splat(base).
16275 SDValue StepVector = DAG.getStepVector(DL, N->getValueType(0));
16276 SDValue Step = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op2);
16277 SDValue Mul = DAG.getNode(ISD::MUL, DL, N->getValueType(0), StepVector, Step);
16278 SDValue Base = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op1);
16279 return DAG.getNode(ISD::ADD, DL, N->getValueType(0), Mul, Base);
16282 static SDValue LowerSVEIntrinsicDUP(SDNode *N, SelectionDAG &DAG) {
16284 SDValue Scalar = N->getOperand(3);
16285 EVT ScalarTy = Scalar.getValueType();
16287 if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
16288 Scalar = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Scalar);
16290 SDValue Passthru = N->getOperand(1);
16291 SDValue Pred = N->getOperand(2);
16292 return DAG.getNode(AArch64ISD::DUP_MERGE_PASSTHRU, dl, N->getValueType(0),
16293 Pred, Scalar, Passthru);
16296 static SDValue LowerSVEIntrinsicEXT(SDNode *N, SelectionDAG &DAG) {
16298 LLVMContext &Ctx = *DAG.getContext();
16299 EVT VT = N->getValueType(0);
16301 assert(VT.isScalableVector() && "Expected a scalable vector.");
16303 // Current lowering only supports the SVE-ACLE types.
16304 if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock)
16307 unsigned ElemSize = VT.getVectorElementType().getSizeInBits() / 8;
16308 unsigned ByteSize = VT.getSizeInBits().getKnownMinSize() / 8;
16310 EVT::getVectorVT(Ctx, MVT::i8, ElementCount::getScalable(ByteSize));
16312 // Convert everything to the domain of EXT (i.e bytes).
16313 SDValue Op0 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(1));
16314 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(2));
16315 SDValue Op2 = DAG.getNode(ISD::MUL, dl, MVT::i32, N->getOperand(3),
16316 DAG.getConstant(ElemSize, dl, MVT::i32));
16318 SDValue EXT = DAG.getNode(AArch64ISD::EXT, dl, ByteVT, Op0, Op1, Op2);
16319 return DAG.getNode(ISD::BITCAST, dl, VT, EXT);
16322 static SDValue tryConvertSVEWideCompare(SDNode *N, ISD::CondCode CC,
16323 TargetLowering::DAGCombinerInfo &DCI,
16324 SelectionDAG &DAG) {
16325 if (DCI.isBeforeLegalize())
16328 SDValue Comparator = N->getOperand(3);
16329 if (Comparator.getOpcode() == AArch64ISD::DUP ||
16330 Comparator.getOpcode() == ISD::SPLAT_VECTOR) {
16331 unsigned IID = getIntrinsicID(N);
16332 EVT VT = N->getValueType(0);
16333 EVT CmpVT = N->getOperand(2).getValueType();
16334 SDValue Pred = N->getOperand(1);
16340 llvm_unreachable("Called with wrong intrinsic!");
16343 // Signed comparisons
16344 case Intrinsic::aarch64_sve_cmpeq_wide:
16345 case Intrinsic::aarch64_sve_cmpne_wide:
16346 case Intrinsic::aarch64_sve_cmpge_wide:
16347 case Intrinsic::aarch64_sve_cmpgt_wide:
16348 case Intrinsic::aarch64_sve_cmplt_wide:
16349 case Intrinsic::aarch64_sve_cmple_wide: {
16350 if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) {
16351 int64_t ImmVal = CN->getSExtValue();
16352 if (ImmVal >= -16 && ImmVal <= 15)
16353 Imm = DAG.getConstant(ImmVal, DL, MVT::i32);
16359 // Unsigned comparisons
16360 case Intrinsic::aarch64_sve_cmphs_wide:
16361 case Intrinsic::aarch64_sve_cmphi_wide:
16362 case Intrinsic::aarch64_sve_cmplo_wide:
16363 case Intrinsic::aarch64_sve_cmpls_wide: {
16364 if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) {
16365 uint64_t ImmVal = CN->getZExtValue();
16367 Imm = DAG.getConstant(ImmVal, DL, MVT::i32);
16378 SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, DL, CmpVT, Imm);
16379 return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, VT, Pred,
16380 N->getOperand(2), Splat, DAG.getCondCode(CC));
16386 static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
16387 AArch64CC::CondCode Cond) {
16388 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16391 assert(Op.getValueType().isScalableVector() &&
16392 TLI.isTypeLegal(Op.getValueType()) &&
16393 "Expected legal scalable vector type!");
16394 assert(Op.getValueType() == Pg.getValueType() &&
16395 "Expected same type for PTEST operands");
16397 // Ensure target specific opcodes are using legal type.
16398 EVT OutVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
16399 SDValue TVal = DAG.getConstant(1, DL, OutVT);
16400 SDValue FVal = DAG.getConstant(0, DL, OutVT);
16402 // Ensure operands have type nxv16i1.
16403 if (Op.getValueType() != MVT::nxv16i1) {
16404 if ((Cond == AArch64CC::ANY_ACTIVE || Cond == AArch64CC::NONE_ACTIVE) &&
16405 isZeroingInactiveLanes(Op))
16406 Pg = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv16i1, Pg);
16408 Pg = getSVEPredicateBitCast(MVT::nxv16i1, Pg, DAG);
16409 Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv16i1, Op);
16412 // Set condition code (CC) flags.
16413 SDValue Test = DAG.getNode(AArch64ISD::PTEST, DL, MVT::Other, Pg, Op);
16415 // Convert CC to integer based on requested condition.
16416 // NOTE: Cond is inverted to promote CSEL's removal when it feeds a compare.
16417 SDValue CC = DAG.getConstant(getInvertedCondCode(Cond), DL, MVT::i32);
16418 SDValue Res = DAG.getNode(AArch64ISD::CSEL, DL, OutVT, FVal, TVal, CC, Test);
16419 return DAG.getZExtOrTrunc(Res, DL, VT);
16422 static SDValue combineSVEReductionInt(SDNode *N, unsigned Opc,
16423 SelectionDAG &DAG) {
16426 SDValue Pred = N->getOperand(1);
16427 SDValue VecToReduce = N->getOperand(2);
16429 // NOTE: The integer reduction's result type is not always linked to the
16430 // operand's element type so we construct it from the intrinsic's result type.
16431 EVT ReduceVT = getPackedSVEVectorVT(N->getValueType(0));
16432 SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, VecToReduce);
16434 // SVE reductions set the whole vector register with the first element
16435 // containing the reduction result, which we'll now extract.
16436 SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
16437 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce,
16441 static SDValue combineSVEReductionFP(SDNode *N, unsigned Opc,
16442 SelectionDAG &DAG) {
16445 SDValue Pred = N->getOperand(1);
16446 SDValue VecToReduce = N->getOperand(2);
16448 EVT ReduceVT = VecToReduce.getValueType();
16449 SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, VecToReduce);
16451 // SVE reductions set the whole vector register with the first element
16452 // containing the reduction result, which we'll now extract.
16453 SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
16454 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce,
16458 static SDValue combineSVEReductionOrderedFP(SDNode *N, unsigned Opc,
16459 SelectionDAG &DAG) {
16462 SDValue Pred = N->getOperand(1);
16463 SDValue InitVal = N->getOperand(2);
16464 SDValue VecToReduce = N->getOperand(3);
16465 EVT ReduceVT = VecToReduce.getValueType();
16467 // Ordered reductions use the first lane of the result vector as the
16468 // reduction's initial value.
16469 SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
16470 InitVal = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ReduceVT,
16471 DAG.getUNDEF(ReduceVT), InitVal, Zero);
16473 SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, InitVal, VecToReduce);
16475 // SVE reductions set the whole vector register with the first element
16476 // containing the reduction result, which we'll now extract.
16477 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce,
16481 static bool isAllInactivePredicate(SDValue N) {
16482 // Look through cast.
16483 while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST)
16484 N = N.getOperand(0);
16486 return ISD::isConstantSplatVectorAllZeros(N.getNode());
16489 static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) {
16490 unsigned NumElts = N.getValueType().getVectorMinNumElements();
16492 // Look through cast.
16493 while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST) {
16494 N = N.getOperand(0);
16495 // When reinterpreting from a type with fewer elements the "new" elements
16496 // are not active, so bail if they're likely to be used.
16497 if (N.getValueType().getVectorMinNumElements() < NumElts)
16501 if (ISD::isConstantSplatVectorAllOnes(N.getNode()))
16504 // "ptrue p.<ty>, all" can be considered all active when <ty> is the same size
16505 // or smaller than the implicit element type represented by N.
16506 // NOTE: A larger element count implies a smaller element type.
16507 if (N.getOpcode() == AArch64ISD::PTRUE &&
16508 N.getConstantOperandVal(0) == AArch64SVEPredPattern::all)
16509 return N.getValueType().getVectorMinNumElements() >= NumElts;
16511 // If we're compiling for a specific vector-length, we can check if the
16512 // pattern's VL equals that of the scalable vector at runtime.
16513 if (N.getOpcode() == AArch64ISD::PTRUE) {
16514 const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
16515 unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
16516 unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
16517 if (MaxSVESize && MinSVESize == MaxSVESize) {
16518 unsigned VScale = MaxSVESize / AArch64::SVEBitsPerBlock;
16519 unsigned PatNumElts =
16520 getNumElementsFromSVEPredPattern(N.getConstantOperandVal(0));
16521 return PatNumElts == (NumElts * VScale);
16528 // If a merged operation has no inactive lanes we can relax it to a predicated
16529 // or unpredicated operation, which potentially allows better isel (perhaps
16530 // using immediate forms) or relaxing register reuse requirements.
16531 static SDValue convertMergedOpToPredOp(SDNode *N, unsigned Opc,
16532 SelectionDAG &DAG, bool UnpredOp = false,
16533 bool SwapOperands = false) {
16534 assert(N->getOpcode() == ISD::INTRINSIC_WO_CHAIN && "Expected intrinsic!");
16535 assert(N->getNumOperands() == 4 && "Expected 3 operand intrinsic!");
16536 SDValue Pg = N->getOperand(1);
16537 SDValue Op1 = N->getOperand(SwapOperands ? 3 : 2);
16538 SDValue Op2 = N->getOperand(SwapOperands ? 2 : 3);
16540 // ISD way to specify an all active predicate.
16541 if (isAllActivePredicate(DAG, Pg)) {
16543 return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Op1, Op2);
16545 return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Pg, Op1, Op2);
16548 // FUTURE: SplatVector(true)
16552 static SDValue performIntrinsicCombine(SDNode *N,
16553 TargetLowering::DAGCombinerInfo &DCI,
16554 const AArch64Subtarget *Subtarget) {
16555 SelectionDAG &DAG = DCI.DAG;
16556 unsigned IID = getIntrinsicID(N);
16560 case Intrinsic::get_active_lane_mask: {
16561 SDValue Res = SDValue();
16562 EVT VT = N->getValueType(0);
16563 if (VT.isFixedLengthVector()) {
16564 // We can use the SVE whilelo instruction to lower this intrinsic by
16565 // creating the appropriate sequence of scalable vector operations and
16566 // then extracting a fixed-width subvector from the scalable vector.
16570 DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64);
16572 EVT WhileVT = EVT::getVectorVT(
16573 *DAG.getContext(), MVT::i1,
16574 ElementCount::getScalable(VT.getVectorNumElements()));
16576 // Get promoted scalable vector VT, i.e. promote nxv4i1 -> nxv4i32.
16577 EVT PromVT = getPromotedVTForPredicate(WhileVT);
16579 // Get the fixed-width equivalent of PromVT for extraction.
16581 EVT::getVectorVT(*DAG.getContext(), PromVT.getVectorElementType(),
16582 VT.getVectorElementCount());
16584 Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WhileVT, ID,
16585 N->getOperand(1), N->getOperand(2));
16586 Res = DAG.getNode(ISD::SIGN_EXTEND, DL, PromVT, Res);
16587 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtVT, Res,
16588 DAG.getConstant(0, DL, MVT::i64));
16589 Res = DAG.getNode(ISD::TRUNCATE, DL, VT, Res);
16593 case Intrinsic::aarch64_neon_vcvtfxs2fp:
16594 case Intrinsic::aarch64_neon_vcvtfxu2fp:
16595 return tryCombineFixedPointConvert(N, DCI, DAG);
16596 case Intrinsic::aarch64_neon_saddv:
16597 return combineAcrossLanesIntrinsic(AArch64ISD::SADDV, N, DAG);
16598 case Intrinsic::aarch64_neon_uaddv:
16599 return combineAcrossLanesIntrinsic(AArch64ISD::UADDV, N, DAG);
16600 case Intrinsic::aarch64_neon_sminv:
16601 return combineAcrossLanesIntrinsic(AArch64ISD::SMINV, N, DAG);
16602 case Intrinsic::aarch64_neon_uminv:
16603 return combineAcrossLanesIntrinsic(AArch64ISD::UMINV, N, DAG);
16604 case Intrinsic::aarch64_neon_smaxv:
16605 return combineAcrossLanesIntrinsic(AArch64ISD::SMAXV, N, DAG);
16606 case Intrinsic::aarch64_neon_umaxv:
16607 return combineAcrossLanesIntrinsic(AArch64ISD::UMAXV, N, DAG);
16608 case Intrinsic::aarch64_neon_fmax:
16609 return DAG.getNode(ISD::FMAXIMUM, SDLoc(N), N->getValueType(0),
16610 N->getOperand(1), N->getOperand(2));
16611 case Intrinsic::aarch64_neon_fmin:
16612 return DAG.getNode(ISD::FMINIMUM, SDLoc(N), N->getValueType(0),
16613 N->getOperand(1), N->getOperand(2));
16614 case Intrinsic::aarch64_neon_fmaxnm:
16615 return DAG.getNode(ISD::FMAXNUM, SDLoc(N), N->getValueType(0),
16616 N->getOperand(1), N->getOperand(2));
16617 case Intrinsic::aarch64_neon_fminnm:
16618 return DAG.getNode(ISD::FMINNUM, SDLoc(N), N->getValueType(0),
16619 N->getOperand(1), N->getOperand(2));
16620 case Intrinsic::aarch64_neon_smull:
16621 return DAG.getNode(AArch64ISD::SMULL, SDLoc(N), N->getValueType(0),
16622 N->getOperand(1), N->getOperand(2));
16623 case Intrinsic::aarch64_neon_umull:
16624 return DAG.getNode(AArch64ISD::UMULL, SDLoc(N), N->getValueType(0),
16625 N->getOperand(1), N->getOperand(2));
16626 case Intrinsic::aarch64_neon_pmull:
16627 case Intrinsic::aarch64_neon_sqdmull:
16628 return tryCombineLongOpWithDup(IID, N, DCI, DAG);
16629 case Intrinsic::aarch64_neon_sqshl:
16630 case Intrinsic::aarch64_neon_uqshl:
16631 case Intrinsic::aarch64_neon_sqshlu:
16632 case Intrinsic::aarch64_neon_srshl:
16633 case Intrinsic::aarch64_neon_urshl:
16634 case Intrinsic::aarch64_neon_sshl:
16635 case Intrinsic::aarch64_neon_ushl:
16636 return tryCombineShiftImm(IID, N, DAG);
16637 case Intrinsic::aarch64_crc32b:
16638 case Intrinsic::aarch64_crc32cb:
16639 return tryCombineCRC32(0xff, N, DAG);
16640 case Intrinsic::aarch64_crc32h:
16641 case Intrinsic::aarch64_crc32ch:
16642 return tryCombineCRC32(0xffff, N, DAG);
16643 case Intrinsic::aarch64_sve_saddv:
16644 // There is no i64 version of SADDV because the sign is irrelevant.
16645 if (N->getOperand(2)->getValueType(0).getVectorElementType() == MVT::i64)
16646 return combineSVEReductionInt(N, AArch64ISD::UADDV_PRED, DAG);
16648 return combineSVEReductionInt(N, AArch64ISD::SADDV_PRED, DAG);
16649 case Intrinsic::aarch64_sve_uaddv:
16650 return combineSVEReductionInt(N, AArch64ISD::UADDV_PRED, DAG);
16651 case Intrinsic::aarch64_sve_smaxv:
16652 return combineSVEReductionInt(N, AArch64ISD::SMAXV_PRED, DAG);
16653 case Intrinsic::aarch64_sve_umaxv:
16654 return combineSVEReductionInt(N, AArch64ISD::UMAXV_PRED, DAG);
16655 case Intrinsic::aarch64_sve_sminv:
16656 return combineSVEReductionInt(N, AArch64ISD::SMINV_PRED, DAG);
16657 case Intrinsic::aarch64_sve_uminv:
16658 return combineSVEReductionInt(N, AArch64ISD::UMINV_PRED, DAG);
16659 case Intrinsic::aarch64_sve_orv:
16660 return combineSVEReductionInt(N, AArch64ISD::ORV_PRED, DAG);
16661 case Intrinsic::aarch64_sve_eorv:
16662 return combineSVEReductionInt(N, AArch64ISD::EORV_PRED, DAG);
16663 case Intrinsic::aarch64_sve_andv:
16664 return combineSVEReductionInt(N, AArch64ISD::ANDV_PRED, DAG);
16665 case Intrinsic::aarch64_sve_index:
16666 return LowerSVEIntrinsicIndex(N, DAG);
16667 case Intrinsic::aarch64_sve_dup:
16668 return LowerSVEIntrinsicDUP(N, DAG);
16669 case Intrinsic::aarch64_sve_dup_x:
16670 return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), N->getValueType(0),
16672 case Intrinsic::aarch64_sve_ext:
16673 return LowerSVEIntrinsicEXT(N, DAG);
16674 case Intrinsic::aarch64_sve_mul:
16675 return convertMergedOpToPredOp(N, AArch64ISD::MUL_PRED, DAG);
16676 case Intrinsic::aarch64_sve_smulh:
16677 return convertMergedOpToPredOp(N, AArch64ISD::MULHS_PRED, DAG);
16678 case Intrinsic::aarch64_sve_umulh:
16679 return convertMergedOpToPredOp(N, AArch64ISD::MULHU_PRED, DAG);
16680 case Intrinsic::aarch64_sve_smin:
16681 return convertMergedOpToPredOp(N, AArch64ISD::SMIN_PRED, DAG);
16682 case Intrinsic::aarch64_sve_umin:
16683 return convertMergedOpToPredOp(N, AArch64ISD::UMIN_PRED, DAG);
16684 case Intrinsic::aarch64_sve_smax:
16685 return convertMergedOpToPredOp(N, AArch64ISD::SMAX_PRED, DAG);
16686 case Intrinsic::aarch64_sve_umax:
16687 return convertMergedOpToPredOp(N, AArch64ISD::UMAX_PRED, DAG);
16688 case Intrinsic::aarch64_sve_lsl:
16689 return convertMergedOpToPredOp(N, AArch64ISD::SHL_PRED, DAG);
16690 case Intrinsic::aarch64_sve_lsr:
16691 return convertMergedOpToPredOp(N, AArch64ISD::SRL_PRED, DAG);
16692 case Intrinsic::aarch64_sve_asr:
16693 return convertMergedOpToPredOp(N, AArch64ISD::SRA_PRED, DAG);
16694 case Intrinsic::aarch64_sve_fadd:
16695 return convertMergedOpToPredOp(N, AArch64ISD::FADD_PRED, DAG);
16696 case Intrinsic::aarch64_sve_fsub:
16697 return convertMergedOpToPredOp(N, AArch64ISD::FSUB_PRED, DAG);
16698 case Intrinsic::aarch64_sve_fmul:
16699 return convertMergedOpToPredOp(N, AArch64ISD::FMUL_PRED, DAG);
16700 case Intrinsic::aarch64_sve_add:
16701 return convertMergedOpToPredOp(N, ISD::ADD, DAG, true);
16702 case Intrinsic::aarch64_sve_sub:
16703 return convertMergedOpToPredOp(N, ISD::SUB, DAG, true);
16704 case Intrinsic::aarch64_sve_subr:
16705 return convertMergedOpToPredOp(N, ISD::SUB, DAG, true, true);
16706 case Intrinsic::aarch64_sve_and:
16707 return convertMergedOpToPredOp(N, ISD::AND, DAG, true);
16708 case Intrinsic::aarch64_sve_bic:
16709 return convertMergedOpToPredOp(N, AArch64ISD::BIC, DAG, true);
16710 case Intrinsic::aarch64_sve_eor:
16711 return convertMergedOpToPredOp(N, ISD::XOR, DAG, true);
16712 case Intrinsic::aarch64_sve_orr:
16713 return convertMergedOpToPredOp(N, ISD::OR, DAG, true);
16714 case Intrinsic::aarch64_sve_sabd:
16715 return convertMergedOpToPredOp(N, ISD::ABDS, DAG, true);
16716 case Intrinsic::aarch64_sve_uabd:
16717 return convertMergedOpToPredOp(N, ISD::ABDU, DAG, true);
16718 case Intrinsic::aarch64_sve_sqadd:
16719 return convertMergedOpToPredOp(N, ISD::SADDSAT, DAG, true);
16720 case Intrinsic::aarch64_sve_sqsub:
16721 return convertMergedOpToPredOp(N, ISD::SSUBSAT, DAG, true);
16722 case Intrinsic::aarch64_sve_uqadd:
16723 return convertMergedOpToPredOp(N, ISD::UADDSAT, DAG, true);
16724 case Intrinsic::aarch64_sve_uqsub:
16725 return convertMergedOpToPredOp(N, ISD::USUBSAT, DAG, true);
16726 case Intrinsic::aarch64_sve_sqadd_x:
16727 return DAG.getNode(ISD::SADDSAT, SDLoc(N), N->getValueType(0),
16728 N->getOperand(1), N->getOperand(2));
16729 case Intrinsic::aarch64_sve_sqsub_x:
16730 return DAG.getNode(ISD::SSUBSAT, SDLoc(N), N->getValueType(0),
16731 N->getOperand(1), N->getOperand(2));
16732 case Intrinsic::aarch64_sve_uqadd_x:
16733 return DAG.getNode(ISD::UADDSAT, SDLoc(N), N->getValueType(0),
16734 N->getOperand(1), N->getOperand(2));
16735 case Intrinsic::aarch64_sve_uqsub_x:
16736 return DAG.getNode(ISD::USUBSAT, SDLoc(N), N->getValueType(0),
16737 N->getOperand(1), N->getOperand(2));
16738 case Intrinsic::aarch64_sve_asrd:
16739 return DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, SDLoc(N), N->getValueType(0),
16740 N->getOperand(1), N->getOperand(2), N->getOperand(3));
16741 case Intrinsic::aarch64_sve_cmphs:
16742 if (!N->getOperand(2).getValueType().isFloatingPoint())
16743 return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16744 N->getValueType(0), N->getOperand(1), N->getOperand(2),
16745 N->getOperand(3), DAG.getCondCode(ISD::SETUGE));
16747 case Intrinsic::aarch64_sve_cmphi:
16748 if (!N->getOperand(2).getValueType().isFloatingPoint())
16749 return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16750 N->getValueType(0), N->getOperand(1), N->getOperand(2),
16751 N->getOperand(3), DAG.getCondCode(ISD::SETUGT));
16753 case Intrinsic::aarch64_sve_fcmpge:
16754 case Intrinsic::aarch64_sve_cmpge:
16755 return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16756 N->getValueType(0), N->getOperand(1), N->getOperand(2),
16757 N->getOperand(3), DAG.getCondCode(ISD::SETGE));
16759 case Intrinsic::aarch64_sve_fcmpgt:
16760 case Intrinsic::aarch64_sve_cmpgt:
16761 return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16762 N->getValueType(0), N->getOperand(1), N->getOperand(2),
16763 N->getOperand(3), DAG.getCondCode(ISD::SETGT));
16765 case Intrinsic::aarch64_sve_fcmpeq:
16766 case Intrinsic::aarch64_sve_cmpeq:
16767 return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16768 N->getValueType(0), N->getOperand(1), N->getOperand(2),
16769 N->getOperand(3), DAG.getCondCode(ISD::SETEQ));
16771 case Intrinsic::aarch64_sve_fcmpne:
16772 case Intrinsic::aarch64_sve_cmpne:
16773 return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16774 N->getValueType(0), N->getOperand(1), N->getOperand(2),
16775 N->getOperand(3), DAG.getCondCode(ISD::SETNE));
16777 case Intrinsic::aarch64_sve_fcmpuo:
16778 return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16779 N->getValueType(0), N->getOperand(1), N->getOperand(2),
16780 N->getOperand(3), DAG.getCondCode(ISD::SETUO));
16782 case Intrinsic::aarch64_sve_fadda:
16783 return combineSVEReductionOrderedFP(N, AArch64ISD::FADDA_PRED, DAG);
16784 case Intrinsic::aarch64_sve_faddv:
16785 return combineSVEReductionFP(N, AArch64ISD::FADDV_PRED, DAG);
16786 case Intrinsic::aarch64_sve_fmaxnmv:
16787 return combineSVEReductionFP(N, AArch64ISD::FMAXNMV_PRED, DAG);
16788 case Intrinsic::aarch64_sve_fmaxv:
16789 return combineSVEReductionFP(N, AArch64ISD::FMAXV_PRED, DAG);
16790 case Intrinsic::aarch64_sve_fminnmv:
16791 return combineSVEReductionFP(N, AArch64ISD::FMINNMV_PRED, DAG);
16792 case Intrinsic::aarch64_sve_fminv:
16793 return combineSVEReductionFP(N, AArch64ISD::FMINV_PRED, DAG);
16794 case Intrinsic::aarch64_sve_sel:
16795 return DAG.getNode(ISD::VSELECT, SDLoc(N), N->getValueType(0),
16796 N->getOperand(1), N->getOperand(2), N->getOperand(3));
16797 case Intrinsic::aarch64_sve_cmpeq_wide:
16798 return tryConvertSVEWideCompare(N, ISD::SETEQ, DCI, DAG);
16799 case Intrinsic::aarch64_sve_cmpne_wide:
16800 return tryConvertSVEWideCompare(N, ISD::SETNE, DCI, DAG);
16801 case Intrinsic::aarch64_sve_cmpge_wide:
16802 return tryConvertSVEWideCompare(N, ISD::SETGE, DCI, DAG);
16803 case Intrinsic::aarch64_sve_cmpgt_wide:
16804 return tryConvertSVEWideCompare(N, ISD::SETGT, DCI, DAG);
16805 case Intrinsic::aarch64_sve_cmplt_wide:
16806 return tryConvertSVEWideCompare(N, ISD::SETLT, DCI, DAG);
16807 case Intrinsic::aarch64_sve_cmple_wide:
16808 return tryConvertSVEWideCompare(N, ISD::SETLE, DCI, DAG);
16809 case Intrinsic::aarch64_sve_cmphs_wide:
16810 return tryConvertSVEWideCompare(N, ISD::SETUGE, DCI, DAG);
16811 case Intrinsic::aarch64_sve_cmphi_wide:
16812 return tryConvertSVEWideCompare(N, ISD::SETUGT, DCI, DAG);
16813 case Intrinsic::aarch64_sve_cmplo_wide:
16814 return tryConvertSVEWideCompare(N, ISD::SETULT, DCI, DAG);
16815 case Intrinsic::aarch64_sve_cmpls_wide:
16816 return tryConvertSVEWideCompare(N, ISD::SETULE, DCI, DAG);
16817 case Intrinsic::aarch64_sve_ptest_any:
16818 return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2),
16819 AArch64CC::ANY_ACTIVE);
16820 case Intrinsic::aarch64_sve_ptest_first:
16821 return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2),
16822 AArch64CC::FIRST_ACTIVE);
16823 case Intrinsic::aarch64_sve_ptest_last:
16824 return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2),
16825 AArch64CC::LAST_ACTIVE);
16830 static bool isCheapToExtend(const SDValue &N) {
16831 unsigned OC = N->getOpcode();
16832 return OC == ISD::LOAD || OC == ISD::MLOAD ||
16833 ISD::isConstantSplatVectorAllZeros(N.getNode());
16837 performSignExtendSetCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
16838 SelectionDAG &DAG) {
16839 // If we have (sext (setcc A B)) and A and B are cheap to extend,
16840 // we can move the sext into the arguments and have the same result. For
16841 // example, if A and B are both loads, we can make those extending loads and
16842 // avoid an extra instruction. This pattern appears often in VLS code
16843 // generation where the inputs to the setcc have a different size to the
16844 // instruction that wants to use the result of the setcc.
16845 assert(N->getOpcode() == ISD::SIGN_EXTEND &&
16846 N->getOperand(0)->getOpcode() == ISD::SETCC);
16847 const SDValue SetCC = N->getOperand(0);
16849 const SDValue CCOp0 = SetCC.getOperand(0);
16850 const SDValue CCOp1 = SetCC.getOperand(1);
16851 if (!CCOp0->getValueType(0).isInteger() ||
16852 !CCOp1->getValueType(0).isInteger())
16855 ISD::CondCode Code =
16856 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get();
16858 ISD::NodeType ExtType =
16859 isSignedIntSetCC(Code) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
16861 if (isCheapToExtend(SetCC.getOperand(0)) &&
16862 isCheapToExtend(SetCC.getOperand(1))) {
16863 const SDValue Ext1 =
16864 DAG.getNode(ExtType, SDLoc(N), N->getValueType(0), CCOp0);
16865 const SDValue Ext2 =
16866 DAG.getNode(ExtType, SDLoc(N), N->getValueType(0), CCOp1);
16868 return DAG.getSetCC(
16869 SDLoc(SetCC), N->getValueType(0), Ext1, Ext2,
16870 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get());
16876 static SDValue performExtendCombine(SDNode *N,
16877 TargetLowering::DAGCombinerInfo &DCI,
16878 SelectionDAG &DAG) {
16879 // If we see something like (zext (sabd (extract_high ...), (DUP ...))) then
16880 // we can convert that DUP into another extract_high (of a bigger DUP), which
16881 // helps the backend to decide that an sabdl2 would be useful, saving a real
16882 // extract_high operation.
16883 if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ZERO_EXTEND &&
16884 (N->getOperand(0).getOpcode() == ISD::ABDU ||
16885 N->getOperand(0).getOpcode() == ISD::ABDS)) {
16886 SDNode *ABDNode = N->getOperand(0).getNode();
16888 tryCombineLongOpWithDup(Intrinsic::not_intrinsic, ABDNode, DCI, DAG);
16889 if (!NewABD.getNode())
16892 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), NewABD);
16895 if (N->getValueType(0).isFixedLengthVector() &&
16896 N->getOpcode() == ISD::SIGN_EXTEND &&
16897 N->getOperand(0)->getOpcode() == ISD::SETCC)
16898 return performSignExtendSetCCCombine(N, DCI, DAG);
16903 static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St,
16904 SDValue SplatVal, unsigned NumVecElts) {
16905 assert(!St.isTruncatingStore() && "cannot split truncating vector store");
16906 Align OrigAlignment = St.getAlign();
16907 unsigned EltOffset = SplatVal.getValueType().getSizeInBits() / 8;
16909 // Create scalar stores. This is at least as good as the code sequence for a
16910 // split unaligned store which is a dup.s, ext.b, and two stores.
16911 // Most of the time the three stores should be replaced by store pair
16912 // instructions (stp).
16914 SDValue BasePtr = St.getBasePtr();
16915 uint64_t BaseOffset = 0;
16917 const MachinePointerInfo &PtrInfo = St.getPointerInfo();
16919 DAG.getStore(St.getChain(), DL, SplatVal, BasePtr, PtrInfo,
16920 OrigAlignment, St.getMemOperand()->getFlags());
16922 // As this in ISel, we will not merge this add which may degrade results.
16923 if (BasePtr->getOpcode() == ISD::ADD &&
16924 isa<ConstantSDNode>(BasePtr->getOperand(1))) {
16925 BaseOffset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
16926 BasePtr = BasePtr->getOperand(0);
16929 unsigned Offset = EltOffset;
16930 while (--NumVecElts) {
16931 Align Alignment = commonAlignment(OrigAlignment, Offset);
16932 SDValue OffsetPtr =
16933 DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
16934 DAG.getConstant(BaseOffset + Offset, DL, MVT::i64));
16935 NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr,
16936 PtrInfo.getWithOffset(Offset), Alignment,
16937 St.getMemOperand()->getFlags());
16938 Offset += EltOffset;
16943 // Returns an SVE type that ContentTy can be trivially sign or zero extended
16945 static MVT getSVEContainerType(EVT ContentTy) {
16946 assert(ContentTy.isSimple() && "No SVE containers for extended types");
16948 switch (ContentTy.getSimpleVT().SimpleTy) {
16950 llvm_unreachable("No known SVE container for this MVT type");
16957 return MVT::nxv2i64;
16962 return MVT::nxv4i32;
16966 case MVT::nxv8bf16:
16967 return MVT::nxv8i16;
16969 return MVT::nxv16i8;
16973 static SDValue performLD1Combine(SDNode *N, SelectionDAG &DAG, unsigned Opc) {
16975 EVT VT = N->getValueType(0);
16977 if (VT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock)
16980 EVT ContainerVT = VT;
16981 if (ContainerVT.isInteger())
16982 ContainerVT = getSVEContainerType(ContainerVT);
16984 SDVTList VTs = DAG.getVTList(ContainerVT, MVT::Other);
16985 SDValue Ops[] = { N->getOperand(0), // Chain
16986 N->getOperand(2), // Pg
16987 N->getOperand(3), // Base
16988 DAG.getValueType(VT) };
16990 SDValue Load = DAG.getNode(Opc, DL, VTs, Ops);
16991 SDValue LoadChain = SDValue(Load.getNode(), 1);
16993 if (ContainerVT.isInteger() && (VT != ContainerVT))
16994 Load = DAG.getNode(ISD::TRUNCATE, DL, VT, Load.getValue(0));
16996 return DAG.getMergeValues({ Load, LoadChain }, DL);
16999 static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) {
17001 EVT VT = N->getValueType(0);
17002 EVT PtrTy = N->getOperand(3).getValueType();
17005 if (VT.isFloatingPoint())
17006 LoadVT = VT.changeTypeToInteger();
17008 auto *MINode = cast<MemIntrinsicSDNode>(N);
17009 SDValue PassThru = DAG.getConstant(0, DL, LoadVT);
17010 SDValue L = DAG.getMaskedLoad(LoadVT, DL, MINode->getChain(),
17011 MINode->getOperand(3), DAG.getUNDEF(PtrTy),
17012 MINode->getOperand(2), PassThru,
17013 MINode->getMemoryVT(), MINode->getMemOperand(),
17014 ISD::UNINDEXED, ISD::NON_EXTLOAD, false);
17016 if (VT.isFloatingPoint()) {
17017 SDValue Ops[] = { DAG.getNode(ISD::BITCAST, DL, VT, L), L.getValue(1) };
17018 return DAG.getMergeValues(Ops, DL);
17024 template <unsigned Opcode>
17025 static SDValue performLD1ReplicateCombine(SDNode *N, SelectionDAG &DAG) {
17026 static_assert(Opcode == AArch64ISD::LD1RQ_MERGE_ZERO ||
17027 Opcode == AArch64ISD::LD1RO_MERGE_ZERO,
17028 "Unsupported opcode.");
17030 EVT VT = N->getValueType(0);
17033 if (VT.isFloatingPoint())
17034 LoadVT = VT.changeTypeToInteger();
17036 SDValue Ops[] = {N->getOperand(0), N->getOperand(2), N->getOperand(3)};
17037 SDValue Load = DAG.getNode(Opcode, DL, {LoadVT, MVT::Other}, Ops);
17038 SDValue LoadChain = SDValue(Load.getNode(), 1);
17040 if (VT.isFloatingPoint())
17041 Load = DAG.getNode(ISD::BITCAST, DL, VT, Load.getValue(0));
17043 return DAG.getMergeValues({Load, LoadChain}, DL);
17046 static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG) {
17048 SDValue Data = N->getOperand(2);
17049 EVT DataVT = Data.getValueType();
17050 EVT HwSrcVt = getSVEContainerType(DataVT);
17051 SDValue InputVT = DAG.getValueType(DataVT);
17053 if (DataVT.isFloatingPoint())
17054 InputVT = DAG.getValueType(HwSrcVt);
17057 if (Data.getValueType().isFloatingPoint())
17058 SrcNew = DAG.getNode(ISD::BITCAST, DL, HwSrcVt, Data);
17060 SrcNew = DAG.getNode(ISD::ANY_EXTEND, DL, HwSrcVt, Data);
17062 SDValue Ops[] = { N->getOperand(0), // Chain
17064 N->getOperand(4), // Base
17065 N->getOperand(3), // Pg
17069 return DAG.getNode(AArch64ISD::ST1_PRED, DL, N->getValueType(0), Ops);
17072 static SDValue performSTNT1Combine(SDNode *N, SelectionDAG &DAG) {
17075 SDValue Data = N->getOperand(2);
17076 EVT DataVT = Data.getValueType();
17077 EVT PtrTy = N->getOperand(4).getValueType();
17079 if (DataVT.isFloatingPoint())
17080 Data = DAG.getNode(ISD::BITCAST, DL, DataVT.changeTypeToInteger(), Data);
17082 auto *MINode = cast<MemIntrinsicSDNode>(N);
17083 return DAG.getMaskedStore(MINode->getChain(), DL, Data, MINode->getOperand(4),
17084 DAG.getUNDEF(PtrTy), MINode->getOperand(3),
17085 MINode->getMemoryVT(), MINode->getMemOperand(),
17086 ISD::UNINDEXED, false, false);
17089 /// Replace a splat of zeros to a vector store by scalar stores of WZR/XZR. The
17090 /// load store optimizer pass will merge them to store pair stores. This should
17091 /// be better than a movi to create the vector zero followed by a vector store
17092 /// if the zero constant is not re-used, since one instructions and one register
17093 /// live range will be removed.
17095 /// For example, the final generated code should be:
17097 /// stp xzr, xzr, [x0]
17104 static SDValue replaceZeroVectorStore(SelectionDAG &DAG, StoreSDNode &St) {
17105 SDValue StVal = St.getValue();
17106 EVT VT = StVal.getValueType();
17108 // Avoid scalarizing zero splat stores for scalable vectors.
17109 if (VT.isScalableVector())
17112 // It is beneficial to scalarize a zero splat store for 2 or 3 i64 elements or
17113 // 2, 3 or 4 i32 elements.
17114 int NumVecElts = VT.getVectorNumElements();
17115 if (!(((NumVecElts == 2 || NumVecElts == 3) &&
17116 VT.getVectorElementType().getSizeInBits() == 64) ||
17117 ((NumVecElts == 2 || NumVecElts == 3 || NumVecElts == 4) &&
17118 VT.getVectorElementType().getSizeInBits() == 32)))
17121 if (StVal.getOpcode() != ISD::BUILD_VECTOR)
17124 // If the zero constant has more than one use then the vector store could be
17125 // better since the constant mov will be amortized and stp q instructions
17126 // should be able to be formed.
17127 if (!StVal.hasOneUse())
17130 // If the store is truncating then it's going down to i16 or smaller, which
17131 // means it can be implemented in a single store anyway.
17132 if (St.isTruncatingStore())
17135 // If the immediate offset of the address operand is too large for the stp
17136 // instruction, then bail out.
17137 if (DAG.isBaseWithConstantOffset(St.getBasePtr())) {
17138 int64_t Offset = St.getBasePtr()->getConstantOperandVal(1);
17139 if (Offset < -512 || Offset > 504)
17143 for (int I = 0; I < NumVecElts; ++I) {
17144 SDValue EltVal = StVal.getOperand(I);
17145 if (!isNullConstant(EltVal) && !isNullFPConstant(EltVal))
17149 // Use a CopyFromReg WZR/XZR here to prevent
17150 // DAGCombiner::MergeConsecutiveStores from undoing this transformation.
17154 if (VT.getVectorElementType().getSizeInBits() == 32) {
17155 ZeroReg = AArch64::WZR;
17158 ZeroReg = AArch64::XZR;
17162 DAG.getCopyFromReg(DAG.getEntryNode(), DL, ZeroReg, ZeroVT);
17163 return splitStoreSplat(DAG, St, SplatVal, NumVecElts);
17166 /// Replace a splat of a scalar to a vector store by scalar stores of the scalar
17167 /// value. The load store optimizer pass will merge them to store pair stores.
17168 /// This has better performance than a splat of the scalar followed by a split
17169 /// vector store. Even if the stores are not merged it is four stores vs a dup,
17170 /// followed by an ext.b and two stores.
17171 static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode &St) {
17172 SDValue StVal = St.getValue();
17173 EVT VT = StVal.getValueType();
17175 // Don't replace floating point stores, they possibly won't be transformed to
17176 // stp because of the store pair suppress pass.
17177 if (VT.isFloatingPoint())
17180 // We can express a splat as store pair(s) for 2 or 4 elements.
17181 unsigned NumVecElts = VT.getVectorNumElements();
17182 if (NumVecElts != 4 && NumVecElts != 2)
17185 // If the store is truncating then it's going down to i16 or smaller, which
17186 // means it can be implemented in a single store anyway.
17187 if (St.isTruncatingStore())
17190 // Check that this is a splat.
17191 // Make sure that each of the relevant vector element locations are inserted
17192 // to, i.e. 0 and 1 for v2i64 and 0, 1, 2, 3 for v4i32.
17193 std::bitset<4> IndexNotInserted((1 << NumVecElts) - 1);
17195 for (unsigned I = 0; I < NumVecElts; ++I) {
17196 // Check for insert vector elements.
17197 if (StVal.getOpcode() != ISD::INSERT_VECTOR_ELT)
17200 // Check that same value is inserted at each vector element.
17202 SplatVal = StVal.getOperand(1);
17203 else if (StVal.getOperand(1) != SplatVal)
17206 // Check insert element index.
17207 ConstantSDNode *CIndex = dyn_cast<ConstantSDNode>(StVal.getOperand(2));
17210 uint64_t IndexVal = CIndex->getZExtValue();
17211 if (IndexVal >= NumVecElts)
17213 IndexNotInserted.reset(IndexVal);
17215 StVal = StVal.getOperand(0);
17217 // Check that all vector element locations were inserted to.
17218 if (IndexNotInserted.any())
17221 return splitStoreSplat(DAG, St, SplatVal, NumVecElts);
17224 static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
17226 const AArch64Subtarget *Subtarget) {
17228 StoreSDNode *S = cast<StoreSDNode>(N);
17229 if (S->isVolatile() || S->isIndexed())
17232 SDValue StVal = S->getValue();
17233 EVT VT = StVal.getValueType();
17235 if (!VT.isFixedLengthVector())
17238 // If we get a splat of zeros, convert this vector store to a store of
17239 // scalars. They will be merged into store pairs of xzr thereby removing one
17240 // instruction and one register.
17241 if (SDValue ReplacedZeroSplat = replaceZeroVectorStore(DAG, *S))
17242 return ReplacedZeroSplat;
17244 // FIXME: The logic for deciding if an unaligned store should be split should
17245 // be included in TLI.allowsMisalignedMemoryAccesses(), and there should be
17246 // a call to that function here.
17248 if (!Subtarget->isMisaligned128StoreSlow())
17251 // Don't split at -Oz.
17252 if (DAG.getMachineFunction().getFunction().hasMinSize())
17255 // Don't split v2i64 vectors. Memcpy lowering produces those and splitting
17256 // those up regresses performance on micro-benchmarks and olden/bh.
17257 if (VT.getVectorNumElements() < 2 || VT == MVT::v2i64)
17260 // Split unaligned 16B stores. They are terrible for performance.
17261 // Don't split stores with alignment of 1 or 2. Code that uses clang vector
17262 // extensions can use this to mark that it does not want splitting to happen
17263 // (by underspecifying alignment to be 1 or 2). Furthermore, the chance of
17264 // eliminating alignment hazards is only 1 in 8 for alignment of 2.
17265 if (VT.getSizeInBits() != 128 || S->getAlign() >= Align(16) ||
17266 S->getAlign() <= Align(2))
17269 // If we get a splat of a scalar convert this vector store to a store of
17270 // scalars. They will be merged into store pairs thereby removing two
17272 if (SDValue ReplacedSplat = replaceSplatVectorStore(DAG, *S))
17273 return ReplacedSplat;
17277 // Split VT into two.
17278 EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
17279 unsigned NumElts = HalfVT.getVectorNumElements();
17280 SDValue SubVector0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
17281 DAG.getConstant(0, DL, MVT::i64));
17282 SDValue SubVector1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
17283 DAG.getConstant(NumElts, DL, MVT::i64));
17284 SDValue BasePtr = S->getBasePtr();
17286 DAG.getStore(S->getChain(), DL, SubVector0, BasePtr, S->getPointerInfo(),
17287 S->getAlign(), S->getMemOperand()->getFlags());
17288 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
17289 DAG.getConstant(8, DL, MVT::i64));
17290 return DAG.getStore(NewST1.getValue(0), DL, SubVector1, OffsetPtr,
17291 S->getPointerInfo(), S->getAlign(),
17292 S->getMemOperand()->getFlags());
17295 static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) {
17296 assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexepected Opcode!");
17298 // splice(pg, op1, undef) -> op1
17299 if (N->getOperand(2).isUndef())
17300 return N->getOperand(1);
17305 static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG,
17306 const AArch64Subtarget *Subtarget) {
17307 assert((N->getOpcode() == AArch64ISD::UUNPKHI ||
17308 N->getOpcode() == AArch64ISD::UUNPKLO) &&
17309 "Unexpected Opcode!");
17311 // uunpklo/hi undef -> undef
17312 if (N->getOperand(0).isUndef())
17313 return DAG.getUNDEF(N->getValueType(0));
17315 // If this is a masked load followed by an UUNPKLO, fold this into a masked
17316 // extending load. We can do this even if this is already a masked
17318 if (N->getOperand(0).getOpcode() == ISD::MLOAD &&
17319 N->getOpcode() == AArch64ISD::UUNPKLO) {
17320 MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N->getOperand(0));
17321 SDValue Mask = MLD->getMask();
17324 if (MLD->isUnindexed() && MLD->getExtensionType() != ISD::SEXTLOAD &&
17325 SDValue(MLD, 0).hasOneUse() && Mask->getOpcode() == AArch64ISD::PTRUE &&
17326 (MLD->getPassThru()->isUndef() ||
17327 isZerosVector(MLD->getPassThru().getNode()))) {
17328 unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits();
17329 unsigned PgPattern = Mask->getConstantOperandVal(0);
17330 EVT VT = N->getValueType(0);
17332 // Ensure we can double the size of the predicate pattern
17333 unsigned NumElts = getNumElementsFromSVEPredPattern(PgPattern);
17335 NumElts * VT.getVectorElementType().getSizeInBits() <= MinSVESize) {
17337 getPTrue(DAG, DL, VT.changeVectorElementType(MVT::i1), PgPattern);
17338 SDValue PassThru = DAG.getConstant(0, DL, VT);
17339 SDValue NewLoad = DAG.getMaskedLoad(
17340 VT, DL, MLD->getChain(), MLD->getBasePtr(), MLD->getOffset(), Mask,
17341 PassThru, MLD->getMemoryVT(), MLD->getMemOperand(),
17342 MLD->getAddressingMode(), ISD::ZEXTLOAD);
17344 DAG.ReplaceAllUsesOfValueWith(SDValue(MLD, 1), NewLoad.getValue(1));
17354 static SDValue performUzpCombine(SDNode *N, SelectionDAG &DAG) {
17356 SDValue Op0 = N->getOperand(0);
17357 SDValue Op1 = N->getOperand(1);
17358 EVT ResVT = N->getValueType(0);
17360 // uzp1(x, undef) -> concat(truncate(x), undef)
17361 if (Op1.getOpcode() == ISD::UNDEF) {
17362 EVT BCVT = MVT::Other, HalfVT = MVT::Other;
17363 switch (ResVT.getSimpleVT().SimpleTy) {
17368 HalfVT = MVT::v8i8;
17372 HalfVT = MVT::v4i16;
17376 HalfVT = MVT::v2i32;
17379 if (BCVT != MVT::Other) {
17380 SDValue BC = DAG.getBitcast(BCVT, Op0);
17381 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, HalfVT, BC);
17382 return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Trunc,
17383 DAG.getUNDEF(HalfVT));
17387 // uzp1(unpklo(uzp1(x, y)), z) => uzp1(x, z)
17388 if (Op0.getOpcode() == AArch64ISD::UUNPKLO) {
17389 if (Op0.getOperand(0).getOpcode() == AArch64ISD::UZP1) {
17390 SDValue X = Op0.getOperand(0).getOperand(0);
17391 return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, X, Op1);
17395 // uzp1(x, unpkhi(uzp1(y, z))) => uzp1(x, z)
17396 if (Op1.getOpcode() == AArch64ISD::UUNPKHI) {
17397 if (Op1.getOperand(0).getOpcode() == AArch64ISD::UZP1) {
17398 SDValue Z = Op1.getOperand(0).getOperand(1);
17399 return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, Op0, Z);
17406 static SDValue performGLD1Combine(SDNode *N, SelectionDAG &DAG) {
17407 unsigned Opc = N->getOpcode();
17409 assert(((Opc >= AArch64ISD::GLD1_MERGE_ZERO && // unsigned gather loads
17410 Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) ||
17411 (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && // signed gather loads
17412 Opc <= AArch64ISD::GLD1S_IMM_MERGE_ZERO)) &&
17413 "Invalid opcode.");
17415 const bool Scaled = Opc == AArch64ISD::GLD1_SCALED_MERGE_ZERO ||
17416 Opc == AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
17417 const bool Signed = Opc == AArch64ISD::GLD1S_MERGE_ZERO ||
17418 Opc == AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
17419 const bool Extended = Opc == AArch64ISD::GLD1_SXTW_MERGE_ZERO ||
17420 Opc == AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO ||
17421 Opc == AArch64ISD::GLD1_UXTW_MERGE_ZERO ||
17422 Opc == AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO;
17425 SDValue Chain = N->getOperand(0);
17426 SDValue Pg = N->getOperand(1);
17427 SDValue Base = N->getOperand(2);
17428 SDValue Offset = N->getOperand(3);
17429 SDValue Ty = N->getOperand(4);
17431 EVT ResVT = N->getValueType(0);
17433 const auto OffsetOpc = Offset.getOpcode();
17434 const bool OffsetIsZExt =
17435 OffsetOpc == AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU;
17436 const bool OffsetIsSExt =
17437 OffsetOpc == AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU;
17439 // Fold sign/zero extensions of vector offsets into GLD1 nodes where possible.
17440 if (!Extended && (OffsetIsSExt || OffsetIsZExt)) {
17441 SDValue ExtPg = Offset.getOperand(0);
17442 VTSDNode *ExtFrom = cast<VTSDNode>(Offset.getOperand(2).getNode());
17443 EVT ExtFromEVT = ExtFrom->getVT().getVectorElementType();
17445 // If the predicate for the sign- or zero-extended offset is the
17446 // same as the predicate used for this load and the sign-/zero-extension
17447 // was from a 32-bits...
17448 if (ExtPg == Pg && ExtFromEVT == MVT::i32) {
17449 SDValue UnextendedOffset = Offset.getOperand(1);
17451 unsigned NewOpc = getGatherVecOpcode(Scaled, OffsetIsSExt, true);
17453 NewOpc = getSignExtendedGatherOpcode(NewOpc);
17455 return DAG.getNode(NewOpc, DL, {ResVT, MVT::Other},
17456 {Chain, Pg, Base, UnextendedOffset, Ty});
17463 /// Optimize a vector shift instruction and its operand if shifted out
17464 /// bits are not used.
17465 static SDValue performVectorShiftCombine(SDNode *N,
17466 const AArch64TargetLowering &TLI,
17467 TargetLowering::DAGCombinerInfo &DCI) {
17468 assert(N->getOpcode() == AArch64ISD::VASHR ||
17469 N->getOpcode() == AArch64ISD::VLSHR);
17471 SDValue Op = N->getOperand(0);
17472 unsigned OpScalarSize = Op.getScalarValueSizeInBits();
17474 unsigned ShiftImm = N->getConstantOperandVal(1);
17475 assert(OpScalarSize > ShiftImm && "Invalid shift imm");
17477 APInt ShiftedOutBits = APInt::getLowBitsSet(OpScalarSize, ShiftImm);
17478 APInt DemandedMask = ~ShiftedOutBits;
17480 if (TLI.SimplifyDemandedBits(Op, DemandedMask, DCI))
17481 return SDValue(N, 0);
17486 static SDValue performSunpkloCombine(SDNode *N, SelectionDAG &DAG) {
17487 // sunpklo(sext(pred)) -> sext(extract_low_half(pred))
17488 // This transform works in partnership with performSetCCPunpkCombine to
17489 // remove unnecessary transfer of predicates into standard registers and back
17490 if (N->getOperand(0).getOpcode() == ISD::SIGN_EXTEND &&
17491 N->getOperand(0)->getOperand(0)->getValueType(0).getScalarType() ==
17493 SDValue CC = N->getOperand(0)->getOperand(0);
17494 auto VT = CC->getValueType(0).getHalfNumVectorElementsVT(*DAG.getContext());
17495 SDValue Unpk = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT, CC,
17496 DAG.getVectorIdxConstant(0, SDLoc(N)));
17497 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), N->getValueType(0), Unpk);
17503 /// Target-specific DAG combine function for post-increment LD1 (lane) and
17504 /// post-increment LD1R.
17505 static SDValue performPostLD1Combine(SDNode *N,
17506 TargetLowering::DAGCombinerInfo &DCI,
17508 if (DCI.isBeforeLegalizeOps())
17511 SelectionDAG &DAG = DCI.DAG;
17512 EVT VT = N->getValueType(0);
17514 if (!VT.is128BitVector() && !VT.is64BitVector())
17517 unsigned LoadIdx = IsLaneOp ? 1 : 0;
17518 SDNode *LD = N->getOperand(LoadIdx).getNode();
17519 // If it is not LOAD, can not do such combine.
17520 if (LD->getOpcode() != ISD::LOAD)
17523 // The vector lane must be a constant in the LD1LANE opcode.
17526 Lane = N->getOperand(2);
17527 auto *LaneC = dyn_cast<ConstantSDNode>(Lane);
17528 if (!LaneC || LaneC->getZExtValue() >= VT.getVectorNumElements())
17532 LoadSDNode *LoadSDN = cast<LoadSDNode>(LD);
17533 EVT MemVT = LoadSDN->getMemoryVT();
17534 // Check if memory operand is the same type as the vector element.
17535 if (MemVT != VT.getVectorElementType())
17538 // Check if there are other uses. If so, do not combine as it will introduce
17540 for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); UI != UE;
17542 if (UI.getUse().getResNo() == 1) // Ignore uses of the chain result.
17548 SDValue Addr = LD->getOperand(1);
17549 SDValue Vector = N->getOperand(0);
17550 // Search for a use of the address operand that is an increment.
17551 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), UE =
17552 Addr.getNode()->use_end(); UI != UE; ++UI) {
17553 SDNode *User = *UI;
17554 if (User->getOpcode() != ISD::ADD
17555 || UI.getUse().getResNo() != Addr.getResNo())
17558 // If the increment is a constant, it must match the memory ref size.
17559 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
17560 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
17561 uint32_t IncVal = CInc->getZExtValue();
17562 unsigned NumBytes = VT.getScalarSizeInBits() / 8;
17563 if (IncVal != NumBytes)
17565 Inc = DAG.getRegister(AArch64::XZR, MVT::i64);
17568 // To avoid cycle construction make sure that neither the load nor the add
17569 // are predecessors to each other or the Vector.
17570 SmallPtrSet<const SDNode *, 32> Visited;
17571 SmallVector<const SDNode *, 16> Worklist;
17572 Visited.insert(Addr.getNode());
17573 Worklist.push_back(User);
17574 Worklist.push_back(LD);
17575 Worklist.push_back(Vector.getNode());
17576 if (SDNode::hasPredecessorHelper(LD, Visited, Worklist) ||
17577 SDNode::hasPredecessorHelper(User, Visited, Worklist))
17580 SmallVector<SDValue, 8> Ops;
17581 Ops.push_back(LD->getOperand(0)); // Chain
17583 Ops.push_back(Vector); // The vector to be inserted
17584 Ops.push_back(Lane); // The lane to be inserted in the vector
17586 Ops.push_back(Addr);
17587 Ops.push_back(Inc);
17589 EVT Tys[3] = { VT, MVT::i64, MVT::Other };
17590 SDVTList SDTys = DAG.getVTList(Tys);
17591 unsigned NewOp = IsLaneOp ? AArch64ISD::LD1LANEpost : AArch64ISD::LD1DUPpost;
17592 SDValue UpdN = DAG.getMemIntrinsicNode(NewOp, SDLoc(N), SDTys, Ops,
17594 LoadSDN->getMemOperand());
17596 // Update the uses.
17597 SDValue NewResults[] = {
17598 SDValue(LD, 0), // The result of load
17599 SDValue(UpdN.getNode(), 2) // Chain
17601 DCI.CombineTo(LD, NewResults);
17602 DCI.CombineTo(N, SDValue(UpdN.getNode(), 0)); // Dup/Inserted Result
17603 DCI.CombineTo(User, SDValue(UpdN.getNode(), 1)); // Write back register
17610 /// Simplify ``Addr`` given that the top byte of it is ignored by HW during
17611 /// address translation.
17612 static bool performTBISimplification(SDValue Addr,
17613 TargetLowering::DAGCombinerInfo &DCI,
17614 SelectionDAG &DAG) {
17615 APInt DemandedMask = APInt::getLowBitsSet(64, 56);
17617 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
17618 !DCI.isBeforeLegalizeOps());
17619 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
17620 if (TLI.SimplifyDemandedBits(Addr, DemandedMask, Known, TLO)) {
17621 DCI.CommitTargetLoweringOpt(TLO);
17627 static SDValue foldTruncStoreOfExt(SelectionDAG &DAG, SDNode *N) {
17628 assert((N->getOpcode() == ISD::STORE || N->getOpcode() == ISD::MSTORE) &&
17629 "Expected STORE dag node in input!");
17631 if (auto Store = dyn_cast<StoreSDNode>(N)) {
17632 if (!Store->isTruncatingStore() || Store->isIndexed())
17634 SDValue Ext = Store->getValue();
17635 auto ExtOpCode = Ext.getOpcode();
17636 if (ExtOpCode != ISD::ZERO_EXTEND && ExtOpCode != ISD::SIGN_EXTEND &&
17637 ExtOpCode != ISD::ANY_EXTEND)
17639 SDValue Orig = Ext->getOperand(0);
17640 if (Store->getMemoryVT() != Orig.getValueType())
17642 return DAG.getStore(Store->getChain(), SDLoc(Store), Orig,
17643 Store->getBasePtr(), Store->getMemOperand());
17649 static SDValue performSTORECombine(SDNode *N,
17650 TargetLowering::DAGCombinerInfo &DCI,
17652 const AArch64Subtarget *Subtarget) {
17653 StoreSDNode *ST = cast<StoreSDNode>(N);
17654 SDValue Chain = ST->getChain();
17655 SDValue Value = ST->getValue();
17656 SDValue Ptr = ST->getBasePtr();
17658 // If this is an FP_ROUND followed by a store, fold this into a truncating
17659 // store. We can do this even if this is already a truncstore.
17660 // We purposefully don't care about legality of the nodes here as we know
17661 // they can be split down into something legal.
17662 if (DCI.isBeforeLegalizeOps() && Value.getOpcode() == ISD::FP_ROUND &&
17663 Value.getNode()->hasOneUse() && ST->isUnindexed() &&
17664 Subtarget->useSVEForFixedLengthVectors() &&
17665 Value.getValueType().isFixedLengthVector() &&
17666 Value.getValueType().getFixedSizeInBits() >=
17667 Subtarget->getMinSVEVectorSizeInBits())
17668 return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0), Ptr,
17669 ST->getMemoryVT(), ST->getMemOperand());
17671 if (SDValue Split = splitStores(N, DCI, DAG, Subtarget))
17674 if (Subtarget->supportsAddressTopByteIgnored() &&
17675 performTBISimplification(N->getOperand(2), DCI, DAG))
17676 return SDValue(N, 0);
17678 if (SDValue Store = foldTruncStoreOfExt(DAG, N))
17684 static SDValue performMSTORECombine(SDNode *N,
17685 TargetLowering::DAGCombinerInfo &DCI,
17687 const AArch64Subtarget *Subtarget) {
17688 MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
17689 SDValue Value = MST->getValue();
17690 SDValue Mask = MST->getMask();
17693 // If this is a UZP1 followed by a masked store, fold this into a masked
17694 // truncating store. We can do this even if this is already a masked
17696 if (Value.getOpcode() == AArch64ISD::UZP1 && Value->hasOneUse() &&
17697 MST->isUnindexed() && Mask->getOpcode() == AArch64ISD::PTRUE &&
17698 Value.getValueType().isInteger()) {
17699 Value = Value.getOperand(0);
17700 if (Value.getOpcode() == ISD::BITCAST) {
17702 Value.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
17703 EVT InVT = Value.getOperand(0).getValueType();
17705 if (HalfVT.widenIntegerVectorElementType(*DAG.getContext()) == InVT) {
17706 unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits();
17707 unsigned PgPattern = Mask->getConstantOperandVal(0);
17709 // Ensure we can double the size of the predicate pattern
17710 unsigned NumElts = getNumElementsFromSVEPredPattern(PgPattern);
17711 if (NumElts && NumElts * InVT.getVectorElementType().getSizeInBits() <=
17713 Mask = getPTrue(DAG, DL, InVT.changeVectorElementType(MVT::i1),
17715 return DAG.getMaskedStore(MST->getChain(), DL, Value.getOperand(0),
17716 MST->getBasePtr(), MST->getOffset(), Mask,
17717 MST->getMemoryVT(), MST->getMemOperand(),
17718 MST->getAddressingMode(),
17719 /*IsTruncating=*/true);
17728 /// \return true if part of the index was folded into the Base.
17729 static bool foldIndexIntoBase(SDValue &BasePtr, SDValue &Index, SDValue Scale,
17730 SDLoc DL, SelectionDAG &DAG) {
17731 // This function assumes a vector of i64 indices.
17732 EVT IndexVT = Index.getValueType();
17733 if (!IndexVT.isVector() || IndexVT.getVectorElementType() != MVT::i64)
17738 // Index = X + splat(Offset)
17740 // BasePtr = Ptr + Offset * scale.
17742 if (Index.getOpcode() == ISD::ADD) {
17743 if (auto Offset = DAG.getSplatValue(Index.getOperand(1))) {
17744 Offset = DAG.getNode(ISD::MUL, DL, MVT::i64, Offset, Scale);
17745 BasePtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, Offset);
17746 Index = Index.getOperand(0);
17753 // Index = (X + splat(Offset)) << splat(Shift)
17755 // BasePtr = Ptr + (Offset << Shift) * scale)
17756 // Index = X << splat(shift)
17757 if (Index.getOpcode() == ISD::SHL &&
17758 Index.getOperand(0).getOpcode() == ISD::ADD) {
17759 SDValue Add = Index.getOperand(0);
17760 SDValue ShiftOp = Index.getOperand(1);
17761 SDValue OffsetOp = Add.getOperand(1);
17762 if (auto Shift = DAG.getSplatValue(ShiftOp))
17763 if (auto Offset = DAG.getSplatValue(OffsetOp)) {
17764 Offset = DAG.getNode(ISD::SHL, DL, MVT::i64, Offset, Shift);
17765 Offset = DAG.getNode(ISD::MUL, DL, MVT::i64, Offset, Scale);
17766 BasePtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, Offset);
17767 Index = DAG.getNode(ISD::SHL, DL, Index.getValueType(),
17768 Add.getOperand(0), ShiftOp);
17776 // Analyse the specified address returning true if a more optimal addressing
17777 // mode is available. When returning true all parameters are updated to reflect
17778 // their recommended values.
17779 static bool findMoreOptimalIndexType(const MaskedGatherScatterSDNode *N,
17780 SDValue &BasePtr, SDValue &Index,
17781 SelectionDAG &DAG) {
17782 // Try to iteratively fold parts of the index into the base pointer to
17783 // simplify the index as much as possible.
17784 bool Changed = false;
17785 while (foldIndexIntoBase(BasePtr, Index, N->getScale(), SDLoc(N), DAG))
17788 // Only consider element types that are pointer sized as smaller types can
17789 // be easily promoted.
17790 EVT IndexVT = Index.getValueType();
17791 if (IndexVT.getVectorElementType() != MVT::i64 || IndexVT == MVT::nxv2i64)
17795 // Index = step(const)
17796 int64_t Stride = 0;
17797 if (Index.getOpcode() == ISD::STEP_VECTOR)
17798 Stride = cast<ConstantSDNode>(Index.getOperand(0))->getSExtValue();
17801 // Index = step(const) << shift(const)
17802 else if (Index.getOpcode() == ISD::SHL &&
17803 Index.getOperand(0).getOpcode() == ISD::STEP_VECTOR) {
17804 SDValue RHS = Index.getOperand(1);
17806 dyn_cast_or_null<ConstantSDNode>(DAG.getSplatValue(RHS))) {
17807 int64_t Step = (int64_t)Index.getOperand(0).getConstantOperandVal(1);
17808 Stride = Step << Shift->getZExtValue();
17812 // Return early because no supported pattern is found.
17816 if (Stride < std::numeric_limits<int32_t>::min() ||
17817 Stride > std::numeric_limits<int32_t>::max())
17820 const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
17821 unsigned MaxVScale =
17822 Subtarget.getMaxSVEVectorSizeInBits() / AArch64::SVEBitsPerBlock;
17823 int64_t LastElementOffset =
17824 IndexVT.getVectorMinNumElements() * Stride * MaxVScale;
17826 if (LastElementOffset < std::numeric_limits<int32_t>::min() ||
17827 LastElementOffset > std::numeric_limits<int32_t>::max())
17830 EVT NewIndexVT = IndexVT.changeVectorElementType(MVT::i32);
17831 // Stride does not scale explicitly by 'Scale', because it happens in
17832 // the gather/scatter addressing mode.
17833 Index = DAG.getNode(ISD::STEP_VECTOR, SDLoc(N), NewIndexVT,
17834 DAG.getTargetConstant(Stride, SDLoc(N), MVT::i32));
17838 static SDValue performMaskedGatherScatterCombine(
17839 SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) {
17840 MaskedGatherScatterSDNode *MGS = cast<MaskedGatherScatterSDNode>(N);
17841 assert(MGS && "Can only combine gather load or scatter store nodes");
17843 if (!DCI.isBeforeLegalize())
17847 SDValue Chain = MGS->getChain();
17848 SDValue Scale = MGS->getScale();
17849 SDValue Index = MGS->getIndex();
17850 SDValue Mask = MGS->getMask();
17851 SDValue BasePtr = MGS->getBasePtr();
17852 ISD::MemIndexType IndexType = MGS->getIndexType();
17854 if (!findMoreOptimalIndexType(MGS, BasePtr, Index, DAG))
17857 // Here we catch such cases early and change MGATHER's IndexType to allow
17858 // the use of an Index that's more legalisation friendly.
17859 if (auto *MGT = dyn_cast<MaskedGatherSDNode>(MGS)) {
17860 SDValue PassThru = MGT->getPassThru();
17861 SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
17862 return DAG.getMaskedGather(
17863 DAG.getVTList(N->getValueType(0), MVT::Other), MGT->getMemoryVT(), DL,
17864 Ops, MGT->getMemOperand(), IndexType, MGT->getExtensionType());
17866 auto *MSC = cast<MaskedScatterSDNode>(MGS);
17867 SDValue Data = MSC->getValue();
17868 SDValue Ops[] = {Chain, Data, Mask, BasePtr, Index, Scale};
17869 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), MSC->getMemoryVT(), DL,
17870 Ops, MSC->getMemOperand(), IndexType,
17871 MSC->isTruncatingStore());
17874 /// Target-specific DAG combine function for NEON load/store intrinsics
17875 /// to merge base address updates.
17876 static SDValue performNEONPostLDSTCombine(SDNode *N,
17877 TargetLowering::DAGCombinerInfo &DCI,
17878 SelectionDAG &DAG) {
17879 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
17882 unsigned AddrOpIdx = N->getNumOperands() - 1;
17883 SDValue Addr = N->getOperand(AddrOpIdx);
17885 // Search for a use of the address operand that is an increment.
17886 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
17887 UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
17888 SDNode *User = *UI;
17889 if (User->getOpcode() != ISD::ADD ||
17890 UI.getUse().getResNo() != Addr.getResNo())
17893 // Check that the add is independent of the load/store. Otherwise, folding
17894 // it would create a cycle.
17895 SmallPtrSet<const SDNode *, 32> Visited;
17896 SmallVector<const SDNode *, 16> Worklist;
17897 Visited.insert(Addr.getNode());
17898 Worklist.push_back(N);
17899 Worklist.push_back(User);
17900 if (SDNode::hasPredecessorHelper(N, Visited, Worklist) ||
17901 SDNode::hasPredecessorHelper(User, Visited, Worklist))
17904 // Find the new opcode for the updating load/store.
17905 bool IsStore = false;
17906 bool IsLaneOp = false;
17907 bool IsDupOp = false;
17908 unsigned NewOpc = 0;
17909 unsigned NumVecs = 0;
17910 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
17912 default: llvm_unreachable("unexpected intrinsic for Neon base update");
17913 case Intrinsic::aarch64_neon_ld2: NewOpc = AArch64ISD::LD2post;
17914 NumVecs = 2; break;
17915 case Intrinsic::aarch64_neon_ld3: NewOpc = AArch64ISD::LD3post;
17916 NumVecs = 3; break;
17917 case Intrinsic::aarch64_neon_ld4: NewOpc = AArch64ISD::LD4post;
17918 NumVecs = 4; break;
17919 case Intrinsic::aarch64_neon_st2: NewOpc = AArch64ISD::ST2post;
17920 NumVecs = 2; IsStore = true; break;
17921 case Intrinsic::aarch64_neon_st3: NewOpc = AArch64ISD::ST3post;
17922 NumVecs = 3; IsStore = true; break;
17923 case Intrinsic::aarch64_neon_st4: NewOpc = AArch64ISD::ST4post;
17924 NumVecs = 4; IsStore = true; break;
17925 case Intrinsic::aarch64_neon_ld1x2: NewOpc = AArch64ISD::LD1x2post;
17926 NumVecs = 2; break;
17927 case Intrinsic::aarch64_neon_ld1x3: NewOpc = AArch64ISD::LD1x3post;
17928 NumVecs = 3; break;
17929 case Intrinsic::aarch64_neon_ld1x4: NewOpc = AArch64ISD::LD1x4post;
17930 NumVecs = 4; break;
17931 case Intrinsic::aarch64_neon_st1x2: NewOpc = AArch64ISD::ST1x2post;
17932 NumVecs = 2; IsStore = true; break;
17933 case Intrinsic::aarch64_neon_st1x3: NewOpc = AArch64ISD::ST1x3post;
17934 NumVecs = 3; IsStore = true; break;
17935 case Intrinsic::aarch64_neon_st1x4: NewOpc = AArch64ISD::ST1x4post;
17936 NumVecs = 4; IsStore = true; break;
17937 case Intrinsic::aarch64_neon_ld2r: NewOpc = AArch64ISD::LD2DUPpost;
17938 NumVecs = 2; IsDupOp = true; break;
17939 case Intrinsic::aarch64_neon_ld3r: NewOpc = AArch64ISD::LD3DUPpost;
17940 NumVecs = 3; IsDupOp = true; break;
17941 case Intrinsic::aarch64_neon_ld4r: NewOpc = AArch64ISD::LD4DUPpost;
17942 NumVecs = 4; IsDupOp = true; break;
17943 case Intrinsic::aarch64_neon_ld2lane: NewOpc = AArch64ISD::LD2LANEpost;
17944 NumVecs = 2; IsLaneOp = true; break;
17945 case Intrinsic::aarch64_neon_ld3lane: NewOpc = AArch64ISD::LD3LANEpost;
17946 NumVecs = 3; IsLaneOp = true; break;
17947 case Intrinsic::aarch64_neon_ld4lane: NewOpc = AArch64ISD::LD4LANEpost;
17948 NumVecs = 4; IsLaneOp = true; break;
17949 case Intrinsic::aarch64_neon_st2lane: NewOpc = AArch64ISD::ST2LANEpost;
17950 NumVecs = 2; IsStore = true; IsLaneOp = true; break;
17951 case Intrinsic::aarch64_neon_st3lane: NewOpc = AArch64ISD::ST3LANEpost;
17952 NumVecs = 3; IsStore = true; IsLaneOp = true; break;
17953 case Intrinsic::aarch64_neon_st4lane: NewOpc = AArch64ISD::ST4LANEpost;
17954 NumVecs = 4; IsStore = true; IsLaneOp = true; break;
17959 VecTy = N->getOperand(2).getValueType();
17961 VecTy = N->getValueType(0);
17963 // If the increment is a constant, it must match the memory ref size.
17964 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
17965 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
17966 uint32_t IncVal = CInc->getZExtValue();
17967 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
17968 if (IsLaneOp || IsDupOp)
17969 NumBytes /= VecTy.getVectorNumElements();
17970 if (IncVal != NumBytes)
17972 Inc = DAG.getRegister(AArch64::XZR, MVT::i64);
17974 SmallVector<SDValue, 8> Ops;
17975 Ops.push_back(N->getOperand(0)); // Incoming chain
17976 // Load lane and store have vector list as input.
17977 if (IsLaneOp || IsStore)
17978 for (unsigned i = 2; i < AddrOpIdx; ++i)
17979 Ops.push_back(N->getOperand(i));
17980 Ops.push_back(Addr); // Base register
17981 Ops.push_back(Inc);
17985 unsigned NumResultVecs = (IsStore ? 0 : NumVecs);
17987 for (n = 0; n < NumResultVecs; ++n)
17989 Tys[n++] = MVT::i64; // Type of write back register
17990 Tys[n] = MVT::Other; // Type of the chain
17991 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2));
17993 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
17994 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, Ops,
17995 MemInt->getMemoryVT(),
17996 MemInt->getMemOperand());
17998 // Update the uses.
17999 std::vector<SDValue> NewResults;
18000 for (unsigned i = 0; i < NumResultVecs; ++i) {
18001 NewResults.push_back(SDValue(UpdN.getNode(), i));
18003 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1));
18004 DCI.CombineTo(N, NewResults);
18005 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
18012 // Checks to see if the value is the prescribed width and returns information
18013 // about its extension mode.
18015 bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType) {
18016 ExtType = ISD::NON_EXTLOAD;
18017 switch(V.getNode()->getOpcode()) {
18021 LoadSDNode *LoadNode = cast<LoadSDNode>(V.getNode());
18022 if ((LoadNode->getMemoryVT() == MVT::i8 && width == 8)
18023 || (LoadNode->getMemoryVT() == MVT::i16 && width == 16)) {
18024 ExtType = LoadNode->getExtensionType();
18029 case ISD::AssertSext: {
18030 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
18031 if ((TypeNode->getVT() == MVT::i8 && width == 8)
18032 || (TypeNode->getVT() == MVT::i16 && width == 16)) {
18033 ExtType = ISD::SEXTLOAD;
18038 case ISD::AssertZext: {
18039 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
18040 if ((TypeNode->getVT() == MVT::i8 && width == 8)
18041 || (TypeNode->getVT() == MVT::i16 && width == 16)) {
18042 ExtType = ISD::ZEXTLOAD;
18047 case ISD::Constant:
18048 case ISD::TargetConstant: {
18049 return std::abs(cast<ConstantSDNode>(V.getNode())->getSExtValue()) <
18050 1LL << (width - 1);
18057 // This function does a whole lot of voodoo to determine if the tests are
18058 // equivalent without and with a mask. Essentially what happens is that given a
18061 // +-------------+ +-------------+ +-------------+ +-------------+
18062 // | Input | | AddConstant | | CompConstant| | CC |
18063 // +-------------+ +-------------+ +-------------+ +-------------+
18065 // V V | +----------+
18066 // +-------------+ +----+ | |
18067 // | ADD | |0xff| | |
18068 // +-------------+ +----+ | |
18071 // +-------------+ | |
18073 // +-------------+ | |
18082 // The AND node may be safely removed for some combinations of inputs. In
18083 // particular we need to take into account the extension type of the Input,
18084 // the exact values of AddConstant, CompConstant, and CC, along with the nominal
18085 // width of the input (this can work for any width inputs, the above graph is
18086 // specific to 8 bits.
18088 // The specific equations were worked out by generating output tables for each
18089 // AArch64CC value in terms of and AddConstant (w1), CompConstant(w2). The
18090 // problem was simplified by working with 4 bit inputs, which means we only
18091 // needed to reason about 24 distinct bit patterns: 8 patterns unique to zero
18092 // extension (8,15), 8 patterns unique to sign extensions (-8,-1), and 8
18093 // patterns present in both extensions (0,7). For every distinct set of
18094 // AddConstant and CompConstants bit patterns we can consider the masked and
18095 // unmasked versions to be equivalent if the result of this function is true for
18096 // all 16 distinct bit patterns of for the current extension type of Input (w0).
18099 // and w10, w8, #0x0f
18101 // cset w9, AArch64CC
18103 // cset w11, AArch64CC
18108 // Since the above function shows when the outputs are equivalent it defines
18109 // when it is safe to remove the AND. Unfortunately it only runs on AArch64 and
18110 // would be expensive to run during compiles. The equations below were written
18111 // in a test harness that confirmed they gave equivalent outputs to the above
18112 // for all inputs function, so they can be used determine if the removal is
18115 // isEquivalentMaskless() is the code for testing if the AND can be removed
18116 // factored out of the DAG recognition as the DAG can take several forms.
18118 static bool isEquivalentMaskless(unsigned CC, unsigned width,
18119 ISD::LoadExtType ExtType, int AddConstant,
18120 int CompConstant) {
18121 // By being careful about our equations and only writing the in term
18122 // symbolic values and well known constants (0, 1, -1, MaxUInt) we can
18123 // make them generally applicable to all bit widths.
18124 int MaxUInt = (1 << width);
18126 // For the purposes of these comparisons sign extending the type is
18127 // equivalent to zero extending the add and displacing it by half the integer
18128 // width. Provided we are careful and make sure our equations are valid over
18129 // the whole range we can just adjust the input and avoid writing equations
18130 // for sign extended inputs.
18131 if (ExtType == ISD::SEXTLOAD)
18132 AddConstant -= (1 << (width-1));
18135 case AArch64CC::LE:
18136 case AArch64CC::GT:
18137 if ((AddConstant == 0) ||
18138 (CompConstant == MaxUInt - 1 && AddConstant < 0) ||
18139 (AddConstant >= 0 && CompConstant < 0) ||
18140 (AddConstant <= 0 && CompConstant <= 0 && CompConstant < AddConstant))
18143 case AArch64CC::LT:
18144 case AArch64CC::GE:
18145 if ((AddConstant == 0) ||
18146 (AddConstant >= 0 && CompConstant <= 0) ||
18147 (AddConstant <= 0 && CompConstant <= 0 && CompConstant <= AddConstant))
18150 case AArch64CC::HI:
18151 case AArch64CC::LS:
18152 if ((AddConstant >= 0 && CompConstant < 0) ||
18153 (AddConstant <= 0 && CompConstant >= -1 &&
18154 CompConstant < AddConstant + MaxUInt))
18157 case AArch64CC::PL:
18158 case AArch64CC::MI:
18159 if ((AddConstant == 0) ||
18160 (AddConstant > 0 && CompConstant <= 0) ||
18161 (AddConstant < 0 && CompConstant <= AddConstant))
18164 case AArch64CC::LO:
18165 case AArch64CC::HS:
18166 if ((AddConstant >= 0 && CompConstant <= 0) ||
18167 (AddConstant <= 0 && CompConstant >= 0 &&
18168 CompConstant <= AddConstant + MaxUInt))
18171 case AArch64CC::EQ:
18172 case AArch64CC::NE:
18173 if ((AddConstant > 0 && CompConstant < 0) ||
18174 (AddConstant < 0 && CompConstant >= 0 &&
18175 CompConstant < AddConstant + MaxUInt) ||
18176 (AddConstant >= 0 && CompConstant >= 0 &&
18177 CompConstant >= AddConstant) ||
18178 (AddConstant <= 0 && CompConstant < 0 && CompConstant < AddConstant))
18181 case AArch64CC::VS:
18182 case AArch64CC::VC:
18183 case AArch64CC::AL:
18184 case AArch64CC::NV:
18186 case AArch64CC::Invalid:
18194 SDValue performCONDCombine(SDNode *N,
18195 TargetLowering::DAGCombinerInfo &DCI,
18196 SelectionDAG &DAG, unsigned CCIndex,
18197 unsigned CmpIndex) {
18198 unsigned CC = cast<ConstantSDNode>(N->getOperand(CCIndex))->getSExtValue();
18199 SDNode *SubsNode = N->getOperand(CmpIndex).getNode();
18200 unsigned CondOpcode = SubsNode->getOpcode();
18202 if (CondOpcode != AArch64ISD::SUBS)
18205 // There is a SUBS feeding this condition. Is it fed by a mask we can
18208 SDNode *AndNode = SubsNode->getOperand(0).getNode();
18209 unsigned MaskBits = 0;
18211 if (AndNode->getOpcode() != ISD::AND)
18214 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(AndNode->getOperand(1))) {
18215 uint32_t CNV = CN->getZExtValue();
18218 else if (CNV == 65535)
18225 SDValue AddValue = AndNode->getOperand(0);
18227 if (AddValue.getOpcode() != ISD::ADD)
18230 // The basic dag structure is correct, grab the inputs and validate them.
18232 SDValue AddInputValue1 = AddValue.getNode()->getOperand(0);
18233 SDValue AddInputValue2 = AddValue.getNode()->getOperand(1);
18234 SDValue SubsInputValue = SubsNode->getOperand(1);
18236 // The mask is present and the provenance of all the values is a smaller type,
18237 // lets see if the mask is superfluous.
18239 if (!isa<ConstantSDNode>(AddInputValue2.getNode()) ||
18240 !isa<ConstantSDNode>(SubsInputValue.getNode()))
18243 ISD::LoadExtType ExtType;
18245 if (!checkValueWidth(SubsInputValue, MaskBits, ExtType) ||
18246 !checkValueWidth(AddInputValue2, MaskBits, ExtType) ||
18247 !checkValueWidth(AddInputValue1, MaskBits, ExtType) )
18250 if(!isEquivalentMaskless(CC, MaskBits, ExtType,
18251 cast<ConstantSDNode>(AddInputValue2.getNode())->getSExtValue(),
18252 cast<ConstantSDNode>(SubsInputValue.getNode())->getSExtValue()))
18255 // The AND is not necessary, remove it.
18257 SDVTList VTs = DAG.getVTList(SubsNode->getValueType(0),
18258 SubsNode->getValueType(1));
18259 SDValue Ops[] = { AddValue, SubsNode->getOperand(1) };
18261 SDValue NewValue = DAG.getNode(CondOpcode, SDLoc(SubsNode), VTs, Ops);
18262 DAG.ReplaceAllUsesWith(SubsNode, NewValue.getNode());
18264 return SDValue(N, 0);
18267 // Optimize compare with zero and branch.
18268 static SDValue performBRCONDCombine(SDNode *N,
18269 TargetLowering::DAGCombinerInfo &DCI,
18270 SelectionDAG &DAG) {
18271 MachineFunction &MF = DAG.getMachineFunction();
18272 // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
18273 // will not be produced, as they are conditional branch instructions that do
18275 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
18278 if (SDValue NV = performCONDCombine(N, DCI, DAG, 2, 3))
18280 SDValue Chain = N->getOperand(0);
18281 SDValue Dest = N->getOperand(1);
18282 SDValue CCVal = N->getOperand(2);
18283 SDValue Cmp = N->getOperand(3);
18285 assert(isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!");
18286 unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue();
18287 if (CC != AArch64CC::EQ && CC != AArch64CC::NE)
18290 unsigned CmpOpc = Cmp.getOpcode();
18291 if (CmpOpc != AArch64ISD::ADDS && CmpOpc != AArch64ISD::SUBS)
18294 // Only attempt folding if there is only one use of the flag and no use of the
18296 if (!Cmp->hasNUsesOfValue(0, 0) || !Cmp->hasNUsesOfValue(1, 1))
18299 SDValue LHS = Cmp.getOperand(0);
18300 SDValue RHS = Cmp.getOperand(1);
18302 assert(LHS.getValueType() == RHS.getValueType() &&
18303 "Expected the value type to be the same for both operands!");
18304 if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64)
18307 if (isNullConstant(LHS))
18308 std::swap(LHS, RHS);
18310 if (!isNullConstant(RHS))
18313 if (LHS.getOpcode() == ISD::SHL || LHS.getOpcode() == ISD::SRA ||
18314 LHS.getOpcode() == ISD::SRL)
18317 // Fold the compare into the branch instruction.
18319 if (CC == AArch64CC::EQ)
18320 BR = DAG.getNode(AArch64ISD::CBZ, SDLoc(N), MVT::Other, Chain, LHS, Dest);
18322 BR = DAG.getNode(AArch64ISD::CBNZ, SDLoc(N), MVT::Other, Chain, LHS, Dest);
18324 // Do not add new nodes to DAG combiner worklist.
18325 DCI.CombineTo(N, BR, false);
18330 static SDValue foldCSELofCTTZ(SDNode *N, SelectionDAG &DAG) {
18331 unsigned CC = N->getConstantOperandVal(2);
18332 SDValue SUBS = N->getOperand(3);
18333 SDValue Zero, CTTZ;
18335 if (CC == AArch64CC::EQ && SUBS.getOpcode() == AArch64ISD::SUBS) {
18336 Zero = N->getOperand(0);
18337 CTTZ = N->getOperand(1);
18338 } else if (CC == AArch64CC::NE && SUBS.getOpcode() == AArch64ISD::SUBS) {
18339 Zero = N->getOperand(1);
18340 CTTZ = N->getOperand(0);
18344 if ((CTTZ.getOpcode() != ISD::CTTZ && CTTZ.getOpcode() != ISD::TRUNCATE) ||
18345 (CTTZ.getOpcode() == ISD::TRUNCATE &&
18346 CTTZ.getOperand(0).getOpcode() != ISD::CTTZ))
18349 assert((CTTZ.getValueType() == MVT::i32 || CTTZ.getValueType() == MVT::i64) &&
18350 "Illegal type in CTTZ folding");
18352 if (!isNullConstant(Zero) || !isNullConstant(SUBS.getOperand(1)))
18355 SDValue X = CTTZ.getOpcode() == ISD::TRUNCATE
18356 ? CTTZ.getOperand(0).getOperand(0)
18357 : CTTZ.getOperand(0);
18359 if (X != SUBS.getOperand(0))
18362 unsigned BitWidth = CTTZ.getOpcode() == ISD::TRUNCATE
18363 ? CTTZ.getOperand(0).getValueSizeInBits()
18364 : CTTZ.getValueSizeInBits();
18365 SDValue BitWidthMinusOne =
18366 DAG.getConstant(BitWidth - 1, SDLoc(N), CTTZ.getValueType());
18367 return DAG.getNode(ISD::AND, SDLoc(N), CTTZ.getValueType(), CTTZ,
18371 // Optimize CSEL instructions
18372 static SDValue performCSELCombine(SDNode *N,
18373 TargetLowering::DAGCombinerInfo &DCI,
18374 SelectionDAG &DAG) {
18375 // CSEL x, x, cc -> x
18376 if (N->getOperand(0) == N->getOperand(1))
18377 return N->getOperand(0);
18379 // CSEL 0, cttz(X), eq(X, 0) -> AND cttz bitwidth-1
18380 // CSEL cttz(X), 0, ne(X, 0) -> AND cttz bitwidth-1
18381 if (SDValue Folded = foldCSELofCTTZ(N, DAG))
18384 return performCONDCombine(N, DCI, DAG, 2, 3);
18387 // Try to re-use an already extended operand of a vector SetCC feeding a
18388 // extended select. Doing so avoids requiring another full extension of the
18389 // SET_CC result when lowering the select.
18390 static SDValue tryToWidenSetCCOperands(SDNode *Op, SelectionDAG &DAG) {
18391 EVT Op0MVT = Op->getOperand(0).getValueType();
18392 if (!Op0MVT.isVector() || Op->use_empty())
18395 // Make sure that all uses of Op are VSELECTs with result matching types where
18396 // the result type has a larger element type than the SetCC operand.
18397 SDNode *FirstUse = *Op->use_begin();
18398 if (FirstUse->getOpcode() != ISD::VSELECT)
18400 EVT UseMVT = FirstUse->getValueType(0);
18401 if (UseMVT.getScalarSizeInBits() <= Op0MVT.getScalarSizeInBits())
18403 if (any_of(Op->uses(), [&UseMVT](const SDNode *N) {
18404 return N->getOpcode() != ISD::VSELECT || N->getValueType(0) != UseMVT;
18409 if (!ISD::isConstantSplatVector(Op->getOperand(1).getNode(), V))
18415 ISD::CondCode CC = cast<CondCodeSDNode>(Op->getOperand(2))->get();
18416 // Check if the first operand of the SET_CC is already extended. If it is,
18417 // split the SET_CC and re-use the extended version of the operand.
18418 SDNode *Op0SExt = DAG.getNodeIfExists(ISD::SIGN_EXTEND, DAG.getVTList(UseMVT),
18419 Op->getOperand(0));
18420 SDNode *Op0ZExt = DAG.getNodeIfExists(ISD::ZERO_EXTEND, DAG.getVTList(UseMVT),
18421 Op->getOperand(0));
18422 if (Op0SExt && (isSignedIntSetCC(CC) || isIntEqualitySetCC(CC))) {
18423 Op0ExtV = SDValue(Op0SExt, 0);
18424 Op1ExtV = DAG.getNode(ISD::SIGN_EXTEND, DL, UseMVT, Op->getOperand(1));
18425 } else if (Op0ZExt && (isUnsignedIntSetCC(CC) || isIntEqualitySetCC(CC))) {
18426 Op0ExtV = SDValue(Op0ZExt, 0);
18427 Op1ExtV = DAG.getNode(ISD::ZERO_EXTEND, DL, UseMVT, Op->getOperand(1));
18431 return DAG.getNode(ISD::SETCC, DL, UseMVT.changeVectorElementType(MVT::i1),
18432 Op0ExtV, Op1ExtV, Op->getOperand(2));
18435 static SDValue performSETCCCombine(SDNode *N,
18436 TargetLowering::DAGCombinerInfo &DCI,
18437 SelectionDAG &DAG) {
18438 assert(N->getOpcode() == ISD::SETCC && "Unexpected opcode!");
18439 SDValue LHS = N->getOperand(0);
18440 SDValue RHS = N->getOperand(1);
18441 ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get();
18443 EVT VT = N->getValueType(0);
18445 if (SDValue V = tryToWidenSetCCOperands(N, DAG))
18448 // setcc (csel 0, 1, cond, X), 1, ne ==> csel 0, 1, !cond, X
18449 if (Cond == ISD::SETNE && isOneConstant(RHS) &&
18450 LHS->getOpcode() == AArch64ISD::CSEL &&
18451 isNullConstant(LHS->getOperand(0)) && isOneConstant(LHS->getOperand(1)) &&
18452 LHS->hasOneUse()) {
18453 // Invert CSEL's condition.
18454 auto *OpCC = cast<ConstantSDNode>(LHS.getOperand(2));
18455 auto OldCond = static_cast<AArch64CC::CondCode>(OpCC->getZExtValue());
18456 auto NewCond = getInvertedCondCode(OldCond);
18458 // csel 0, 1, !cond, X
18460 DAG.getNode(AArch64ISD::CSEL, DL, LHS.getValueType(), LHS.getOperand(0),
18461 LHS.getOperand(1), DAG.getConstant(NewCond, DL, MVT::i32),
18462 LHS.getOperand(3));
18463 return DAG.getZExtOrTrunc(CSEL, DL, VT);
18466 // setcc (srl x, imm), 0, ne ==> setcc (and x, (-1 << imm)), 0, ne
18467 if (Cond == ISD::SETNE && isNullConstant(RHS) &&
18468 LHS->getOpcode() == ISD::SRL && isa<ConstantSDNode>(LHS->getOperand(1)) &&
18469 LHS->hasOneUse()) {
18470 EVT TstVT = LHS->getValueType(0);
18471 if (TstVT.isScalarInteger() && TstVT.getFixedSizeInBits() <= 64) {
18472 // this pattern will get better opt in emitComparison
18473 uint64_t TstImm = -1ULL << LHS->getConstantOperandVal(1);
18474 SDValue TST = DAG.getNode(ISD::AND, DL, TstVT, LHS->getOperand(0),
18475 DAG.getConstant(TstImm, DL, TstVT));
18476 return DAG.getNode(ISD::SETCC, DL, VT, TST, RHS, N->getOperand(2));
18480 // setcc (iN (bitcast (vNi1 X))), 0, (eq|ne)
18481 // ==> setcc (iN (zext (i1 (vecreduce_or (vNi1 X))))), 0, (eq|ne)
18482 if (DCI.isBeforeLegalize() && VT.isScalarInteger() &&
18483 (Cond == ISD::SETEQ || Cond == ISD::SETNE) && isNullConstant(RHS) &&
18484 LHS->getOpcode() == ISD::BITCAST) {
18485 EVT ToVT = LHS->getValueType(0);
18486 EVT FromVT = LHS->getOperand(0).getValueType();
18487 if (FromVT.isFixedLengthVector() &&
18488 FromVT.getVectorElementType() == MVT::i1) {
18489 LHS = DAG.getNode(ISD::VECREDUCE_OR, DL, MVT::i1, LHS->getOperand(0));
18490 LHS = DAG.getNode(ISD::ZERO_EXTEND, DL, ToVT, LHS);
18491 return DAG.getSetCC(DL, VT, LHS, RHS, Cond);
18498 // Replace a flag-setting operator (eg ANDS) with the generic version
18499 // (eg AND) if the flag is unused.
18500 static SDValue performFlagSettingCombine(SDNode *N,
18501 TargetLowering::DAGCombinerInfo &DCI,
18502 unsigned GenericOpcode) {
18504 SDValue LHS = N->getOperand(0);
18505 SDValue RHS = N->getOperand(1);
18506 EVT VT = N->getValueType(0);
18508 // If the flag result isn't used, convert back to a generic opcode.
18509 if (!N->hasAnyUseOfValue(1)) {
18510 SDValue Res = DCI.DAG.getNode(GenericOpcode, DL, VT, N->ops());
18511 return DCI.DAG.getMergeValues({Res, DCI.DAG.getConstant(0, DL, MVT::i32)},
18515 // Combine identical generic nodes into this node, re-using the result.
18516 if (SDNode *Generic = DCI.DAG.getNodeIfExists(
18517 GenericOpcode, DCI.DAG.getVTList(VT), {LHS, RHS}))
18518 DCI.CombineTo(Generic, SDValue(N, 0));
18523 static SDValue performSetCCPunpkCombine(SDNode *N, SelectionDAG &DAG) {
18524 // setcc_merge_zero pred
18525 // (sign_extend (extract_subvector (setcc_merge_zero ... pred ...))), 0, ne
18526 // => extract_subvector (inner setcc_merge_zero)
18527 SDValue Pred = N->getOperand(0);
18528 SDValue LHS = N->getOperand(1);
18529 SDValue RHS = N->getOperand(2);
18530 ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(3))->get();
18532 if (Cond != ISD::SETNE || !isZerosVector(RHS.getNode()) ||
18533 LHS->getOpcode() != ISD::SIGN_EXTEND)
18536 SDValue Extract = LHS->getOperand(0);
18537 if (Extract->getOpcode() != ISD::EXTRACT_SUBVECTOR ||
18538 Extract->getValueType(0) != N->getValueType(0) ||
18539 Extract->getConstantOperandVal(1) != 0)
18542 SDValue InnerSetCC = Extract->getOperand(0);
18543 if (InnerSetCC->getOpcode() != AArch64ISD::SETCC_MERGE_ZERO)
18546 // By this point we've effectively got
18547 // zero_inactive_lanes_and_trunc_i1(sext_i1(A)). If we can prove A's inactive
18548 // lanes are already zero then the trunc(sext()) sequence is redundant and we
18549 // can operate on A directly.
18550 SDValue InnerPred = InnerSetCC.getOperand(0);
18551 if (Pred.getOpcode() == AArch64ISD::PTRUE &&
18552 InnerPred.getOpcode() == AArch64ISD::PTRUE &&
18553 Pred.getConstantOperandVal(0) == InnerPred.getConstantOperandVal(0) &&
18554 Pred->getConstantOperandVal(0) >= AArch64SVEPredPattern::vl1 &&
18555 Pred->getConstantOperandVal(0) <= AArch64SVEPredPattern::vl256)
18562 performSetccMergeZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
18563 assert(N->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO &&
18564 "Unexpected opcode!");
18566 SelectionDAG &DAG = DCI.DAG;
18567 SDValue Pred = N->getOperand(0);
18568 SDValue LHS = N->getOperand(1);
18569 SDValue RHS = N->getOperand(2);
18570 ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(3))->get();
18572 if (SDValue V = performSetCCPunpkCombine(N, DAG))
18575 if (Cond == ISD::SETNE && isZerosVector(RHS.getNode()) &&
18576 LHS->getOpcode() == ISD::SIGN_EXTEND &&
18577 LHS->getOperand(0)->getValueType(0) == N->getValueType(0)) {
18578 // setcc_merge_zero(
18579 // pred, extend(setcc_merge_zero(pred, ...)), != splat(0))
18580 // => setcc_merge_zero(pred, ...)
18581 if (LHS->getOperand(0)->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO &&
18582 LHS->getOperand(0)->getOperand(0) == Pred)
18583 return LHS->getOperand(0);
18585 // setcc_merge_zero(
18586 // all_active, extend(nxvNi1 ...), != splat(0))
18588 if (isAllActivePredicate(DAG, Pred))
18589 return LHS->getOperand(0);
18591 // setcc_merge_zero(
18592 // pred, extend(nxvNi1 ...), != splat(0))
18593 // -> nxvNi1 and(pred, ...)
18594 if (DCI.isAfterLegalizeDAG())
18595 // Do this after legalization to allow more folds on setcc_merge_zero
18596 // to be recognized.
18597 return DAG.getNode(ISD::AND, SDLoc(N), N->getValueType(0),
18598 LHS->getOperand(0), Pred);
18604 // Optimize some simple tbz/tbnz cases. Returns the new operand and bit to test
18605 // as well as whether the test should be inverted. This code is required to
18606 // catch these cases (as opposed to standard dag combines) because
18607 // AArch64ISD::TBZ is matched during legalization.
18608 static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert,
18609 SelectionDAG &DAG) {
18611 if (!Op->hasOneUse())
18614 // We don't handle undef/constant-fold cases below, as they should have
18615 // already been taken care of (e.g. and of 0, test of undefined shifted bits,
18618 // (tbz (trunc x), b) -> (tbz x, b)
18619 // This case is just here to enable more of the below cases to be caught.
18620 if (Op->getOpcode() == ISD::TRUNCATE &&
18621 Bit < Op->getValueType(0).getSizeInBits()) {
18622 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18625 // (tbz (any_ext x), b) -> (tbz x, b) if we don't use the extended bits.
18626 if (Op->getOpcode() == ISD::ANY_EXTEND &&
18627 Bit < Op->getOperand(0).getValueSizeInBits()) {
18628 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18631 if (Op->getNumOperands() != 2)
18634 auto *C = dyn_cast<ConstantSDNode>(Op->getOperand(1));
18638 switch (Op->getOpcode()) {
18642 // (tbz (and x, m), b) -> (tbz x, b)
18644 if ((C->getZExtValue() >> Bit) & 1)
18645 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18648 // (tbz (shl x, c), b) -> (tbz x, b-c)
18650 if (C->getZExtValue() <= Bit &&
18651 (Bit - C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
18652 Bit = Bit - C->getZExtValue();
18653 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18657 // (tbz (sra x, c), b) -> (tbz x, b+c) or (tbz x, msb) if b+c is > # bits in x
18659 Bit = Bit + C->getZExtValue();
18660 if (Bit >= Op->getValueType(0).getSizeInBits())
18661 Bit = Op->getValueType(0).getSizeInBits() - 1;
18662 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18664 // (tbz (srl x, c), b) -> (tbz x, b+c)
18666 if ((Bit + C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
18667 Bit = Bit + C->getZExtValue();
18668 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18672 // (tbz (xor x, -1), b) -> (tbnz x, b)
18674 if ((C->getZExtValue() >> Bit) & 1)
18676 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18680 // Optimize test single bit zero/non-zero and branch.
18681 static SDValue performTBZCombine(SDNode *N,
18682 TargetLowering::DAGCombinerInfo &DCI,
18683 SelectionDAG &DAG) {
18684 unsigned Bit = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
18685 bool Invert = false;
18686 SDValue TestSrc = N->getOperand(1);
18687 SDValue NewTestSrc = getTestBitOperand(TestSrc, Bit, Invert, DAG);
18689 if (TestSrc == NewTestSrc)
18692 unsigned NewOpc = N->getOpcode();
18694 if (NewOpc == AArch64ISD::TBZ)
18695 NewOpc = AArch64ISD::TBNZ;
18697 assert(NewOpc == AArch64ISD::TBNZ);
18698 NewOpc = AArch64ISD::TBZ;
18703 return DAG.getNode(NewOpc, DL, MVT::Other, N->getOperand(0), NewTestSrc,
18704 DAG.getConstant(Bit, DL, MVT::i64), N->getOperand(3));
18707 // Swap vselect operands where it may allow a predicated operation to achieve
18710 // (vselect (setcc ( condcode) (_) (_)) (a) (op (a) (b)))
18711 // => (vselect (setcc (!condcode) (_) (_)) (op (a) (b)) (a))
18712 static SDValue trySwapVSelectOperands(SDNode *N, SelectionDAG &DAG) {
18713 auto SelectA = N->getOperand(1);
18714 auto SelectB = N->getOperand(2);
18715 auto NTy = N->getValueType(0);
18717 if (!NTy.isScalableVector())
18719 SDValue SetCC = N->getOperand(0);
18720 if (SetCC.getOpcode() != ISD::SETCC || !SetCC.hasOneUse())
18723 switch (SelectB.getOpcode()) {
18731 if (SelectA != SelectB.getOperand(0))
18734 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
18735 ISD::CondCode InverseCC =
18736 ISD::getSetCCInverse(CC, SetCC.getOperand(0).getValueType());
18737 auto InverseSetCC =
18738 DAG.getSetCC(SDLoc(SetCC), SetCC.getValueType(), SetCC.getOperand(0),
18739 SetCC.getOperand(1), InverseCC);
18741 return DAG.getNode(ISD::VSELECT, SDLoc(N), NTy,
18742 {InverseSetCC, SelectB, SelectA});
18745 // vselect (v1i1 setcc) ->
18746 // vselect (v1iXX setcc) (XX is the size of the compared operand type)
18747 // FIXME: Currently the type legalizer can't handle VSELECT having v1i1 as
18748 // condition. If it can legalize "VSELECT v1i1" correctly, no need to combine
18750 static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG) {
18751 if (auto SwapResult = trySwapVSelectOperands(N, DAG))
18754 SDValue N0 = N->getOperand(0);
18755 EVT CCVT = N0.getValueType();
18757 if (isAllActivePredicate(DAG, N0))
18758 return N->getOperand(1);
18760 if (isAllInactivePredicate(N0))
18761 return N->getOperand(2);
18763 // Check for sign pattern (VSELECT setgt, iN lhs, -1, 1, -1) and transform
18764 // into (OR (ASR lhs, N-1), 1), which requires less instructions for the
18765 // supported types.
18766 SDValue SetCC = N->getOperand(0);
18767 if (SetCC.getOpcode() == ISD::SETCC &&
18768 SetCC.getOperand(2) == DAG.getCondCode(ISD::SETGT)) {
18769 SDValue CmpLHS = SetCC.getOperand(0);
18770 EVT VT = CmpLHS.getValueType();
18771 SDNode *CmpRHS = SetCC.getOperand(1).getNode();
18772 SDNode *SplatLHS = N->getOperand(1).getNode();
18773 SDNode *SplatRHS = N->getOperand(2).getNode();
18775 if (CmpLHS.getValueType() == N->getOperand(1).getValueType() &&
18778 makeArrayRef({MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
18779 MVT::v2i32, MVT::v4i32, MVT::v2i64}),
18780 VT.getSimpleVT().SimpleTy) &&
18781 ISD::isConstantSplatVector(SplatLHS, SplatLHSVal) &&
18782 SplatLHSVal.isOne() && ISD::isConstantSplatVectorAllOnes(CmpRHS) &&
18783 ISD::isConstantSplatVectorAllOnes(SplatRHS)) {
18784 unsigned NumElts = VT.getVectorNumElements();
18785 SmallVector<SDValue, 8> Ops(
18786 NumElts, DAG.getConstant(VT.getScalarSizeInBits() - 1, SDLoc(N),
18787 VT.getScalarType()));
18788 SDValue Val = DAG.getBuildVector(VT, SDLoc(N), Ops);
18790 auto Shift = DAG.getNode(ISD::SRA, SDLoc(N), VT, CmpLHS, Val);
18791 auto Or = DAG.getNode(ISD::OR, SDLoc(N), VT, Shift, N->getOperand(1));
18796 if (N0.getOpcode() != ISD::SETCC ||
18797 CCVT.getVectorElementCount() != ElementCount::getFixed(1) ||
18798 CCVT.getVectorElementType() != MVT::i1)
18801 EVT ResVT = N->getValueType(0);
18802 EVT CmpVT = N0.getOperand(0).getValueType();
18803 // Only combine when the result type is of the same size as the compared
18805 if (ResVT.getSizeInBits() != CmpVT.getSizeInBits())
18808 SDValue IfTrue = N->getOperand(1);
18809 SDValue IfFalse = N->getOperand(2);
18810 SetCC = DAG.getSetCC(SDLoc(N), CmpVT.changeVectorElementTypeToInteger(),
18811 N0.getOperand(0), N0.getOperand(1),
18812 cast<CondCodeSDNode>(N0.getOperand(2))->get());
18813 return DAG.getNode(ISD::VSELECT, SDLoc(N), ResVT, SetCC,
18817 /// A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with
18818 /// the compare-mask instructions rather than going via NZCV, even if LHS and
18819 /// RHS are really scalar. This replaces any scalar setcc in the above pattern
18820 /// with a vector one followed by a DUP shuffle on the result.
18821 static SDValue performSelectCombine(SDNode *N,
18822 TargetLowering::DAGCombinerInfo &DCI) {
18823 SelectionDAG &DAG = DCI.DAG;
18824 SDValue N0 = N->getOperand(0);
18825 EVT ResVT = N->getValueType(0);
18827 if (N0.getOpcode() != ISD::SETCC)
18830 if (ResVT.isScalableVector())
18833 // Make sure the SETCC result is either i1 (initial DAG), or i32, the lowered
18834 // scalar SetCCResultType. We also don't expect vectors, because we assume
18835 // that selects fed by vector SETCCs are canonicalized to VSELECT.
18836 assert((N0.getValueType() == MVT::i1 || N0.getValueType() == MVT::i32) &&
18837 "Scalar-SETCC feeding SELECT has unexpected result type!");
18839 // If NumMaskElts == 0, the comparison is larger than select result. The
18840 // largest real NEON comparison is 64-bits per lane, which means the result is
18841 // at most 32-bits and an illegal vector. Just bail out for now.
18842 EVT SrcVT = N0.getOperand(0).getValueType();
18844 // Don't try to do this optimization when the setcc itself has i1 operands.
18845 // There are no legal vectors of i1, so this would be pointless.
18846 if (SrcVT == MVT::i1)
18849 int NumMaskElts = ResVT.getSizeInBits() / SrcVT.getSizeInBits();
18850 if (!ResVT.isVector() || NumMaskElts == 0)
18853 SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumMaskElts);
18854 EVT CCVT = SrcVT.changeVectorElementTypeToInteger();
18856 // Also bail out if the vector CCVT isn't the same size as ResVT.
18857 // This can happen if the SETCC operand size doesn't divide the ResVT size
18858 // (e.g., f64 vs v3f32).
18859 if (CCVT.getSizeInBits() != ResVT.getSizeInBits())
18862 // Make sure we didn't create illegal types, if we're not supposed to.
18863 assert(DCI.isBeforeLegalize() ||
18864 DAG.getTargetLoweringInfo().isTypeLegal(SrcVT));
18866 // First perform a vector comparison, where lane 0 is the one we're interested
18870 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(0));
18872 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(1));
18873 SDValue SetCC = DAG.getNode(ISD::SETCC, DL, CCVT, LHS, RHS, N0.getOperand(2));
18875 // Now duplicate the comparison mask we want across all other lanes.
18876 SmallVector<int, 8> DUPMask(CCVT.getVectorNumElements(), 0);
18877 SDValue Mask = DAG.getVectorShuffle(CCVT, DL, SetCC, SetCC, DUPMask);
18878 Mask = DAG.getNode(ISD::BITCAST, DL,
18879 ResVT.changeVectorElementTypeToInteger(), Mask);
18881 return DAG.getSelect(DL, ResVT, Mask, N->getOperand(1), N->getOperand(2));
18884 static SDValue performDUPCombine(SDNode *N,
18885 TargetLowering::DAGCombinerInfo &DCI) {
18886 EVT VT = N->getValueType(0);
18887 // If "v2i32 DUP(x)" and "v4i32 DUP(x)" both exist, use an extract from the
18888 // 128bit vector version.
18889 if (VT.is64BitVector() && DCI.isAfterLegalizeDAG()) {
18890 EVT LVT = VT.getDoubleNumVectorElementsVT(*DCI.DAG.getContext());
18891 if (SDNode *LN = DCI.DAG.getNodeIfExists(
18892 N->getOpcode(), DCI.DAG.getVTList(LVT), {N->getOperand(0)})) {
18894 return DCI.DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SDValue(LN, 0),
18895 DCI.DAG.getConstant(0, DL, MVT::i64));
18899 return performPostLD1Combine(N, DCI, false);
18902 /// Get rid of unnecessary NVCASTs (that don't change the type).
18903 static SDValue performNVCASTCombine(SDNode *N) {
18904 if (N->getValueType(0) == N->getOperand(0).getValueType())
18905 return N->getOperand(0);
18910 // If all users of the globaladdr are of the form (globaladdr + constant), find
18911 // the smallest constant, fold it into the globaladdr's offset and rewrite the
18912 // globaladdr as (globaladdr + constant) - constant.
18913 static SDValue performGlobalAddressCombine(SDNode *N, SelectionDAG &DAG,
18914 const AArch64Subtarget *Subtarget,
18915 const TargetMachine &TM) {
18916 auto *GN = cast<GlobalAddressSDNode>(N);
18917 if (Subtarget->ClassifyGlobalReference(GN->getGlobal(), TM) !=
18918 AArch64II::MO_NO_FLAG)
18921 uint64_t MinOffset = -1ull;
18922 for (SDNode *N : GN->uses()) {
18923 if (N->getOpcode() != ISD::ADD)
18925 auto *C = dyn_cast<ConstantSDNode>(N->getOperand(0));
18927 C = dyn_cast<ConstantSDNode>(N->getOperand(1));
18930 MinOffset = std::min(MinOffset, C->getZExtValue());
18932 uint64_t Offset = MinOffset + GN->getOffset();
18934 // Require that the new offset is larger than the existing one. Otherwise, we
18935 // can end up oscillating between two possible DAGs, for example,
18936 // (add (add globaladdr + 10, -1), 1) and (add globaladdr + 9, 1).
18937 if (Offset <= uint64_t(GN->getOffset()))
18940 // Check whether folding this offset is legal. It must not go out of bounds of
18941 // the referenced object to avoid violating the code model, and must be
18942 // smaller than 2^20 because this is the largest offset expressible in all
18943 // object formats. (The IMAGE_REL_ARM64_PAGEBASE_REL21 relocation in COFF
18944 // stores an immediate signed 21 bit offset.)
18946 // This check also prevents us from folding negative offsets, which will end
18947 // up being treated in the same way as large positive ones. They could also
18948 // cause code model violations, and aren't really common enough to matter.
18949 if (Offset >= (1 << 20))
18952 const GlobalValue *GV = GN->getGlobal();
18953 Type *T = GV->getValueType();
18954 if (!T->isSized() ||
18955 Offset > GV->getParent()->getDataLayout().getTypeAllocSize(T))
18959 SDValue Result = DAG.getGlobalAddress(GV, DL, MVT::i64, Offset);
18960 return DAG.getNode(ISD::SUB, DL, MVT::i64, Result,
18961 DAG.getConstant(MinOffset, DL, MVT::i64));
18964 // Turns the vector of indices into a vector of byte offstes by scaling Offset
18965 // by (BitWidth / 8).
18966 static SDValue getScaledOffsetForBitWidth(SelectionDAG &DAG, SDValue Offset,
18967 SDLoc DL, unsigned BitWidth) {
18968 assert(Offset.getValueType().isScalableVector() &&
18969 "This method is only for scalable vectors of offsets");
18971 SDValue Shift = DAG.getConstant(Log2_32(BitWidth / 8), DL, MVT::i64);
18972 SDValue SplatShift = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, Shift);
18974 return DAG.getNode(ISD::SHL, DL, MVT::nxv2i64, Offset, SplatShift);
18977 /// Check if the value of \p OffsetInBytes can be used as an immediate for
18978 /// the gather load/prefetch and scatter store instructions with vector base and
18979 /// immediate offset addressing mode:
18981 /// [<Zn>.[S|D]{, #<imm>}]
18983 /// where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.
18984 inline static bool isValidImmForSVEVecImmAddrMode(unsigned OffsetInBytes,
18985 unsigned ScalarSizeInBytes) {
18986 // The immediate is not a multiple of the scalar size.
18987 if (OffsetInBytes % ScalarSizeInBytes)
18990 // The immediate is out of range.
18991 if (OffsetInBytes / ScalarSizeInBytes > 31)
18997 /// Check if the value of \p Offset represents a valid immediate for the SVE
18998 /// gather load/prefetch and scatter store instructiona with vector base and
18999 /// immediate offset addressing mode:
19001 /// [<Zn>.[S|D]{, #<imm>}]
19003 /// where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.
19004 static bool isValidImmForSVEVecImmAddrMode(SDValue Offset,
19005 unsigned ScalarSizeInBytes) {
19006 ConstantSDNode *OffsetConst = dyn_cast<ConstantSDNode>(Offset.getNode());
19007 return OffsetConst && isValidImmForSVEVecImmAddrMode(
19008 OffsetConst->getZExtValue(), ScalarSizeInBytes);
19011 static SDValue performScatterStoreCombine(SDNode *N, SelectionDAG &DAG,
19013 bool OnlyPackedOffsets = true) {
19014 const SDValue Src = N->getOperand(2);
19015 const EVT SrcVT = Src->getValueType(0);
19016 assert(SrcVT.isScalableVector() &&
19017 "Scatter stores are only possible for SVE vectors");
19020 MVT SrcElVT = SrcVT.getVectorElementType().getSimpleVT();
19022 // Make sure that source data will fit into an SVE register
19023 if (SrcVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock)
19026 // For FPs, ACLE only supports _packed_ single and double precision types.
19027 if (SrcElVT.isFloatingPoint())
19028 if ((SrcVT != MVT::nxv4f32) && (SrcVT != MVT::nxv2f64))
19031 // Depending on the addressing mode, this is either a pointer or a vector of
19032 // pointers (that fits into one register)
19033 SDValue Base = N->getOperand(4);
19034 // Depending on the addressing mode, this is either a single offset or a
19035 // vector of offsets (that fits into one register)
19036 SDValue Offset = N->getOperand(5);
19038 // For "scalar + vector of indices", just scale the indices. This only
19039 // applies to non-temporal scatters because there's no instruction that takes
19041 if (Opcode == AArch64ISD::SSTNT1_INDEX_PRED) {
19043 getScaledOffsetForBitWidth(DAG, Offset, DL, SrcElVT.getSizeInBits());
19044 Opcode = AArch64ISD::SSTNT1_PRED;
19047 // In the case of non-temporal gather loads there's only one SVE instruction
19048 // per data-size: "scalar + vector", i.e.
19049 // * stnt1{b|h|w|d} { z0.s }, p0/z, [z0.s, x0]
19050 // Since we do have intrinsics that allow the arguments to be in a different
19051 // order, we may need to swap them to match the spec.
19052 if (Opcode == AArch64ISD::SSTNT1_PRED && Offset.getValueType().isVector())
19053 std::swap(Base, Offset);
19055 // SST1_IMM requires that the offset is an immediate that is:
19056 // * a multiple of #SizeInBytes,
19057 // * in the range [0, 31 x #SizeInBytes],
19058 // where #SizeInBytes is the size in bytes of the stored items. For
19059 // immediates outside that range and non-immediate scalar offsets use SST1 or
19060 // SST1_UXTW instead.
19061 if (Opcode == AArch64ISD::SST1_IMM_PRED) {
19062 if (!isValidImmForSVEVecImmAddrMode(Offset,
19063 SrcVT.getScalarSizeInBits() / 8)) {
19064 if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy)
19065 Opcode = AArch64ISD::SST1_UXTW_PRED;
19067 Opcode = AArch64ISD::SST1_PRED;
19069 std::swap(Base, Offset);
19073 auto &TLI = DAG.getTargetLoweringInfo();
19074 if (!TLI.isTypeLegal(Base.getValueType()))
19077 // Some scatter store variants allow unpacked offsets, but only as nxv2i32
19078 // vectors. These are implicitly sign (sxtw) or zero (zxtw) extend to
19079 // nxv2i64. Legalize accordingly.
19080 if (!OnlyPackedOffsets &&
19081 Offset.getValueType().getSimpleVT().SimpleTy == MVT::nxv2i32)
19082 Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0);
19084 if (!TLI.isTypeLegal(Offset.getValueType()))
19087 // Source value type that is representable in hardware
19088 EVT HwSrcVt = getSVEContainerType(SrcVT);
19090 // Keep the original type of the input data to store - this is needed to be
19091 // able to select the correct instruction, e.g. ST1B, ST1H, ST1W and ST1D. For
19092 // FP values we want the integer equivalent, so just use HwSrcVt.
19093 SDValue InputVT = DAG.getValueType(SrcVT);
19094 if (SrcVT.isFloatingPoint())
19095 InputVT = DAG.getValueType(HwSrcVt);
19097 SDVTList VTs = DAG.getVTList(MVT::Other);
19100 if (Src.getValueType().isFloatingPoint())
19101 SrcNew = DAG.getNode(ISD::BITCAST, DL, HwSrcVt, Src);
19103 SrcNew = DAG.getNode(ISD::ANY_EXTEND, DL, HwSrcVt, Src);
19105 SDValue Ops[] = {N->getOperand(0), // Chain
19107 N->getOperand(3), // Pg
19112 return DAG.getNode(Opcode, DL, VTs, Ops);
19115 static SDValue performGatherLoadCombine(SDNode *N, SelectionDAG &DAG,
19117 bool OnlyPackedOffsets = true) {
19118 const EVT RetVT = N->getValueType(0);
19119 assert(RetVT.isScalableVector() &&
19120 "Gather loads are only possible for SVE vectors");
19124 // Make sure that the loaded data will fit into an SVE register
19125 if (RetVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock)
19128 // Depending on the addressing mode, this is either a pointer or a vector of
19129 // pointers (that fits into one register)
19130 SDValue Base = N->getOperand(3);
19131 // Depending on the addressing mode, this is either a single offset or a
19132 // vector of offsets (that fits into one register)
19133 SDValue Offset = N->getOperand(4);
19135 // For "scalar + vector of indices", just scale the indices. This only
19136 // applies to non-temporal gathers because there's no instruction that takes
19138 if (Opcode == AArch64ISD::GLDNT1_INDEX_MERGE_ZERO) {
19139 Offset = getScaledOffsetForBitWidth(DAG, Offset, DL,
19140 RetVT.getScalarSizeInBits());
19141 Opcode = AArch64ISD::GLDNT1_MERGE_ZERO;
19144 // In the case of non-temporal gather loads there's only one SVE instruction
19145 // per data-size: "scalar + vector", i.e.
19146 // * ldnt1{b|h|w|d} { z0.s }, p0/z, [z0.s, x0]
19147 // Since we do have intrinsics that allow the arguments to be in a different
19148 // order, we may need to swap them to match the spec.
19149 if (Opcode == AArch64ISD::GLDNT1_MERGE_ZERO &&
19150 Offset.getValueType().isVector())
19151 std::swap(Base, Offset);
19153 // GLD{FF}1_IMM requires that the offset is an immediate that is:
19154 // * a multiple of #SizeInBytes,
19155 // * in the range [0, 31 x #SizeInBytes],
19156 // where #SizeInBytes is the size in bytes of the loaded items. For
19157 // immediates outside that range and non-immediate scalar offsets use
19158 // GLD1_MERGE_ZERO or GLD1_UXTW_MERGE_ZERO instead.
19159 if (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO ||
19160 Opcode == AArch64ISD::GLDFF1_IMM_MERGE_ZERO) {
19161 if (!isValidImmForSVEVecImmAddrMode(Offset,
19162 RetVT.getScalarSizeInBits() / 8)) {
19163 if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy)
19164 Opcode = (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO)
19165 ? AArch64ISD::GLD1_UXTW_MERGE_ZERO
19166 : AArch64ISD::GLDFF1_UXTW_MERGE_ZERO;
19168 Opcode = (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO)
19169 ? AArch64ISD::GLD1_MERGE_ZERO
19170 : AArch64ISD::GLDFF1_MERGE_ZERO;
19172 std::swap(Base, Offset);
19176 auto &TLI = DAG.getTargetLoweringInfo();
19177 if (!TLI.isTypeLegal(Base.getValueType()))
19180 // Some gather load variants allow unpacked offsets, but only as nxv2i32
19181 // vectors. These are implicitly sign (sxtw) or zero (zxtw) extend to
19182 // nxv2i64. Legalize accordingly.
19183 if (!OnlyPackedOffsets &&
19184 Offset.getValueType().getSimpleVT().SimpleTy == MVT::nxv2i32)
19185 Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0);
19187 // Return value type that is representable in hardware
19188 EVT HwRetVt = getSVEContainerType(RetVT);
19190 // Keep the original output value type around - this is needed to be able to
19191 // select the correct instruction, e.g. LD1B, LD1H, LD1W and LD1D. For FP
19192 // values we want the integer equivalent, so just use HwRetVT.
19193 SDValue OutVT = DAG.getValueType(RetVT);
19194 if (RetVT.isFloatingPoint())
19195 OutVT = DAG.getValueType(HwRetVt);
19197 SDVTList VTs = DAG.getVTList(HwRetVt, MVT::Other);
19198 SDValue Ops[] = {N->getOperand(0), // Chain
19199 N->getOperand(2), // Pg
19200 Base, Offset, OutVT};
19202 SDValue Load = DAG.getNode(Opcode, DL, VTs, Ops);
19203 SDValue LoadChain = SDValue(Load.getNode(), 1);
19205 if (RetVT.isInteger() && (RetVT != HwRetVt))
19206 Load = DAG.getNode(ISD::TRUNCATE, DL, RetVT, Load.getValue(0));
19208 // If the original return value was FP, bitcast accordingly. Doing it here
19209 // means that we can avoid adding TableGen patterns for FPs.
19210 if (RetVT.isFloatingPoint())
19211 Load = DAG.getNode(ISD::BITCAST, DL, RetVT, Load.getValue(0));
19213 return DAG.getMergeValues({Load, LoadChain}, DL);
19217 performSignExtendInRegCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
19218 SelectionDAG &DAG) {
19220 SDValue Src = N->getOperand(0);
19221 unsigned Opc = Src->getOpcode();
19223 // Sign extend of an unsigned unpack -> signed unpack
19224 if (Opc == AArch64ISD::UUNPKHI || Opc == AArch64ISD::UUNPKLO) {
19226 unsigned SOpc = Opc == AArch64ISD::UUNPKHI ? AArch64ISD::SUNPKHI
19227 : AArch64ISD::SUNPKLO;
19229 // Push the sign extend to the operand of the unpack
19230 // This is necessary where, for example, the operand of the unpack
19231 // is another unpack:
19232 // 4i32 sign_extend_inreg (4i32 uunpklo(8i16 uunpklo (16i8 opnd)), from 4i8)
19234 // 4i32 sunpklo (8i16 sign_extend_inreg(8i16 uunpklo (16i8 opnd), from 8i8)
19236 // 4i32 sunpklo(8i16 sunpklo(16i8 opnd))
19237 SDValue ExtOp = Src->getOperand(0);
19238 auto VT = cast<VTSDNode>(N->getOperand(1))->getVT();
19239 EVT EltTy = VT.getVectorElementType();
19242 assert((EltTy == MVT::i8 || EltTy == MVT::i16 || EltTy == MVT::i32) &&
19243 "Sign extending from an invalid type");
19245 EVT ExtVT = VT.getDoubleNumVectorElementsVT(*DAG.getContext());
19247 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ExtOp.getValueType(),
19248 ExtOp, DAG.getValueType(ExtVT));
19250 return DAG.getNode(SOpc, DL, N->getValueType(0), Ext);
19253 if (DCI.isBeforeLegalizeOps())
19256 if (!EnableCombineMGatherIntrinsics)
19259 // SVE load nodes (e.g. AArch64ISD::GLD1) are straightforward candidates
19260 // for DAG Combine with SIGN_EXTEND_INREG. Bail out for all other nodes.
19262 unsigned MemVTOpNum = 4;
19264 case AArch64ISD::LD1_MERGE_ZERO:
19265 NewOpc = AArch64ISD::LD1S_MERGE_ZERO;
19268 case AArch64ISD::LDNF1_MERGE_ZERO:
19269 NewOpc = AArch64ISD::LDNF1S_MERGE_ZERO;
19272 case AArch64ISD::LDFF1_MERGE_ZERO:
19273 NewOpc = AArch64ISD::LDFF1S_MERGE_ZERO;
19276 case AArch64ISD::GLD1_MERGE_ZERO:
19277 NewOpc = AArch64ISD::GLD1S_MERGE_ZERO;
19279 case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
19280 NewOpc = AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
19282 case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
19283 NewOpc = AArch64ISD::GLD1S_SXTW_MERGE_ZERO;
19285 case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
19286 NewOpc = AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO;
19288 case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
19289 NewOpc = AArch64ISD::GLD1S_UXTW_MERGE_ZERO;
19291 case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
19292 NewOpc = AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO;
19294 case AArch64ISD::GLD1_IMM_MERGE_ZERO:
19295 NewOpc = AArch64ISD::GLD1S_IMM_MERGE_ZERO;
19297 case AArch64ISD::GLDFF1_MERGE_ZERO:
19298 NewOpc = AArch64ISD::GLDFF1S_MERGE_ZERO;
19300 case AArch64ISD::GLDFF1_SCALED_MERGE_ZERO:
19301 NewOpc = AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO;
19303 case AArch64ISD::GLDFF1_SXTW_MERGE_ZERO:
19304 NewOpc = AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO;
19306 case AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO:
19307 NewOpc = AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO;
19309 case AArch64ISD::GLDFF1_UXTW_MERGE_ZERO:
19310 NewOpc = AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO;
19312 case AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO:
19313 NewOpc = AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO;
19315 case AArch64ISD::GLDFF1_IMM_MERGE_ZERO:
19316 NewOpc = AArch64ISD::GLDFF1S_IMM_MERGE_ZERO;
19318 case AArch64ISD::GLDNT1_MERGE_ZERO:
19319 NewOpc = AArch64ISD::GLDNT1S_MERGE_ZERO;
19325 EVT SignExtSrcVT = cast<VTSDNode>(N->getOperand(1))->getVT();
19326 EVT SrcMemVT = cast<VTSDNode>(Src->getOperand(MemVTOpNum))->getVT();
19328 if ((SignExtSrcVT != SrcMemVT) || !Src.hasOneUse())
19331 EVT DstVT = N->getValueType(0);
19332 SDVTList VTs = DAG.getVTList(DstVT, MVT::Other);
19334 SmallVector<SDValue, 5> Ops;
19335 for (unsigned I = 0; I < Src->getNumOperands(); ++I)
19336 Ops.push_back(Src->getOperand(I));
19338 SDValue ExtLoad = DAG.getNode(NewOpc, SDLoc(N), VTs, Ops);
19339 DCI.CombineTo(N, ExtLoad);
19340 DCI.CombineTo(Src.getNode(), ExtLoad, ExtLoad.getValue(1));
19342 // Return N so it doesn't get rechecked
19343 return SDValue(N, 0);
19346 /// Legalize the gather prefetch (scalar + vector addressing mode) when the
19347 /// offset vector is an unpacked 32-bit scalable vector. The other cases (Offset
19348 /// != nxv2i32) do not need legalization.
19349 static SDValue legalizeSVEGatherPrefetchOffsVec(SDNode *N, SelectionDAG &DAG) {
19350 const unsigned OffsetPos = 4;
19351 SDValue Offset = N->getOperand(OffsetPos);
19353 // Not an unpacked vector, bail out.
19354 if (Offset.getValueType().getSimpleVT().SimpleTy != MVT::nxv2i32)
19357 // Extend the unpacked offset vector to 64-bit lanes.
19359 Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset);
19360 SmallVector<SDValue, 5> Ops(N->op_begin(), N->op_end());
19361 // Replace the offset operand with the 64-bit one.
19362 Ops[OffsetPos] = Offset;
19364 return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops);
19367 /// Combines a node carrying the intrinsic
19368 /// `aarch64_sve_prf<T>_gather_scalar_offset` into a node that uses
19369 /// `aarch64_sve_prfb_gather_uxtw_index` when the scalar offset passed to
19370 /// `aarch64_sve_prf<T>_gather_scalar_offset` is not a valid immediate for the
19371 /// sve gather prefetch instruction with vector plus immediate addressing mode.
19372 static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG,
19373 unsigned ScalarSizeInBytes) {
19374 const unsigned ImmPos = 4, OffsetPos = 3;
19375 // No need to combine the node if the immediate is valid...
19376 if (isValidImmForSVEVecImmAddrMode(N->getOperand(ImmPos), ScalarSizeInBytes))
19379 // ...otherwise swap the offset base with the offset...
19380 SmallVector<SDValue, 5> Ops(N->op_begin(), N->op_end());
19381 std::swap(Ops[ImmPos], Ops[OffsetPos]);
19382 // ...and remap the intrinsic `aarch64_sve_prf<T>_gather_scalar_offset` to
19383 // `aarch64_sve_prfb_gather_uxtw_index`.
19385 Ops[1] = DAG.getConstant(Intrinsic::aarch64_sve_prfb_gather_uxtw_index, DL,
19388 return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops);
19391 // Return true if the vector operation can guarantee only the first lane of its
19392 // result contains data, with all bits in other lanes set to zero.
19393 static bool isLanes1toNKnownZero(SDValue Op) {
19394 switch (Op.getOpcode()) {
19397 case AArch64ISD::ANDV_PRED:
19398 case AArch64ISD::EORV_PRED:
19399 case AArch64ISD::FADDA_PRED:
19400 case AArch64ISD::FADDV_PRED:
19401 case AArch64ISD::FMAXNMV_PRED:
19402 case AArch64ISD::FMAXV_PRED:
19403 case AArch64ISD::FMINNMV_PRED:
19404 case AArch64ISD::FMINV_PRED:
19405 case AArch64ISD::ORV_PRED:
19406 case AArch64ISD::SADDV_PRED:
19407 case AArch64ISD::SMAXV_PRED:
19408 case AArch64ISD::SMINV_PRED:
19409 case AArch64ISD::UADDV_PRED:
19410 case AArch64ISD::UMAXV_PRED:
19411 case AArch64ISD::UMINV_PRED:
19416 static SDValue removeRedundantInsertVectorElt(SDNode *N) {
19417 assert(N->getOpcode() == ISD::INSERT_VECTOR_ELT && "Unexpected node!");
19418 SDValue InsertVec = N->getOperand(0);
19419 SDValue InsertElt = N->getOperand(1);
19420 SDValue InsertIdx = N->getOperand(2);
19422 // We only care about inserts into the first element...
19423 if (!isNullConstant(InsertIdx))
19425 // ...of a zero'd vector...
19426 if (!ISD::isConstantSplatVectorAllZeros(InsertVec.getNode()))
19428 // ...where the inserted data was previously extracted...
19429 if (InsertElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
19432 SDValue ExtractVec = InsertElt.getOperand(0);
19433 SDValue ExtractIdx = InsertElt.getOperand(1);
19435 // ...from the first element of a vector.
19436 if (!isNullConstant(ExtractIdx))
19439 // If we get here we are effectively trying to zero lanes 1-N of a vector.
19441 // Ensure there's no type conversion going on.
19442 if (N->getValueType(0) != ExtractVec.getValueType())
19445 if (!isLanes1toNKnownZero(ExtractVec))
19448 // The explicit zeroing is redundant.
19453 performInsertVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
19454 if (SDValue Res = removeRedundantInsertVectorElt(N))
19457 return performPostLD1Combine(N, DCI, true);
19460 static SDValue performSVESpliceCombine(SDNode *N, SelectionDAG &DAG) {
19461 EVT Ty = N->getValueType(0);
19462 if (Ty.isInteger())
19465 EVT IntTy = Ty.changeVectorElementTypeToInteger();
19466 EVT ExtIntTy = getPackedSVEVectorVT(IntTy.getVectorElementCount());
19467 if (ExtIntTy.getVectorElementType().getScalarSizeInBits() <
19468 IntTy.getVectorElementType().getScalarSizeInBits())
19472 SDValue LHS = DAG.getAnyExtOrTrunc(DAG.getBitcast(IntTy, N->getOperand(0)),
19474 SDValue RHS = DAG.getAnyExtOrTrunc(DAG.getBitcast(IntTy, N->getOperand(1)),
19476 SDValue Idx = N->getOperand(2);
19477 SDValue Splice = DAG.getNode(ISD::VECTOR_SPLICE, DL, ExtIntTy, LHS, RHS, Idx);
19478 SDValue Trunc = DAG.getAnyExtOrTrunc(Splice, DL, IntTy);
19479 return DAG.getBitcast(Ty, Trunc);
19482 static SDValue performFPExtendCombine(SDNode *N, SelectionDAG &DAG,
19483 TargetLowering::DAGCombinerInfo &DCI,
19484 const AArch64Subtarget *Subtarget) {
19485 SDValue N0 = N->getOperand(0);
19486 EVT VT = N->getValueType(0);
19488 // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
19489 if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::FP_ROUND)
19492 // fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
19493 // We purposefully don't care about legality of the nodes here as we know
19494 // they can be split down into something legal.
19495 if (DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(N0.getNode()) &&
19496 N0.hasOneUse() && Subtarget->useSVEForFixedLengthVectors() &&
19497 VT.isFixedLengthVector() &&
19498 VT.getFixedSizeInBits() >= Subtarget->getMinSVEVectorSizeInBits()) {
19499 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
19500 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
19501 LN0->getChain(), LN0->getBasePtr(),
19502 N0.getValueType(), LN0->getMemOperand());
19503 DCI.CombineTo(N, ExtLoad);
19504 DCI.CombineTo(N0.getNode(),
19505 DAG.getNode(ISD::FP_ROUND, SDLoc(N0), N0.getValueType(),
19506 ExtLoad, DAG.getIntPtrConstant(1, SDLoc(N0))),
19507 ExtLoad.getValue(1));
19508 return SDValue(N, 0); // Return N so it doesn't get rechecked!
19514 static SDValue performBSPExpandForSVE(SDNode *N, SelectionDAG &DAG,
19515 const AArch64Subtarget *Subtarget,
19516 bool fixedSVEVectorVT) {
19517 EVT VT = N->getValueType(0);
19519 // Don't expand for SVE2
19520 if (!VT.isScalableVector() || Subtarget->hasSVE2() || Subtarget->hasSME())
19523 // Don't expand for NEON
19524 if (VT.isFixedLengthVector() && !fixedSVEVectorVT)
19529 SDValue Mask = N->getOperand(0);
19530 SDValue In1 = N->getOperand(1);
19531 SDValue In2 = N->getOperand(2);
19533 SDValue InvMask = DAG.getNOT(DL, Mask, VT);
19534 SDValue Sel = DAG.getNode(ISD::AND, DL, VT, Mask, In1);
19535 SDValue SelInv = DAG.getNode(ISD::AND, DL, VT, InvMask, In2);
19536 return DAG.getNode(ISD::OR, DL, VT, Sel, SelInv);
19539 static SDValue performDupLane128Combine(SDNode *N, SelectionDAG &DAG) {
19540 EVT VT = N->getValueType(0);
19542 SDValue Insert = N->getOperand(0);
19543 if (Insert.getOpcode() != ISD::INSERT_SUBVECTOR)
19546 if (!Insert.getOperand(0).isUndef())
19549 uint64_t IdxInsert = Insert.getConstantOperandVal(2);
19550 uint64_t IdxDupLane = N->getConstantOperandVal(1);
19551 if (IdxInsert != 0 || IdxDupLane != 0)
19554 SDValue Bitcast = Insert.getOperand(1);
19555 if (Bitcast.getOpcode() != ISD::BITCAST)
19558 SDValue Subvec = Bitcast.getOperand(0);
19559 EVT SubvecVT = Subvec.getValueType();
19560 if (!SubvecVT.is128BitVector())
19563 getPackedSVEVectorVT(Subvec.getValueType().getVectorElementType());
19566 SDValue NewInsert =
19567 DAG.getNode(ISD::INSERT_SUBVECTOR, DL, NewSubvecVT,
19568 DAG.getUNDEF(NewSubvecVT), Subvec, Insert->getOperand(2));
19569 SDValue NewDuplane128 = DAG.getNode(AArch64ISD::DUPLANE128, DL, NewSubvecVT,
19570 NewInsert, N->getOperand(1));
19571 return DAG.getNode(ISD::BITCAST, DL, VT, NewDuplane128);
19574 SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
19575 DAGCombinerInfo &DCI) const {
19576 SelectionDAG &DAG = DCI.DAG;
19577 switch (N->getOpcode()) {
19579 LLVM_DEBUG(dbgs() << "Custom combining: skipping\n");
19583 return performAddSubCombine(N, DCI, DAG);
19584 case ISD::BUILD_VECTOR:
19585 return performBuildVectorCombine(N, DCI, DAG);
19586 case AArch64ISD::ANDS:
19587 return performFlagSettingCombine(N, DCI, ISD::AND);
19588 case AArch64ISD::ADC:
19589 if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ true))
19591 return foldADCToCINC(N, DAG);
19592 case AArch64ISD::SBC:
19593 return foldOverflowCheck(N, DAG, /* IsAdd */ false);
19594 case AArch64ISD::ADCS:
19595 if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ true))
19597 return performFlagSettingCombine(N, DCI, AArch64ISD::ADC);
19598 case AArch64ISD::SBCS:
19599 if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ false))
19601 return performFlagSettingCombine(N, DCI, AArch64ISD::SBC);
19603 return performXorCombine(N, DAG, DCI, Subtarget);
19605 return performMulCombine(N, DAG, DCI, Subtarget);
19606 case ISD::SINT_TO_FP:
19607 case ISD::UINT_TO_FP:
19608 return performIntToFpCombine(N, DAG, Subtarget);
19609 case ISD::FP_TO_SINT:
19610 case ISD::FP_TO_UINT:
19611 case ISD::FP_TO_SINT_SAT:
19612 case ISD::FP_TO_UINT_SAT:
19613 return performFpToIntCombine(N, DAG, DCI, Subtarget);
19615 return performFDivCombine(N, DAG, DCI, Subtarget);
19617 return performORCombine(N, DCI, Subtarget);
19619 return performANDCombine(N, DCI);
19620 case ISD::INTRINSIC_WO_CHAIN:
19621 return performIntrinsicCombine(N, DCI, Subtarget);
19622 case ISD::ANY_EXTEND:
19623 case ISD::ZERO_EXTEND:
19624 case ISD::SIGN_EXTEND:
19625 return performExtendCombine(N, DCI, DAG);
19626 case ISD::SIGN_EXTEND_INREG:
19627 return performSignExtendInRegCombine(N, DCI, DAG);
19628 case ISD::CONCAT_VECTORS:
19629 return performConcatVectorsCombine(N, DCI, DAG);
19630 case ISD::EXTRACT_SUBVECTOR:
19631 return performExtractSubvectorCombine(N, DCI, DAG);
19632 case ISD::INSERT_SUBVECTOR:
19633 return performInsertSubvectorCombine(N, DCI, DAG);
19635 return performSelectCombine(N, DCI);
19637 return performVSelectCombine(N, DCI.DAG);
19639 return performSETCCCombine(N, DCI, DAG);
19641 if (performTBISimplification(N->getOperand(1), DCI, DAG))
19642 return SDValue(N, 0);
19645 return performSTORECombine(N, DCI, DAG, Subtarget);
19647 return performMSTORECombine(N, DCI, DAG, Subtarget);
19649 case ISD::MSCATTER:
19650 return performMaskedGatherScatterCombine(N, DCI, DAG);
19651 case ISD::VECTOR_SPLICE:
19652 return performSVESpliceCombine(N, DAG);
19653 case ISD::FP_EXTEND:
19654 return performFPExtendCombine(N, DAG, DCI, Subtarget);
19655 case AArch64ISD::BRCOND:
19656 return performBRCONDCombine(N, DCI, DAG);
19657 case AArch64ISD::TBNZ:
19658 case AArch64ISD::TBZ:
19659 return performTBZCombine(N, DCI, DAG);
19660 case AArch64ISD::CSEL:
19661 return performCSELCombine(N, DCI, DAG);
19662 case AArch64ISD::DUP:
19663 return performDUPCombine(N, DCI);
19664 case AArch64ISD::DUPLANE128:
19665 return performDupLane128Combine(N, DAG);
19666 case AArch64ISD::NVCAST:
19667 return performNVCASTCombine(N);
19668 case AArch64ISD::SPLICE:
19669 return performSpliceCombine(N, DAG);
19670 case AArch64ISD::UUNPKLO:
19671 case AArch64ISD::UUNPKHI:
19672 return performUnpackCombine(N, DAG, Subtarget);
19673 case AArch64ISD::UZP1:
19674 return performUzpCombine(N, DAG);
19675 case AArch64ISD::SETCC_MERGE_ZERO:
19676 return performSetccMergeZeroCombine(N, DCI);
19677 case AArch64ISD::GLD1_MERGE_ZERO:
19678 case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
19679 case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
19680 case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
19681 case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
19682 case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
19683 case AArch64ISD::GLD1_IMM_MERGE_ZERO:
19684 case AArch64ISD::GLD1S_MERGE_ZERO:
19685 case AArch64ISD::GLD1S_SCALED_MERGE_ZERO:
19686 case AArch64ISD::GLD1S_UXTW_MERGE_ZERO:
19687 case AArch64ISD::GLD1S_SXTW_MERGE_ZERO:
19688 case AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO:
19689 case AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO:
19690 case AArch64ISD::GLD1S_IMM_MERGE_ZERO:
19691 return performGLD1Combine(N, DAG);
19692 case AArch64ISD::VASHR:
19693 case AArch64ISD::VLSHR:
19694 return performVectorShiftCombine(N, *this, DCI);
19695 case AArch64ISD::SUNPKLO:
19696 return performSunpkloCombine(N, DAG);
19697 case AArch64ISD::BSP:
19698 return performBSPExpandForSVE(
19699 N, DAG, Subtarget, useSVEForFixedLengthVectorVT(N->getValueType(0)));
19700 case ISD::INSERT_VECTOR_ELT:
19701 return performInsertVectorEltCombine(N, DCI);
19702 case ISD::EXTRACT_VECTOR_ELT:
19703 return performExtractVectorEltCombine(N, DCI, Subtarget);
19704 case ISD::VECREDUCE_ADD:
19705 return performVecReduceAddCombine(N, DCI.DAG, Subtarget);
19706 case AArch64ISD::UADDV:
19707 return performUADDVCombine(N, DAG);
19708 case AArch64ISD::SMULL:
19709 case AArch64ISD::UMULL:
19710 return tryCombineLongOpWithDup(Intrinsic::not_intrinsic, N, DCI, DAG);
19711 case ISD::INTRINSIC_VOID:
19712 case ISD::INTRINSIC_W_CHAIN:
19713 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
19714 case Intrinsic::aarch64_sve_prfb_gather_scalar_offset:
19715 return combineSVEPrefetchVecBaseImmOff(N, DAG, 1 /*=ScalarSizeInBytes*/);
19716 case Intrinsic::aarch64_sve_prfh_gather_scalar_offset:
19717 return combineSVEPrefetchVecBaseImmOff(N, DAG, 2 /*=ScalarSizeInBytes*/);
19718 case Intrinsic::aarch64_sve_prfw_gather_scalar_offset:
19719 return combineSVEPrefetchVecBaseImmOff(N, DAG, 4 /*=ScalarSizeInBytes*/);
19720 case Intrinsic::aarch64_sve_prfd_gather_scalar_offset:
19721 return combineSVEPrefetchVecBaseImmOff(N, DAG, 8 /*=ScalarSizeInBytes*/);
19722 case Intrinsic::aarch64_sve_prfb_gather_uxtw_index:
19723 case Intrinsic::aarch64_sve_prfb_gather_sxtw_index:
19724 case Intrinsic::aarch64_sve_prfh_gather_uxtw_index:
19725 case Intrinsic::aarch64_sve_prfh_gather_sxtw_index:
19726 case Intrinsic::aarch64_sve_prfw_gather_uxtw_index:
19727 case Intrinsic::aarch64_sve_prfw_gather_sxtw_index:
19728 case Intrinsic::aarch64_sve_prfd_gather_uxtw_index:
19729 case Intrinsic::aarch64_sve_prfd_gather_sxtw_index:
19730 return legalizeSVEGatherPrefetchOffsVec(N, DAG);
19731 case Intrinsic::aarch64_neon_ld2:
19732 case Intrinsic::aarch64_neon_ld3:
19733 case Intrinsic::aarch64_neon_ld4:
19734 case Intrinsic::aarch64_neon_ld1x2:
19735 case Intrinsic::aarch64_neon_ld1x3:
19736 case Intrinsic::aarch64_neon_ld1x4:
19737 case Intrinsic::aarch64_neon_ld2lane:
19738 case Intrinsic::aarch64_neon_ld3lane:
19739 case Intrinsic::aarch64_neon_ld4lane:
19740 case Intrinsic::aarch64_neon_ld2r:
19741 case Intrinsic::aarch64_neon_ld3r:
19742 case Intrinsic::aarch64_neon_ld4r:
19743 case Intrinsic::aarch64_neon_st2:
19744 case Intrinsic::aarch64_neon_st3:
19745 case Intrinsic::aarch64_neon_st4:
19746 case Intrinsic::aarch64_neon_st1x2:
19747 case Intrinsic::aarch64_neon_st1x3:
19748 case Intrinsic::aarch64_neon_st1x4:
19749 case Intrinsic::aarch64_neon_st2lane:
19750 case Intrinsic::aarch64_neon_st3lane:
19751 case Intrinsic::aarch64_neon_st4lane:
19752 return performNEONPostLDSTCombine(N, DCI, DAG);
19753 case Intrinsic::aarch64_sve_ldnt1:
19754 return performLDNT1Combine(N, DAG);
19755 case Intrinsic::aarch64_sve_ld1rq:
19756 return performLD1ReplicateCombine<AArch64ISD::LD1RQ_MERGE_ZERO>(N, DAG);
19757 case Intrinsic::aarch64_sve_ld1ro:
19758 return performLD1ReplicateCombine<AArch64ISD::LD1RO_MERGE_ZERO>(N, DAG);
19759 case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset:
19760 return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO);
19761 case Intrinsic::aarch64_sve_ldnt1_gather:
19762 return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO);
19763 case Intrinsic::aarch64_sve_ldnt1_gather_index:
19764 return performGatherLoadCombine(N, DAG,
19765 AArch64ISD::GLDNT1_INDEX_MERGE_ZERO);
19766 case Intrinsic::aarch64_sve_ldnt1_gather_uxtw:
19767 return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO);
19768 case Intrinsic::aarch64_sve_ld1:
19769 return performLD1Combine(N, DAG, AArch64ISD::LD1_MERGE_ZERO);
19770 case Intrinsic::aarch64_sve_ldnf1:
19771 return performLD1Combine(N, DAG, AArch64ISD::LDNF1_MERGE_ZERO);
19772 case Intrinsic::aarch64_sve_ldff1:
19773 return performLD1Combine(N, DAG, AArch64ISD::LDFF1_MERGE_ZERO);
19774 case Intrinsic::aarch64_sve_st1:
19775 return performST1Combine(N, DAG);
19776 case Intrinsic::aarch64_sve_stnt1:
19777 return performSTNT1Combine(N, DAG);
19778 case Intrinsic::aarch64_sve_stnt1_scatter_scalar_offset:
19779 return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED);
19780 case Intrinsic::aarch64_sve_stnt1_scatter_uxtw:
19781 return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED);
19782 case Intrinsic::aarch64_sve_stnt1_scatter:
19783 return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED);
19784 case Intrinsic::aarch64_sve_stnt1_scatter_index:
19785 return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_INDEX_PRED);
19786 case Intrinsic::aarch64_sve_ld1_gather:
19787 return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_MERGE_ZERO);
19788 case Intrinsic::aarch64_sve_ld1_gather_index:
19789 return performGatherLoadCombine(N, DAG,
19790 AArch64ISD::GLD1_SCALED_MERGE_ZERO);
19791 case Intrinsic::aarch64_sve_ld1_gather_sxtw:
19792 return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_SXTW_MERGE_ZERO,
19793 /*OnlyPackedOffsets=*/false);
19794 case Intrinsic::aarch64_sve_ld1_gather_uxtw:
19795 return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_UXTW_MERGE_ZERO,
19796 /*OnlyPackedOffsets=*/false);
19797 case Intrinsic::aarch64_sve_ld1_gather_sxtw_index:
19798 return performGatherLoadCombine(N, DAG,
19799 AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO,
19800 /*OnlyPackedOffsets=*/false);
19801 case Intrinsic::aarch64_sve_ld1_gather_uxtw_index:
19802 return performGatherLoadCombine(N, DAG,
19803 AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO,
19804 /*OnlyPackedOffsets=*/false);
19805 case Intrinsic::aarch64_sve_ld1_gather_scalar_offset:
19806 return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_IMM_MERGE_ZERO);
19807 case Intrinsic::aarch64_sve_ldff1_gather:
19808 return performGatherLoadCombine(N, DAG, AArch64ISD::GLDFF1_MERGE_ZERO);
19809 case Intrinsic::aarch64_sve_ldff1_gather_index:
19810 return performGatherLoadCombine(N, DAG,
19811 AArch64ISD::GLDFF1_SCALED_MERGE_ZERO);
19812 case Intrinsic::aarch64_sve_ldff1_gather_sxtw:
19813 return performGatherLoadCombine(N, DAG,
19814 AArch64ISD::GLDFF1_SXTW_MERGE_ZERO,
19815 /*OnlyPackedOffsets=*/false);
19816 case Intrinsic::aarch64_sve_ldff1_gather_uxtw:
19817 return performGatherLoadCombine(N, DAG,
19818 AArch64ISD::GLDFF1_UXTW_MERGE_ZERO,
19819 /*OnlyPackedOffsets=*/false);
19820 case Intrinsic::aarch64_sve_ldff1_gather_sxtw_index:
19821 return performGatherLoadCombine(N, DAG,
19822 AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO,
19823 /*OnlyPackedOffsets=*/false);
19824 case Intrinsic::aarch64_sve_ldff1_gather_uxtw_index:
19825 return performGatherLoadCombine(N, DAG,
19826 AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO,
19827 /*OnlyPackedOffsets=*/false);
19828 case Intrinsic::aarch64_sve_ldff1_gather_scalar_offset:
19829 return performGatherLoadCombine(N, DAG,
19830 AArch64ISD::GLDFF1_IMM_MERGE_ZERO);
19831 case Intrinsic::aarch64_sve_st1_scatter:
19832 return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_PRED);
19833 case Intrinsic::aarch64_sve_st1_scatter_index:
19834 return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_SCALED_PRED);
19835 case Intrinsic::aarch64_sve_st1_scatter_sxtw:
19836 return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_SXTW_PRED,
19837 /*OnlyPackedOffsets=*/false);
19838 case Intrinsic::aarch64_sve_st1_scatter_uxtw:
19839 return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_UXTW_PRED,
19840 /*OnlyPackedOffsets=*/false);
19841 case Intrinsic::aarch64_sve_st1_scatter_sxtw_index:
19842 return performScatterStoreCombine(N, DAG,
19843 AArch64ISD::SST1_SXTW_SCALED_PRED,
19844 /*OnlyPackedOffsets=*/false);
19845 case Intrinsic::aarch64_sve_st1_scatter_uxtw_index:
19846 return performScatterStoreCombine(N, DAG,
19847 AArch64ISD::SST1_UXTW_SCALED_PRED,
19848 /*OnlyPackedOffsets=*/false);
19849 case Intrinsic::aarch64_sve_st1_scatter_scalar_offset:
19850 return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_IMM_PRED);
19851 case Intrinsic::aarch64_sve_tuple_get: {
19853 SDValue Chain = N->getOperand(0);
19854 SDValue Src1 = N->getOperand(2);
19855 SDValue Idx = N->getOperand(3);
19857 uint64_t IdxConst = cast<ConstantSDNode>(Idx)->getZExtValue();
19858 EVT ResVT = N->getValueType(0);
19859 uint64_t NumLanes = ResVT.getVectorElementCount().getKnownMinValue();
19860 SDValue ExtIdx = DAG.getVectorIdxConstant(IdxConst * NumLanes, DL);
19862 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResVT, Src1, ExtIdx);
19863 return DAG.getMergeValues({Val, Chain}, DL);
19865 case Intrinsic::aarch64_sve_tuple_set: {
19867 SDValue Chain = N->getOperand(0);
19868 SDValue Tuple = N->getOperand(2);
19869 SDValue Idx = N->getOperand(3);
19870 SDValue Vec = N->getOperand(4);
19872 EVT TupleVT = Tuple.getValueType();
19873 uint64_t TupleLanes = TupleVT.getVectorElementCount().getKnownMinValue();
19875 uint64_t IdxConst = cast<ConstantSDNode>(Idx)->getZExtValue();
19876 uint64_t NumLanes =
19877 Vec.getValueType().getVectorElementCount().getKnownMinValue();
19879 if ((TupleLanes % NumLanes) != 0)
19880 report_fatal_error("invalid tuple vector!");
19882 uint64_t NumVecs = TupleLanes / NumLanes;
19884 SmallVector<SDValue, 4> Opnds;
19885 for (unsigned I = 0; I < NumVecs; ++I) {
19887 Opnds.push_back(Vec);
19889 SDValue ExtIdx = DAG.getVectorIdxConstant(I * NumLanes, DL);
19890 Opnds.push_back(DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
19891 Vec.getValueType(), Tuple, ExtIdx));
19895 DAG.getNode(ISD::CONCAT_VECTORS, DL, Tuple.getValueType(), Opnds);
19896 return DAG.getMergeValues({Concat, Chain}, DL);
19898 case Intrinsic::aarch64_sve_tuple_create2:
19899 case Intrinsic::aarch64_sve_tuple_create3:
19900 case Intrinsic::aarch64_sve_tuple_create4: {
19902 SDValue Chain = N->getOperand(0);
19904 SmallVector<SDValue, 4> Opnds;
19905 for (unsigned I = 2; I < N->getNumOperands(); ++I)
19906 Opnds.push_back(N->getOperand(I));
19908 EVT VT = Opnds[0].getValueType();
19909 EVT EltVT = VT.getVectorElementType();
19910 EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
19911 VT.getVectorElementCount() *
19912 (N->getNumOperands() - 2));
19913 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, DestVT, Opnds);
19914 return DAG.getMergeValues({Concat, Chain}, DL);
19916 case Intrinsic::aarch64_sve_ld2:
19917 case Intrinsic::aarch64_sve_ld3:
19918 case Intrinsic::aarch64_sve_ld4: {
19920 SDValue Chain = N->getOperand(0);
19921 SDValue Mask = N->getOperand(2);
19922 SDValue BasePtr = N->getOperand(3);
19923 SDValue LoadOps[] = {Chain, Mask, BasePtr};
19924 unsigned IntrinsicID =
19925 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
19927 LowerSVEStructLoad(IntrinsicID, LoadOps, N->getValueType(0), DAG, DL);
19928 return DAG.getMergeValues({Result, Chain}, DL);
19930 case Intrinsic::aarch64_rndr:
19931 case Intrinsic::aarch64_rndrrs: {
19932 unsigned IntrinsicID =
19933 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
19935 (IntrinsicID == Intrinsic::aarch64_rndr ? AArch64SysReg::RNDR
19936 : AArch64SysReg::RNDRRS);
19938 SDValue A = DAG.getNode(
19939 AArch64ISD::MRS, DL, DAG.getVTList(MVT::i64, MVT::Glue, MVT::Other),
19940 N->getOperand(0), DAG.getConstant(Register, DL, MVT::i64));
19941 SDValue B = DAG.getNode(
19942 AArch64ISD::CSINC, DL, MVT::i32, DAG.getConstant(0, DL, MVT::i32),
19943 DAG.getConstant(0, DL, MVT::i32),
19944 DAG.getConstant(AArch64CC::NE, DL, MVT::i32), A.getValue(1));
19945 return DAG.getMergeValues(
19946 {A, DAG.getZExtOrTrunc(B, DL, MVT::i1), A.getValue(2)}, DL);
19952 case ISD::GlobalAddress:
19953 return performGlobalAddressCombine(N, DAG, Subtarget, getTargetMachine());
19958 // Check if the return value is used as only a return value, as otherwise
19959 // we can't perform a tail-call. In particular, we need to check for
19960 // target ISD nodes that are returns and any other "odd" constructs
19961 // that the generic analysis code won't necessarily catch.
19962 bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N,
19963 SDValue &Chain) const {
19964 if (N->getNumValues() != 1)
19966 if (!N->hasNUsesOfValue(1, 0))
19969 SDValue TCChain = Chain;
19970 SDNode *Copy = *N->use_begin();
19971 if (Copy->getOpcode() == ISD::CopyToReg) {
19972 // If the copy has a glue operand, we conservatively assume it isn't safe to
19973 // perform a tail call.
19974 if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() ==
19977 TCChain = Copy->getOperand(0);
19978 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
19981 bool HasRet = false;
19982 for (SDNode *Node : Copy->uses()) {
19983 if (Node->getOpcode() != AArch64ISD::RET_FLAG)
19995 // Return whether the an instruction can potentially be optimized to a tail
19996 // call. This will cause the optimizers to attempt to move, or duplicate,
19997 // return instructions to help enable tail call optimizations for this
19999 bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
20000 return CI->isTailCall();
20003 bool AArch64TargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base,
20005 ISD::MemIndexedMode &AM,
20007 SelectionDAG &DAG) const {
20008 if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)
20011 Base = Op->getOperand(0);
20012 // All of the indexed addressing mode instructions take a signed
20013 // 9 bit immediate offset.
20014 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
20015 int64_t RHSC = RHS->getSExtValue();
20016 if (Op->getOpcode() == ISD::SUB)
20017 RHSC = -(uint64_t)RHSC;
20018 if (!isInt<9>(RHSC))
20020 IsInc = (Op->getOpcode() == ISD::ADD);
20021 Offset = Op->getOperand(1);
20027 bool AArch64TargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
20029 ISD::MemIndexedMode &AM,
20030 SelectionDAG &DAG) const {
20033 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
20034 VT = LD->getMemoryVT();
20035 Ptr = LD->getBasePtr();
20036 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
20037 VT = ST->getMemoryVT();
20038 Ptr = ST->getBasePtr();
20043 if (!getIndexedAddressParts(Ptr.getNode(), Base, Offset, AM, IsInc, DAG))
20045 AM = IsInc ? ISD::PRE_INC : ISD::PRE_DEC;
20049 bool AArch64TargetLowering::getPostIndexedAddressParts(
20050 SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset,
20051 ISD::MemIndexedMode &AM, SelectionDAG &DAG) const {
20054 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
20055 VT = LD->getMemoryVT();
20056 Ptr = LD->getBasePtr();
20057 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
20058 VT = ST->getMemoryVT();
20059 Ptr = ST->getBasePtr();
20064 if (!getIndexedAddressParts(Op, Base, Offset, AM, IsInc, DAG))
20066 // Post-indexing updates the base, so it's not a valid transform
20067 // if that's not the same as the load's pointer.
20070 AM = IsInc ? ISD::POST_INC : ISD::POST_DEC;
20074 void AArch64TargetLowering::ReplaceBITCASTResults(
20075 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
20077 SDValue Op = N->getOperand(0);
20078 EVT VT = N->getValueType(0);
20079 EVT SrcVT = Op.getValueType();
20081 if (VT.isScalableVector() && !isTypeLegal(VT) && isTypeLegal(SrcVT)) {
20082 assert(!VT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
20083 "Expected fp->int bitcast!");
20085 // Bitcasting between unpacked vector types of different element counts is
20086 // not a NOP because the live elements are laid out differently.
20088 // e.g. nxv2i32 = XX??XX??
20089 // nxv4f16 = X?X?X?X?
20090 if (VT.getVectorElementCount() != SrcVT.getVectorElementCount())
20093 SDValue CastResult = getSVESafeBitCast(getSVEContainerType(VT), Op, DAG);
20094 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, CastResult));
20098 if (VT != MVT::i16 || (SrcVT != MVT::f16 && SrcVT != MVT::bf16))
20102 DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32,
20103 DAG.getUNDEF(MVT::i32), Op,
20104 DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)),
20106 Op = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op);
20107 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Op));
20110 static void ReplaceAddWithADDP(SDNode *N, SmallVectorImpl<SDValue> &Results,
20112 const AArch64Subtarget *Subtarget) {
20113 EVT VT = N->getValueType(0);
20114 if (!VT.is256BitVector() ||
20115 (VT.getScalarType().isFloatingPoint() &&
20116 !N->getFlags().hasAllowReassociation()) ||
20117 (VT.getScalarType() == MVT::f16 && !Subtarget->hasFullFP16()))
20120 SDValue X = N->getOperand(0);
20121 auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(1));
20123 Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(0));
20124 X = N->getOperand(1);
20129 if (Shuf->getOperand(0) != X || !Shuf->getOperand(1)->isUndef())
20132 // Check the mask is 1,0,3,2,5,4,...
20133 ArrayRef<int> Mask = Shuf->getMask();
20134 for (int I = 0, E = Mask.size(); I < E; I++)
20135 if (Mask[I] != (I % 2 == 0 ? I + 1 : I - 1))
20139 auto LoHi = DAG.SplitVector(X, DL);
20140 assert(LoHi.first.getValueType() == LoHi.second.getValueType());
20141 SDValue Addp = DAG.getNode(AArch64ISD::ADDP, N, LoHi.first.getValueType(),
20142 LoHi.first, LoHi.second);
20144 // Shuffle the elements back into order.
20145 SmallVector<int> NMask;
20146 for (unsigned I = 0, E = VT.getVectorNumElements() / 2; I < E; I++) {
20147 NMask.push_back(I);
20148 NMask.push_back(I);
20151 DAG.getVectorShuffle(VT, DL,
20152 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Addp,
20153 DAG.getUNDEF(LoHi.first.getValueType())),
20154 DAG.getUNDEF(VT), NMask));
20157 static void ReplaceReductionResults(SDNode *N,
20158 SmallVectorImpl<SDValue> &Results,
20159 SelectionDAG &DAG, unsigned InterOp,
20160 unsigned AcrossOp) {
20164 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
20165 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
20166 SDValue InterVal = DAG.getNode(InterOp, dl, LoVT, Lo, Hi);
20167 SDValue SplitVal = DAG.getNode(AcrossOp, dl, LoVT, InterVal);
20168 Results.push_back(SplitVal);
20171 static std::pair<SDValue, SDValue> splitInt128(SDValue N, SelectionDAG &DAG) {
20173 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, N);
20174 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64,
20175 DAG.getNode(ISD::SRL, DL, MVT::i128, N,
20176 DAG.getConstant(64, DL, MVT::i64)));
20177 return std::make_pair(Lo, Hi);
20180 void AArch64TargetLowering::ReplaceExtractSubVectorResults(
20181 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
20182 SDValue In = N->getOperand(0);
20183 EVT InVT = In.getValueType();
20185 // Common code will handle these just fine.
20186 if (!InVT.isScalableVector() || !InVT.isInteger())
20190 EVT VT = N->getValueType(0);
20192 // The following checks bail if this is not a halving operation.
20194 ElementCount ResEC = VT.getVectorElementCount();
20196 if (InVT.getVectorElementCount() != (ResEC * 2))
20199 auto *CIndex = dyn_cast<ConstantSDNode>(N->getOperand(1));
20203 unsigned Index = CIndex->getZExtValue();
20204 if ((Index != 0) && (Index != ResEC.getKnownMinValue()))
20207 unsigned Opcode = (Index == 0) ? AArch64ISD::UUNPKLO : AArch64ISD::UUNPKHI;
20208 EVT ExtendedHalfVT = VT.widenIntegerVectorElementType(*DAG.getContext());
20210 SDValue Half = DAG.getNode(Opcode, DL, ExtendedHalfVT, N->getOperand(0));
20211 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Half));
20214 // Create an even/odd pair of X registers holding integer value V.
20215 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
20216 SDLoc dl(V.getNode());
20217 SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i64);
20218 SDValue VHi = DAG.getAnyExtOrTrunc(
20219 DAG.getNode(ISD::SRL, dl, MVT::i128, V, DAG.getConstant(64, dl, MVT::i64)),
20221 if (DAG.getDataLayout().isBigEndian())
20222 std::swap (VLo, VHi);
20224 DAG.getTargetConstant(AArch64::XSeqPairsClassRegClassID, dl, MVT::i32);
20225 SDValue SubReg0 = DAG.getTargetConstant(AArch64::sube64, dl, MVT::i32);
20226 SDValue SubReg1 = DAG.getTargetConstant(AArch64::subo64, dl, MVT::i32);
20227 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
20229 DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
20232 static void ReplaceCMP_SWAP_128Results(SDNode *N,
20233 SmallVectorImpl<SDValue> &Results,
20235 const AArch64Subtarget *Subtarget) {
20236 assert(N->getValueType(0) == MVT::i128 &&
20237 "AtomicCmpSwap on types less than 128 should be legal");
20239 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
20240 if (Subtarget->hasLSE() || Subtarget->outlineAtomics()) {
20241 // LSE has a 128-bit compare and swap (CASP), but i128 is not a legal type,
20242 // so lower it here, wrapped in REG_SEQUENCE and EXTRACT_SUBREG.
20244 createGPRPairNode(DAG, N->getOperand(2)), // Compare value
20245 createGPRPairNode(DAG, N->getOperand(3)), // Store value
20246 N->getOperand(1), // Ptr
20247 N->getOperand(0), // Chain in
20251 switch (MemOp->getMergedOrdering()) {
20252 case AtomicOrdering::Monotonic:
20253 Opcode = AArch64::CASPX;
20255 case AtomicOrdering::Acquire:
20256 Opcode = AArch64::CASPAX;
20258 case AtomicOrdering::Release:
20259 Opcode = AArch64::CASPLX;
20261 case AtomicOrdering::AcquireRelease:
20262 case AtomicOrdering::SequentiallyConsistent:
20263 Opcode = AArch64::CASPALX;
20266 llvm_unreachable("Unexpected ordering!");
20269 MachineSDNode *CmpSwap = DAG.getMachineNode(
20270 Opcode, SDLoc(N), DAG.getVTList(MVT::Untyped, MVT::Other), Ops);
20271 DAG.setNodeMemRefs(CmpSwap, {MemOp});
20273 unsigned SubReg1 = AArch64::sube64, SubReg2 = AArch64::subo64;
20274 if (DAG.getDataLayout().isBigEndian())
20275 std::swap(SubReg1, SubReg2);
20276 SDValue Lo = DAG.getTargetExtractSubreg(SubReg1, SDLoc(N), MVT::i64,
20277 SDValue(CmpSwap, 0));
20278 SDValue Hi = DAG.getTargetExtractSubreg(SubReg2, SDLoc(N), MVT::i64,
20279 SDValue(CmpSwap, 0));
20281 DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128, Lo, Hi));
20282 Results.push_back(SDValue(CmpSwap, 1)); // Chain out
20287 switch (MemOp->getMergedOrdering()) {
20288 case AtomicOrdering::Monotonic:
20289 Opcode = AArch64::CMP_SWAP_128_MONOTONIC;
20291 case AtomicOrdering::Acquire:
20292 Opcode = AArch64::CMP_SWAP_128_ACQUIRE;
20294 case AtomicOrdering::Release:
20295 Opcode = AArch64::CMP_SWAP_128_RELEASE;
20297 case AtomicOrdering::AcquireRelease:
20298 case AtomicOrdering::SequentiallyConsistent:
20299 Opcode = AArch64::CMP_SWAP_128;
20302 llvm_unreachable("Unexpected ordering!");
20305 auto Desired = splitInt128(N->getOperand(2), DAG);
20306 auto New = splitInt128(N->getOperand(3), DAG);
20307 SDValue Ops[] = {N->getOperand(1), Desired.first, Desired.second,
20308 New.first, New.second, N->getOperand(0)};
20309 SDNode *CmpSwap = DAG.getMachineNode(
20310 Opcode, SDLoc(N), DAG.getVTList(MVT::i64, MVT::i64, MVT::i32, MVT::Other),
20312 DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
20314 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128,
20315 SDValue(CmpSwap, 0), SDValue(CmpSwap, 1)));
20316 Results.push_back(SDValue(CmpSwap, 3));
20319 void AArch64TargetLowering::ReplaceNodeResults(
20320 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
20321 switch (N->getOpcode()) {
20323 llvm_unreachable("Don't know how to custom expand this");
20325 ReplaceBITCASTResults(N, Results, DAG);
20327 case ISD::VECREDUCE_ADD:
20328 case ISD::VECREDUCE_SMAX:
20329 case ISD::VECREDUCE_SMIN:
20330 case ISD::VECREDUCE_UMAX:
20331 case ISD::VECREDUCE_UMIN:
20332 Results.push_back(LowerVECREDUCE(SDValue(N, 0), DAG));
20336 ReplaceAddWithADDP(N, Results, DAG, Subtarget);
20341 if (SDValue Result = LowerCTPOP_PARITY(SDValue(N, 0), DAG))
20342 Results.push_back(Result);
20344 case AArch64ISD::SADDV:
20345 ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::SADDV);
20347 case AArch64ISD::UADDV:
20348 ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::UADDV);
20350 case AArch64ISD::SMINV:
20351 ReplaceReductionResults(N, Results, DAG, ISD::SMIN, AArch64ISD::SMINV);
20353 case AArch64ISD::UMINV:
20354 ReplaceReductionResults(N, Results, DAG, ISD::UMIN, AArch64ISD::UMINV);
20356 case AArch64ISD::SMAXV:
20357 ReplaceReductionResults(N, Results, DAG, ISD::SMAX, AArch64ISD::SMAXV);
20359 case AArch64ISD::UMAXV:
20360 ReplaceReductionResults(N, Results, DAG, ISD::UMAX, AArch64ISD::UMAXV);
20362 case ISD::FP_TO_UINT:
20363 case ISD::FP_TO_SINT:
20364 case ISD::STRICT_FP_TO_SINT:
20365 case ISD::STRICT_FP_TO_UINT:
20366 assert(N->getValueType(0) == MVT::i128 && "unexpected illegal conversion");
20367 // Let normal code take care of it by not adding anything to Results.
20369 case ISD::ATOMIC_CMP_SWAP:
20370 ReplaceCMP_SWAP_128Results(N, Results, DAG, Subtarget);
20372 case ISD::ATOMIC_LOAD:
20374 assert(SDValue(N, 0).getValueType() == MVT::i128 &&
20375 "unexpected load's value type");
20376 MemSDNode *LoadNode = cast<MemSDNode>(N);
20377 if ((!LoadNode->isVolatile() && !LoadNode->isAtomic()) ||
20378 LoadNode->getMemoryVT() != MVT::i128) {
20379 // Non-volatile or atomic loads are optimized later in AArch64's load/store
20384 SDValue Result = DAG.getMemIntrinsicNode(
20385 AArch64ISD::LDP, SDLoc(N),
20386 DAG.getVTList({MVT::i64, MVT::i64, MVT::Other}),
20387 {LoadNode->getChain(), LoadNode->getBasePtr()}, LoadNode->getMemoryVT(),
20388 LoadNode->getMemOperand());
20390 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128,
20391 Result.getValue(0), Result.getValue(1));
20392 Results.append({Pair, Result.getValue(2) /* Chain */});
20395 case ISD::EXTRACT_SUBVECTOR:
20396 ReplaceExtractSubVectorResults(N, Results, DAG);
20398 case ISD::INSERT_SUBVECTOR:
20399 case ISD::CONCAT_VECTORS:
20400 // Custom lowering has been requested for INSERT_SUBVECTOR and
20401 // CONCAT_VECTORS -- but delegate to common code for result type
20404 case ISD::INTRINSIC_WO_CHAIN: {
20405 EVT VT = N->getValueType(0);
20406 assert((VT == MVT::i8 || VT == MVT::i16) &&
20407 "custom lowering for unexpected type");
20409 ConstantSDNode *CN = cast<ConstantSDNode>(N->getOperand(0));
20410 Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
20414 case Intrinsic::aarch64_sve_clasta_n: {
20416 auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2));
20417 auto V = DAG.getNode(AArch64ISD::CLASTA_N, DL, MVT::i32,
20418 N->getOperand(1), Op2, N->getOperand(3));
20419 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20422 case Intrinsic::aarch64_sve_clastb_n: {
20424 auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2));
20425 auto V = DAG.getNode(AArch64ISD::CLASTB_N, DL, MVT::i32,
20426 N->getOperand(1), Op2, N->getOperand(3));
20427 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20430 case Intrinsic::aarch64_sve_lasta: {
20432 auto V = DAG.getNode(AArch64ISD::LASTA, DL, MVT::i32,
20433 N->getOperand(1), N->getOperand(2));
20434 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20437 case Intrinsic::aarch64_sve_lastb: {
20439 auto V = DAG.getNode(AArch64ISD::LASTB, DL, MVT::i32,
20440 N->getOperand(1), N->getOperand(2));
20441 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20449 bool AArch64TargetLowering::useLoadStackGuardNode() const {
20450 if (Subtarget->isTargetAndroid() || Subtarget->isTargetFuchsia())
20451 return TargetLowering::useLoadStackGuardNode();
20455 unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const {
20456 // Combine multiple FDIVs with the same divisor into multiple FMULs by the
20457 // reciprocal if there are three or more FDIVs.
20461 TargetLoweringBase::LegalizeTypeAction
20462 AArch64TargetLowering::getPreferredVectorAction(MVT VT) const {
20463 // During type legalization, we prefer to widen v1i8, v1i16, v1i32 to v8i8,
20464 // v4i16, v2i32 instead of to promote.
20465 if (VT == MVT::v1i8 || VT == MVT::v1i16 || VT == MVT::v1i32 ||
20467 return TypeWidenVector;
20469 return TargetLoweringBase::getPreferredVectorAction(VT);
20472 // In v8.4a, ldp and stp instructions are guaranteed to be single-copy atomic
20473 // provided the address is 16-byte aligned.
20474 bool AArch64TargetLowering::isOpSuitableForLDPSTP(const Instruction *I) const {
20475 if (!Subtarget->hasLSE2())
20478 if (auto LI = dyn_cast<LoadInst>(I))
20479 return LI->getType()->getPrimitiveSizeInBits() == 128 &&
20480 LI->getAlign() >= Align(16);
20482 if (auto SI = dyn_cast<StoreInst>(I))
20483 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() == 128 &&
20484 SI->getAlign() >= Align(16);
20489 bool AArch64TargetLowering::shouldInsertFencesForAtomic(
20490 const Instruction *I) const {
20491 return isOpSuitableForLDPSTP(I);
20494 // Loads and stores less than 128-bits are already atomic; ones above that
20495 // are doomed anyway, so defer to the default libcall and blame the OS when
20496 // things go wrong.
20497 TargetLoweringBase::AtomicExpansionKind
20498 AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
20499 unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
20500 if (Size != 128 || isOpSuitableForLDPSTP(SI))
20501 return AtomicExpansionKind::None;
20502 return AtomicExpansionKind::Expand;
20505 // Loads and stores less than 128-bits are already atomic; ones above that
20506 // are doomed anyway, so defer to the default libcall and blame the OS when
20507 // things go wrong.
20508 TargetLowering::AtomicExpansionKind
20509 AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
20510 unsigned Size = LI->getType()->getPrimitiveSizeInBits();
20512 if (Size != 128 || isOpSuitableForLDPSTP(LI))
20513 return AtomicExpansionKind::None;
20515 // At -O0, fast-regalloc cannot cope with the live vregs necessary to
20516 // implement atomicrmw without spilling. If the target address is also on the
20517 // stack and close enough to the spill slot, this can lead to a situation
20518 // where the monitor always gets cleared and the atomic operation can never
20519 // succeed. So at -O0 lower this operation to a CAS loop.
20520 if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
20521 return AtomicExpansionKind::CmpXChg;
20523 return AtomicExpansionKind::LLSC;
20526 // For the real atomic operations, we have ldxr/stxr up to 128 bits,
20527 TargetLowering::AtomicExpansionKind
20528 AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
20529 if (AI->isFloatingPointOperation())
20530 return AtomicExpansionKind::CmpXChg;
20532 unsigned Size = AI->getType()->getPrimitiveSizeInBits();
20533 if (Size > 128) return AtomicExpansionKind::None;
20535 // Nand is not supported in LSE.
20536 // Leave 128 bits to LLSC or CmpXChg.
20537 if (AI->getOperation() != AtomicRMWInst::Nand && Size < 128) {
20538 if (Subtarget->hasLSE())
20539 return AtomicExpansionKind::None;
20540 if (Subtarget->outlineAtomics()) {
20541 // [U]Min/[U]Max RWM atomics are used in __sync_fetch_ libcalls so far.
20542 // Don't outline them unless
20543 // (1) high level <atomic> support approved:
20544 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p0493r1.pdf
20545 // (2) low level libgcc and compiler-rt support implemented by:
20546 // min/max outline atomics helpers
20547 if (AI->getOperation() != AtomicRMWInst::Min &&
20548 AI->getOperation() != AtomicRMWInst::Max &&
20549 AI->getOperation() != AtomicRMWInst::UMin &&
20550 AI->getOperation() != AtomicRMWInst::UMax) {
20551 return AtomicExpansionKind::None;
20556 // At -O0, fast-regalloc cannot cope with the live vregs necessary to
20557 // implement atomicrmw without spilling. If the target address is also on the
20558 // stack and close enough to the spill slot, this can lead to a situation
20559 // where the monitor always gets cleared and the atomic operation can never
20560 // succeed. So at -O0 lower this operation to a CAS loop.
20561 if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
20562 return AtomicExpansionKind::CmpXChg;
20564 return AtomicExpansionKind::LLSC;
20567 TargetLowering::AtomicExpansionKind
20568 AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR(
20569 AtomicCmpXchgInst *AI) const {
20570 // If subtarget has LSE, leave cmpxchg intact for codegen.
20571 if (Subtarget->hasLSE() || Subtarget->outlineAtomics())
20572 return AtomicExpansionKind::None;
20573 // At -O0, fast-regalloc cannot cope with the live vregs necessary to
20574 // implement cmpxchg without spilling. If the address being exchanged is also
20575 // on the stack and close enough to the spill slot, this can lead to a
20576 // situation where the monitor always gets cleared and the atomic operation
20577 // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
20578 if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
20579 return AtomicExpansionKind::None;
20581 // 128-bit atomic cmpxchg is weird; AtomicExpand doesn't know how to expand
20583 unsigned Size = AI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
20585 return AtomicExpansionKind::None;
20587 return AtomicExpansionKind::LLSC;
20590 Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder,
20591 Type *ValueTy, Value *Addr,
20592 AtomicOrdering Ord) const {
20593 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20594 bool IsAcquire = isAcquireOrStronger(Ord);
20596 // Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd
20597 // intrinsic must return {i64, i64} and we have to recombine them into a
20598 // single i128 here.
20599 if (ValueTy->getPrimitiveSizeInBits() == 128) {
20600 Intrinsic::ID Int =
20601 IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp;
20602 Function *Ldxr = Intrinsic::getDeclaration(M, Int);
20604 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
20605 Value *LoHi = Builder.CreateCall(Ldxr, Addr, "lohi");
20607 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
20608 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
20609 Lo = Builder.CreateZExt(Lo, ValueTy, "lo64");
20610 Hi = Builder.CreateZExt(Hi, ValueTy, "hi64");
20611 return Builder.CreateOr(
20612 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValueTy, 64)), "val64");
20615 Type *Tys[] = { Addr->getType() };
20616 Intrinsic::ID Int =
20617 IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr;
20618 Function *Ldxr = Intrinsic::getDeclaration(M, Int, Tys);
20620 const DataLayout &DL = M->getDataLayout();
20621 IntegerType *IntEltTy = Builder.getIntNTy(DL.getTypeSizeInBits(ValueTy));
20622 CallInst *CI = Builder.CreateCall(Ldxr, Addr);
20624 0, Attribute::get(Builder.getContext(), Attribute::ElementType, ValueTy));
20625 Value *Trunc = Builder.CreateTrunc(CI, IntEltTy);
20627 return Builder.CreateBitCast(Trunc, ValueTy);
20630 void AArch64TargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
20631 IRBuilderBase &Builder) const {
20632 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20633 Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::aarch64_clrex));
20636 Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder,
20637 Value *Val, Value *Addr,
20638 AtomicOrdering Ord) const {
20639 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20640 bool IsRelease = isReleaseOrStronger(Ord);
20642 // Since the intrinsics must have legal type, the i128 intrinsics take two
20643 // parameters: "i64, i64". We must marshal Val into the appropriate form
20644 // before the call.
20645 if (Val->getType()->getPrimitiveSizeInBits() == 128) {
20646 Intrinsic::ID Int =
20647 IsRelease ? Intrinsic::aarch64_stlxp : Intrinsic::aarch64_stxp;
20648 Function *Stxr = Intrinsic::getDeclaration(M, Int);
20649 Type *Int64Ty = Type::getInt64Ty(M->getContext());
20651 Value *Lo = Builder.CreateTrunc(Val, Int64Ty, "lo");
20652 Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 64), Int64Ty, "hi");
20653 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
20654 return Builder.CreateCall(Stxr, {Lo, Hi, Addr});
20657 Intrinsic::ID Int =
20658 IsRelease ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr;
20659 Type *Tys[] = { Addr->getType() };
20660 Function *Stxr = Intrinsic::getDeclaration(M, Int, Tys);
20662 const DataLayout &DL = M->getDataLayout();
20663 IntegerType *IntValTy = Builder.getIntNTy(DL.getTypeSizeInBits(Val->getType()));
20664 Val = Builder.CreateBitCast(Val, IntValTy);
20666 CallInst *CI = Builder.CreateCall(
20667 Stxr, {Builder.CreateZExtOrBitCast(
20668 Val, Stxr->getFunctionType()->getParamType(0)),
20670 CI->addParamAttr(1, Attribute::get(Builder.getContext(),
20671 Attribute::ElementType, Val->getType()));
20675 bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
20676 Type *Ty, CallingConv::ID CallConv, bool isVarArg,
20677 const DataLayout &DL) const {
20678 if (!Ty->isArrayTy()) {
20679 const TypeSize &TySize = Ty->getPrimitiveSizeInBits();
20680 return TySize.isScalable() && TySize.getKnownMinSize() > 128;
20683 // All non aggregate members of the type must have the same type
20684 SmallVector<EVT> ValueVTs;
20685 ComputeValueVTs(*this, DL, Ty, ValueVTs);
20686 return is_splat(ValueVTs);
20689 bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &,
20694 static Value *UseTlsOffset(IRBuilderBase &IRB, unsigned Offset) {
20695 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
20696 Function *ThreadPointerFunc =
20697 Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
20698 return IRB.CreatePointerCast(
20699 IRB.CreateConstGEP1_32(IRB.getInt8Ty(), IRB.CreateCall(ThreadPointerFunc),
20701 IRB.getInt8PtrTy()->getPointerTo(0));
20704 Value *AArch64TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {
20705 // Android provides a fixed TLS slot for the stack cookie. See the definition
20706 // of TLS_SLOT_STACK_GUARD in
20707 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
20708 if (Subtarget->isTargetAndroid())
20709 return UseTlsOffset(IRB, 0x28);
20711 // Fuchsia is similar.
20712 // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
20713 if (Subtarget->isTargetFuchsia())
20714 return UseTlsOffset(IRB, -0x10);
20716 return TargetLowering::getIRStackGuard(IRB);
20719 void AArch64TargetLowering::insertSSPDeclarations(Module &M) const {
20720 // MSVC CRT provides functionalities for stack protection.
20721 if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) {
20722 // MSVC CRT has a global variable holding security cookie.
20723 M.getOrInsertGlobal("__security_cookie",
20724 Type::getInt8PtrTy(M.getContext()));
20726 // MSVC CRT has a function to validate security cookie.
20727 FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
20728 "__security_check_cookie", Type::getVoidTy(M.getContext()),
20729 Type::getInt8PtrTy(M.getContext()));
20730 if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
20731 F->setCallingConv(CallingConv::Win64);
20732 F->addParamAttr(0, Attribute::AttrKind::InReg);
20736 TargetLowering::insertSSPDeclarations(M);
20739 Value *AArch64TargetLowering::getSDagStackGuard(const Module &M) const {
20740 // MSVC CRT has a global variable holding security cookie.
20741 if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
20742 return M.getGlobalVariable("__security_cookie");
20743 return TargetLowering::getSDagStackGuard(M);
20746 Function *AArch64TargetLowering::getSSPStackGuardCheck(const Module &M) const {
20747 // MSVC CRT has a function to validate security cookie.
20748 if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
20749 return M.getFunction("__security_check_cookie");
20750 return TargetLowering::getSSPStackGuardCheck(M);
20754 AArch64TargetLowering::getSafeStackPointerLocation(IRBuilderBase &IRB) const {
20755 // Android provides a fixed TLS slot for the SafeStack pointer. See the
20756 // definition of TLS_SLOT_SAFESTACK in
20757 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
20758 if (Subtarget->isTargetAndroid())
20759 return UseTlsOffset(IRB, 0x48);
20761 // Fuchsia is similar.
20762 // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
20763 if (Subtarget->isTargetFuchsia())
20764 return UseTlsOffset(IRB, -0x8);
20766 return TargetLowering::getSafeStackPointerLocation(IRB);
20769 bool AArch64TargetLowering::isMaskAndCmp0FoldingBeneficial(
20770 const Instruction &AndI) const {
20771 // Only sink 'and' mask to cmp use block if it is masking a single bit, since
20772 // this is likely to be fold the and/cmp/br into a single tbz instruction. It
20773 // may be beneficial to sink in other cases, but we would have to check that
20774 // the cmp would not get folded into the br to form a cbz for these to be
20776 ConstantInt* Mask = dyn_cast<ConstantInt>(AndI.getOperand(1));
20779 return Mask->getValue().isPowerOf2();
20782 bool AArch64TargetLowering::
20783 shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
20784 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
20785 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
20786 SelectionDAG &DAG) const {
20787 // Does baseline recommend not to perform the fold by default?
20788 if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
20789 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
20791 // Else, if this is a vector shift, prefer 'shl'.
20792 return X.getValueType().isScalarInteger() || NewShiftOpcode == ISD::SHL;
20795 bool AArch64TargetLowering::shouldExpandShift(SelectionDAG &DAG,
20797 if (DAG.getMachineFunction().getFunction().hasMinSize() &&
20798 !Subtarget->isTargetWindows() && !Subtarget->isTargetDarwin())
20803 void AArch64TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
20804 // Update IsSplitCSR in AArch64unctionInfo.
20805 AArch64FunctionInfo *AFI = Entry->getParent()->getInfo<AArch64FunctionInfo>();
20806 AFI->setIsSplitCSR(true);
20809 void AArch64TargetLowering::insertCopiesSplitCSR(
20810 MachineBasicBlock *Entry,
20811 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
20812 const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
20813 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
20817 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20818 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
20819 MachineBasicBlock::iterator MBBI = Entry->begin();
20820 for (const MCPhysReg *I = IStart; *I; ++I) {
20821 const TargetRegisterClass *RC = nullptr;
20822 if (AArch64::GPR64RegClass.contains(*I))
20823 RC = &AArch64::GPR64RegClass;
20824 else if (AArch64::FPR64RegClass.contains(*I))
20825 RC = &AArch64::FPR64RegClass;
20827 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
20829 Register NewVR = MRI->createVirtualRegister(RC);
20830 // Create copy from CSR to a virtual register.
20831 // FIXME: this currently does not emit CFI pseudo-instructions, it works
20832 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
20833 // nounwind. If we want to generalize this later, we may need to emit
20834 // CFI pseudo-instructions.
20835 assert(Entry->getParent()->getFunction().hasFnAttribute(
20836 Attribute::NoUnwind) &&
20837 "Function should be nounwind in insertCopiesSplitCSR!");
20838 Entry->addLiveIn(*I);
20839 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
20842 // Insert the copy-back instructions right before the terminator.
20843 for (auto *Exit : Exits)
20844 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
20845 TII->get(TargetOpcode::COPY), *I)
20850 bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
20851 // Integer division on AArch64 is expensive. However, when aggressively
20852 // optimizing for code size, we prefer to use a div instruction, as it is
20853 // usually smaller than the alternative sequence.
20854 // The exception to this is vector division. Since AArch64 doesn't have vector
20855 // integer division, leaving the division as-is is a loss even in terms of
20856 // size, because it will have to be scalarized, while the alternative code
20857 // sequence can be performed in vector form.
20858 bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
20859 return OptSize && !VT.isVector();
20862 bool AArch64TargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
20863 // We want inc-of-add for scalars and sub-of-not for vectors.
20864 return VT.isScalarInteger();
20867 bool AArch64TargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
20869 // v8f16 without fp16 need to be extended to v8f32, which is more difficult to
20871 if (FPVT == MVT::v8f16 && !Subtarget->hasFullFP16())
20873 return TargetLowering::shouldConvertFpToSat(Op, FPVT, VT);
20876 bool AArch64TargetLowering::enableAggressiveFMAFusion(EVT VT) const {
20877 return Subtarget->hasAggressiveFMA() && VT.isFloatingPoint();
20881 AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const {
20882 if (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows())
20883 return getPointerTy(DL).getSizeInBits();
20885 return 3 * getPointerTy(DL).getSizeInBits() + 2 * 32;
20888 void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const {
20889 MachineFrameInfo &MFI = MF.getFrameInfo();
20890 // If we have any vulnerable SVE stack objects then the stack protector
20891 // needs to be placed at the top of the SVE stack area, as the SVE locals
20892 // are placed above the other locals, so we allocate it as if it were a
20893 // scalable vector.
20894 // FIXME: It may be worthwhile having a specific interface for this rather
20895 // than doing it here in finalizeLowering.
20896 if (MFI.hasStackProtectorIndex()) {
20897 for (unsigned int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
20898 if (MFI.getStackID(i) == TargetStackID::ScalableVector &&
20899 MFI.getObjectSSPLayout(i) != MachineFrameInfo::SSPLK_None) {
20900 MFI.setStackID(MFI.getStackProtectorIndex(),
20901 TargetStackID::ScalableVector);
20902 MFI.setObjectAlignment(MFI.getStackProtectorIndex(), Align(16));
20907 MFI.computeMaxCallFrameSize(MF);
20908 TargetLoweringBase::finalizeLowering(MF);
20911 // Unlike X86, we let frame lowering assign offsets to all catch objects.
20912 bool AArch64TargetLowering::needsFixedCatchObjects() const {
20916 bool AArch64TargetLowering::shouldLocalize(
20917 const MachineInstr &MI, const TargetTransformInfo *TTI) const {
20918 auto &MF = *MI.getMF();
20919 auto &MRI = MF.getRegInfo();
20920 auto maxUses = [](unsigned RematCost) {
20921 // A cost of 1 means remats are basically free.
20922 if (RematCost == 1)
20923 return std::numeric_limits<unsigned>::max();
20924 if (RematCost == 2)
20927 // Remat is too expensive, only sink if there's one user.
20930 llvm_unreachable("Unexpected remat cost");
20933 switch (MI.getOpcode()) {
20934 case TargetOpcode::G_GLOBAL_VALUE: {
20935 // On Darwin, TLS global vars get selected into function calls, which
20936 // we don't want localized, as they can get moved into the middle of a
20937 // another call sequence.
20938 const GlobalValue &GV = *MI.getOperand(1).getGlobal();
20939 if (GV.isThreadLocal() && Subtarget->isTargetMachO())
20943 case TargetOpcode::G_CONSTANT: {
20944 auto *CI = MI.getOperand(1).getCImm();
20945 APInt Imm = CI->getValue();
20946 InstructionCost Cost = TTI->getIntImmCost(
20947 Imm, CI->getType(), TargetTransformInfo::TCK_CodeSize);
20948 assert(Cost.isValid() && "Expected a valid imm cost");
20950 unsigned RematCost = *Cost.getValue();
20951 Register Reg = MI.getOperand(0).getReg();
20952 unsigned MaxUses = maxUses(RematCost);
20953 // Don't pass UINT_MAX sentinal value to hasAtMostUserInstrs().
20954 if (MaxUses == std::numeric_limits<unsigned>::max())
20956 return MRI.hasAtMostUserInstrs(Reg, MaxUses);
20958 // If we legalized G_GLOBAL_VALUE into ADRP + G_ADD_LOW, mark both as being
20960 case AArch64::ADRP:
20961 case AArch64::G_ADD_LOW:
20966 return TargetLoweringBase::shouldLocalize(MI, TTI);
20969 bool AArch64TargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
20970 if (isa<ScalableVectorType>(Inst.getType()))
20973 for (unsigned i = 0; i < Inst.getNumOperands(); ++i)
20974 if (isa<ScalableVectorType>(Inst.getOperand(i)->getType()))
20977 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
20978 if (isa<ScalableVectorType>(AI->getAllocatedType()))
20985 // Return the largest legal scalable vector type that matches VT's element type.
20986 static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT) {
20987 assert(VT.isFixedLengthVector() &&
20988 DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
20989 "Expected legal fixed length vector!");
20990 switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
20992 llvm_unreachable("unexpected element type for SVE container");
20994 return EVT(MVT::nxv16i8);
20996 return EVT(MVT::nxv8i16);
20998 return EVT(MVT::nxv4i32);
21000 return EVT(MVT::nxv2i64);
21002 return EVT(MVT::nxv8f16);
21004 return EVT(MVT::nxv4f32);
21006 return EVT(MVT::nxv2f64);
21010 // Return a PTRUE with active lanes corresponding to the extent of VT.
21011 static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL,
21013 assert(VT.isFixedLengthVector() &&
21014 DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
21015 "Expected legal fixed length vector!");
21017 Optional<unsigned> PgPattern =
21018 getSVEPredPatternFromNumElements(VT.getVectorNumElements());
21019 assert(PgPattern && "Unexpected element count for SVE predicate");
21021 // For vectors that are exactly getMaxSVEVectorSizeInBits big, we can use
21022 // AArch64SVEPredPattern::all, which can enable the use of unpredicated
21023 // variants of instructions when available.
21024 const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
21025 unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
21026 unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
21027 if (MaxSVESize && MinSVESize == MaxSVESize &&
21028 MaxSVESize == VT.getSizeInBits())
21029 PgPattern = AArch64SVEPredPattern::all;
21032 switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
21034 llvm_unreachable("unexpected element type for SVE predicate");
21036 MaskVT = MVT::nxv16i1;
21040 MaskVT = MVT::nxv8i1;
21044 MaskVT = MVT::nxv4i1;
21048 MaskVT = MVT::nxv2i1;
21052 return getPTrue(DAG, DL, MaskVT, *PgPattern);
21055 static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
21057 assert(VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
21058 "Expected legal scalable vector!");
21059 auto PredTy = VT.changeVectorElementType(MVT::i1);
21060 return getPTrue(DAG, DL, PredTy, AArch64SVEPredPattern::all);
21063 static SDValue getPredicateForVector(SelectionDAG &DAG, SDLoc &DL, EVT VT) {
21064 if (VT.isFixedLengthVector())
21065 return getPredicateForFixedLengthVector(DAG, DL, VT);
21067 return getPredicateForScalableVector(DAG, DL, VT);
21070 // Grow V to consume an entire SVE register.
21071 static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) {
21072 assert(VT.isScalableVector() &&
21073 "Expected to convert into a scalable vector!");
21074 assert(V.getValueType().isFixedLengthVector() &&
21075 "Expected a fixed length vector operand!");
21077 SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
21078 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
21081 // Shrink V so it's just big enough to maintain a VT's worth of data.
21082 static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) {
21083 assert(VT.isFixedLengthVector() &&
21084 "Expected to convert into a fixed length vector!");
21085 assert(V.getValueType().isScalableVector() &&
21086 "Expected a scalable vector operand!");
21088 SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
21089 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
21092 // Convert all fixed length vector loads larger than NEON to masked_loads.
21093 SDValue AArch64TargetLowering::LowerFixedLengthVectorLoadToSVE(
21094 SDValue Op, SelectionDAG &DAG) const {
21095 auto Load = cast<LoadSDNode>(Op);
21098 EVT VT = Op.getValueType();
21099 EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21100 EVT LoadVT = ContainerVT;
21101 EVT MemVT = Load->getMemoryVT();
21103 auto Pg = getPredicateForFixedLengthVector(DAG, DL, VT);
21105 if (VT.isFloatingPoint() && Load->getExtensionType() == ISD::EXTLOAD) {
21106 LoadVT = ContainerVT.changeTypeToInteger();
21107 MemVT = MemVT.changeTypeToInteger();
21110 SDValue NewLoad = DAG.getMaskedLoad(
21111 LoadVT, DL, Load->getChain(), Load->getBasePtr(), Load->getOffset(), Pg,
21112 DAG.getUNDEF(LoadVT), MemVT, Load->getMemOperand(),
21113 Load->getAddressingMode(), Load->getExtensionType());
21115 SDValue Result = NewLoad;
21116 if (VT.isFloatingPoint() && Load->getExtensionType() == ISD::EXTLOAD) {
21117 EVT ExtendVT = ContainerVT.changeVectorElementType(
21118 Load->getMemoryVT().getVectorElementType());
21120 Result = getSVESafeBitCast(ExtendVT, Result, DAG);
21121 Result = DAG.getNode(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU, DL, ContainerVT,
21122 Pg, Result, DAG.getUNDEF(ContainerVT));
21125 Result = convertFromScalableVector(DAG, VT, Result);
21126 SDValue MergedValues[2] = {Result, NewLoad.getValue(1)};
21127 return DAG.getMergeValues(MergedValues, DL);
21130 static SDValue convertFixedMaskToScalableVector(SDValue Mask,
21131 SelectionDAG &DAG) {
21133 EVT InVT = Mask.getValueType();
21134 EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21136 auto Pg = getPredicateForFixedLengthVector(DAG, DL, InVT);
21138 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
21141 auto Op1 = convertToScalableVector(DAG, ContainerVT, Mask);
21142 auto Op2 = DAG.getConstant(0, DL, ContainerVT);
21144 return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, Pg.getValueType(),
21145 {Pg, Op1, Op2, DAG.getCondCode(ISD::SETNE)});
21148 // Convert all fixed length vector loads larger than NEON to masked_loads.
21149 SDValue AArch64TargetLowering::LowerFixedLengthVectorMLoadToSVE(
21150 SDValue Op, SelectionDAG &DAG) const {
21151 auto Load = cast<MaskedLoadSDNode>(Op);
21154 EVT VT = Op.getValueType();
21155 EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21157 SDValue Mask = convertFixedMaskToScalableVector(Load->getMask(), DAG);
21160 bool IsPassThruZeroOrUndef = false;
21162 if (Load->getPassThru()->isUndef()) {
21163 PassThru = DAG.getUNDEF(ContainerVT);
21164 IsPassThruZeroOrUndef = true;
21166 if (ContainerVT.isInteger())
21167 PassThru = DAG.getConstant(0, DL, ContainerVT);
21169 PassThru = DAG.getConstantFP(0, DL, ContainerVT);
21170 if (isZerosVector(Load->getPassThru().getNode()))
21171 IsPassThruZeroOrUndef = true;
21174 SDValue NewLoad = DAG.getMaskedLoad(
21175 ContainerVT, DL, Load->getChain(), Load->getBasePtr(), Load->getOffset(),
21176 Mask, PassThru, Load->getMemoryVT(), Load->getMemOperand(),
21177 Load->getAddressingMode(), Load->getExtensionType());
21179 SDValue Result = NewLoad;
21180 if (!IsPassThruZeroOrUndef) {
21181 SDValue OldPassThru =
21182 convertToScalableVector(DAG, ContainerVT, Load->getPassThru());
21183 Result = DAG.getSelect(DL, ContainerVT, Mask, Result, OldPassThru);
21186 Result = convertFromScalableVector(DAG, VT, Result);
21187 SDValue MergedValues[2] = {Result, NewLoad.getValue(1)};
21188 return DAG.getMergeValues(MergedValues, DL);
21191 // Convert all fixed length vector stores larger than NEON to masked_stores.
21192 SDValue AArch64TargetLowering::LowerFixedLengthVectorStoreToSVE(
21193 SDValue Op, SelectionDAG &DAG) const {
21194 auto Store = cast<StoreSDNode>(Op);
21197 EVT VT = Store->getValue().getValueType();
21198 EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21199 EVT MemVT = Store->getMemoryVT();
21201 auto Pg = getPredicateForFixedLengthVector(DAG, DL, VT);
21202 auto NewValue = convertToScalableVector(DAG, ContainerVT, Store->getValue());
21204 if (VT.isFloatingPoint() && Store->isTruncatingStore()) {
21205 EVT TruncVT = ContainerVT.changeVectorElementType(
21206 Store->getMemoryVT().getVectorElementType());
21207 MemVT = MemVT.changeTypeToInteger();
21208 NewValue = DAG.getNode(AArch64ISD::FP_ROUND_MERGE_PASSTHRU, DL, TruncVT, Pg,
21209 NewValue, DAG.getTargetConstant(0, DL, MVT::i64),
21210 DAG.getUNDEF(TruncVT));
21212 getSVESafeBitCast(ContainerVT.changeTypeToInteger(), NewValue, DAG);
21215 return DAG.getMaskedStore(Store->getChain(), DL, NewValue,
21216 Store->getBasePtr(), Store->getOffset(), Pg, MemVT,
21217 Store->getMemOperand(), Store->getAddressingMode(),
21218 Store->isTruncatingStore());
21221 SDValue AArch64TargetLowering::LowerFixedLengthVectorMStoreToSVE(
21222 SDValue Op, SelectionDAG &DAG) const {
21223 auto *Store = cast<MaskedStoreSDNode>(Op);
21226 EVT VT = Store->getValue().getValueType();
21227 EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21229 auto NewValue = convertToScalableVector(DAG, ContainerVT, Store->getValue());
21230 SDValue Mask = convertFixedMaskToScalableVector(Store->getMask(), DAG);
21232 return DAG.getMaskedStore(
21233 Store->getChain(), DL, NewValue, Store->getBasePtr(), Store->getOffset(),
21234 Mask, Store->getMemoryVT(), Store->getMemOperand(),
21235 Store->getAddressingMode(), Store->isTruncatingStore());
21238 SDValue AArch64TargetLowering::LowerFixedLengthVectorIntDivideToSVE(
21239 SDValue Op, SelectionDAG &DAG) const {
21241 EVT VT = Op.getValueType();
21242 EVT EltVT = VT.getVectorElementType();
21244 bool Signed = Op.getOpcode() == ISD::SDIV;
21245 unsigned PredOpcode = Signed ? AArch64ISD::SDIV_PRED : AArch64ISD::UDIV_PRED;
21249 if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) {
21250 EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21251 SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
21252 SDValue Op2 = DAG.getTargetConstant(Log2_64(SplatVal), dl, MVT::i32);
21254 SDValue Pg = getPredicateForFixedLengthVector(DAG, dl, VT);
21255 SDValue Res = DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, dl, ContainerVT, Pg, Op1, Op2);
21257 Res = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), Res);
21259 return convertFromScalableVector(DAG, VT, Res);
21262 // Scalable vector i32/i64 DIV is supported.
21263 if (EltVT == MVT::i32 || EltVT == MVT::i64)
21264 return LowerToPredicatedOp(Op, DAG, PredOpcode);
21266 // Scalable vector i8/i16 DIV is not supported. Promote it to i32.
21267 EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21268 EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
21269 EVT FixedWidenedVT = HalfVT.widenIntegerVectorElementType(*DAG.getContext());
21270 EVT ScalableWidenedVT = getContainerForFixedLengthVector(DAG, FixedWidenedVT);
21272 // If this is not a full vector, extend, div, and truncate it.
21273 EVT WidenedVT = VT.widenIntegerVectorElementType(*DAG.getContext());
21274 if (DAG.getTargetLoweringInfo().isTypeLegal(WidenedVT)) {
21275 unsigned ExtendOpcode = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
21276 SDValue Op0 = DAG.getNode(ExtendOpcode, dl, WidenedVT, Op.getOperand(0));
21277 SDValue Op1 = DAG.getNode(ExtendOpcode, dl, WidenedVT, Op.getOperand(1));
21278 SDValue Div = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0, Op1);
21279 return DAG.getNode(ISD::TRUNCATE, dl, VT, Div);
21282 // Convert the operands to scalable vectors.
21283 SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
21284 SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(1));
21286 // Extend the scalable operands.
21287 unsigned UnpkLo = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO;
21288 unsigned UnpkHi = Signed ? AArch64ISD::SUNPKHI : AArch64ISD::UUNPKHI;
21289 SDValue Op0Lo = DAG.getNode(UnpkLo, dl, ScalableWidenedVT, Op0);
21290 SDValue Op1Lo = DAG.getNode(UnpkLo, dl, ScalableWidenedVT, Op1);
21291 SDValue Op0Hi = DAG.getNode(UnpkHi, dl, ScalableWidenedVT, Op0);
21292 SDValue Op1Hi = DAG.getNode(UnpkHi, dl, ScalableWidenedVT, Op1);
21294 // Convert back to fixed vectors so the DIV can be further lowered.
21295 Op0Lo = convertFromScalableVector(DAG, FixedWidenedVT, Op0Lo);
21296 Op1Lo = convertFromScalableVector(DAG, FixedWidenedVT, Op1Lo);
21297 Op0Hi = convertFromScalableVector(DAG, FixedWidenedVT, Op0Hi);
21298 Op1Hi = convertFromScalableVector(DAG, FixedWidenedVT, Op1Hi);
21299 SDValue ResultLo = DAG.getNode(Op.getOpcode(), dl, FixedWidenedVT,
21301 SDValue ResultHi = DAG.getNode(Op.getOpcode(), dl, FixedWidenedVT,
21304 // Convert again to scalable vectors to truncate.
21305 ResultLo = convertToScalableVector(DAG, ScalableWidenedVT, ResultLo);
21306 ResultHi = convertToScalableVector(DAG, ScalableWidenedVT, ResultHi);
21307 SDValue ScalableResult = DAG.getNode(AArch64ISD::UZP1, dl, ContainerVT,
21308 ResultLo, ResultHi);
21310 return convertFromScalableVector(DAG, VT, ScalableResult);
21313 SDValue AArch64TargetLowering::LowerFixedLengthVectorIntExtendToSVE(
21314 SDValue Op, SelectionDAG &DAG) const {
21315 EVT VT = Op.getValueType();
21316 assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21319 SDValue Val = Op.getOperand(0);
21320 EVT ContainerVT = getContainerForFixedLengthVector(DAG, Val.getValueType());
21321 Val = convertToScalableVector(DAG, ContainerVT, Val);
21323 bool Signed = Op.getOpcode() == ISD::SIGN_EXTEND;
21324 unsigned ExtendOpc = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO;
21326 // Repeatedly unpack Val until the result is of the desired element type.
21327 switch (ContainerVT.getSimpleVT().SimpleTy) {
21329 llvm_unreachable("unimplemented container type");
21331 Val = DAG.getNode(ExtendOpc, DL, MVT::nxv8i16, Val);
21332 if (VT.getVectorElementType() == MVT::i16)
21336 Val = DAG.getNode(ExtendOpc, DL, MVT::nxv4i32, Val);
21337 if (VT.getVectorElementType() == MVT::i32)
21341 Val = DAG.getNode(ExtendOpc, DL, MVT::nxv2i64, Val);
21342 assert(VT.getVectorElementType() == MVT::i64 && "Unexpected element type!");
21346 return convertFromScalableVector(DAG, VT, Val);
21349 SDValue AArch64TargetLowering::LowerFixedLengthVectorTruncateToSVE(
21350 SDValue Op, SelectionDAG &DAG) const {
21351 EVT VT = Op.getValueType();
21352 assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21355 SDValue Val = Op.getOperand(0);
21356 EVT ContainerVT = getContainerForFixedLengthVector(DAG, Val.getValueType());
21357 Val = convertToScalableVector(DAG, ContainerVT, Val);
21359 // Repeatedly truncate Val until the result is of the desired element type.
21360 switch (ContainerVT.getSimpleVT().SimpleTy) {
21362 llvm_unreachable("unimplemented container type");
21364 Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv4i32, Val);
21365 Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv4i32, Val, Val);
21366 if (VT.getVectorElementType() == MVT::i32)
21370 Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv8i16, Val);
21371 Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv8i16, Val, Val);
21372 if (VT.getVectorElementType() == MVT::i16)
21376 Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv16i8, Val);
21377 Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv16i8, Val, Val);
21378 assert(VT.getVectorElementType() == MVT::i8 && "Unexpected element type!");
21382 return convertFromScalableVector(DAG, VT, Val);
21385 SDValue AArch64TargetLowering::LowerFixedLengthExtractVectorElt(
21386 SDValue Op, SelectionDAG &DAG) const {
21387 EVT VT = Op.getValueType();
21388 EVT InVT = Op.getOperand(0).getValueType();
21389 assert(InVT.isFixedLengthVector() && "Expected fixed length vector type!");
21392 EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21393 SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0));
21395 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op0, Op.getOperand(1));
21398 SDValue AArch64TargetLowering::LowerFixedLengthInsertVectorElt(
21399 SDValue Op, SelectionDAG &DAG) const {
21400 EVT VT = Op.getValueType();
21401 assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21404 EVT InVT = Op.getOperand(0).getValueType();
21405 EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21406 SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0));
21408 auto ScalableRes = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT, Op0,
21409 Op.getOperand(1), Op.getOperand(2));
21411 return convertFromScalableVector(DAG, VT, ScalableRes);
21414 // Convert vector operation 'Op' to an equivalent predicated operation whereby
21415 // the original operation's type is used to construct a suitable predicate.
21416 // NOTE: The results for inactive lanes are undefined.
21417 SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
21419 unsigned NewOp) const {
21420 EVT VT = Op.getValueType();
21422 auto Pg = getPredicateForVector(DAG, DL, VT);
21424 if (VT.isFixedLengthVector()) {
21425 assert(isTypeLegal(VT) && "Expected only legal fixed-width types");
21426 EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21428 // Create list of operands by converting existing ones to scalable types.
21429 SmallVector<SDValue, 4> Operands = {Pg};
21430 for (const SDValue &V : Op->op_values()) {
21431 if (isa<CondCodeSDNode>(V)) {
21432 Operands.push_back(V);
21436 if (const VTSDNode *VTNode = dyn_cast<VTSDNode>(V)) {
21437 EVT VTArg = VTNode->getVT().getVectorElementType();
21438 EVT NewVTArg = ContainerVT.changeVectorElementType(VTArg);
21439 Operands.push_back(DAG.getValueType(NewVTArg));
21443 assert(isTypeLegal(V.getValueType()) &&
21444 "Expected only legal fixed-width types");
21445 Operands.push_back(convertToScalableVector(DAG, ContainerVT, V));
21448 if (isMergePassthruOpcode(NewOp))
21449 Operands.push_back(DAG.getUNDEF(ContainerVT));
21451 auto ScalableRes = DAG.getNode(NewOp, DL, ContainerVT, Operands);
21452 return convertFromScalableVector(DAG, VT, ScalableRes);
21455 assert(VT.isScalableVector() && "Only expect to lower scalable vector op!");
21457 SmallVector<SDValue, 4> Operands = {Pg};
21458 for (const SDValue &V : Op->op_values()) {
21459 assert((!V.getValueType().isVector() ||
21460 V.getValueType().isScalableVector()) &&
21461 "Only scalable vectors are supported!");
21462 Operands.push_back(V);
21465 if (isMergePassthruOpcode(NewOp))
21466 Operands.push_back(DAG.getUNDEF(VT));
21468 return DAG.getNode(NewOp, DL, VT, Operands, Op->getFlags());
21471 // If a fixed length vector operation has no side effects when applied to
21472 // undefined elements, we can safely use scalable vectors to perform the same
21473 // operation without needing to worry about predication.
21474 SDValue AArch64TargetLowering::LowerToScalableOp(SDValue Op,
21475 SelectionDAG &DAG) const {
21476 EVT VT = Op.getValueType();
21477 assert(useSVEForFixedLengthVectorVT(VT) &&
21478 "Only expected to lower fixed length vector operation!");
21479 EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21481 // Create list of operands by converting existing ones to scalable types.
21482 SmallVector<SDValue, 4> Ops;
21483 for (const SDValue &V : Op->op_values()) {
21484 assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
21486 // Pass through non-vector operands.
21487 if (!V.getValueType().isVector()) {
21492 // "cast" fixed length vector to a scalable vector.
21493 assert(useSVEForFixedLengthVectorVT(V.getValueType()) &&
21494 "Only fixed length vectors are supported!");
21495 Ops.push_back(convertToScalableVector(DAG, ContainerVT, V));
21498 auto ScalableRes = DAG.getNode(Op.getOpcode(), SDLoc(Op), ContainerVT, Ops);
21499 return convertFromScalableVector(DAG, VT, ScalableRes);
21502 SDValue AArch64TargetLowering::LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp,
21503 SelectionDAG &DAG) const {
21504 SDLoc DL(ScalarOp);
21505 SDValue AccOp = ScalarOp.getOperand(0);
21506 SDValue VecOp = ScalarOp.getOperand(1);
21507 EVT SrcVT = VecOp.getValueType();
21508 EVT ResVT = SrcVT.getVectorElementType();
21510 EVT ContainerVT = SrcVT;
21511 if (SrcVT.isFixedLengthVector()) {
21512 ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT);
21513 VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);
21516 SDValue Pg = getPredicateForVector(DAG, DL, SrcVT);
21517 SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
21519 // Convert operands to Scalable.
21520 AccOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT,
21521 DAG.getUNDEF(ContainerVT), AccOp, Zero);
21523 // Perform reduction.
21524 SDValue Rdx = DAG.getNode(AArch64ISD::FADDA_PRED, DL, ContainerVT,
21527 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Rdx, Zero);
21530 SDValue AArch64TargetLowering::LowerPredReductionToSVE(SDValue ReduceOp,
21531 SelectionDAG &DAG) const {
21532 SDLoc DL(ReduceOp);
21533 SDValue Op = ReduceOp.getOperand(0);
21534 EVT OpVT = Op.getValueType();
21535 EVT VT = ReduceOp.getValueType();
21537 if (!OpVT.isScalableVector() || OpVT.getVectorElementType() != MVT::i1)
21540 SDValue Pg = getPredicateForVector(DAG, DL, OpVT);
21542 switch (ReduceOp.getOpcode()) {
21545 case ISD::VECREDUCE_OR:
21546 if (isAllActivePredicate(DAG, Pg) && OpVT == MVT::nxv16i1)
21547 // The predicate can be 'Op' because
21548 // vecreduce_or(Op & <all true>) <=> vecreduce_or(Op).
21549 return getPTest(DAG, VT, Op, Op, AArch64CC::ANY_ACTIVE);
21551 return getPTest(DAG, VT, Pg, Op, AArch64CC::ANY_ACTIVE);
21552 case ISD::VECREDUCE_AND: {
21553 Op = DAG.getNode(ISD::XOR, DL, OpVT, Op, Pg);
21554 return getPTest(DAG, VT, Pg, Op, AArch64CC::NONE_ACTIVE);
21556 case ISD::VECREDUCE_XOR: {
21558 DAG.getTargetConstant(Intrinsic::aarch64_sve_cntp, DL, MVT::i64);
21559 if (OpVT == MVT::nxv1i1) {
21560 // Emulate a CNTP on .Q using .D and a different governing predicate.
21561 Pg = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv2i1, Pg);
21562 Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv2i1, Op);
21565 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64, ID, Pg, Op);
21566 return DAG.getAnyExtOrTrunc(Cntp, DL, VT);
21573 SDValue AArch64TargetLowering::LowerReductionToSVE(unsigned Opcode,
21575 SelectionDAG &DAG) const {
21576 SDLoc DL(ScalarOp);
21577 SDValue VecOp = ScalarOp.getOperand(0);
21578 EVT SrcVT = VecOp.getValueType();
21580 if (useSVEForFixedLengthVectorVT(
21582 /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors())) {
21583 EVT ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT);
21584 VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);
21587 // UADDV always returns an i64 result.
21588 EVT ResVT = (Opcode == AArch64ISD::UADDV_PRED) ? MVT::i64 :
21589 SrcVT.getVectorElementType();
21591 if (SrcVT.isFixedLengthVector() || Opcode == AArch64ISD::UADDV_PRED)
21592 RdxVT = getPackedSVEVectorVT(ResVT);
21594 SDValue Pg = getPredicateForVector(DAG, DL, SrcVT);
21595 SDValue Rdx = DAG.getNode(Opcode, DL, RdxVT, Pg, VecOp);
21596 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT,
21597 Rdx, DAG.getConstant(0, DL, MVT::i64));
21599 // The VEC_REDUCE nodes expect an element size result.
21600 if (ResVT != ScalarOp.getValueType())
21601 Res = DAG.getAnyExtOrTrunc(Res, DL, ScalarOp.getValueType());
21607 AArch64TargetLowering::LowerFixedLengthVectorSelectToSVE(SDValue Op,
21608 SelectionDAG &DAG) const {
21609 EVT VT = Op.getValueType();
21612 EVT InVT = Op.getOperand(1).getValueType();
21613 EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21614 SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(1));
21615 SDValue Op2 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(2));
21617 // Convert the mask to a predicated (NOTE: We don't need to worry about
21618 // inactive lanes since VSELECT is safe when given undefined elements).
21619 EVT MaskVT = Op.getOperand(0).getValueType();
21620 EVT MaskContainerVT = getContainerForFixedLengthVector(DAG, MaskVT);
21621 auto Mask = convertToScalableVector(DAG, MaskContainerVT, Op.getOperand(0));
21622 Mask = DAG.getNode(ISD::TRUNCATE, DL,
21623 MaskContainerVT.changeVectorElementType(MVT::i1), Mask);
21625 auto ScalableRes = DAG.getNode(ISD::VSELECT, DL, ContainerVT,
21628 return convertFromScalableVector(DAG, VT, ScalableRes);
21631 SDValue AArch64TargetLowering::LowerFixedLengthVectorSetccToSVE(
21632 SDValue Op, SelectionDAG &DAG) const {
21634 EVT InVT = Op.getOperand(0).getValueType();
21635 EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21637 assert(useSVEForFixedLengthVectorVT(InVT) &&
21638 "Only expected to lower fixed length vector operation!");
21639 assert(Op.getValueType() == InVT.changeTypeToInteger() &&
21640 "Expected integer result of the same bit length as the inputs!");
21642 auto Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
21643 auto Op2 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(1));
21644 auto Pg = getPredicateForFixedLengthVector(DAG, DL, InVT);
21646 EVT CmpVT = Pg.getValueType();
21647 auto Cmp = DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, CmpVT,
21648 {Pg, Op1, Op2, Op.getOperand(2)});
21650 EVT PromoteVT = ContainerVT.changeTypeToInteger();
21651 auto Promote = DAG.getBoolExtOrTrunc(Cmp, DL, PromoteVT, InVT);
21652 return convertFromScalableVector(DAG, Op.getValueType(), Promote);
21656 AArch64TargetLowering::LowerFixedLengthBitcastToSVE(SDValue Op,
21657 SelectionDAG &DAG) const {
21659 auto SrcOp = Op.getOperand(0);
21660 EVT VT = Op.getValueType();
21661 EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT);
21662 EVT ContainerSrcVT =
21663 getContainerForFixedLengthVector(DAG, SrcOp.getValueType());
21665 SrcOp = convertToScalableVector(DAG, ContainerSrcVT, SrcOp);
21666 Op = DAG.getNode(ISD::BITCAST, DL, ContainerDstVT, SrcOp);
21667 return convertFromScalableVector(DAG, VT, Op);
21670 SDValue AArch64TargetLowering::LowerFixedLengthConcatVectorsToSVE(
21671 SDValue Op, SelectionDAG &DAG) const {
21673 unsigned NumOperands = Op->getNumOperands();
21675 assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
21676 "Unexpected number of operands in CONCAT_VECTORS");
21678 auto SrcOp1 = Op.getOperand(0);
21679 auto SrcOp2 = Op.getOperand(1);
21680 EVT VT = Op.getValueType();
21681 EVT SrcVT = SrcOp1.getValueType();
21683 if (NumOperands > 2) {
21684 SmallVector<SDValue, 4> Ops;
21685 EVT PairVT = SrcVT.getDoubleNumVectorElementsVT(*DAG.getContext());
21686 for (unsigned I = 0; I < NumOperands; I += 2)
21687 Ops.push_back(DAG.getNode(ISD::CONCAT_VECTORS, DL, PairVT,
21688 Op->getOperand(I), Op->getOperand(I + 1)));
21690 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Ops);
21693 EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21695 SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, SrcVT);
21696 SrcOp1 = convertToScalableVector(DAG, ContainerVT, SrcOp1);
21697 SrcOp2 = convertToScalableVector(DAG, ContainerVT, SrcOp2);
21699 Op = DAG.getNode(AArch64ISD::SPLICE, DL, ContainerVT, Pg, SrcOp1, SrcOp2);
21701 return convertFromScalableVector(DAG, VT, Op);
21705 AArch64TargetLowering::LowerFixedLengthFPExtendToSVE(SDValue Op,
21706 SelectionDAG &DAG) const {
21707 EVT VT = Op.getValueType();
21708 assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21711 SDValue Val = Op.getOperand(0);
21712 SDValue Pg = getPredicateForVector(DAG, DL, VT);
21713 EVT SrcVT = Val.getValueType();
21714 EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21715 EVT ExtendVT = ContainerVT.changeVectorElementType(
21716 SrcVT.getVectorElementType());
21718 Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val);
21719 Val = DAG.getNode(ISD::ANY_EXTEND, DL, VT.changeTypeToInteger(), Val);
21721 Val = convertToScalableVector(DAG, ContainerVT.changeTypeToInteger(), Val);
21722 Val = getSVESafeBitCast(ExtendVT, Val, DAG);
21723 Val = DAG.getNode(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU, DL, ContainerVT,
21724 Pg, Val, DAG.getUNDEF(ContainerVT));
21726 return convertFromScalableVector(DAG, VT, Val);
21730 AArch64TargetLowering::LowerFixedLengthFPRoundToSVE(SDValue Op,
21731 SelectionDAG &DAG) const {
21732 EVT VT = Op.getValueType();
21733 assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21736 SDValue Val = Op.getOperand(0);
21737 EVT SrcVT = Val.getValueType();
21738 EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
21739 EVT RoundVT = ContainerSrcVT.changeVectorElementType(
21740 VT.getVectorElementType());
21741 SDValue Pg = getPredicateForVector(DAG, DL, RoundVT);
21743 Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21744 Val = DAG.getNode(AArch64ISD::FP_ROUND_MERGE_PASSTHRU, DL, RoundVT, Pg, Val,
21745 Op.getOperand(1), DAG.getUNDEF(RoundVT));
21746 Val = getSVESafeBitCast(ContainerSrcVT.changeTypeToInteger(), Val, DAG);
21747 Val = convertFromScalableVector(DAG, SrcVT.changeTypeToInteger(), Val);
21749 Val = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Val);
21750 return DAG.getNode(ISD::BITCAST, DL, VT, Val);
21754 AArch64TargetLowering::LowerFixedLengthIntToFPToSVE(SDValue Op,
21755 SelectionDAG &DAG) const {
21756 EVT VT = Op.getValueType();
21757 assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21759 bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP;
21760 unsigned Opcode = IsSigned ? AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU
21761 : AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU;
21764 SDValue Val = Op.getOperand(0);
21765 EVT SrcVT = Val.getValueType();
21766 EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT);
21767 EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
21769 if (ContainerSrcVT.getVectorElementType().getSizeInBits() <=
21770 ContainerDstVT.getVectorElementType().getSizeInBits()) {
21771 SDValue Pg = getPredicateForVector(DAG, DL, VT);
21773 Val = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL,
21774 VT.changeTypeToInteger(), Val);
21776 Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21777 Val = getSVESafeBitCast(ContainerDstVT.changeTypeToInteger(), Val, DAG);
21778 // Safe to use a larger than specified operand since we just unpacked the
21779 // data, hence the upper bits are zero.
21780 Val = DAG.getNode(Opcode, DL, ContainerDstVT, Pg, Val,
21781 DAG.getUNDEF(ContainerDstVT));
21782 return convertFromScalableVector(DAG, VT, Val);
21784 EVT CvtVT = ContainerSrcVT.changeVectorElementType(
21785 ContainerDstVT.getVectorElementType());
21786 SDValue Pg = getPredicateForVector(DAG, DL, CvtVT);
21788 Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21789 Val = DAG.getNode(Opcode, DL, CvtVT, Pg, Val, DAG.getUNDEF(CvtVT));
21790 Val = getSVESafeBitCast(ContainerSrcVT, Val, DAG);
21791 Val = convertFromScalableVector(DAG, SrcVT, Val);
21793 Val = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Val);
21794 return DAG.getNode(ISD::BITCAST, DL, VT, Val);
21799 AArch64TargetLowering::LowerFixedLengthFPToIntToSVE(SDValue Op,
21800 SelectionDAG &DAG) const {
21801 EVT VT = Op.getValueType();
21802 assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21804 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
21805 unsigned Opcode = IsSigned ? AArch64ISD::FCVTZS_MERGE_PASSTHRU
21806 : AArch64ISD::FCVTZU_MERGE_PASSTHRU;
21809 SDValue Val = Op.getOperand(0);
21810 EVT SrcVT = Val.getValueType();
21811 EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT);
21812 EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
21814 if (ContainerSrcVT.getVectorElementType().getSizeInBits() <=
21815 ContainerDstVT.getVectorElementType().getSizeInBits()) {
21816 EVT CvtVT = ContainerDstVT.changeVectorElementType(
21817 ContainerSrcVT.getVectorElementType());
21818 SDValue Pg = getPredicateForVector(DAG, DL, VT);
21820 Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val);
21821 Val = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Val);
21823 Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21824 Val = getSVESafeBitCast(CvtVT, Val, DAG);
21825 Val = DAG.getNode(Opcode, DL, ContainerDstVT, Pg, Val,
21826 DAG.getUNDEF(ContainerDstVT));
21827 return convertFromScalableVector(DAG, VT, Val);
21829 EVT CvtVT = ContainerSrcVT.changeTypeToInteger();
21830 SDValue Pg = getPredicateForVector(DAG, DL, CvtVT);
21832 // Safe to use a larger than specified result since an fp_to_int where the
21833 // result doesn't fit into the destination is undefined.
21834 Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21835 Val = DAG.getNode(Opcode, DL, CvtVT, Pg, Val, DAG.getUNDEF(CvtVT));
21836 Val = convertFromScalableVector(DAG, SrcVT.changeTypeToInteger(), Val);
21838 return DAG.getNode(ISD::TRUNCATE, DL, VT, Val);
21842 SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE(
21843 SDValue Op, SelectionDAG &DAG) const {
21844 EVT VT = Op.getValueType();
21845 assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21847 auto *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
21848 auto ShuffleMask = SVN->getMask();
21851 SDValue Op1 = Op.getOperand(0);
21852 SDValue Op2 = Op.getOperand(1);
21854 EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21855 Op1 = convertToScalableVector(DAG, ContainerVT, Op1);
21856 Op2 = convertToScalableVector(DAG, ContainerVT, Op2);
21858 bool ReverseEXT = false;
21860 if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm) &&
21861 Imm == VT.getVectorNumElements() - 1) {
21863 std::swap(Op1, Op2);
21865 EVT ScalarTy = VT.getVectorElementType();
21866 if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
21867 ScalarTy = MVT::i32;
21868 SDValue Scalar = DAG.getNode(
21869 ISD::EXTRACT_VECTOR_ELT, DL, ScalarTy, Op1,
21870 DAG.getConstant(VT.getVectorNumElements() - 1, DL, MVT::i64));
21871 Op = DAG.getNode(AArch64ISD::INSR, DL, ContainerVT, Op2, Scalar);
21872 return convertFromScalableVector(DAG, VT, Op);
21875 for (unsigned LaneSize : {64U, 32U, 16U}) {
21876 if (isREVMask(ShuffleMask, VT, LaneSize)) {
21878 getPackedSVEVectorVT(EVT::getIntegerVT(*DAG.getContext(), LaneSize));
21880 unsigned EltSz = VT.getScalarSizeInBits();
21882 RevOp = AArch64ISD::BSWAP_MERGE_PASSTHRU;
21883 else if (EltSz == 16)
21884 RevOp = AArch64ISD::REVH_MERGE_PASSTHRU;
21886 RevOp = AArch64ISD::REVW_MERGE_PASSTHRU;
21888 Op = DAG.getNode(ISD::BITCAST, DL, NewVT, Op1);
21889 Op = LowerToPredicatedOp(Op, DAG, RevOp);
21890 Op = DAG.getNode(ISD::BITCAST, DL, ContainerVT, Op);
21891 return convertFromScalableVector(DAG, VT, Op);
21895 unsigned WhichResult;
21896 if (isZIPMask(ShuffleMask, VT, WhichResult) && WhichResult == 0)
21897 return convertFromScalableVector(
21898 DAG, VT, DAG.getNode(AArch64ISD::ZIP1, DL, ContainerVT, Op1, Op2));
21900 if (isTRNMask(ShuffleMask, VT, WhichResult)) {
21901 unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
21902 return convertFromScalableVector(
21903 DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op2));
21906 if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult) && WhichResult == 0)
21907 return convertFromScalableVector(
21908 DAG, VT, DAG.getNode(AArch64ISD::ZIP1, DL, ContainerVT, Op1, Op1));
21910 if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
21911 unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
21912 return convertFromScalableVector(
21913 DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op1));
21916 // Functions like isZIPMask return true when a ISD::VECTOR_SHUFFLE's mask
21917 // represents the same logical operation as performed by a ZIP instruction. In
21918 // isolation these functions do not mean the ISD::VECTOR_SHUFFLE is exactly
21919 // equivalent to an AArch64 instruction. There's the extra component of
21920 // ISD::VECTOR_SHUFFLE's value type to consider. Prior to SVE these functions
21921 // only operated on 64/128bit vector types that have a direct mapping to a
21922 // target register and so an exact mapping is implied.
21923 // However, when using SVE for fixed length vectors, most legal vector types
21924 // are actually sub-vectors of a larger SVE register. When mapping
21925 // ISD::VECTOR_SHUFFLE to an SVE instruction care must be taken to consider
21926 // how the mask's indices translate. Specifically, when the mapping requires
21927 // an exact meaning for a specific vector index (e.g. Index X is the last
21928 // vector element in the register) then such mappings are often only safe when
21929 // the exact SVE register size is know. The main exception to this is when
21930 // indices are logically relative to the first element of either
21931 // ISD::VECTOR_SHUFFLE operand because these relative indices don't change
21932 // when converting from fixed-length to scalable vector types (i.e. the start
21933 // of a fixed length vector is always the start of a scalable vector).
21934 unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits();
21935 unsigned MaxSVESize = Subtarget->getMaxSVEVectorSizeInBits();
21936 if (MinSVESize == MaxSVESize && MaxSVESize == VT.getSizeInBits()) {
21937 if (ShuffleVectorInst::isReverseMask(ShuffleMask) && Op2.isUndef()) {
21938 Op = DAG.getNode(ISD::VECTOR_REVERSE, DL, ContainerVT, Op1);
21939 return convertFromScalableVector(DAG, VT, Op);
21942 if (isZIPMask(ShuffleMask, VT, WhichResult) && WhichResult != 0)
21943 return convertFromScalableVector(
21944 DAG, VT, DAG.getNode(AArch64ISD::ZIP2, DL, ContainerVT, Op1, Op2));
21946 if (isUZPMask(ShuffleMask, VT, WhichResult)) {
21947 unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
21948 return convertFromScalableVector(
21949 DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op2));
21952 if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult) && WhichResult != 0)
21953 return convertFromScalableVector(
21954 DAG, VT, DAG.getNode(AArch64ISD::ZIP2, DL, ContainerVT, Op1, Op1));
21956 if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
21957 unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
21958 return convertFromScalableVector(
21959 DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op1));
21966 SDValue AArch64TargetLowering::getSVESafeBitCast(EVT VT, SDValue Op,
21967 SelectionDAG &DAG) const {
21969 EVT InVT = Op.getValueType();
21971 assert(VT.isScalableVector() && isTypeLegal(VT) &&
21972 InVT.isScalableVector() && isTypeLegal(InVT) &&
21973 "Only expect to cast between legal scalable vector types!");
21974 assert(VT.getVectorElementType() != MVT::i1 &&
21975 InVT.getVectorElementType() != MVT::i1 &&
21976 "For predicate bitcasts, use getSVEPredicateBitCast");
21981 EVT PackedVT = getPackedSVEVectorVT(VT.getVectorElementType());
21982 EVT PackedInVT = getPackedSVEVectorVT(InVT.getVectorElementType());
21984 // Safe bitcasting between unpacked vector types of different element counts
21985 // is currently unsupported because the following is missing the necessary
21986 // work to ensure the result's elements live where they're supposed to within
21987 // an SVE register.
21989 // e.g. nxv2i32 = XX??XX??
21990 // nxv4f16 = X?X?X?X?
21991 assert((VT.getVectorElementCount() == InVT.getVectorElementCount() ||
21992 VT == PackedVT || InVT == PackedInVT) &&
21993 "Unexpected bitcast!");
21995 // Pack input if required.
21996 if (InVT != PackedInVT)
21997 Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, PackedInVT, Op);
21999 Op = DAG.getNode(ISD::BITCAST, DL, PackedVT, Op);
22001 // Unpack result if required.
22002 if (VT != PackedVT)
22003 Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Op);
22008 bool AArch64TargetLowering::isAllActivePredicate(SelectionDAG &DAG,
22010 return ::isAllActivePredicate(DAG, N);
22013 EVT AArch64TargetLowering::getPromotedVTForPredicate(EVT VT) const {
22014 return ::getPromotedVTForPredicate(VT);
22017 bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode(
22018 SDValue Op, const APInt &OriginalDemandedBits,
22019 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
22020 unsigned Depth) const {
22022 unsigned Opc = Op.getOpcode();
22024 case AArch64ISD::VSHL: {
22025 // Match (VSHL (VLSHR Val X) X)
22026 SDValue ShiftL = Op;
22027 SDValue ShiftR = Op->getOperand(0);
22028 if (ShiftR->getOpcode() != AArch64ISD::VLSHR)
22031 if (!ShiftL.hasOneUse() || !ShiftR.hasOneUse())
22034 unsigned ShiftLBits = ShiftL->getConstantOperandVal(1);
22035 unsigned ShiftRBits = ShiftR->getConstantOperandVal(1);
22037 // Other cases can be handled as well, but this is not
22039 if (ShiftRBits != ShiftLBits)
22042 unsigned ScalarSize = Op.getScalarValueSizeInBits();
22043 assert(ScalarSize > ShiftLBits && "Invalid shift imm");
22045 APInt ZeroBits = APInt::getLowBitsSet(ScalarSize, ShiftLBits);
22046 APInt UnusedBits = ~OriginalDemandedBits;
22048 if ((ZeroBits & UnusedBits) != ZeroBits)
22051 // All bits that are zeroed by (VSHL (VLSHR Val X) X) are not
22052 // used - simplify to just Val.
22053 return TLO.CombineTo(Op, ShiftR->getOperand(0));
22057 return TargetLowering::SimplifyDemandedBitsForTargetNode(
22058 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
22061 bool AArch64TargetLowering::isTargetCanonicalConstantNode(SDValue Op) const {
22062 return Op.getOpcode() == AArch64ISD::DUP ||
22063 (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
22064 Op.getOperand(0).getOpcode() == AArch64ISD::DUP) ||
22065 TargetLowering::isTargetCanonicalConstantNode(Op);
22068 bool AArch64TargetLowering::isConstantUnsignedBitfieldExtractLegal(
22069 unsigned Opc, LLT Ty1, LLT Ty2) const {
22070 return Ty1 == Ty2 && (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64));