1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that X86 uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "X86ISelLowering.h"
15 #include "MCTargetDesc/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86IntrinsicsInfo.h"
21 #include "X86MachineFunctionInfo.h"
22 #include "X86TargetMachine.h"
23 #include "X86TargetObjectFile.h"
24 #include "llvm/ADT/SmallBitVector.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/Analysis/BlockFrequencyInfo.h"
30 #include "llvm/Analysis/EHPersonalities.h"
31 #include "llvm/Analysis/ObjCARCUtil.h"
32 #include "llvm/Analysis/ProfileSummaryInfo.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/CodeGen/IntrinsicLowering.h"
35 #include "llvm/CodeGen/MachineFrameInfo.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineInstrBuilder.h"
38 #include "llvm/CodeGen/MachineJumpTableInfo.h"
39 #include "llvm/CodeGen/MachineLoopInfo.h"
40 #include "llvm/CodeGen/MachineModuleInfo.h"
41 #include "llvm/CodeGen/MachineRegisterInfo.h"
42 #include "llvm/CodeGen/TargetLowering.h"
43 #include "llvm/CodeGen/WinEHFuncInfo.h"
44 #include "llvm/IR/CallingConv.h"
45 #include "llvm/IR/Constants.h"
46 #include "llvm/IR/DerivedTypes.h"
47 #include "llvm/IR/DiagnosticInfo.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/IR/GlobalAlias.h"
50 #include "llvm/IR/GlobalVariable.h"
51 #include "llvm/IR/IRBuilder.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/PatternMatch.h"
55 #include "llvm/MC/MCAsmInfo.h"
56 #include "llvm/MC/MCContext.h"
57 #include "llvm/MC/MCExpr.h"
58 #include "llvm/MC/MCSymbol.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Support/KnownBits.h"
63 #include "llvm/Support/MathExtras.h"
64 #include "llvm/Target/TargetOptions.h"
71 #define DEBUG_TYPE "x86-isel"
73 STATISTIC(NumTailCalls, "Number of tail calls");
75 static cl::opt<int> ExperimentalPrefInnermostLoopAlignment(
76 "x86-experimental-pref-innermost-loop-alignment", cl::init(4),
78 "Sets the preferable loop alignment for experiments (as log2 bytes) "
79 "for innermost loops only. If specified, this option overrides "
80 "alignment set by x86-experimental-pref-loop-alignment."),
83 static cl::opt<bool> MulConstantOptimization(
84 "mul-constant-optimization", cl::init(true),
85 cl::desc("Replace 'mul x, Const' with more effective instructions like "
89 static cl::opt<bool> ExperimentalUnorderedISEL(
90 "x86-experimental-unordered-atomic-isel", cl::init(false),
91 cl::desc("Use LoadSDNode and StoreSDNode instead of "
92 "AtomicSDNode for unordered atomic loads and "
93 "stores respectively."),
96 /// Call this when the user attempts to do something unsupported, like
97 /// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
98 /// report_fatal_error, so calling code should attempt to recover without
100 static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
102 MachineFunction &MF = DAG.getMachineFunction();
103 DAG.getContext()->diagnose(
104 DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
107 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
108 const X86Subtarget &STI)
109 : TargetLowering(TM), Subtarget(STI) {
110 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
111 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
113 // Set up the TargetLowering object.
115 // X86 is weird. It always uses i8 for shift amounts and setcc results.
116 setBooleanContents(ZeroOrOneBooleanContent);
117 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
118 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
120 // For 64-bit, since we have so many registers, use the ILP scheduler.
121 // For 32-bit, use the register pressure specific scheduling.
122 // For Atom, always use ILP scheduling.
123 if (Subtarget.isAtom())
124 setSchedulingPreference(Sched::ILP);
125 else if (Subtarget.is64Bit())
126 setSchedulingPreference(Sched::ILP);
128 setSchedulingPreference(Sched::RegPressure);
129 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
130 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
132 // Bypass expensive divides and use cheaper ones.
133 if (TM.getOptLevel() >= CodeGenOpt::Default) {
134 if (Subtarget.hasSlowDivide32())
135 addBypassSlowDiv(32, 8);
136 if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
137 addBypassSlowDiv(64, 32);
140 // Setup Windows compiler runtime calls.
141 if (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()) {
142 static const struct {
143 const RTLIB::Libcall Op;
144 const char * const Name;
145 const CallingConv::ID CC;
147 { RTLIB::SDIV_I64, "_alldiv", CallingConv::X86_StdCall },
148 { RTLIB::UDIV_I64, "_aulldiv", CallingConv::X86_StdCall },
149 { RTLIB::SREM_I64, "_allrem", CallingConv::X86_StdCall },
150 { RTLIB::UREM_I64, "_aullrem", CallingConv::X86_StdCall },
151 { RTLIB::MUL_I64, "_allmul", CallingConv::X86_StdCall },
154 for (const auto &LC : LibraryCalls) {
155 setLibcallName(LC.Op, LC.Name);
156 setLibcallCallingConv(LC.Op, LC.CC);
160 if (Subtarget.getTargetTriple().isOSMSVCRT()) {
161 // MSVCRT doesn't have powi; fall back to pow
162 setLibcallName(RTLIB::POWI_F32, nullptr);
163 setLibcallName(RTLIB::POWI_F64, nullptr);
166 // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
167 // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
168 // FIXME: Should we be limiting the atomic size on other configs? Default is
170 if (!Subtarget.canUseCMPXCHG8B())
171 setMaxAtomicSizeInBitsSupported(32);
173 // Set up the register classes.
174 addRegisterClass(MVT::i8, &X86::GR8RegClass);
175 addRegisterClass(MVT::i16, &X86::GR16RegClass);
176 addRegisterClass(MVT::i32, &X86::GR32RegClass);
177 if (Subtarget.is64Bit())
178 addRegisterClass(MVT::i64, &X86::GR64RegClass);
180 for (MVT VT : MVT::integer_valuetypes())
181 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
183 // We don't accept any truncstore of integer registers.
184 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
185 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
186 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
187 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
188 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
189 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
191 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
193 // SETOEQ and SETUNE require checking two conditions.
194 for (auto VT : {MVT::f32, MVT::f64, MVT::f80}) {
195 setCondCodeAction(ISD::SETOEQ, VT, Expand);
196 setCondCodeAction(ISD::SETUNE, VT, Expand);
200 if (Subtarget.canUseCMOV()) {
201 setOperationAction(ISD::ABS , MVT::i16 , Custom);
202 setOperationAction(ISD::ABS , MVT::i32 , Custom);
203 if (Subtarget.is64Bit())
204 setOperationAction(ISD::ABS , MVT::i64 , Custom);
207 // Signed saturation subtraction.
208 setOperationAction(ISD::SSUBSAT , MVT::i8 , Custom);
209 setOperationAction(ISD::SSUBSAT , MVT::i16 , Custom);
210 setOperationAction(ISD::SSUBSAT , MVT::i32 , Custom);
211 if (Subtarget.is64Bit())
212 setOperationAction(ISD::SSUBSAT , MVT::i64 , Custom);
215 for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
216 // For slow shld targets we only lower for code size.
217 LegalizeAction ShiftDoubleAction = Subtarget.isSHLDSlow() ? Custom : Legal;
219 setOperationAction(ShiftOp , MVT::i8 , Custom);
220 setOperationAction(ShiftOp , MVT::i16 , Custom);
221 setOperationAction(ShiftOp , MVT::i32 , ShiftDoubleAction);
222 if (Subtarget.is64Bit())
223 setOperationAction(ShiftOp , MVT::i64 , ShiftDoubleAction);
226 if (!Subtarget.useSoftFloat()) {
227 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
229 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
230 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i8, Promote);
231 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
232 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i16, Promote);
233 // We have an algorithm for SSE2, and we turn this into a 64-bit
234 // FILD or VCVTUSI2SS/SD for other targets.
235 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
236 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
237 // We have an algorithm for SSE2->double, and we turn this into a
238 // 64-bit FILD followed by conditional FADD for other targets.
239 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
240 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
242 // Promote i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
244 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
245 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i8, Promote);
246 // SSE has no i16 to fp conversion, only i32. We promote in the handler
247 // to allow f80 to use i16 and f64 to use i16 with sse1 only
248 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom);
249 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i16, Custom);
250 // f32 and f64 cases are Legal with SSE1/SSE2, f80 case is not
251 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
252 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
253 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
254 // are Legal, f80 is custom lowered.
255 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
256 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
258 // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
260 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
261 // FIXME: This doesn't generate invalid exception when it should. PR44019.
262 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8, Promote);
263 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom);
264 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom);
265 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
266 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
267 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
268 // are Legal, f80 is custom lowered.
269 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
270 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
272 // Handle FP_TO_UINT by promoting the destination to a larger signed
274 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
275 // FIXME: This doesn't generate invalid exception when it should. PR44019.
276 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i8, Promote);
277 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
278 // FIXME: This doesn't generate invalid exception when it should. PR44019.
279 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote);
280 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
281 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
282 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
283 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
285 setOperationAction(ISD::LRINT, MVT::f32, Custom);
286 setOperationAction(ISD::LRINT, MVT::f64, Custom);
287 setOperationAction(ISD::LLRINT, MVT::f32, Custom);
288 setOperationAction(ISD::LLRINT, MVT::f64, Custom);
290 if (!Subtarget.is64Bit()) {
291 setOperationAction(ISD::LRINT, MVT::i64, Custom);
292 setOperationAction(ISD::LLRINT, MVT::i64, Custom);
296 if (Subtarget.hasSSE2()) {
297 // Custom lowering for saturating float to int conversions.
298 // We handle promotion to larger result types manually.
299 for (MVT VT : { MVT::i8, MVT::i16, MVT::i32 }) {
300 setOperationAction(ISD::FP_TO_UINT_SAT, VT, Custom);
301 setOperationAction(ISD::FP_TO_SINT_SAT, VT, Custom);
303 if (Subtarget.is64Bit()) {
304 setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
305 setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
309 // Handle address space casts between mixed sized pointers.
310 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
311 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
313 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
314 if (!Subtarget.hasSSE2()) {
315 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
316 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
317 if (Subtarget.is64Bit()) {
318 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
319 // Without SSE, i64->f64 goes through memory.
320 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
322 } else if (!Subtarget.is64Bit())
323 setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
325 // Scalar integer divide and remainder are lowered to use operations that
326 // produce two results, to match the available instructions. This exposes
327 // the two-result form to trivial CSE, which is able to combine x/y and x%y
328 // into a single instruction.
330 // Scalar integer multiply-high is also lowered to use two-result
331 // operations, to match the available instructions. However, plain multiply
332 // (low) operations are left as Legal, as there are single-result
333 // instructions for this in x86. Using the two-result multiply instructions
334 // when both high and low results are needed must be arranged by dagcombine.
335 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
336 setOperationAction(ISD::MULHS, VT, Expand);
337 setOperationAction(ISD::MULHU, VT, Expand);
338 setOperationAction(ISD::SDIV, VT, Expand);
339 setOperationAction(ISD::UDIV, VT, Expand);
340 setOperationAction(ISD::SREM, VT, Expand);
341 setOperationAction(ISD::UREM, VT, Expand);
344 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
345 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
346 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
347 MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
348 setOperationAction(ISD::BR_CC, VT, Expand);
349 setOperationAction(ISD::SELECT_CC, VT, Expand);
351 if (Subtarget.is64Bit())
352 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
353 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
354 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
355 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
357 setOperationAction(ISD::FREM , MVT::f32 , Expand);
358 setOperationAction(ISD::FREM , MVT::f64 , Expand);
359 setOperationAction(ISD::FREM , MVT::f80 , Expand);
360 setOperationAction(ISD::FREM , MVT::f128 , Expand);
362 if (!Subtarget.useSoftFloat() && Subtarget.hasX87()) {
363 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
364 setOperationAction(ISD::SET_ROUNDING , MVT::Other, Custom);
367 // Promote the i8 variants and force them on up to i32 which has a shorter
369 setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
370 setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
372 if (Subtarget.hasBMI()) {
373 // Promote the i16 zero undef variant and force it on up to i32 when tzcnt
375 setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i16, MVT::i32);
377 setOperationAction(ISD::CTTZ, MVT::i16, Custom);
378 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
379 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal);
380 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
381 if (Subtarget.is64Bit()) {
382 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
383 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
387 if (Subtarget.hasLZCNT()) {
388 // When promoting the i8 variants, force them to i32 for a shorter
390 setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
391 setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
393 for (auto VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
394 if (VT == MVT::i64 && !Subtarget.is64Bit())
396 setOperationAction(ISD::CTLZ , VT, Custom);
397 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
401 for (auto Op : {ISD::FP16_TO_FP, ISD::STRICT_FP16_TO_FP, ISD::FP_TO_FP16,
402 ISD::STRICT_FP_TO_FP16}) {
403 // Special handling for half-precision floating point conversions.
404 // If we don't have F16C support, then lower half float conversions
405 // into library calls.
408 (!Subtarget.useSoftFloat() && Subtarget.hasF16C()) ? Custom : Expand);
409 // There's never any support for operations beyond MVT::f32.
410 setOperationAction(Op, MVT::f64, Expand);
411 setOperationAction(Op, MVT::f80, Expand);
412 setOperationAction(Op, MVT::f128, Expand);
415 for (MVT VT : {MVT::f32, MVT::f64, MVT::f80, MVT::f128}) {
416 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
417 setLoadExtAction(ISD::EXTLOAD, VT, MVT::bf16, Expand);
418 setTruncStoreAction(VT, MVT::f16, Expand);
419 setTruncStoreAction(VT, MVT::bf16, Expand);
421 setOperationAction(ISD::BF16_TO_FP, VT, Expand);
422 setOperationAction(ISD::FP_TO_BF16, VT, Expand);
425 setOperationAction(ISD::PARITY, MVT::i8, Custom);
426 setOperationAction(ISD::PARITY, MVT::i16, Custom);
427 setOperationAction(ISD::PARITY, MVT::i32, Custom);
428 if (Subtarget.is64Bit())
429 setOperationAction(ISD::PARITY, MVT::i64, Custom);
430 if (Subtarget.hasPOPCNT()) {
431 setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
432 // popcntw is longer to encode than popcntl and also has a false dependency
433 // on the dest that popcntl hasn't had since Cannon Lake.
434 setOperationPromotedToType(ISD::CTPOP, MVT::i16, MVT::i32);
436 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
437 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
438 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
439 if (Subtarget.is64Bit())
440 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
442 setOperationAction(ISD::CTPOP , MVT::i64 , Custom);
445 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
447 if (!Subtarget.hasMOVBE())
448 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
450 // X86 wants to expand cmov itself.
451 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
452 setOperationAction(ISD::SELECT, VT, Custom);
453 setOperationAction(ISD::SETCC, VT, Custom);
454 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
455 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
457 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
458 if (VT == MVT::i64 && !Subtarget.is64Bit())
460 setOperationAction(ISD::SELECT, VT, Custom);
461 setOperationAction(ISD::SETCC, VT, Custom);
464 // Custom action for SELECT MMX and expand action for SELECT_CC MMX
465 setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
466 setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
468 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
469 // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
470 // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
471 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
472 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
473 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
474 if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
475 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
478 for (auto VT : { MVT::i32, MVT::i64 }) {
479 if (VT == MVT::i64 && !Subtarget.is64Bit())
481 setOperationAction(ISD::ConstantPool , VT, Custom);
482 setOperationAction(ISD::JumpTable , VT, Custom);
483 setOperationAction(ISD::GlobalAddress , VT, Custom);
484 setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
485 setOperationAction(ISD::ExternalSymbol , VT, Custom);
486 setOperationAction(ISD::BlockAddress , VT, Custom);
489 // 64-bit shl, sra, srl (iff 32-bit x86)
490 for (auto VT : { MVT::i32, MVT::i64 }) {
491 if (VT == MVT::i64 && !Subtarget.is64Bit())
493 setOperationAction(ISD::SHL_PARTS, VT, Custom);
494 setOperationAction(ISD::SRA_PARTS, VT, Custom);
495 setOperationAction(ISD::SRL_PARTS, VT, Custom);
498 if (Subtarget.hasSSEPrefetch() || Subtarget.hasThreeDNow())
499 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
501 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
503 // Expand certain atomics
504 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
505 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
506 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
507 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
508 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
509 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
510 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
511 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
514 if (!Subtarget.is64Bit())
515 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
517 if (Subtarget.canUseCMPXCHG16B())
518 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
520 // FIXME - use subtarget debug flags
521 if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
522 !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
523 TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
524 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
527 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
528 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
530 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
531 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
533 setOperationAction(ISD::TRAP, MVT::Other, Legal);
534 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
535 if (Subtarget.isTargetPS())
536 setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand);
538 setOperationAction(ISD::UBSANTRAP, MVT::Other, Legal);
540 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
541 setOperationAction(ISD::VASTART , MVT::Other, Custom);
542 setOperationAction(ISD::VAEND , MVT::Other, Expand);
543 bool Is64Bit = Subtarget.is64Bit();
544 setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
545 setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
547 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
548 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
550 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
552 // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
553 setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
554 setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
556 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
558 auto setF16Action = [&] (MVT VT, LegalizeAction Action) {
559 setOperationAction(ISD::FABS, VT, Action);
560 setOperationAction(ISD::FNEG, VT, Action);
561 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
562 setOperationAction(ISD::FREM, VT, Action);
563 setOperationAction(ISD::FMA, VT, Action);
564 setOperationAction(ISD::FMINNUM, VT, Action);
565 setOperationAction(ISD::FMAXNUM, VT, Action);
566 setOperationAction(ISD::FMINIMUM, VT, Action);
567 setOperationAction(ISD::FMAXIMUM, VT, Action);
568 setOperationAction(ISD::FSIN, VT, Action);
569 setOperationAction(ISD::FCOS, VT, Action);
570 setOperationAction(ISD::FSINCOS, VT, Action);
571 setOperationAction(ISD::FSQRT, VT, Action);
572 setOperationAction(ISD::FPOW, VT, Action);
573 setOperationAction(ISD::FLOG, VT, Action);
574 setOperationAction(ISD::FLOG2, VT, Action);
575 setOperationAction(ISD::FLOG10, VT, Action);
576 setOperationAction(ISD::FEXP, VT, Action);
577 setOperationAction(ISD::FEXP2, VT, Action);
578 setOperationAction(ISD::FCEIL, VT, Action);
579 setOperationAction(ISD::FFLOOR, VT, Action);
580 setOperationAction(ISD::FNEARBYINT, VT, Action);
581 setOperationAction(ISD::FRINT, VT, Action);
582 setOperationAction(ISD::BR_CC, VT, Action);
583 setOperationAction(ISD::SETCC, VT, Action);
584 setOperationAction(ISD::SELECT, VT, Custom);
585 setOperationAction(ISD::SELECT_CC, VT, Action);
586 setOperationAction(ISD::FROUND, VT, Action);
587 setOperationAction(ISD::FROUNDEVEN, VT, Action);
588 setOperationAction(ISD::FTRUNC, VT, Action);
591 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
592 // f16, f32 and f64 use SSE.
593 // Set up the FP register classes.
594 addRegisterClass(MVT::f16, Subtarget.hasAVX512() ? &X86::FR16XRegClass
595 : &X86::FR16RegClass);
596 addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
597 : &X86::FR32RegClass);
598 addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
599 : &X86::FR64RegClass);
601 // Disable f32->f64 extload as we can only generate this in one instruction
602 // under optsize. So its easier to pattern match (fpext (load)) for that
603 // case instead of needing to emit 2 instructions for extload in the
605 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
607 for (auto VT : { MVT::f32, MVT::f64 }) {
608 // Use ANDPD to simulate FABS.
609 setOperationAction(ISD::FABS, VT, Custom);
611 // Use XORP to simulate FNEG.
612 setOperationAction(ISD::FNEG, VT, Custom);
614 // Use ANDPD and ORPD to simulate FCOPYSIGN.
615 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
617 // These might be better off as horizontal vector ops.
618 setOperationAction(ISD::FADD, VT, Custom);
619 setOperationAction(ISD::FSUB, VT, Custom);
621 // We don't support sin/cos/fmod
622 setOperationAction(ISD::FSIN , VT, Expand);
623 setOperationAction(ISD::FCOS , VT, Expand);
624 setOperationAction(ISD::FSINCOS, VT, Expand);
627 // Half type will be promoted by default.
628 setF16Action(MVT::f16, Promote);
629 setOperationAction(ISD::FADD, MVT::f16, Promote);
630 setOperationAction(ISD::FSUB, MVT::f16, Promote);
631 setOperationAction(ISD::FMUL, MVT::f16, Promote);
632 setOperationAction(ISD::FDIV, MVT::f16, Promote);
633 setOperationAction(ISD::FP_ROUND, MVT::f16, LibCall);
634 setOperationAction(ISD::FP_EXTEND, MVT::f32, LibCall);
635 setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom);
637 setOperationAction(ISD::STRICT_FADD, MVT::f16, Promote);
638 setOperationAction(ISD::STRICT_FSUB, MVT::f16, Promote);
639 setOperationAction(ISD::STRICT_FMUL, MVT::f16, Promote);
640 setOperationAction(ISD::STRICT_FDIV, MVT::f16, Promote);
641 setOperationAction(ISD::STRICT_FMA, MVT::f16, Promote);
642 setOperationAction(ISD::STRICT_FMINNUM, MVT::f16, Promote);
643 setOperationAction(ISD::STRICT_FMAXNUM, MVT::f16, Promote);
644 setOperationAction(ISD::STRICT_FMINIMUM, MVT::f16, Promote);
645 setOperationAction(ISD::STRICT_FMAXIMUM, MVT::f16, Promote);
646 setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Promote);
647 setOperationAction(ISD::STRICT_FPOW, MVT::f16, Promote);
648 setOperationAction(ISD::STRICT_FLOG, MVT::f16, Promote);
649 setOperationAction(ISD::STRICT_FLOG2, MVT::f16, Promote);
650 setOperationAction(ISD::STRICT_FLOG10, MVT::f16, Promote);
651 setOperationAction(ISD::STRICT_FEXP, MVT::f16, Promote);
652 setOperationAction(ISD::STRICT_FEXP2, MVT::f16, Promote);
653 setOperationAction(ISD::STRICT_FCEIL, MVT::f16, Promote);
654 setOperationAction(ISD::STRICT_FFLOOR, MVT::f16, Promote);
655 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f16, Promote);
656 setOperationAction(ISD::STRICT_FRINT, MVT::f16, Promote);
657 setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Promote);
658 setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Promote);
659 setOperationAction(ISD::STRICT_FROUND, MVT::f16, Promote);
660 setOperationAction(ISD::STRICT_FROUNDEVEN, MVT::f16, Promote);
661 setOperationAction(ISD::STRICT_FTRUNC, MVT::f16, Promote);
662 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, LibCall);
663 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, LibCall);
664 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
666 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
667 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
669 // Lower this to MOVMSK plus an AND.
670 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
671 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
673 } else if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1() &&
674 (UseX87 || Is64Bit)) {
675 // Use SSE for f32, x87 for f64.
676 // Set up the FP register classes.
677 addRegisterClass(MVT::f32, &X86::FR32RegClass);
679 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
681 // Use ANDPS to simulate FABS.
682 setOperationAction(ISD::FABS , MVT::f32, Custom);
684 // Use XORP to simulate FNEG.
685 setOperationAction(ISD::FNEG , MVT::f32, Custom);
688 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
690 // Use ANDPS and ORPS to simulate FCOPYSIGN.
692 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
693 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
695 // We don't support sin/cos/fmod
696 setOperationAction(ISD::FSIN , MVT::f32, Expand);
697 setOperationAction(ISD::FCOS , MVT::f32, Expand);
698 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
701 // Always expand sin/cos functions even though x87 has an instruction.
702 setOperationAction(ISD::FSIN, MVT::f64, Expand);
703 setOperationAction(ISD::FCOS, MVT::f64, Expand);
704 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
707 // f32 and f64 in x87.
708 // Set up the FP register classes.
709 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
710 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
712 for (auto VT : { MVT::f32, MVT::f64 }) {
713 setOperationAction(ISD::UNDEF, VT, Expand);
714 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
716 // Always expand sin/cos functions even though x87 has an instruction.
717 setOperationAction(ISD::FSIN , VT, Expand);
718 setOperationAction(ISD::FCOS , VT, Expand);
719 setOperationAction(ISD::FSINCOS, VT, Expand);
723 // Expand FP32 immediates into loads from the stack, save special cases.
724 if (isTypeLegal(MVT::f32)) {
725 if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
726 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
727 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
728 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
729 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
730 } else // SSE immediates.
731 addLegalFPImmediate(APFloat(+0.0f)); // xorps
733 // Expand FP64 immediates into loads from the stack, save special cases.
734 if (isTypeLegal(MVT::f64)) {
735 if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
736 addLegalFPImmediate(APFloat(+0.0)); // FLD0
737 addLegalFPImmediate(APFloat(+1.0)); // FLD1
738 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
739 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
740 } else // SSE immediates.
741 addLegalFPImmediate(APFloat(+0.0)); // xorpd
743 // Support fp16 0 immediate.
744 if (isTypeLegal(MVT::f16))
745 addLegalFPImmediate(APFloat::getZero(APFloat::IEEEhalf()));
747 // Handle constrained floating-point operations of scalar.
748 setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
749 setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
750 setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
751 setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
752 setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
753 setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
754 setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
755 setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
756 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
757 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
758 setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
759 setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
761 // We don't support FMA.
762 setOperationAction(ISD::FMA, MVT::f64, Expand);
763 setOperationAction(ISD::FMA, MVT::f32, Expand);
765 // f80 always uses X87.
767 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
768 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
769 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
771 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
772 addLegalFPImmediate(TmpFlt); // FLD0
774 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
777 APFloat TmpFlt2(+1.0);
778 TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
780 addLegalFPImmediate(TmpFlt2); // FLD1
781 TmpFlt2.changeSign();
782 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
785 // Always expand sin/cos functions even though x87 has an instruction.
786 setOperationAction(ISD::FSIN , MVT::f80, Expand);
787 setOperationAction(ISD::FCOS , MVT::f80, Expand);
788 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
790 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
791 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
792 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
793 setOperationAction(ISD::FRINT, MVT::f80, Expand);
794 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
795 setOperationAction(ISD::FMA, MVT::f80, Expand);
796 setOperationAction(ISD::LROUND, MVT::f80, Expand);
797 setOperationAction(ISD::LLROUND, MVT::f80, Expand);
798 setOperationAction(ISD::LRINT, MVT::f80, Custom);
799 setOperationAction(ISD::LLRINT, MVT::f80, Custom);
801 // Handle constrained floating-point operations of scalar.
802 setOperationAction(ISD::STRICT_FADD , MVT::f80, Legal);
803 setOperationAction(ISD::STRICT_FSUB , MVT::f80, Legal);
804 setOperationAction(ISD::STRICT_FMUL , MVT::f80, Legal);
805 setOperationAction(ISD::STRICT_FDIV , MVT::f80, Legal);
806 setOperationAction(ISD::STRICT_FSQRT , MVT::f80, Legal);
807 if (isTypeLegal(MVT::f16)) {
808 setOperationAction(ISD::FP_EXTEND, MVT::f80, Custom);
809 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Custom);
811 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Legal);
813 // FIXME: When the target is 64-bit, STRICT_FP_ROUND will be overwritten
815 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Legal);
818 // f128 uses xmm registers, but most operations require libcalls.
819 if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) {
820 addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
821 : &X86::VR128RegClass);
823 addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps
825 setOperationAction(ISD::FADD, MVT::f128, LibCall);
826 setOperationAction(ISD::STRICT_FADD, MVT::f128, LibCall);
827 setOperationAction(ISD::FSUB, MVT::f128, LibCall);
828 setOperationAction(ISD::STRICT_FSUB, MVT::f128, LibCall);
829 setOperationAction(ISD::FDIV, MVT::f128, LibCall);
830 setOperationAction(ISD::STRICT_FDIV, MVT::f128, LibCall);
831 setOperationAction(ISD::FMUL, MVT::f128, LibCall);
832 setOperationAction(ISD::STRICT_FMUL, MVT::f128, LibCall);
833 setOperationAction(ISD::FMA, MVT::f128, LibCall);
834 setOperationAction(ISD::STRICT_FMA, MVT::f128, LibCall);
836 setOperationAction(ISD::FABS, MVT::f128, Custom);
837 setOperationAction(ISD::FNEG, MVT::f128, Custom);
838 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
840 setOperationAction(ISD::FSIN, MVT::f128, LibCall);
841 setOperationAction(ISD::STRICT_FSIN, MVT::f128, LibCall);
842 setOperationAction(ISD::FCOS, MVT::f128, LibCall);
843 setOperationAction(ISD::STRICT_FCOS, MVT::f128, LibCall);
844 setOperationAction(ISD::FSINCOS, MVT::f128, LibCall);
846 setOperationAction(ISD::FSQRT, MVT::f128, LibCall);
847 setOperationAction(ISD::STRICT_FSQRT, MVT::f128, LibCall);
849 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
850 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Custom);
851 // We need to custom handle any FP_ROUND with an f128 input, but
852 // LegalizeDAG uses the result type to know when to run a custom handler.
853 // So we have to list all legal floating point result types here.
854 if (isTypeLegal(MVT::f32)) {
855 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
856 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
858 if (isTypeLegal(MVT::f64)) {
859 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
860 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
862 if (isTypeLegal(MVT::f80)) {
863 setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
864 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
867 setOperationAction(ISD::SETCC, MVT::f128, Custom);
869 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
870 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
871 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f80, Expand);
872 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
873 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
874 setTruncStoreAction(MVT::f128, MVT::f80, Expand);
877 // Always use a library call for pow.
878 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
879 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
880 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
881 setOperationAction(ISD::FPOW , MVT::f128 , Expand);
883 setOperationAction(ISD::FLOG, MVT::f80, Expand);
884 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
885 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
886 setOperationAction(ISD::FEXP, MVT::f80, Expand);
887 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
888 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
889 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
891 // Some FP actions are always expanded for vector types.
892 for (auto VT : { MVT::v8f16, MVT::v16f16, MVT::v32f16,
893 MVT::v4f32, MVT::v8f32, MVT::v16f32,
894 MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
895 setOperationAction(ISD::FSIN, VT, Expand);
896 setOperationAction(ISD::FSINCOS, VT, Expand);
897 setOperationAction(ISD::FCOS, VT, Expand);
898 setOperationAction(ISD::FREM, VT, Expand);
899 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
900 setOperationAction(ISD::FPOW, VT, Expand);
901 setOperationAction(ISD::FLOG, VT, Expand);
902 setOperationAction(ISD::FLOG2, VT, Expand);
903 setOperationAction(ISD::FLOG10, VT, Expand);
904 setOperationAction(ISD::FEXP, VT, Expand);
905 setOperationAction(ISD::FEXP2, VT, Expand);
908 // First set operation action for all vector types to either promote
909 // (for widening) or expand (for scalarization). Then we will selectively
910 // turn on ones that can be effectively codegen'd.
911 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
912 setOperationAction(ISD::SDIV, VT, Expand);
913 setOperationAction(ISD::UDIV, VT, Expand);
914 setOperationAction(ISD::SREM, VT, Expand);
915 setOperationAction(ISD::UREM, VT, Expand);
916 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
917 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
918 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
919 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
920 setOperationAction(ISD::FMA, VT, Expand);
921 setOperationAction(ISD::FFLOOR, VT, Expand);
922 setOperationAction(ISD::FCEIL, VT, Expand);
923 setOperationAction(ISD::FTRUNC, VT, Expand);
924 setOperationAction(ISD::FRINT, VT, Expand);
925 setOperationAction(ISD::FNEARBYINT, VT, Expand);
926 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
927 setOperationAction(ISD::MULHS, VT, Expand);
928 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
929 setOperationAction(ISD::MULHU, VT, Expand);
930 setOperationAction(ISD::SDIVREM, VT, Expand);
931 setOperationAction(ISD::UDIVREM, VT, Expand);
932 setOperationAction(ISD::CTPOP, VT, Expand);
933 setOperationAction(ISD::CTTZ, VT, Expand);
934 setOperationAction(ISD::CTLZ, VT, Expand);
935 setOperationAction(ISD::ROTL, VT, Expand);
936 setOperationAction(ISD::ROTR, VT, Expand);
937 setOperationAction(ISD::BSWAP, VT, Expand);
938 setOperationAction(ISD::SETCC, VT, Expand);
939 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
940 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
941 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
942 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
943 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
944 setOperationAction(ISD::TRUNCATE, VT, Expand);
945 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
946 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
947 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
948 setOperationAction(ISD::SELECT_CC, VT, Expand);
949 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
950 setTruncStoreAction(InnerVT, VT, Expand);
952 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
953 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
955 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
956 // types, we have to deal with them whether we ask for Expansion or not.
957 // Setting Expand causes its own optimisation problems though, so leave
959 if (VT.getVectorElementType() == MVT::i1)
960 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
962 // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
963 // split/scalarized right now.
964 if (VT.getVectorElementType() == MVT::f16 ||
965 VT.getVectorElementType() == MVT::bf16)
966 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
970 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
971 // with -msoft-float, disable use of MMX as well.
972 if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
973 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
974 // No operations on x86mmx supported, everything uses intrinsics.
977 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
978 addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
979 : &X86::VR128RegClass);
981 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
982 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
983 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom);
984 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
985 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
986 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
987 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
988 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
990 setOperationAction(ISD::LOAD, MVT::v2f32, Custom);
991 setOperationAction(ISD::STORE, MVT::v2f32, Custom);
993 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
994 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
995 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
996 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
997 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
1000 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
1001 addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1002 : &X86::VR128RegClass);
1004 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
1005 // registers cannot be used even for integer operations.
1006 addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
1007 : &X86::VR128RegClass);
1008 addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1009 : &X86::VR128RegClass);
1010 addRegisterClass(MVT::v8f16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1011 : &X86::VR128RegClass);
1012 addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
1013 : &X86::VR128RegClass);
1014 addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1015 : &X86::VR128RegClass);
1017 for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
1018 MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
1019 setOperationAction(ISD::SDIV, VT, Custom);
1020 setOperationAction(ISD::SREM, VT, Custom);
1021 setOperationAction(ISD::UDIV, VT, Custom);
1022 setOperationAction(ISD::UREM, VT, Custom);
1025 setOperationAction(ISD::MUL, MVT::v2i8, Custom);
1026 setOperationAction(ISD::MUL, MVT::v4i8, Custom);
1027 setOperationAction(ISD::MUL, MVT::v8i8, Custom);
1029 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
1030 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
1031 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
1032 setOperationAction(ISD::MULHU, MVT::v4i32, Custom);
1033 setOperationAction(ISD::MULHS, MVT::v4i32, Custom);
1034 setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
1035 setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
1036 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
1037 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
1038 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
1039 setOperationAction(ISD::AVGCEILU, MVT::v16i8, Legal);
1040 setOperationAction(ISD::AVGCEILU, MVT::v8i16, Legal);
1042 setOperationAction(ISD::SMULO, MVT::v16i8, Custom);
1043 setOperationAction(ISD::UMULO, MVT::v16i8, Custom);
1044 setOperationAction(ISD::UMULO, MVT::v2i32, Custom);
1046 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
1047 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
1048 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom);
1050 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1051 setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
1052 setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
1053 setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
1054 setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
1057 setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal);
1058 setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal);
1059 setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal);
1060 setOperationAction(ISD::SSUBSAT, MVT::v16i8, Legal);
1061 setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal);
1062 setOperationAction(ISD::SADDSAT, MVT::v8i16, Legal);
1063 setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal);
1064 setOperationAction(ISD::SSUBSAT, MVT::v8i16, Legal);
1065 setOperationAction(ISD::USUBSAT, MVT::v4i32, Custom);
1066 setOperationAction(ISD::USUBSAT, MVT::v2i64, Custom);
1068 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1069 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1070 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1071 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1073 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1074 setOperationAction(ISD::SETCC, VT, Custom);
1075 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
1076 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
1077 setOperationAction(ISD::CTPOP, VT, Custom);
1078 setOperationAction(ISD::ABS, VT, Custom);
1080 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1081 // setcc all the way to isel and prefer SETGT in some isel patterns.
1082 setCondCodeAction(ISD::SETLT, VT, Custom);
1083 setCondCodeAction(ISD::SETLE, VT, Custom);
1086 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
1087 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1088 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1089 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1090 setOperationAction(ISD::VSELECT, VT, Custom);
1091 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1094 for (auto VT : { MVT::v8f16, MVT::v2f64, MVT::v2i64 }) {
1095 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1096 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1097 setOperationAction(ISD::VSELECT, VT, Custom);
1099 if (VT == MVT::v2i64 && !Subtarget.is64Bit())
1102 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1103 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1105 setF16Action(MVT::v8f16, Expand);
1106 setOperationAction(ISD::FADD, MVT::v8f16, Expand);
1107 setOperationAction(ISD::FSUB, MVT::v8f16, Expand);
1108 setOperationAction(ISD::FMUL, MVT::v8f16, Expand);
1109 setOperationAction(ISD::FDIV, MVT::v8f16, Expand);
1111 // Custom lower v2i64 and v2f64 selects.
1112 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1113 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1114 setOperationAction(ISD::SELECT, MVT::v4i32, Custom);
1115 setOperationAction(ISD::SELECT, MVT::v8i16, Custom);
1116 setOperationAction(ISD::SELECT, MVT::v8f16, Custom);
1117 setOperationAction(ISD::SELECT, MVT::v16i8, Custom);
1119 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Custom);
1120 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Custom);
1121 setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
1122 setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
1123 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Custom);
1124 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i32, Custom);
1126 // Custom legalize these to avoid over promotion or custom promotion.
1127 for (auto VT : {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}) {
1128 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
1129 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
1130 setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
1131 setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
1134 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Custom);
1135 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Custom);
1136 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
1137 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i32, Custom);
1139 setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
1140 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i32, Custom);
1142 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
1143 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Custom);
1145 // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
1146 setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom);
1147 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f32, Custom);
1148 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1149 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f32, Custom);
1151 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1152 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v2f32, Custom);
1153 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1154 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v2f32, Custom);
1156 // We want to legalize this to an f64 load rather than an i64 load on
1157 // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
1159 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
1160 setOperationAction(ISD::LOAD, MVT::v4i16, Custom);
1161 setOperationAction(ISD::LOAD, MVT::v8i8, Custom);
1162 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
1163 setOperationAction(ISD::STORE, MVT::v4i16, Custom);
1164 setOperationAction(ISD::STORE, MVT::v8i8, Custom);
1166 // Add 32-bit vector stores to help vectorization opportunities.
1167 setOperationAction(ISD::STORE, MVT::v2i16, Custom);
1168 setOperationAction(ISD::STORE, MVT::v4i8, Custom);
1170 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1171 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1172 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1173 if (!Subtarget.hasAVX512())
1174 setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
1176 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
1177 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
1178 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
1180 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1182 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
1183 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
1184 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Custom);
1185 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
1186 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
1187 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
1189 // In the customized shift lowering, the legal v4i32/v2i64 cases
1190 // in AVX2 will be recognized.
1191 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1192 setOperationAction(ISD::SRL, VT, Custom);
1193 setOperationAction(ISD::SHL, VT, Custom);
1194 setOperationAction(ISD::SRA, VT, Custom);
1195 if (VT == MVT::v2i64) continue;
1196 setOperationAction(ISD::ROTL, VT, Custom);
1197 setOperationAction(ISD::ROTR, VT, Custom);
1198 setOperationAction(ISD::FSHL, VT, Custom);
1199 setOperationAction(ISD::FSHR, VT, Custom);
1202 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1203 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1204 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1205 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1206 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1209 if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1210 setOperationAction(ISD::ABS, MVT::v16i8, Legal);
1211 setOperationAction(ISD::ABS, MVT::v8i16, Legal);
1212 setOperationAction(ISD::ABS, MVT::v4i32, Legal);
1213 setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
1214 setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
1215 setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
1216 setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
1217 setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1219 // These might be better off as horizontal vector ops.
1220 setOperationAction(ISD::ADD, MVT::i16, Custom);
1221 setOperationAction(ISD::ADD, MVT::i32, Custom);
1222 setOperationAction(ISD::SUB, MVT::i16, Custom);
1223 setOperationAction(ISD::SUB, MVT::i32, Custom);
1226 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1227 for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1228 setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
1229 setOperationAction(ISD::STRICT_FFLOOR, RoundedTy, Legal);
1230 setOperationAction(ISD::FCEIL, RoundedTy, Legal);
1231 setOperationAction(ISD::STRICT_FCEIL, RoundedTy, Legal);
1232 setOperationAction(ISD::FTRUNC, RoundedTy, Legal);
1233 setOperationAction(ISD::STRICT_FTRUNC, RoundedTy, Legal);
1234 setOperationAction(ISD::FRINT, RoundedTy, Legal);
1235 setOperationAction(ISD::STRICT_FRINT, RoundedTy, Legal);
1236 setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
1237 setOperationAction(ISD::STRICT_FNEARBYINT, RoundedTy, Legal);
1238 setOperationAction(ISD::FROUNDEVEN, RoundedTy, Legal);
1239 setOperationAction(ISD::STRICT_FROUNDEVEN, RoundedTy, Legal);
1241 setOperationAction(ISD::FROUND, RoundedTy, Custom);
1244 setOperationAction(ISD::SMAX, MVT::v16i8, Legal);
1245 setOperationAction(ISD::SMAX, MVT::v4i32, Legal);
1246 setOperationAction(ISD::UMAX, MVT::v8i16, Legal);
1247 setOperationAction(ISD::UMAX, MVT::v4i32, Legal);
1248 setOperationAction(ISD::SMIN, MVT::v16i8, Legal);
1249 setOperationAction(ISD::SMIN, MVT::v4i32, Legal);
1250 setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
1251 setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
1253 setOperationAction(ISD::UADDSAT, MVT::v4i32, Custom);
1254 setOperationAction(ISD::SADDSAT, MVT::v2i64, Custom);
1255 setOperationAction(ISD::SSUBSAT, MVT::v2i64, Custom);
1257 // FIXME: Do we need to handle scalar-to-vector here?
1258 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1259 setOperationAction(ISD::SMULO, MVT::v2i32, Custom);
1261 // We directly match byte blends in the backend as they match the VSELECT
1263 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1265 // SSE41 brings specific instructions for doing vector sign extend even in
1266 // cases where we don't have SRA.
1267 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1268 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1269 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1272 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1273 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1274 setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8, Legal);
1275 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8, Legal);
1276 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8, Legal);
1277 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1278 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1279 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1282 if (Subtarget.is64Bit() && !Subtarget.hasAVX512()) {
1283 // We need to scalarize v4i64->v432 uint_to_fp using cvtsi2ss, but we can
1284 // do the pre and post work in the vector domain.
1285 setOperationAction(ISD::UINT_TO_FP, MVT::v4i64, Custom);
1286 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i64, Custom);
1287 // We need to mark SINT_TO_FP as Custom even though we want to expand it
1288 // so that DAG combine doesn't try to turn it into uint_to_fp.
1289 setOperationAction(ISD::SINT_TO_FP, MVT::v4i64, Custom);
1290 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i64, Custom);
1294 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE42()) {
1295 setOperationAction(ISD::UADDSAT, MVT::v2i64, Custom);
1298 if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1299 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1300 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1301 setOperationAction(ISD::ROTL, VT, Custom);
1302 setOperationAction(ISD::ROTR, VT, Custom);
1305 // XOP can efficiently perform BITREVERSE with VPPERM.
1306 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1307 setOperationAction(ISD::BITREVERSE, VT, Custom);
1309 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1310 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1311 setOperationAction(ISD::BITREVERSE, VT, Custom);
1314 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1315 bool HasInt256 = Subtarget.hasInt256();
1317 addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass
1318 : &X86::VR256RegClass);
1319 addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1320 : &X86::VR256RegClass);
1321 addRegisterClass(MVT::v16f16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1322 : &X86::VR256RegClass);
1323 addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1324 : &X86::VR256RegClass);
1325 addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1326 : &X86::VR256RegClass);
1327 addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1328 : &X86::VR256RegClass);
1329 addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1330 : &X86::VR256RegClass);
1332 for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1333 setOperationAction(ISD::FFLOOR, VT, Legal);
1334 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
1335 setOperationAction(ISD::FCEIL, VT, Legal);
1336 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
1337 setOperationAction(ISD::FTRUNC, VT, Legal);
1338 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
1339 setOperationAction(ISD::FRINT, VT, Legal);
1340 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
1341 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1342 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1343 setOperationAction(ISD::FROUNDEVEN, VT, Legal);
1344 setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
1346 setOperationAction(ISD::FROUND, VT, Custom);
1348 setOperationAction(ISD::FNEG, VT, Custom);
1349 setOperationAction(ISD::FABS, VT, Custom);
1350 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1353 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1354 // even though v8i16 is a legal type.
1355 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1356 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1357 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1358 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1359 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Custom);
1360 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Custom);
1361 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i32, Custom);
1363 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Custom);
1364 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i32, Custom);
1365 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Expand);
1366 setOperationAction(ISD::FP_ROUND, MVT::v8f16, Expand);
1367 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Custom);
1368 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f64, Custom);
1370 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f32, Legal);
1371 setOperationAction(ISD::STRICT_FADD, MVT::v8f32, Legal);
1372 setOperationAction(ISD::STRICT_FADD, MVT::v4f64, Legal);
1373 setOperationAction(ISD::STRICT_FSUB, MVT::v8f32, Legal);
1374 setOperationAction(ISD::STRICT_FSUB, MVT::v4f64, Legal);
1375 setOperationAction(ISD::STRICT_FMUL, MVT::v8f32, Legal);
1376 setOperationAction(ISD::STRICT_FMUL, MVT::v4f64, Legal);
1377 setOperationAction(ISD::STRICT_FDIV, MVT::v8f32, Legal);
1378 setOperationAction(ISD::STRICT_FDIV, MVT::v4f64, Legal);
1379 setOperationAction(ISD::STRICT_FSQRT, MVT::v8f32, Legal);
1380 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f64, Legal);
1382 if (!Subtarget.hasAVX512())
1383 setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1385 // In the customized shift lowering, the legal v8i32/v4i64 cases
1386 // in AVX2 will be recognized.
1387 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1388 setOperationAction(ISD::SRL, VT, Custom);
1389 setOperationAction(ISD::SHL, VT, Custom);
1390 setOperationAction(ISD::SRA, VT, Custom);
1391 if (VT == MVT::v4i64) continue;
1392 setOperationAction(ISD::ROTL, VT, Custom);
1393 setOperationAction(ISD::ROTR, VT, Custom);
1394 setOperationAction(ISD::FSHL, VT, Custom);
1395 setOperationAction(ISD::FSHR, VT, Custom);
1398 // These types need custom splitting if their input is a 128-bit vector.
1399 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1400 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1401 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1402 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1404 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1405 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1406 setOperationAction(ISD::SELECT, MVT::v8i32, Custom);
1407 setOperationAction(ISD::SELECT, MVT::v16i16, Custom);
1408 setOperationAction(ISD::SELECT, MVT::v16f16, Custom);
1409 setOperationAction(ISD::SELECT, MVT::v32i8, Custom);
1410 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1412 for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1413 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1414 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1415 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1418 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1419 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1420 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1421 setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
1423 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1424 setOperationAction(ISD::SETCC, VT, Custom);
1425 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
1426 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
1427 setOperationAction(ISD::CTPOP, VT, Custom);
1428 setOperationAction(ISD::CTLZ, VT, Custom);
1430 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1431 // setcc all the way to isel and prefer SETGT in some isel patterns.
1432 setCondCodeAction(ISD::SETLT, VT, Custom);
1433 setCondCodeAction(ISD::SETLE, VT, Custom);
1436 if (Subtarget.hasAnyFMA()) {
1437 for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1438 MVT::v2f64, MVT::v4f64 }) {
1439 setOperationAction(ISD::FMA, VT, Legal);
1440 setOperationAction(ISD::STRICT_FMA, VT, Legal);
1444 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1445 setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1446 setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1449 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1450 setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
1451 setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
1452 setOperationAction(ISD::MUL, MVT::v32i8, Custom);
1454 setOperationAction(ISD::MULHU, MVT::v8i32, Custom);
1455 setOperationAction(ISD::MULHS, MVT::v8i32, Custom);
1456 setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
1457 setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
1458 setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
1459 setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
1460 setOperationAction(ISD::AVGCEILU, MVT::v16i16, HasInt256 ? Legal : Custom);
1461 setOperationAction(ISD::AVGCEILU, MVT::v32i8, HasInt256 ? Legal : Custom);
1463 setOperationAction(ISD::SMULO, MVT::v32i8, Custom);
1464 setOperationAction(ISD::UMULO, MVT::v32i8, Custom);
1466 setOperationAction(ISD::ABS, MVT::v4i64, Custom);
1467 setOperationAction(ISD::SMAX, MVT::v4i64, Custom);
1468 setOperationAction(ISD::UMAX, MVT::v4i64, Custom);
1469 setOperationAction(ISD::SMIN, MVT::v4i64, Custom);
1470 setOperationAction(ISD::UMIN, MVT::v4i64, Custom);
1472 setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1473 setOperationAction(ISD::SADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1474 setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1475 setOperationAction(ISD::SSUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1476 setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1477 setOperationAction(ISD::SADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1478 setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1479 setOperationAction(ISD::SSUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1480 setOperationAction(ISD::UADDSAT, MVT::v8i32, Custom);
1481 setOperationAction(ISD::USUBSAT, MVT::v8i32, Custom);
1482 setOperationAction(ISD::UADDSAT, MVT::v4i64, Custom);
1483 setOperationAction(ISD::USUBSAT, MVT::v4i64, Custom);
1485 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1486 setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom);
1487 setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1488 setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1489 setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1490 setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1493 for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1494 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1495 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1499 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1500 // when we have a 256bit-wide blend with immediate.
1501 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1502 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, Custom);
1504 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1505 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1506 setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1507 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i8, Legal);
1508 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i8, Legal);
1509 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i16, Legal);
1510 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i16, Legal);
1511 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i32, Legal);
1515 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1516 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1517 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1518 setOperationAction(ISD::MSTORE, VT, Legal);
1521 // Extract subvector is special because the value type
1522 // (result) is 128-bit but the source is 256-bit wide.
1523 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1524 MVT::v4f32, MVT::v2f64 }) {
1525 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1528 // Custom lower several nodes for 256-bit types.
1529 for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1530 MVT::v16f16, MVT::v8f32, MVT::v4f64 }) {
1531 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1532 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1533 setOperationAction(ISD::VSELECT, VT, Custom);
1534 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1535 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1536 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1537 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1538 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1539 setOperationAction(ISD::STORE, VT, Custom);
1541 setF16Action(MVT::v16f16, Expand);
1542 setOperationAction(ISD::FADD, MVT::v16f16, Expand);
1543 setOperationAction(ISD::FSUB, MVT::v16f16, Expand);
1544 setOperationAction(ISD::FMUL, MVT::v16f16, Expand);
1545 setOperationAction(ISD::FDIV, MVT::v16f16, Expand);
1548 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1550 // Custom legalize 2x32 to get a little better code.
1551 setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1552 setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1554 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1555 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1556 setOperationAction(ISD::MGATHER, VT, Custom);
1560 if (!Subtarget.useSoftFloat() && !Subtarget.hasFP16() &&
1561 Subtarget.hasF16C()) {
1562 for (MVT VT : { MVT::f16, MVT::v2f16, MVT::v4f16, MVT::v8f16 }) {
1563 setOperationAction(ISD::FP_ROUND, VT, Custom);
1564 setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom);
1566 for (MVT VT : { MVT::f32, MVT::v2f32, MVT::v4f32 }) {
1567 setOperationAction(ISD::FP_EXTEND, VT, Custom);
1568 setOperationAction(ISD::STRICT_FP_EXTEND, VT, Custom);
1570 for (unsigned Opc : {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV}) {
1571 setOperationPromotedToType(Opc, MVT::v8f16, MVT::v8f32);
1572 setOperationPromotedToType(Opc, MVT::v16f16, MVT::v16f32);
1575 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1576 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f32, Legal);
1579 // This block controls legalization of the mask vector sizes that are
1580 // available with AVX512. 512-bit vectors are in a separate block controlled
1581 // by useAVX512Regs.
1582 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1583 addRegisterClass(MVT::v1i1, &X86::VK1RegClass);
1584 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1585 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1586 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1587 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1589 setOperationAction(ISD::SELECT, MVT::v1i1, Custom);
1590 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1591 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom);
1593 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1594 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1595 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1596 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1597 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1598 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1599 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1600 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1601 setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Custom);
1602 setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom);
1603 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i1, Custom);
1604 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i1, Custom);
1606 // There is no byte sized k-register load or store without AVX512DQ.
1607 if (!Subtarget.hasDQI()) {
1608 setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1609 setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1610 setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1611 setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1613 setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1614 setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1615 setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1616 setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1619 // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1620 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1621 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1622 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1623 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1626 for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 })
1627 setOperationAction(ISD::VSELECT, VT, Expand);
1629 for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1630 setOperationAction(ISD::SETCC, VT, Custom);
1631 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
1632 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
1633 setOperationAction(ISD::SELECT, VT, Custom);
1634 setOperationAction(ISD::TRUNCATE, VT, Custom);
1636 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1637 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1638 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1639 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1640 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1641 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1644 for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1645 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1648 // This block controls legalization for 512-bit operations with 32/64 bit
1649 // elements. 512-bits can be disabled based on prefer-vector-width and
1650 // required-vector-width function attributes.
1651 if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1652 bool HasBWI = Subtarget.hasBWI();
1654 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1655 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1656 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1657 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1658 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1659 addRegisterClass(MVT::v32f16, &X86::VR512RegClass);
1660 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1662 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1663 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
1664 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1665 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
1666 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
1667 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
1669 setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1672 for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1673 setOperationAction(ISD::FNEG, VT, Custom);
1674 setOperationAction(ISD::FABS, VT, Custom);
1675 setOperationAction(ISD::FMA, VT, Legal);
1676 setOperationAction(ISD::STRICT_FMA, VT, Legal);
1677 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1680 for (MVT VT : { MVT::v16i1, MVT::v16i8, MVT::v16i16 }) {
1681 setOperationPromotedToType(ISD::FP_TO_SINT , VT, MVT::v16i32);
1682 setOperationPromotedToType(ISD::FP_TO_UINT , VT, MVT::v16i32);
1683 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, VT, MVT::v16i32);
1684 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, VT, MVT::v16i32);
1686 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Custom);
1687 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Custom);
1688 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v16i32, Custom);
1689 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v16i32, Custom);
1690 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Custom);
1691 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Custom);
1692 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i32, Custom);
1693 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i32, Custom);
1694 setOperationAction(ISD::FP_EXTEND, MVT::v8f64, Custom);
1695 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f64, Custom);
1697 setOperationAction(ISD::STRICT_FADD, MVT::v16f32, Legal);
1698 setOperationAction(ISD::STRICT_FADD, MVT::v8f64, Legal);
1699 setOperationAction(ISD::STRICT_FSUB, MVT::v16f32, Legal);
1700 setOperationAction(ISD::STRICT_FSUB, MVT::v8f64, Legal);
1701 setOperationAction(ISD::STRICT_FMUL, MVT::v16f32, Legal);
1702 setOperationAction(ISD::STRICT_FMUL, MVT::v8f64, Legal);
1703 setOperationAction(ISD::STRICT_FDIV, MVT::v16f32, Legal);
1704 setOperationAction(ISD::STRICT_FDIV, MVT::v8f64, Legal);
1705 setOperationAction(ISD::STRICT_FSQRT, MVT::v16f32, Legal);
1706 setOperationAction(ISD::STRICT_FSQRT, MVT::v8f64, Legal);
1707 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f32, Legal);
1709 setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal);
1710 setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal);
1711 setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
1712 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
1713 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
1715 setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
1717 // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1718 // to 512-bit rather than use the AVX2 instructions so that we can use
1720 if (!Subtarget.hasVLX()) {
1721 for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1722 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1723 setOperationAction(ISD::MLOAD, VT, Custom);
1724 setOperationAction(ISD::MSTORE, VT, Custom);
1728 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Legal);
1729 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Legal);
1730 setOperationAction(ISD::TRUNCATE, MVT::v32i8, HasBWI ? Legal : Custom);
1731 setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
1732 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1733 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1734 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1735 setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
1736 setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom);
1737 setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom);
1738 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1739 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1740 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1743 // Extends from v64i1 masks to 512-bit vectors.
1744 setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
1745 setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
1746 setOperationAction(ISD::ANY_EXTEND, MVT::v64i8, Custom);
1749 for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1750 setOperationAction(ISD::FFLOOR, VT, Legal);
1751 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
1752 setOperationAction(ISD::FCEIL, VT, Legal);
1753 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
1754 setOperationAction(ISD::FTRUNC, VT, Legal);
1755 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
1756 setOperationAction(ISD::FRINT, VT, Legal);
1757 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
1758 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1759 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1760 setOperationAction(ISD::FROUNDEVEN, VT, Legal);
1761 setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
1763 setOperationAction(ISD::FROUND, VT, Custom);
1766 for (auto VT : {MVT::v32i16, MVT::v16i32, MVT::v8i64}) {
1767 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1768 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1771 setOperationAction(ISD::ADD, MVT::v32i16, HasBWI ? Legal : Custom);
1772 setOperationAction(ISD::SUB, MVT::v32i16, HasBWI ? Legal : Custom);
1773 setOperationAction(ISD::ADD, MVT::v64i8, HasBWI ? Legal : Custom);
1774 setOperationAction(ISD::SUB, MVT::v64i8, HasBWI ? Legal : Custom);
1776 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1777 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1778 setOperationAction(ISD::MUL, MVT::v32i16, HasBWI ? Legal : Custom);
1779 setOperationAction(ISD::MUL, MVT::v64i8, Custom);
1781 setOperationAction(ISD::MULHU, MVT::v16i32, Custom);
1782 setOperationAction(ISD::MULHS, MVT::v16i32, Custom);
1783 setOperationAction(ISD::MULHS, MVT::v32i16, HasBWI ? Legal : Custom);
1784 setOperationAction(ISD::MULHU, MVT::v32i16, HasBWI ? Legal : Custom);
1785 setOperationAction(ISD::MULHS, MVT::v64i8, Custom);
1786 setOperationAction(ISD::MULHU, MVT::v64i8, Custom);
1787 setOperationAction(ISD::AVGCEILU, MVT::v32i16, HasBWI ? Legal : Custom);
1788 setOperationAction(ISD::AVGCEILU, MVT::v64i8, HasBWI ? Legal : Custom);
1790 setOperationAction(ISD::SMULO, MVT::v64i8, Custom);
1791 setOperationAction(ISD::UMULO, MVT::v64i8, Custom);
1793 setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
1795 for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64 }) {
1796 setOperationAction(ISD::SRL, VT, Custom);
1797 setOperationAction(ISD::SHL, VT, Custom);
1798 setOperationAction(ISD::SRA, VT, Custom);
1799 setOperationAction(ISD::ROTL, VT, Custom);
1800 setOperationAction(ISD::ROTR, VT, Custom);
1801 setOperationAction(ISD::SETCC, VT, Custom);
1803 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1804 // setcc all the way to isel and prefer SETGT in some isel patterns.
1805 setCondCodeAction(ISD::SETLT, VT, Custom);
1806 setCondCodeAction(ISD::SETLE, VT, Custom);
1808 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1809 setOperationAction(ISD::SMAX, VT, Legal);
1810 setOperationAction(ISD::UMAX, VT, Legal);
1811 setOperationAction(ISD::SMIN, VT, Legal);
1812 setOperationAction(ISD::UMIN, VT, Legal);
1813 setOperationAction(ISD::ABS, VT, Legal);
1814 setOperationAction(ISD::CTPOP, VT, Custom);
1815 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
1816 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
1819 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1820 setOperationAction(ISD::ABS, VT, HasBWI ? Legal : Custom);
1821 setOperationAction(ISD::CTPOP, VT, Subtarget.hasBITALG() ? Legal : Custom);
1822 setOperationAction(ISD::CTLZ, VT, Custom);
1823 setOperationAction(ISD::SMAX, VT, HasBWI ? Legal : Custom);
1824 setOperationAction(ISD::UMAX, VT, HasBWI ? Legal : Custom);
1825 setOperationAction(ISD::SMIN, VT, HasBWI ? Legal : Custom);
1826 setOperationAction(ISD::UMIN, VT, HasBWI ? Legal : Custom);
1827 setOperationAction(ISD::UADDSAT, VT, HasBWI ? Legal : Custom);
1828 setOperationAction(ISD::SADDSAT, VT, HasBWI ? Legal : Custom);
1829 setOperationAction(ISD::USUBSAT, VT, HasBWI ? Legal : Custom);
1830 setOperationAction(ISD::SSUBSAT, VT, HasBWI ? Legal : Custom);
1833 setOperationAction(ISD::FSHL, MVT::v64i8, Custom);
1834 setOperationAction(ISD::FSHR, MVT::v64i8, Custom);
1835 setOperationAction(ISD::FSHL, MVT::v32i16, Custom);
1836 setOperationAction(ISD::FSHR, MVT::v32i16, Custom);
1837 setOperationAction(ISD::FSHL, MVT::v16i32, Custom);
1838 setOperationAction(ISD::FSHR, MVT::v16i32, Custom);
1840 if (Subtarget.hasDQI()) {
1841 for (auto Opc : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
1842 ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
1843 ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT})
1844 setOperationAction(Opc, MVT::v8i64, Custom);
1845 setOperationAction(ISD::MUL, MVT::v8i64, Legal);
1848 if (Subtarget.hasCDI()) {
1849 // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1850 for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1851 setOperationAction(ISD::CTLZ, VT, Legal);
1853 } // Subtarget.hasCDI()
1855 if (Subtarget.hasVPOPCNTDQ()) {
1856 for (auto VT : { MVT::v16i32, MVT::v8i64 })
1857 setOperationAction(ISD::CTPOP, VT, Legal);
1860 // Extract subvector is special because the value type
1861 // (result) is 256-bit but the source is 512-bit wide.
1862 // 128-bit was made Legal under AVX1.
1863 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1864 MVT::v8f32, MVT::v4f64 })
1865 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1867 for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64,
1868 MVT::v32f16, MVT::v16f32, MVT::v8f64 }) {
1869 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1870 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1871 setOperationAction(ISD::SELECT, VT, Custom);
1872 setOperationAction(ISD::VSELECT, VT, Custom);
1873 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1874 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1875 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1876 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1877 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1879 setF16Action(MVT::v32f16, Expand);
1880 setOperationAction(ISD::FP_ROUND, MVT::v16f16, Custom);
1881 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v16f16, Custom);
1882 setOperationAction(ISD::FP_EXTEND, MVT::v16f32, Legal);
1883 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v16f32, Legal);
1884 for (unsigned Opc : {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV}) {
1885 setOperationPromotedToType(Opc, MVT::v16f16, MVT::v16f32);
1886 setOperationPromotedToType(Opc, MVT::v32f16, MVT::v32f32);
1889 for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1890 setOperationAction(ISD::MLOAD, VT, Legal);
1891 setOperationAction(ISD::MSTORE, VT, Legal);
1892 setOperationAction(ISD::MGATHER, VT, Custom);
1893 setOperationAction(ISD::MSCATTER, VT, Custom);
1896 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1897 setOperationAction(ISD::MLOAD, VT, Legal);
1898 setOperationAction(ISD::MSTORE, VT, Legal);
1901 setOperationAction(ISD::STORE, MVT::v32i16, Custom);
1902 setOperationAction(ISD::STORE, MVT::v64i8, Custom);
1905 if (Subtarget.hasVBMI2()) {
1906 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64,
1907 MVT::v16i16, MVT::v8i32, MVT::v4i64,
1908 MVT::v32i16, MVT::v16i32, MVT::v8i64 }) {
1909 setOperationAction(ISD::FSHL, VT, Custom);
1910 setOperationAction(ISD::FSHR, VT, Custom);
1913 setOperationAction(ISD::ROTL, MVT::v32i16, Custom);
1914 setOperationAction(ISD::ROTR, MVT::v8i16, Custom);
1915 setOperationAction(ISD::ROTR, MVT::v16i16, Custom);
1916 setOperationAction(ISD::ROTR, MVT::v32i16, Custom);
1920 // This block controls legalization for operations that don't have
1921 // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
1923 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1924 // These operations are handled on non-VLX by artificially widening in
1927 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i32, Custom);
1928 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Custom);
1929 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i32, Custom);
1931 if (Subtarget.hasDQI()) {
1932 // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
1933 // v2f32 UINT_TO_FP is already custom under SSE2.
1934 assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
1935 isOperationCustom(ISD::STRICT_UINT_TO_FP, MVT::v2f32) &&
1936 "Unexpected operation action!");
1937 // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
1938 setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom);
1939 setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom);
1940 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f32, Custom);
1941 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f32, Custom);
1944 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1945 setOperationAction(ISD::SMAX, VT, Legal);
1946 setOperationAction(ISD::UMAX, VT, Legal);
1947 setOperationAction(ISD::SMIN, VT, Legal);
1948 setOperationAction(ISD::UMIN, VT, Legal);
1949 setOperationAction(ISD::ABS, VT, Legal);
1952 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1953 setOperationAction(ISD::ROTL, VT, Custom);
1954 setOperationAction(ISD::ROTR, VT, Custom);
1957 // Custom legalize 2x32 to get a little better code.
1958 setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
1959 setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
1961 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1962 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1963 setOperationAction(ISD::MSCATTER, VT, Custom);
1965 if (Subtarget.hasDQI()) {
1966 for (auto Opc : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
1967 ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
1968 ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}) {
1969 setOperationAction(Opc, MVT::v2i64, Custom);
1970 setOperationAction(Opc, MVT::v4i64, Custom);
1972 setOperationAction(ISD::MUL, MVT::v2i64, Legal);
1973 setOperationAction(ISD::MUL, MVT::v4i64, Legal);
1976 if (Subtarget.hasCDI()) {
1977 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1978 setOperationAction(ISD::CTLZ, VT, Legal);
1980 } // Subtarget.hasCDI()
1982 if (Subtarget.hasVPOPCNTDQ()) {
1983 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
1984 setOperationAction(ISD::CTPOP, VT, Legal);
1988 // This block control legalization of v32i1/v64i1 which are available with
1989 // AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with
1991 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1992 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1993 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1995 for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
1996 setOperationAction(ISD::VSELECT, VT, Expand);
1997 setOperationAction(ISD::TRUNCATE, VT, Custom);
1998 setOperationAction(ISD::SETCC, VT, Custom);
1999 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
2000 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
2001 setOperationAction(ISD::SELECT, VT, Custom);
2002 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
2003 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
2004 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
2005 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
2008 for (auto VT : { MVT::v16i1, MVT::v32i1 })
2009 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
2011 // Extends from v32i1 masks to 256-bit vectors.
2012 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
2013 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
2014 setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom);
2016 for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
2017 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
2018 setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
2021 // These operations are handled on non-VLX by artificially widening in
2023 // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
2025 if (Subtarget.hasBITALG()) {
2026 for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
2027 setOperationAction(ISD::CTPOP, VT, Legal);
2031 if (!Subtarget.useSoftFloat() && Subtarget.hasFP16()) {
2032 auto setGroup = [&] (MVT VT) {
2033 setOperationAction(ISD::FADD, VT, Legal);
2034 setOperationAction(ISD::STRICT_FADD, VT, Legal);
2035 setOperationAction(ISD::FSUB, VT, Legal);
2036 setOperationAction(ISD::STRICT_FSUB, VT, Legal);
2037 setOperationAction(ISD::FMUL, VT, Legal);
2038 setOperationAction(ISD::STRICT_FMUL, VT, Legal);
2039 setOperationAction(ISD::FDIV, VT, Legal);
2040 setOperationAction(ISD::STRICT_FDIV, VT, Legal);
2041 setOperationAction(ISD::FSQRT, VT, Legal);
2042 setOperationAction(ISD::STRICT_FSQRT, VT, Legal);
2044 setOperationAction(ISD::FFLOOR, VT, Legal);
2045 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
2046 setOperationAction(ISD::FCEIL, VT, Legal);
2047 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
2048 setOperationAction(ISD::FTRUNC, VT, Legal);
2049 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
2050 setOperationAction(ISD::FRINT, VT, Legal);
2051 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
2052 setOperationAction(ISD::FNEARBYINT, VT, Legal);
2053 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
2055 setOperationAction(ISD::LOAD, VT, Legal);
2056 setOperationAction(ISD::STORE, VT, Legal);
2058 setOperationAction(ISD::FMA, VT, Legal);
2059 setOperationAction(ISD::STRICT_FMA, VT, Legal);
2060 setOperationAction(ISD::VSELECT, VT, Legal);
2061 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
2062 setOperationAction(ISD::SELECT, VT, Custom);
2064 setOperationAction(ISD::FNEG, VT, Custom);
2065 setOperationAction(ISD::FABS, VT, Custom);
2066 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
2067 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
2068 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
2071 // AVX512_FP16 scalar operations
2073 setOperationAction(ISD::FREM, MVT::f16, Promote);
2074 setOperationAction(ISD::STRICT_FREM, MVT::f16, Promote);
2075 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
2076 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
2077 setOperationAction(ISD::SETCC, MVT::f16, Custom);
2078 setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom);
2079 setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom);
2080 setOperationAction(ISD::FROUND, MVT::f16, Custom);
2081 setOperationAction(ISD::STRICT_FROUND, MVT::f16, Promote);
2082 setOperationAction(ISD::FROUNDEVEN, MVT::f16, Legal);
2083 setOperationAction(ISD::STRICT_FROUNDEVEN, MVT::f16, Legal);
2084 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
2085 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
2086 setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal);
2087 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
2089 setCondCodeAction(ISD::SETOEQ, MVT::f16, Expand);
2090 setCondCodeAction(ISD::SETUNE, MVT::f16, Expand);
2092 if (Subtarget.useAVX512Regs()) {
2093 setGroup(MVT::v32f16);
2094 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32f16, Custom);
2095 setOperationAction(ISD::SINT_TO_FP, MVT::v32i16, Legal);
2096 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v32i16, Legal);
2097 setOperationAction(ISD::UINT_TO_FP, MVT::v32i16, Legal);
2098 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v32i16, Legal);
2099 setOperationAction(ISD::FP_ROUND, MVT::v16f16, Legal);
2100 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v16f16, Legal);
2101 setOperationAction(ISD::FP_EXTEND, MVT::v16f32, Legal);
2102 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v16f32, Legal);
2103 setOperationAction(ISD::FP_EXTEND, MVT::v8f64, Legal);
2104 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f64, Legal);
2105 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32f16, Custom);
2107 setOperationAction(ISD::FP_TO_SINT, MVT::v32i16, Custom);
2108 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v32i16, Custom);
2109 setOperationAction(ISD::FP_TO_UINT, MVT::v32i16, Custom);
2110 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v32i16, Custom);
2111 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v32i8, MVT::v32i16);
2112 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i8,
2114 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v32i8, MVT::v32i16);
2115 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v32i8,
2117 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v32i1, MVT::v32i16);
2118 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i1,
2120 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v32i1, MVT::v32i16);
2121 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v32i1,
2124 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16f16, Legal);
2125 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32f16, Legal);
2126 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32f16, Custom);
2128 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Legal);
2129 setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Legal);
2131 setOperationAction(ISD::STRICT_FSETCC, MVT::v32i1, Custom);
2132 setOperationAction(ISD::STRICT_FSETCCS, MVT::v32i1, Custom);
2135 if (Subtarget.hasVLX()) {
2136 setGroup(MVT::v8f16);
2137 setGroup(MVT::v16f16);
2139 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8f16, Legal);
2140 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16f16, Custom);
2141 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Legal);
2142 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i16, Legal);
2143 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Legal);
2144 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i16, Legal);
2145 setOperationAction(ISD::UINT_TO_FP, MVT::v16i16, Legal);
2146 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i16, Legal);
2147 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Legal);
2148 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i16, Legal);
2150 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
2151 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i16, Custom);
2152 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom);
2153 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i16, Custom);
2154 setOperationAction(ISD::FP_ROUND, MVT::v8f16, Legal);
2155 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f16, Legal);
2156 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
2157 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f32, Legal);
2158 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal);
2159 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f64, Legal);
2161 // INSERT_VECTOR_ELT v8f16 extended to VECTOR_SHUFFLE
2162 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8f16, Custom);
2163 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16f16, Custom);
2165 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f16, Legal);
2166 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16f16, Legal);
2167 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f16, Custom);
2169 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Legal);
2170 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Legal);
2171 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Legal);
2172 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Legal);
2174 // Need to custom widen these to prevent scalarization.
2175 setOperationAction(ISD::LOAD, MVT::v4f16, Custom);
2176 setOperationAction(ISD::STORE, MVT::v4f16, Custom);
2180 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
2181 setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
2182 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
2183 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
2184 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal);
2185 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
2187 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal);
2188 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
2189 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
2190 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
2191 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
2193 if (Subtarget.hasBWI()) {
2194 setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
2195 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
2198 if (Subtarget.hasFP16()) {
2199 // vcvttph2[u]dq v4f16 -> v4i32/64, v2f16 -> v2i32/64
2200 setOperationAction(ISD::FP_TO_SINT, MVT::v2f16, Custom);
2201 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f16, Custom);
2202 setOperationAction(ISD::FP_TO_UINT, MVT::v2f16, Custom);
2203 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f16, Custom);
2204 setOperationAction(ISD::FP_TO_SINT, MVT::v4f16, Custom);
2205 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f16, Custom);
2206 setOperationAction(ISD::FP_TO_UINT, MVT::v4f16, Custom);
2207 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f16, Custom);
2208 // vcvt[u]dq2ph v4i32/64 -> v4f16, v2i32/64 -> v2f16
2209 setOperationAction(ISD::SINT_TO_FP, MVT::v2f16, Custom);
2210 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f16, Custom);
2211 setOperationAction(ISD::UINT_TO_FP, MVT::v2f16, Custom);
2212 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f16, Custom);
2213 setOperationAction(ISD::SINT_TO_FP, MVT::v4f16, Custom);
2214 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f16, Custom);
2215 setOperationAction(ISD::UINT_TO_FP, MVT::v4f16, Custom);
2216 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f16, Custom);
2217 // vcvtps2phx v4f32 -> v4f16, v2f32 -> v2f16
2218 setOperationAction(ISD::FP_ROUND, MVT::v2f16, Custom);
2219 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v2f16, Custom);
2220 setOperationAction(ISD::FP_ROUND, MVT::v4f16, Custom);
2221 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f16, Custom);
2222 // vcvtph2psx v4f16 -> v4f32, v2f16 -> v2f32
2223 setOperationAction(ISD::FP_EXTEND, MVT::v2f16, Custom);
2224 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v2f16, Custom);
2225 setOperationAction(ISD::FP_EXTEND, MVT::v4f16, Custom);
2226 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f16, Custom);
2229 setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom);
2230 setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom);
2231 setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
2234 if (Subtarget.hasAMXTILE()) {
2235 addRegisterClass(MVT::x86amx, &X86::TILERegClass);
2238 // We want to custom lower some of our intrinsics.
2239 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
2240 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
2241 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
2242 if (!Subtarget.is64Bit()) {
2243 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
2246 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
2247 // handle type legalization for these operations here.
2249 // FIXME: We really should do custom legalization for addition and
2250 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
2251 // than generic legalization for 64-bit multiplication-with-overflow, though.
2252 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
2253 if (VT == MVT::i64 && !Subtarget.is64Bit())
2255 // Add/Sub/Mul with overflow operations are custom lowered.
2256 setOperationAction(ISD::SADDO, VT, Custom);
2257 setOperationAction(ISD::UADDO, VT, Custom);
2258 setOperationAction(ISD::SSUBO, VT, Custom);
2259 setOperationAction(ISD::USUBO, VT, Custom);
2260 setOperationAction(ISD::SMULO, VT, Custom);
2261 setOperationAction(ISD::UMULO, VT, Custom);
2263 // Support carry in as value rather than glue.
2264 setOperationAction(ISD::ADDCARRY, VT, Custom);
2265 setOperationAction(ISD::SUBCARRY, VT, Custom);
2266 setOperationAction(ISD::SETCCCARRY, VT, Custom);
2267 setOperationAction(ISD::SADDO_CARRY, VT, Custom);
2268 setOperationAction(ISD::SSUBO_CARRY, VT, Custom);
2271 if (!Subtarget.is64Bit()) {
2272 // These libcalls are not available in 32-bit.
2273 setLibcallName(RTLIB::SHL_I128, nullptr);
2274 setLibcallName(RTLIB::SRL_I128, nullptr);
2275 setLibcallName(RTLIB::SRA_I128, nullptr);
2276 setLibcallName(RTLIB::MUL_I128, nullptr);
2277 // The MULO libcall is not part of libgcc, only compiler-rt.
2278 setLibcallName(RTLIB::MULO_I64, nullptr);
2280 // The MULO libcall is not part of libgcc, only compiler-rt.
2281 setLibcallName(RTLIB::MULO_I128, nullptr);
2283 // Combine sin / cos into _sincos_stret if it is available.
2284 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
2285 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
2286 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
2287 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
2290 if (Subtarget.isTargetWin64()) {
2291 setOperationAction(ISD::SDIV, MVT::i128, Custom);
2292 setOperationAction(ISD::UDIV, MVT::i128, Custom);
2293 setOperationAction(ISD::SREM, MVT::i128, Custom);
2294 setOperationAction(ISD::UREM, MVT::i128, Custom);
2295 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
2296 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
2297 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
2298 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
2299 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom);
2300 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom);
2301 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom);
2302 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom);
2305 // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
2306 // is. We should promote the value to 64-bits to solve this.
2307 // This is what the CRT headers do - `fmodf` is an inline header
2308 // function casting to f64 and calling `fmod`.
2309 if (Subtarget.is32Bit() &&
2310 (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
2311 for (ISD::NodeType Op :
2312 {ISD::FCEIL, ISD::STRICT_FCEIL,
2313 ISD::FCOS, ISD::STRICT_FCOS,
2314 ISD::FEXP, ISD::STRICT_FEXP,
2315 ISD::FFLOOR, ISD::STRICT_FFLOOR,
2316 ISD::FREM, ISD::STRICT_FREM,
2317 ISD::FLOG, ISD::STRICT_FLOG,
2318 ISD::FLOG10, ISD::STRICT_FLOG10,
2319 ISD::FPOW, ISD::STRICT_FPOW,
2320 ISD::FSIN, ISD::STRICT_FSIN})
2321 if (isOperationExpand(Op, MVT::f32))
2322 setOperationAction(Op, MVT::f32, Promote);
2324 // We have target-specific dag combine patterns for the following nodes:
2325 setTargetDAGCombine({ISD::VECTOR_SHUFFLE,
2326 ISD::SCALAR_TO_VECTOR,
2327 ISD::INSERT_VECTOR_ELT,
2328 ISD::EXTRACT_VECTOR_ELT,
2329 ISD::CONCAT_VECTORS,
2330 ISD::INSERT_SUBVECTOR,
2331 ISD::EXTRACT_SUBVECTOR,
2357 ISD::SIGN_EXTEND_INREG,
2358 ISD::ANY_EXTEND_VECTOR_INREG,
2359 ISD::SIGN_EXTEND_VECTOR_INREG,
2360 ISD::ZERO_EXTEND_VECTOR_INREG,
2363 ISD::STRICT_SINT_TO_FP,
2364 ISD::STRICT_UINT_TO_FP,
2372 ISD::STRICT_FP_EXTEND,
2374 ISD::STRICT_FP_ROUND});
2376 computeRegisterProperties(Subtarget.getRegisterInfo());
2378 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
2379 MaxStoresPerMemsetOptSize = 8;
2380 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
2381 MaxStoresPerMemcpyOptSize = 4;
2382 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
2383 MaxStoresPerMemmoveOptSize = 4;
2385 // TODO: These control memcmp expansion in CGP and could be raised higher, but
2386 // that needs to benchmarked and balanced with the potential use of vector
2387 // load/store types (PR33329, PR33914).
2388 MaxLoadsPerMemcmp = 2;
2389 MaxLoadsPerMemcmpOptSize = 2;
2391 // Default loop alignment, which can be overridden by -align-loops.
2392 setPrefLoopAlignment(Align(16));
2394 // An out-of-order CPU can speculatively execute past a predictable branch,
2395 // but a conditional move could be stalled by an expensive earlier operation.
2396 PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
2397 EnableExtLdPromotion = true;
2398 setPrefFunctionAlignment(Align(16));
2400 verifyIntrinsicTables();
2402 // Default to having -disable-strictnode-mutation on
2403 IsStrictFPEnabled = true;
2406 // This has so far only been implemented for 64-bit MachO.
2407 bool X86TargetLowering::useLoadStackGuardNode() const {
2408 return Subtarget.isTargetMachO() && Subtarget.is64Bit();
2411 bool X86TargetLowering::useStackGuardXorFP() const {
2412 // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
2413 return Subtarget.getTargetTriple().isOSMSVCRT() && !Subtarget.isTargetMachO();
2416 SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
2417 const SDLoc &DL) const {
2418 EVT PtrTy = getPointerTy(DAG.getDataLayout());
2419 unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
2420 MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
2421 return SDValue(Node, 0);
2424 TargetLoweringBase::LegalizeTypeAction
2425 X86TargetLowering::getPreferredVectorAction(MVT VT) const {
2426 if ((VT == MVT::v32i1 || VT == MVT::v64i1) && Subtarget.hasAVX512() &&
2427 !Subtarget.hasBWI())
2428 return TypeSplitVector;
2430 if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
2431 !Subtarget.hasF16C() && VT.getVectorElementType() == MVT::f16)
2432 return TypeSplitVector;
2434 if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
2435 VT.getVectorElementType() != MVT::i1)
2436 return TypeWidenVector;
2438 return TargetLoweringBase::getPreferredVectorAction(VT);
2441 static std::pair<MVT, unsigned>
2442 handleMaskRegisterForCallingConv(unsigned NumElts, CallingConv::ID CC,
2443 const X86Subtarget &Subtarget) {
2444 // v2i1/v4i1/v8i1/v16i1 all pass in xmm registers unless the calling
2445 // convention is one that uses k registers.
2447 return {MVT::v2i64, 1};
2449 return {MVT::v4i32, 1};
2450 if (NumElts == 8 && CC != CallingConv::X86_RegCall &&
2451 CC != CallingConv::Intel_OCL_BI)
2452 return {MVT::v8i16, 1};
2453 if (NumElts == 16 && CC != CallingConv::X86_RegCall &&
2454 CC != CallingConv::Intel_OCL_BI)
2455 return {MVT::v16i8, 1};
2456 // v32i1 passes in ymm unless we have BWI and the calling convention is
2458 if (NumElts == 32 && (!Subtarget.hasBWI() || CC != CallingConv::X86_RegCall))
2459 return {MVT::v32i8, 1};
2460 // Split v64i1 vectors if we don't have v64i8 available.
2461 if (NumElts == 64 && Subtarget.hasBWI() && CC != CallingConv::X86_RegCall) {
2462 if (Subtarget.useAVX512Regs())
2463 return {MVT::v64i8, 1};
2464 return {MVT::v32i8, 2};
2467 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2468 if (!isPowerOf2_32(NumElts) || (NumElts == 64 && !Subtarget.hasBWI()) ||
2470 return {MVT::i8, NumElts};
2472 return {MVT::INVALID_SIMPLE_VALUE_TYPE, 0};
2475 MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
2478 if (VT.isVector()) {
2479 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512()) {
2480 unsigned NumElts = VT.getVectorNumElements();
2483 unsigned NumRegisters;
2484 std::tie(RegisterVT, NumRegisters) =
2485 handleMaskRegisterForCallingConv(NumElts, CC, Subtarget);
2486 if (RegisterVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
2490 if (VT.getVectorElementType() == MVT::f16 && VT.getVectorNumElements() < 8)
2494 // We will use more GPRs for f64 and f80 on 32 bits when x87 is disabled.
2495 if ((VT == MVT::f64 || VT == MVT::f80) && !Subtarget.is64Bit() &&
2496 !Subtarget.hasX87())
2499 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
2502 unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
2505 if (VT.isVector()) {
2506 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512()) {
2507 unsigned NumElts = VT.getVectorNumElements();
2510 unsigned NumRegisters;
2511 std::tie(RegisterVT, NumRegisters) =
2512 handleMaskRegisterForCallingConv(NumElts, CC, Subtarget);
2513 if (RegisterVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
2514 return NumRegisters;
2517 if (VT.getVectorElementType() == MVT::f16 && VT.getVectorNumElements() < 8)
2521 // We have to split f64 to 2 registers and f80 to 3 registers on 32 bits if
2523 if (!Subtarget.is64Bit() && !Subtarget.hasX87()) {
2530 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
2533 unsigned X86TargetLowering::getVectorTypeBreakdownForCallingConv(
2534 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
2535 unsigned &NumIntermediates, MVT &RegisterVT) const {
2536 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2537 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2538 Subtarget.hasAVX512() &&
2539 (!isPowerOf2_32(VT.getVectorNumElements()) ||
2540 (VT.getVectorNumElements() == 64 && !Subtarget.hasBWI()) ||
2541 VT.getVectorNumElements() > 64)) {
2542 RegisterVT = MVT::i8;
2543 IntermediateVT = MVT::i1;
2544 NumIntermediates = VT.getVectorNumElements();
2545 return NumIntermediates;
2548 // Split v64i1 vectors if we don't have v64i8 available.
2549 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
2550 CC != CallingConv::X86_RegCall) {
2551 RegisterVT = MVT::v32i8;
2552 IntermediateVT = MVT::v32i1;
2553 NumIntermediates = 2;
2557 return TargetLowering::getVectorTypeBreakdownForCallingConv(Context, CC, VT, IntermediateVT,
2558 NumIntermediates, RegisterVT);
2561 EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
2562 LLVMContext& Context,
2567 if (Subtarget.hasAVX512()) {
2568 // Figure out what this type will be legalized to.
2570 while (getTypeAction(Context, LegalVT) != TypeLegal)
2571 LegalVT = getTypeToTransformTo(Context, LegalVT);
2573 // If we got a 512-bit vector then we'll definitely have a vXi1 compare.
2574 if (LegalVT.getSimpleVT().is512BitVector())
2575 return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
2577 if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
2578 // If we legalized to less than a 512-bit vector, then we will use a vXi1
2579 // compare for vXi32/vXi64 for sure. If we have BWI we will also support
2581 MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
2582 if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
2583 return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
2587 return VT.changeVectorElementTypeToInteger();
2590 /// Helper for getByValTypeAlignment to determine
2591 /// the desired ByVal argument alignment.
2592 static void getMaxByValAlign(Type *Ty, Align &MaxAlign) {
2595 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
2596 if (VTy->getPrimitiveSizeInBits().getFixedSize() == 128)
2597 MaxAlign = Align(16);
2598 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2600 getMaxByValAlign(ATy->getElementType(), EltAlign);
2601 if (EltAlign > MaxAlign)
2602 MaxAlign = EltAlign;
2603 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2604 for (auto *EltTy : STy->elements()) {
2606 getMaxByValAlign(EltTy, EltAlign);
2607 if (EltAlign > MaxAlign)
2608 MaxAlign = EltAlign;
2615 /// Return the desired alignment for ByVal aggregate
2616 /// function arguments in the caller parameter area. For X86, aggregates
2617 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
2618 /// are at 4-byte boundaries.
2619 uint64_t X86TargetLowering::getByValTypeAlignment(Type *Ty,
2620 const DataLayout &DL) const {
2621 if (Subtarget.is64Bit()) {
2622 // Max of 8 and alignment of type.
2623 Align TyAlign = DL.getABITypeAlign(Ty);
2625 return TyAlign.value();
2630 if (Subtarget.hasSSE1())
2631 getMaxByValAlign(Ty, Alignment);
2632 return Alignment.value();
2635 /// It returns EVT::Other if the type should be determined using generic
2636 /// target-independent logic.
2637 /// For vector ops we check that the overall size isn't larger than our
2638 /// preferred vector width.
2639 EVT X86TargetLowering::getOptimalMemOpType(
2640 const MemOp &Op, const AttributeList &FuncAttributes) const {
2641 if (!FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat)) {
2642 if (Op.size() >= 16 &&
2643 (!Subtarget.isUnalignedMem16Slow() || Op.isAligned(Align(16)))) {
2644 // FIXME: Check if unaligned 64-byte accesses are slow.
2645 if (Op.size() >= 64 && Subtarget.hasAVX512() &&
2646 (Subtarget.getPreferVectorWidth() >= 512)) {
2647 return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
2649 // FIXME: Check if unaligned 32-byte accesses are slow.
2650 if (Op.size() >= 32 && Subtarget.hasAVX() &&
2651 (Subtarget.getPreferVectorWidth() >= 256)) {
2652 // Although this isn't a well-supported type for AVX1, we'll let
2653 // legalization and shuffle lowering produce the optimal codegen. If we
2654 // choose an optimal type with a vector element larger than a byte,
2655 // getMemsetStores() may create an intermediate splat (using an integer
2656 // multiply) before we splat as a vector.
2659 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))
2661 // TODO: Can SSE1 handle a byte vector?
2662 // If we have SSE1 registers we should be able to use them.
2663 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
2664 (Subtarget.getPreferVectorWidth() >= 128))
2666 } else if (((Op.isMemcpy() && !Op.isMemcpyStrSrc()) || Op.isZeroMemset()) &&
2667 Op.size() >= 8 && !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
2668 // Do not use f64 to lower memcpy if source is string constant. It's
2669 // better to use i32 to avoid the loads.
2670 // Also, do not use f64 to lower memset unless this is a memset of zeros.
2671 // The gymnastics of splatting a byte value into an XMM register and then
2672 // only using 8-byte stores (because this is a CPU with slow unaligned
2673 // 16-byte accesses) makes that a loser.
2677 // This is a compromise. If we reach here, unaligned accesses may be slow on
2678 // this target. However, creating smaller, aligned accesses could be even
2679 // slower and would certainly be a lot more code.
2680 if (Subtarget.is64Bit() && Op.size() >= 8)
2685 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
2687 return Subtarget.hasSSE1();
2689 return Subtarget.hasSSE2();
2693 bool X86TargetLowering::allowsMisalignedMemoryAccesses(
2694 EVT VT, unsigned, Align Alignment, MachineMemOperand::Flags Flags,
2697 switch (VT.getSizeInBits()) {
2699 // 8-byte and under are always assumed to be fast.
2703 *Fast = !Subtarget.isUnalignedMem16Slow();
2706 *Fast = !Subtarget.isUnalignedMem32Slow();
2708 // TODO: What about AVX-512 (512-bit) accesses?
2711 // NonTemporal vector memory ops must be aligned.
2712 if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2713 // NT loads can only be vector aligned, so if its less aligned than the
2714 // minimum vector size (which we can split the vector down to), we might as
2715 // well use a regular unaligned vector load.
2716 // We don't have any NT loads pre-SSE41.
2717 if (!!(Flags & MachineMemOperand::MOLoad))
2718 return (Alignment < 16 || !Subtarget.hasSSE41());
2721 // Misaligned accesses of any size are always allowed.
2725 /// Return the entry encoding for a jump table in the
2726 /// current function. The returned value is a member of the
2727 /// MachineJumpTableInfo::JTEntryKind enum.
2728 unsigned X86TargetLowering::getJumpTableEncoding() const {
2729 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
2731 if (isPositionIndependent() && Subtarget.isPICStyleGOT())
2732 return MachineJumpTableInfo::EK_Custom32;
2734 // Otherwise, use the normal jump table encoding heuristics.
2735 return TargetLowering::getJumpTableEncoding();
2738 bool X86TargetLowering::useSoftFloat() const {
2739 return Subtarget.useSoftFloat();
2742 void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
2743 ArgListTy &Args) const {
2745 // Only relabel X86-32 for C / Stdcall CCs.
2746 if (Subtarget.is64Bit())
2748 if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
2750 unsigned ParamRegs = 0;
2751 if (auto *M = MF->getFunction().getParent())
2752 ParamRegs = M->getNumberRegisterParameters();
2754 // Mark the first N int arguments as having reg
2755 for (auto &Arg : Args) {
2757 if (T->isIntOrPtrTy())
2758 if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
2759 unsigned numRegs = 1;
2760 if (MF->getDataLayout().getTypeAllocSize(T) > 4)
2762 if (ParamRegs < numRegs)
2764 ParamRegs -= numRegs;
2771 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
2772 const MachineBasicBlock *MBB,
2773 unsigned uid,MCContext &Ctx) const{
2774 assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
2775 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
2777 return MCSymbolRefExpr::create(MBB->getSymbol(),
2778 MCSymbolRefExpr::VK_GOTOFF, Ctx);
2781 /// Returns relocation base for the given PIC jumptable.
2782 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
2783 SelectionDAG &DAG) const {
2784 if (!Subtarget.is64Bit())
2785 // This doesn't have SDLoc associated with it, but is not really the
2786 // same as a Register.
2787 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
2788 getPointerTy(DAG.getDataLayout()));
2792 /// This returns the relocation base for the given PIC jumptable,
2793 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
2794 const MCExpr *X86TargetLowering::
2795 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
2796 MCContext &Ctx) const {
2797 // X86-64 uses RIP relative addressing based on the jump table label.
2798 if (Subtarget.isPICStyleRIPRel())
2799 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2801 // Otherwise, the reference is relative to the PIC base.
2802 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2805 std::pair<const TargetRegisterClass *, uint8_t>
2806 X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
2808 const TargetRegisterClass *RRC = nullptr;
2810 switch (VT.SimpleTy) {
2812 return TargetLowering::findRepresentativeClass(TRI, VT);
2813 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
2814 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
2817 RRC = &X86::VR64RegClass;
2819 case MVT::f32: case MVT::f64:
2820 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
2821 case MVT::v4f32: case MVT::v2f64:
2822 case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
2823 case MVT::v8f32: case MVT::v4f64:
2824 case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
2825 case MVT::v16f32: case MVT::v8f64:
2826 RRC = &X86::VR128XRegClass;
2829 return std::make_pair(RRC, Cost);
2832 unsigned X86TargetLowering::getAddressSpace() const {
2833 if (Subtarget.is64Bit())
2834 return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
2838 static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
2839 return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
2840 (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
2843 static Constant* SegmentOffset(IRBuilderBase &IRB,
2844 int Offset, unsigned AddressSpace) {
2845 return ConstantExpr::getIntToPtr(
2846 ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
2847 Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
2850 Value *X86TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {
2851 // glibc, bionic, and Fuchsia have a special slot for the stack guard in
2852 // tcbhead_t; use it instead of the usual global variable (see
2853 // sysdeps/{i386,x86_64}/nptl/tls.h)
2854 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
2855 if (Subtarget.isTargetFuchsia()) {
2856 // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
2857 return SegmentOffset(IRB, 0x10, getAddressSpace());
2859 unsigned AddressSpace = getAddressSpace();
2860 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
2861 // Specially, some users may customize the base reg and offset.
2862 int Offset = M->getStackProtectorGuardOffset();
2863 // If we don't set -stack-protector-guard-offset value:
2864 // %fs:0x28, unless we're using a Kernel code model, in which case
2865 // it's %gs:0x28. gs:0x14 on i386.
2866 if (Offset == INT_MAX)
2867 Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
2869 StringRef GuardReg = M->getStackProtectorGuardReg();
2870 if (GuardReg == "fs")
2871 AddressSpace = X86AS::FS;
2872 else if (GuardReg == "gs")
2873 AddressSpace = X86AS::GS;
2875 // Use symbol guard if user specify.
2876 StringRef GuardSymb = M->getStackProtectorGuardSymbol();
2877 if (!GuardSymb.empty()) {
2878 GlobalVariable *GV = M->getGlobalVariable(GuardSymb);
2880 Type *Ty = Subtarget.is64Bit() ? Type::getInt64Ty(M->getContext())
2881 : Type::getInt32Ty(M->getContext());
2882 GV = new GlobalVariable(*M, Ty, false, GlobalValue::ExternalLinkage,
2883 nullptr, GuardSymb, nullptr,
2884 GlobalValue::NotThreadLocal, AddressSpace);
2889 return SegmentOffset(IRB, Offset, AddressSpace);
2892 return TargetLowering::getIRStackGuard(IRB);
2895 void X86TargetLowering::insertSSPDeclarations(Module &M) const {
2896 // MSVC CRT provides functionalities for stack protection.
2897 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2898 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2899 // MSVC CRT has a global variable holding security cookie.
2900 M.getOrInsertGlobal("__security_cookie",
2901 Type::getInt8PtrTy(M.getContext()));
2903 // MSVC CRT has a function to validate security cookie.
2904 FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
2905 "__security_check_cookie", Type::getVoidTy(M.getContext()),
2906 Type::getInt8PtrTy(M.getContext()));
2907 if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
2908 F->setCallingConv(CallingConv::X86_FastCall);
2909 F->addParamAttr(0, Attribute::AttrKind::InReg);
2914 StringRef GuardMode = M.getStackProtectorGuard();
2916 // glibc, bionic, and Fuchsia have a special slot for the stack guard.
2917 if ((GuardMode == "tls" || GuardMode.empty()) &&
2918 hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
2920 TargetLowering::insertSSPDeclarations(M);
2923 Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
2924 // MSVC CRT has a global variable holding security cookie.
2925 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2926 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2927 return M.getGlobalVariable("__security_cookie");
2929 return TargetLowering::getSDagStackGuard(M);
2932 Function *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
2933 // MSVC CRT has a function to validate security cookie.
2934 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2935 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2936 return M.getFunction("__security_check_cookie");
2938 return TargetLowering::getSSPStackGuardCheck(M);
2942 X86TargetLowering::getSafeStackPointerLocation(IRBuilderBase &IRB) const {
2943 if (Subtarget.getTargetTriple().isOSContiki())
2944 return getDefaultSafeStackPointerLocation(IRB, false);
2946 // Android provides a fixed TLS slot for the SafeStack pointer. See the
2947 // definition of TLS_SLOT_SAFESTACK in
2948 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
2949 if (Subtarget.isTargetAndroid()) {
2950 // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
2952 int Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
2953 return SegmentOffset(IRB, Offset, getAddressSpace());
2956 // Fuchsia is similar.
2957 if (Subtarget.isTargetFuchsia()) {
2958 // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
2959 return SegmentOffset(IRB, 0x18, getAddressSpace());
2962 return TargetLowering::getSafeStackPointerLocation(IRB);
2965 //===----------------------------------------------------------------------===//
2966 // Return Value Calling Convention Implementation
2967 //===----------------------------------------------------------------------===//
2969 bool X86TargetLowering::CanLowerReturn(
2970 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
2971 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2972 SmallVector<CCValAssign, 16> RVLocs;
2973 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2974 return CCInfo.CheckReturn(Outs, RetCC_X86);
2977 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2978 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2982 /// Lowers masks values (v*i1) to the local register values
2983 /// \returns DAG node after lowering to register type
2984 static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
2985 const SDLoc &Dl, SelectionDAG &DAG) {
2986 EVT ValVT = ValArg.getValueType();
2988 if (ValVT == MVT::v1i1)
2989 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg,
2990 DAG.getIntPtrConstant(0, Dl));
2992 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
2993 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
2994 // Two stage lowering might be required
2995 // bitcast: v8i1 -> i8 / v16i1 -> i16
2996 // anyextend: i8 -> i32 / i16 -> i32
2997 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
2998 SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
2999 if (ValLoc == MVT::i32)
3000 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
3004 if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
3005 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
3006 // One stage lowering is required
3007 // bitcast: v32i1 -> i32 / v64i1 -> i64
3008 return DAG.getBitcast(ValLoc, ValArg);
3011 return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg);
3014 /// Breaks v64i1 value into two registers and adds the new node to the DAG
3015 static void Passv64i1ArgInRegs(
3016 const SDLoc &Dl, SelectionDAG &DAG, SDValue &Arg,
3017 SmallVectorImpl<std::pair<Register, SDValue>> &RegsToPass, CCValAssign &VA,
3018 CCValAssign &NextVA, const X86Subtarget &Subtarget) {
3019 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
3020 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
3021 assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
3022 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
3023 "The value should reside in two registers");
3025 // Before splitting the value we cast it to i64
3026 Arg = DAG.getBitcast(MVT::i64, Arg);
3028 // Splitting the value into two i32 types
3030 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
3031 DAG.getConstant(0, Dl, MVT::i32));
3032 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
3033 DAG.getConstant(1, Dl, MVT::i32));
3035 // Attach the two i32 types into corresponding registers
3036 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
3037 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
3041 X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3043 const SmallVectorImpl<ISD::OutputArg> &Outs,
3044 const SmallVectorImpl<SDValue> &OutVals,
3045 const SDLoc &dl, SelectionDAG &DAG) const {
3046 MachineFunction &MF = DAG.getMachineFunction();
3047 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3049 // In some cases we need to disable registers from the default CSR list.
3050 // For example, when they are used for argument passing.
3051 bool ShouldDisableCalleeSavedRegister =
3052 CallConv == CallingConv::X86_RegCall ||
3053 MF.getFunction().hasFnAttribute("no_caller_saved_registers");
3055 if (CallConv == CallingConv::X86_INTR && !Outs.empty())
3056 report_fatal_error("X86 interrupts may not return any value");
3058 SmallVector<CCValAssign, 16> RVLocs;
3059 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
3060 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
3062 SmallVector<std::pair<Register, SDValue>, 4> RetVals;
3063 for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
3065 CCValAssign &VA = RVLocs[I];
3066 assert(VA.isRegLoc() && "Can only return in registers!");
3068 // Add the register to the CalleeSaveDisableRegs list.
3069 if (ShouldDisableCalleeSavedRegister)
3070 MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
3072 SDValue ValToCopy = OutVals[OutsIndex];
3073 EVT ValVT = ValToCopy.getValueType();
3075 // Promote values to the appropriate types.
3076 if (VA.getLocInfo() == CCValAssign::SExt)
3077 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
3078 else if (VA.getLocInfo() == CCValAssign::ZExt)
3079 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
3080 else if (VA.getLocInfo() == CCValAssign::AExt) {
3081 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
3082 ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
3084 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
3086 else if (VA.getLocInfo() == CCValAssign::BCvt)
3087 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
3089 assert(VA.getLocInfo() != CCValAssign::FPExt &&
3090 "Unexpected FP-extend for return value.");
3092 // Report an error if we have attempted to return a value via an XMM
3093 // register and SSE was disabled.
3094 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
3095 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
3096 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3097 } else if (!Subtarget.hasSSE2() &&
3098 X86::FR64XRegClass.contains(VA.getLocReg()) &&
3099 ValVT == MVT::f64) {
3100 // When returning a double via an XMM register, report an error if SSE2 is
3102 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
3103 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3106 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
3107 // the RET instruction and handled by the FP Stackifier.
3108 if (VA.getLocReg() == X86::FP0 ||
3109 VA.getLocReg() == X86::FP1) {
3110 // If this is a copy from an xmm register to ST(0), use an FPExtend to
3111 // change the value to the FP stack register class.
3112 if (isScalarFPTypeInSSEReg(VA.getValVT()))
3113 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
3114 RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
3115 // Don't emit a copytoreg.
3119 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
3120 // which is returned in RAX / RDX.
3121 if (Subtarget.is64Bit()) {
3122 if (ValVT == MVT::x86mmx) {
3123 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
3124 ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
3125 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
3127 // If we don't have SSE2 available, convert to v4f32 so the generated
3128 // register is legal.
3129 if (!Subtarget.hasSSE2())
3130 ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
3135 if (VA.needsCustom()) {
3136 assert(VA.getValVT() == MVT::v64i1 &&
3137 "Currently the only custom case is when we split v64i1 to 2 regs");
3139 Passv64i1ArgInRegs(dl, DAG, ValToCopy, RetVals, VA, RVLocs[++I],
3142 // Add the second register to the CalleeSaveDisableRegs list.
3143 if (ShouldDisableCalleeSavedRegister)
3144 MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
3146 RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
3151 SmallVector<SDValue, 6> RetOps;
3152 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
3153 // Operand #1 = Bytes To Pop
3154 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
3157 // Copy the result values into the output registers.
3158 for (auto &RetVal : RetVals) {
3159 if (RetVal.first == X86::FP0 || RetVal.first == X86::FP1) {
3160 RetOps.push_back(RetVal.second);
3161 continue; // Don't emit a copytoreg.
3164 Chain = DAG.getCopyToReg(Chain, dl, RetVal.first, RetVal.second, Flag);
3165 Flag = Chain.getValue(1);
3167 DAG.getRegister(RetVal.first, RetVal.second.getValueType()));
3170 // Swift calling convention does not require we copy the sret argument
3171 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
3173 // All x86 ABIs require that for returning structs by value we copy
3174 // the sret argument into %rax/%eax (depending on ABI) for the return.
3175 // We saved the argument into a virtual register in the entry block,
3176 // so now we copy the value out and into %rax/%eax.
3178 // Checking Function.hasStructRetAttr() here is insufficient because the IR
3179 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
3180 // false, then an sret argument may be implicitly inserted in the SelDAG. In
3181 // either case FuncInfo->setSRetReturnReg() will have been called.
3182 if (Register SRetReg = FuncInfo->getSRetReturnReg()) {
3183 // When we have both sret and another return value, we should use the
3184 // original Chain stored in RetOps[0], instead of the current Chain updated
3185 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
3187 // For the case of sret and another return value, we have
3188 // Chain_0 at the function entry
3189 // Chain_1 = getCopyToReg(Chain_0) in the above loop
3190 // If we use Chain_1 in getCopyFromReg, we will have
3191 // Val = getCopyFromReg(Chain_1)
3192 // Chain_2 = getCopyToReg(Chain_1, Val) from below
3194 // getCopyToReg(Chain_0) will be glued together with
3195 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
3196 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
3197 // Data dependency from Unit B to Unit A due to usage of Val in
3198 // getCopyToReg(Chain_1, Val)
3199 // Chain dependency from Unit A to Unit B
3201 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
3202 SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
3203 getPointerTy(MF.getDataLayout()));
3206 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
3207 X86::RAX : X86::EAX;
3208 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
3209 Flag = Chain.getValue(1);
3211 // RAX/EAX now acts like a return value.
3213 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
3215 // Add the returned register to the CalleeSaveDisableRegs list.
3216 if (ShouldDisableCalleeSavedRegister)
3217 MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
3220 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
3221 const MCPhysReg *I =
3222 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
3225 if (X86::GR64RegClass.contains(*I))
3226 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
3228 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
3232 RetOps[0] = Chain; // Update chain.
3234 // Add the flag if we have it.
3236 RetOps.push_back(Flag);
3238 X86ISD::NodeType opcode = X86ISD::RET_FLAG;
3239 if (CallConv == CallingConv::X86_INTR)
3240 opcode = X86ISD::IRET;
3241 return DAG.getNode(opcode, dl, MVT::Other, RetOps);
3244 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
3245 if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
3248 SDValue TCChain = Chain;
3249 SDNode *Copy = *N->use_begin();
3250 if (Copy->getOpcode() == ISD::CopyToReg) {
3251 // If the copy has a glue operand, we conservatively assume it isn't safe to
3252 // perform a tail call.
3253 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3255 TCChain = Copy->getOperand(0);
3256 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
3259 bool HasRet = false;
3260 for (const SDNode *U : Copy->uses()) {
3261 if (U->getOpcode() != X86ISD::RET_FLAG)
3263 // If we are returning more than one value, we can definitely
3264 // not make a tail call see PR19530
3265 if (U->getNumOperands() > 4)
3267 if (U->getNumOperands() == 4 &&
3268 U->getOperand(U->getNumOperands() - 1).getValueType() != MVT::Glue)
3280 EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
3281 ISD::NodeType ExtendKind) const {
3282 MVT ReturnMVT = MVT::i32;
3284 bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
3285 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
3286 // The ABI does not require i1, i8 or i16 to be extended.
3288 // On Darwin, there is code in the wild relying on Clang's old behaviour of
3289 // always extending i8/i16 return values, so keep doing that for now.
3291 ReturnMVT = MVT::i8;
3294 EVT MinVT = getRegisterType(Context, ReturnMVT);
3295 return VT.bitsLT(MinVT) ? MinVT : VT;
3298 /// Reads two 32 bit registers and creates a 64 bit mask value.
3299 /// \param VA The current 32 bit value that need to be assigned.
3300 /// \param NextVA The next 32 bit value that need to be assigned.
3301 /// \param Root The parent DAG node.
3302 /// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
3303 /// glue purposes. In the case the DAG is already using
3304 /// physical register instead of virtual, we should glue
3305 /// our new SDValue to InFlag SDvalue.
3306 /// \return a new SDvalue of size 64bit.
3307 static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
3308 SDValue &Root, SelectionDAG &DAG,
3309 const SDLoc &Dl, const X86Subtarget &Subtarget,
3310 SDValue *InFlag = nullptr) {
3311 assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
3312 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
3313 assert(VA.getValVT() == MVT::v64i1 &&
3314 "Expecting first location of 64 bit width type");
3315 assert(NextVA.getValVT() == VA.getValVT() &&
3316 "The locations should have the same type");
3317 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
3318 "The values should reside in two registers");
3321 SDValue ArgValueLo, ArgValueHi;
3323 MachineFunction &MF = DAG.getMachineFunction();
3324 const TargetRegisterClass *RC = &X86::GR32RegClass;
3326 // Read a 32 bit value from the registers.
3327 if (nullptr == InFlag) {
3328 // When no physical register is present,
3329 // create an intermediate virtual register.
3330 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
3331 ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
3332 Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
3333 ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
3335 // When a physical register is available read the value from it and glue
3336 // the reads together.
3338 DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
3339 *InFlag = ArgValueLo.getValue(2);
3341 DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
3342 *InFlag = ArgValueHi.getValue(2);
3345 // Convert the i32 type into v32i1 type.
3346 Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
3348 // Convert the i32 type into v32i1 type.
3349 Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
3351 // Concatenate the two values together.
3352 return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
3355 /// The function will lower a register of various sizes (8/16/32/64)
3356 /// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
3357 /// \returns a DAG node contains the operand after lowering to mask type.
3358 static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
3359 const EVT &ValLoc, const SDLoc &Dl,
3360 SelectionDAG &DAG) {
3361 SDValue ValReturned = ValArg;
3363 if (ValVT == MVT::v1i1)
3364 return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
3366 if (ValVT == MVT::v64i1) {
3367 // In 32 bit machine, this case is handled by getv64i1Argument
3368 assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
3369 // In 64 bit machine, There is no need to truncate the value only bitcast
3372 switch (ValVT.getSimpleVT().SimpleTy) {
3383 llvm_unreachable("Expecting a vector of i1 types");
3386 ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
3388 return DAG.getBitcast(ValVT, ValReturned);
3391 /// Lower the result values of a call into the
3392 /// appropriate copies out of appropriate physical registers.
3394 SDValue X86TargetLowering::LowerCallResult(
3395 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
3396 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3397 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
3398 uint32_t *RegMask) const {
3400 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3401 // Assign locations to each value returned by this call.
3402 SmallVector<CCValAssign, 16> RVLocs;
3403 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
3405 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3407 // Copy all of the result registers out of their specified physreg.
3408 for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
3410 CCValAssign &VA = RVLocs[I];
3411 EVT CopyVT = VA.getLocVT();
3413 // In some calling conventions we need to remove the used registers
3414 // from the register mask.
3416 for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
3417 SubRegs.isValid(); ++SubRegs)
3418 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
3421 // Report an error if there was an attempt to return FP values via XMM
3423 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
3424 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
3425 if (VA.getLocReg() == X86::XMM1)
3426 VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3428 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3429 } else if (!Subtarget.hasSSE2() &&
3430 X86::FR64XRegClass.contains(VA.getLocReg()) &&
3431 CopyVT == MVT::f64) {
3432 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
3433 if (VA.getLocReg() == X86::XMM1)
3434 VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3436 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3439 // If we prefer to use the value in xmm registers, copy it out as f80 and
3440 // use a truncate to move it from fp stack reg to xmm reg.
3441 bool RoundAfterCopy = false;
3442 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
3443 isScalarFPTypeInSSEReg(VA.getValVT())) {
3444 if (!Subtarget.hasX87())
3445 report_fatal_error("X87 register return with X87 disabled");
3447 RoundAfterCopy = (CopyVT != VA.getLocVT());
3451 if (VA.needsCustom()) {
3452 assert(VA.getValVT() == MVT::v64i1 &&
3453 "Currently the only custom case is when we split v64i1 to 2 regs");
3455 getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
3457 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
3459 Val = Chain.getValue(0);
3460 InFlag = Chain.getValue(2);
3464 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
3465 // This truncation won't change the value.
3466 DAG.getIntPtrConstant(1, dl, /*isTarget=*/true));
3468 if (VA.isExtInLoc()) {
3469 if (VA.getValVT().isVector() &&
3470 VA.getValVT().getScalarType() == MVT::i1 &&
3471 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3472 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3473 // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3474 Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
3476 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
3479 if (VA.getLocInfo() == CCValAssign::BCvt)
3480 Val = DAG.getBitcast(VA.getValVT(), Val);
3482 InVals.push_back(Val);
3488 //===----------------------------------------------------------------------===//
3489 // C & StdCall & Fast Calling Convention implementation
3490 //===----------------------------------------------------------------------===//
3491 // StdCall calling convention seems to be standard for many Windows' API
3492 // routines and around. It differs from C calling convention just a little:
3493 // callee should clean up the stack, not caller. Symbols should be also
3494 // decorated in some fancy way :) It doesn't support any vector arguments.
3495 // For info on fast calling convention see Fast Calling Convention (tail call)
3496 // implementation LowerX86_32FastCCCallTo.
3498 /// Determines whether Args, either a set of outgoing arguments to a call, or a
3499 /// set of incoming args of a call, contains an sret pointer that the callee
3501 template <typename T>
3502 static bool hasCalleePopSRet(const SmallVectorImpl<T> &Args,
3503 const X86Subtarget &Subtarget) {
3504 // Not C++20 (yet), so no concepts available.
3505 static_assert(std::is_same<T, ISD::OutputArg>::value ||
3506 std::is_same<T, ISD::InputArg>::value,
3507 "requires ISD::OutputArg or ISD::InputArg");
3509 // Only 32-bit pops the sret. It's a 64-bit world these days, so early-out
3510 // for most compilations.
3511 if (!Subtarget.is32Bit())
3517 // Most calls do not have an sret argument, check the arg next.
3518 const ISD::ArgFlagsTy &Flags = Args[0].Flags;
3519 if (!Flags.isSRet() || Flags.isInReg())
3522 // The MSVCabi does not pop the sret.
3523 if (Subtarget.getTargetTriple().isOSMSVCRT())
3526 // MCUs don't pop the sret
3527 if (Subtarget.isTargetMCU())
3530 // Callee pops argument
3534 /// Make a copy of an aggregate at address specified by "Src" to address
3535 /// "Dst" with size and alignment information specified by the specific
3536 /// parameter attribute. The copy will be passed as a byval function parameter.
3537 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
3538 SDValue Chain, ISD::ArgFlagsTy Flags,
3539 SelectionDAG &DAG, const SDLoc &dl) {
3540 SDValue SizeNode = DAG.getIntPtrConstant(Flags.getByValSize(), dl);
3542 return DAG.getMemcpy(
3543 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
3544 /*isVolatile*/ false, /*AlwaysInline=*/true,
3545 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
3548 /// Return true if the calling convention is one that we can guarantee TCO for.
3549 static bool canGuaranteeTCO(CallingConv::ID CC) {
3550 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
3551 CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
3552 CC == CallingConv::HHVM || CC == CallingConv::Tail ||
3553 CC == CallingConv::SwiftTail);
3556 /// Return true if we might ever do TCO for calls with this calling convention.
3557 static bool mayTailCallThisCC(CallingConv::ID CC) {
3559 // C calling conventions:
3560 case CallingConv::C:
3561 case CallingConv::Win64:
3562 case CallingConv::X86_64_SysV:
3563 // Callee pop conventions:
3564 case CallingConv::X86_ThisCall:
3565 case CallingConv::X86_StdCall:
3566 case CallingConv::X86_VectorCall:
3567 case CallingConv::X86_FastCall:
3569 case CallingConv::Swift:
3572 return canGuaranteeTCO(CC);
3576 /// Return true if the function is being made into a tailcall target by
3577 /// changing its ABI.
3578 static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
3579 return (GuaranteedTailCallOpt && canGuaranteeTCO(CC)) ||
3580 CC == CallingConv::Tail || CC == CallingConv::SwiftTail;
3583 bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
3584 if (!CI->isTailCall())
3587 CallingConv::ID CalleeCC = CI->getCallingConv();
3588 if (!mayTailCallThisCC(CalleeCC))
3595 X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
3596 const SmallVectorImpl<ISD::InputArg> &Ins,
3597 const SDLoc &dl, SelectionDAG &DAG,
3598 const CCValAssign &VA,
3599 MachineFrameInfo &MFI, unsigned i) const {
3600 // Create the nodes corresponding to a load from this parameter slot.
3601 ISD::ArgFlagsTy Flags = Ins[i].Flags;
3602 bool AlwaysUseMutable = shouldGuaranteeTCO(
3603 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
3604 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
3606 MVT PtrVT = getPointerTy(DAG.getDataLayout());
3608 // If value is passed by pointer we have address passed instead of the value
3609 // itself. No need to extend if the mask value and location share the same
3611 bool ExtendedInMem =
3612 VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
3613 VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
3615 if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
3616 ValVT = VA.getLocVT();
3618 ValVT = VA.getValVT();
3620 // FIXME: For now, all byval parameter objects are marked mutable. This can be
3621 // changed with more analysis.
3622 // In case of tail call optimization mark all arguments mutable. Since they
3623 // could be overwritten by lowering of arguments in case of a tail call.
3624 if (Flags.isByVal()) {
3625 unsigned Bytes = Flags.getByValSize();
3626 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
3628 // FIXME: For now, all byval parameter objects are marked as aliasing. This
3629 // can be improved with deeper analysis.
3630 int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
3631 /*isAliased=*/true);
3632 return DAG.getFrameIndex(FI, PtrVT);
3635 EVT ArgVT = Ins[i].ArgVT;
3637 // If this is a vector that has been split into multiple parts, and the
3638 // scalar size of the parts don't match the vector element size, then we can't
3639 // elide the copy. The parts will have padding between them instead of being
3640 // packed like a vector.
3641 bool ScalarizedAndExtendedVector =
3642 ArgVT.isVector() && !VA.getLocVT().isVector() &&
3643 VA.getLocVT().getSizeInBits() != ArgVT.getScalarSizeInBits();
3645 // This is an argument in memory. We might be able to perform copy elision.
3646 // If the argument is passed directly in memory without any extension, then we
3647 // can perform copy elision. Large vector types, for example, may be passed
3648 // indirectly by pointer.
3649 if (Flags.isCopyElisionCandidate() &&
3650 VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem &&
3651 !ScalarizedAndExtendedVector) {
3653 if (Ins[i].PartOffset == 0) {
3654 // If this is a one-part value or the first part of a multi-part value,
3655 // create a stack object for the entire argument value type and return a
3656 // load from our portion of it. This assumes that if the first part of an
3657 // argument is in memory, the rest will also be in memory.
3658 int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
3659 /*IsImmutable=*/false);
3660 PartAddr = DAG.getFrameIndex(FI, PtrVT);
3662 ValVT, dl, Chain, PartAddr,
3663 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3665 // This is not the first piece of an argument in memory. See if there is
3666 // already a fixed stack object including this offset. If so, assume it
3667 // was created by the PartOffset == 0 branch above and create a load from
3668 // the appropriate offset into it.
3669 int64_t PartBegin = VA.getLocMemOffset();
3670 int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
3671 int FI = MFI.getObjectIndexBegin();
3672 for (; MFI.isFixedObjectIndex(FI); ++FI) {
3673 int64_t ObjBegin = MFI.getObjectOffset(FI);
3674 int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
3675 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
3678 if (MFI.isFixedObjectIndex(FI)) {
3680 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
3681 DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
3683 ValVT, dl, Chain, Addr,
3684 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
3685 Ins[i].PartOffset));
3690 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
3691 VA.getLocMemOffset(), isImmutable);
3693 // Set SExt or ZExt flag.
3694 if (VA.getLocInfo() == CCValAssign::ZExt) {
3695 MFI.setObjectZExt(FI, true);
3696 } else if (VA.getLocInfo() == CCValAssign::SExt) {
3697 MFI.setObjectSExt(FI, true);
3700 MaybeAlign Alignment;
3701 if (Subtarget.isTargetWindowsMSVC() && !Subtarget.is64Bit() &&
3703 Alignment = MaybeAlign(4);
3704 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3705 SDValue Val = DAG.getLoad(
3706 ValVT, dl, Chain, FIN,
3707 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
3709 return ExtendedInMem
3710 ? (VA.getValVT().isVector()
3711 ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
3712 : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
3716 // FIXME: Get this from tablegen.
3717 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
3718 const X86Subtarget &Subtarget) {
3719 assert(Subtarget.is64Bit());
3721 if (Subtarget.isCallingConvWin64(CallConv)) {
3722 static const MCPhysReg GPR64ArgRegsWin64[] = {
3723 X86::RCX, X86::RDX, X86::R8, X86::R9
3725 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
3728 static const MCPhysReg GPR64ArgRegs64Bit[] = {
3729 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
3731 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
3734 // FIXME: Get this from tablegen.
3735 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
3736 CallingConv::ID CallConv,
3737 const X86Subtarget &Subtarget) {
3738 assert(Subtarget.is64Bit());
3739 if (Subtarget.isCallingConvWin64(CallConv)) {
3740 // The XMM registers which might contain var arg parameters are shadowed
3741 // in their paired GPR. So we only need to save the GPR to their home
3743 // TODO: __vectorcall will change this.
3747 bool isSoftFloat = Subtarget.useSoftFloat();
3748 if (isSoftFloat || !Subtarget.hasSSE1())
3749 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
3753 static const MCPhysReg XMMArgRegs64Bit[] = {
3754 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3755 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3757 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
3761 static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
3762 return llvm::is_sorted(
3763 ArgLocs, [](const CCValAssign &A, const CCValAssign &B) -> bool {
3764 return A.getValNo() < B.getValNo();
3770 /// This is a helper class for lowering variable arguments parameters.
3771 class VarArgsLoweringHelper {
3773 VarArgsLoweringHelper(X86MachineFunctionInfo *FuncInfo, const SDLoc &Loc,
3774 SelectionDAG &DAG, const X86Subtarget &Subtarget,
3775 CallingConv::ID CallConv, CCState &CCInfo)
3776 : FuncInfo(FuncInfo), DL(Loc), DAG(DAG), Subtarget(Subtarget),
3777 TheMachineFunction(DAG.getMachineFunction()),
3778 TheFunction(TheMachineFunction.getFunction()),
3779 FrameInfo(TheMachineFunction.getFrameInfo()),
3780 FrameLowering(*Subtarget.getFrameLowering()),
3781 TargLowering(DAG.getTargetLoweringInfo()), CallConv(CallConv),
3784 // Lower variable arguments parameters.
3785 void lowerVarArgsParameters(SDValue &Chain, unsigned StackSize);
3788 void createVarArgAreaAndStoreRegisters(SDValue &Chain, unsigned StackSize);
3790 void forwardMustTailParameters(SDValue &Chain);
3792 bool is64Bit() const { return Subtarget.is64Bit(); }
3793 bool isWin64() const { return Subtarget.isCallingConvWin64(CallConv); }
3795 X86MachineFunctionInfo *FuncInfo;
3798 const X86Subtarget &Subtarget;
3799 MachineFunction &TheMachineFunction;
3800 const Function &TheFunction;
3801 MachineFrameInfo &FrameInfo;
3802 const TargetFrameLowering &FrameLowering;
3803 const TargetLowering &TargLowering;
3804 CallingConv::ID CallConv;
3809 void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters(
3810 SDValue &Chain, unsigned StackSize) {
3811 // If the function takes variable number of arguments, make a frame index for
3812 // the start of the first vararg value... for expansion of llvm.va_start. We
3813 // can skip this if there are no va_start calls.
3814 if (is64Bit() || (CallConv != CallingConv::X86_FastCall &&
3815 CallConv != CallingConv::X86_ThisCall)) {
3816 FuncInfo->setVarArgsFrameIndex(
3817 FrameInfo.CreateFixedObject(1, StackSize, true));
3820 // 64-bit calling conventions support varargs and register parameters, so we
3821 // have to do extra work to spill them in the prologue.
3823 // Find the first unallocated argument registers.
3824 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
3825 ArrayRef<MCPhysReg> ArgXMMs =
3826 get64BitArgumentXMMs(TheMachineFunction, CallConv, Subtarget);
3827 unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
3828 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
3830 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
3831 "SSE register cannot be used when SSE is disabled!");
3834 // Get to the caller-allocated home save location. Add 8 to account
3835 // for the return address.
3836 int HomeOffset = FrameLowering.getOffsetOfLocalArea() + 8;
3837 FuncInfo->setRegSaveFrameIndex(
3838 FrameInfo.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
3839 // Fixup to set vararg frame on shadow area (4 x i64).
3841 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
3843 // For X86-64, if there are vararg parameters that are passed via
3844 // registers, then we must store them to their spots on the stack so
3845 // they may be loaded by dereferencing the result of va_next.
3846 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
3847 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
3848 FuncInfo->setRegSaveFrameIndex(FrameInfo.CreateStackObject(
3849 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, Align(16), false));
3852 SmallVector<SDValue, 6>
3853 LiveGPRs; // list of SDValue for GPR registers keeping live input value
3854 SmallVector<SDValue, 8> LiveXMMRegs; // list of SDValue for XMM registers
3855 // keeping live input value
3856 SDValue ALVal; // if applicable keeps SDValue for %al register
3858 // Gather all the live in physical registers.
3859 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
3860 Register GPR = TheMachineFunction.addLiveIn(Reg, &X86::GR64RegClass);
3861 LiveGPRs.push_back(DAG.getCopyFromReg(Chain, DL, GPR, MVT::i64));
3863 const auto &AvailableXmms = ArgXMMs.slice(NumXMMRegs);
3864 if (!AvailableXmms.empty()) {
3865 Register AL = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
3866 ALVal = DAG.getCopyFromReg(Chain, DL, AL, MVT::i8);
3867 for (MCPhysReg Reg : AvailableXmms) {
3868 // FastRegisterAllocator spills virtual registers at basic
3869 // block boundary. That leads to usages of xmm registers
3870 // outside of check for %al. Pass physical registers to
3871 // VASTART_SAVE_XMM_REGS to avoid unneccessary spilling.
3872 TheMachineFunction.getRegInfo().addLiveIn(Reg);
3873 LiveXMMRegs.push_back(DAG.getRegister(Reg, MVT::v4f32));
3877 // Store the integer parameter registers.
3878 SmallVector<SDValue, 8> MemOps;
3880 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
3881 TargLowering.getPointerTy(DAG.getDataLayout()));
3882 unsigned Offset = FuncInfo->getVarArgsGPOffset();
3883 for (SDValue Val : LiveGPRs) {
3884 SDValue FIN = DAG.getNode(ISD::ADD, DL,
3885 TargLowering.getPointerTy(DAG.getDataLayout()),
3886 RSFIN, DAG.getIntPtrConstant(Offset, DL));
3888 DAG.getStore(Val.getValue(1), DL, Val, FIN,
3889 MachinePointerInfo::getFixedStack(
3890 DAG.getMachineFunction(),
3891 FuncInfo->getRegSaveFrameIndex(), Offset));
3892 MemOps.push_back(Store);
3896 // Now store the XMM (fp + vector) parameter registers.
3897 if (!LiveXMMRegs.empty()) {
3898 SmallVector<SDValue, 12> SaveXMMOps;
3899 SaveXMMOps.push_back(Chain);
3900 SaveXMMOps.push_back(ALVal);
3901 SaveXMMOps.push_back(RSFIN);
3902 SaveXMMOps.push_back(
3903 DAG.getTargetConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32));
3904 llvm::append_range(SaveXMMOps, LiveXMMRegs);
3905 MachineMemOperand *StoreMMO =
3906 DAG.getMachineFunction().getMachineMemOperand(
3907 MachinePointerInfo::getFixedStack(
3908 DAG.getMachineFunction(), FuncInfo->getRegSaveFrameIndex(),
3910 MachineMemOperand::MOStore, 128, Align(16));
3911 MemOps.push_back(DAG.getMemIntrinsicNode(X86ISD::VASTART_SAVE_XMM_REGS,
3912 DL, DAG.getVTList(MVT::Other),
3913 SaveXMMOps, MVT::i8, StoreMMO));
3916 if (!MemOps.empty())
3917 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
3921 void VarArgsLoweringHelper::forwardMustTailParameters(SDValue &Chain) {
3922 // Find the largest legal vector type.
3923 MVT VecVT = MVT::Other;
3924 // FIXME: Only some x86_32 calling conventions support AVX512.
3925 if (Subtarget.useAVX512Regs() &&
3926 (is64Bit() || (CallConv == CallingConv::X86_VectorCall ||
3927 CallConv == CallingConv::Intel_OCL_BI)))
3928 VecVT = MVT::v16f32;
3929 else if (Subtarget.hasAVX())
3931 else if (Subtarget.hasSSE2())
3934 // We forward some GPRs and some vector types.
3935 SmallVector<MVT, 2> RegParmTypes;
3936 MVT IntVT = is64Bit() ? MVT::i64 : MVT::i32;
3937 RegParmTypes.push_back(IntVT);
3938 if (VecVT != MVT::Other)
3939 RegParmTypes.push_back(VecVT);
3941 // Compute the set of forwarded registers. The rest are scratch.
3942 SmallVectorImpl<ForwardedRegister> &Forwards =
3943 FuncInfo->getForwardedMustTailRegParms();
3944 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
3946 // Forward AL for SysV x86_64 targets, since it is used for varargs.
3947 if (is64Bit() && !isWin64() && !CCInfo.isAllocated(X86::AL)) {
3948 Register ALVReg = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
3949 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
3952 // Copy all forwards from physical to virtual registers.
3953 for (ForwardedRegister &FR : Forwards) {
3954 // FIXME: Can we use a less constrained schedule?
3955 SDValue RegVal = DAG.getCopyFromReg(Chain, DL, FR.VReg, FR.VT);
3956 FR.VReg = TheMachineFunction.getRegInfo().createVirtualRegister(
3957 TargLowering.getRegClassFor(FR.VT));
3958 Chain = DAG.getCopyToReg(Chain, DL, FR.VReg, RegVal);
3962 void VarArgsLoweringHelper::lowerVarArgsParameters(SDValue &Chain,
3963 unsigned StackSize) {
3964 // Set FrameIndex to the 0xAAAAAAA value to mark unset state.
3965 // If necessary, it would be set into the correct value later.
3966 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
3967 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
3969 if (FrameInfo.hasVAStart())
3970 createVarArgAreaAndStoreRegisters(Chain, StackSize);
3972 if (FrameInfo.hasMustTailInVarArgFunc())
3973 forwardMustTailParameters(Chain);
3976 SDValue X86TargetLowering::LowerFormalArguments(
3977 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
3978 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3979 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3980 MachineFunction &MF = DAG.getMachineFunction();
3981 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3983 const Function &F = MF.getFunction();
3984 if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
3985 F.getName() == "main")
3986 FuncInfo->setForceFramePointer(true);
3988 MachineFrameInfo &MFI = MF.getFrameInfo();
3989 bool Is64Bit = Subtarget.is64Bit();
3990 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3993 !(IsVarArg && canGuaranteeTCO(CallConv)) &&
3994 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
3996 // Assign locations to all of the incoming arguments.
3997 SmallVector<CCValAssign, 16> ArgLocs;
3998 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
4000 // Allocate shadow area for Win64.
4002 CCInfo.AllocateStack(32, Align(8));
4004 CCInfo.AnalyzeArguments(Ins, CC_X86);
4006 // In vectorcall calling convention a second pass is required for the HVA
4008 if (CallingConv::X86_VectorCall == CallConv) {
4009 CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
4012 // The next loop assumes that the locations are in the same order of the
4014 assert(isSortedByValueNo(ArgLocs) &&
4015 "Argument Location list must be sorted before lowering");
4018 for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
4020 assert(InsIndex < Ins.size() && "Invalid Ins index");
4021 CCValAssign &VA = ArgLocs[I];
4023 if (VA.isRegLoc()) {
4024 EVT RegVT = VA.getLocVT();
4025 if (VA.needsCustom()) {
4027 VA.getValVT() == MVT::v64i1 &&
4028 "Currently the only custom case is when we split v64i1 to 2 regs");
4030 // v64i1 values, in regcall calling convention, that are
4031 // compiled to 32 bit arch, are split up into two registers.
4033 getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
4035 const TargetRegisterClass *RC;
4036 if (RegVT == MVT::i8)
4037 RC = &X86::GR8RegClass;
4038 else if (RegVT == MVT::i16)
4039 RC = &X86::GR16RegClass;
4040 else if (RegVT == MVT::i32)
4041 RC = &X86::GR32RegClass;
4042 else if (Is64Bit && RegVT == MVT::i64)
4043 RC = &X86::GR64RegClass;
4044 else if (RegVT == MVT::f16)
4045 RC = Subtarget.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
4046 else if (RegVT == MVT::f32)
4047 RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
4048 else if (RegVT == MVT::f64)
4049 RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
4050 else if (RegVT == MVT::f80)
4051 RC = &X86::RFP80RegClass;
4052 else if (RegVT == MVT::f128)
4053 RC = &X86::VR128RegClass;
4054 else if (RegVT.is512BitVector())
4055 RC = &X86::VR512RegClass;
4056 else if (RegVT.is256BitVector())
4057 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
4058 else if (RegVT.is128BitVector())
4059 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
4060 else if (RegVT == MVT::x86mmx)
4061 RC = &X86::VR64RegClass;
4062 else if (RegVT == MVT::v1i1)
4063 RC = &X86::VK1RegClass;
4064 else if (RegVT == MVT::v8i1)
4065 RC = &X86::VK8RegClass;
4066 else if (RegVT == MVT::v16i1)
4067 RC = &X86::VK16RegClass;
4068 else if (RegVT == MVT::v32i1)
4069 RC = &X86::VK32RegClass;
4070 else if (RegVT == MVT::v64i1)
4071 RC = &X86::VK64RegClass;
4073 llvm_unreachable("Unknown argument type!");
4075 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
4076 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
4079 // If this is an 8 or 16-bit value, it is really passed promoted to 32
4080 // bits. Insert an assert[sz]ext to capture this, then truncate to the
4082 if (VA.getLocInfo() == CCValAssign::SExt)
4083 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
4084 DAG.getValueType(VA.getValVT()));
4085 else if (VA.getLocInfo() == CCValAssign::ZExt)
4086 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
4087 DAG.getValueType(VA.getValVT()));
4088 else if (VA.getLocInfo() == CCValAssign::BCvt)
4089 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
4091 if (VA.isExtInLoc()) {
4092 // Handle MMX values passed in XMM regs.
4093 if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
4094 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
4095 else if (VA.getValVT().isVector() &&
4096 VA.getValVT().getScalarType() == MVT::i1 &&
4097 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
4098 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
4099 // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
4100 ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
4102 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
4105 assert(VA.isMemLoc());
4107 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
4110 // If value is passed via pointer - do a load.
4111 if (VA.getLocInfo() == CCValAssign::Indirect && !Ins[I].Flags.isByVal())
4113 DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
4115 InVals.push_back(ArgValue);
4118 for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
4119 if (Ins[I].Flags.isSwiftAsync()) {
4120 auto X86FI = MF.getInfo<X86MachineFunctionInfo>();
4121 if (Subtarget.is64Bit())
4122 X86FI->setHasSwiftAsyncContext(true);
4124 int FI = MF.getFrameInfo().CreateStackObject(4, Align(4), false);
4125 X86FI->setSwiftAsyncContextFrameIdx(FI);
4126 SDValue St = DAG.getStore(DAG.getEntryNode(), dl, InVals[I],
4127 DAG.getFrameIndex(FI, MVT::i32),
4128 MachinePointerInfo::getFixedStack(MF, FI));
4129 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, St, Chain);
4133 // Swift calling convention does not require we copy the sret argument
4134 // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
4135 if (CallConv == CallingConv::Swift || CallConv == CallingConv::SwiftTail)
4138 // All x86 ABIs require that for returning structs by value we copy the
4139 // sret argument into %rax/%eax (depending on ABI) for the return. Save
4140 // the argument into a virtual register so that we can access it from the
4142 if (Ins[I].Flags.isSRet()) {
4143 assert(!FuncInfo->getSRetReturnReg() &&
4144 "SRet return has already been set");
4145 MVT PtrTy = getPointerTy(DAG.getDataLayout());
4147 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
4148 FuncInfo->setSRetReturnReg(Reg);
4149 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
4150 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
4155 unsigned StackSize = CCInfo.getNextStackOffset();
4156 // Align stack specially for tail calls.
4157 if (shouldGuaranteeTCO(CallConv,
4158 MF.getTarget().Options.GuaranteedTailCallOpt))
4159 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
4162 VarArgsLoweringHelper(FuncInfo, dl, DAG, Subtarget, CallConv, CCInfo)
4163 .lowerVarArgsParameters(Chain, StackSize);
4165 // Some CCs need callee pop.
4166 if (X86::isCalleePop(CallConv, Is64Bit, IsVarArg,
4167 MF.getTarget().Options.GuaranteedTailCallOpt)) {
4168 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
4169 } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
4170 // X86 interrupts must pop the error code (and the alignment padding) if
4172 FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
4174 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
4175 // If this is an sret function, the return should pop the hidden pointer.
4176 if (!canGuaranteeTCO(CallConv) && hasCalleePopSRet(Ins, Subtarget))
4177 FuncInfo->setBytesToPopOnReturn(4);
4181 // RegSaveFrameIndex is X86-64 only.
4182 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
4185 FuncInfo->setArgumentStackSize(StackSize);
4187 if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
4188 EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
4189 if (Personality == EHPersonality::CoreCLR) {
4191 // TODO: Add a mechanism to frame lowering that will allow us to indicate
4192 // that we'd prefer this slot be allocated towards the bottom of the frame
4193 // (i.e. near the stack pointer after allocating the frame). Every
4194 // funclet needs a copy of this slot in its (mostly empty) frame, and the
4195 // offset from the bottom of this and each funclet's frame must be the
4196 // same, so the size of funclets' (mostly empty) frames is dictated by
4197 // how far this slot is from the bottom (since they allocate just enough
4198 // space to accommodate holding this slot at the correct offset).
4199 int PSPSymFI = MFI.CreateStackObject(8, Align(8), /*isSpillSlot=*/false);
4200 EHInfo->PSPSymFrameIdx = PSPSymFI;
4204 if (CallConv == CallingConv::X86_RegCall ||
4205 F.hasFnAttribute("no_caller_saved_registers")) {
4206 MachineRegisterInfo &MRI = MF.getRegInfo();
4207 for (std::pair<Register, Register> Pair : MRI.liveins())
4208 MRI.disableCalleeSavedRegister(Pair.first);
4214 SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
4215 SDValue Arg, const SDLoc &dl,
4217 const CCValAssign &VA,
4218 ISD::ArgFlagsTy Flags,
4219 bool isByVal) const {
4220 unsigned LocMemOffset = VA.getLocMemOffset();
4221 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
4222 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
4225 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
4227 MaybeAlign Alignment;
4228 if (Subtarget.isTargetWindowsMSVC() && !Subtarget.is64Bit() &&
4229 Arg.getSimpleValueType() != MVT::f80)
4230 Alignment = MaybeAlign(4);
4231 return DAG.getStore(
4232 Chain, dl, Arg, PtrOff,
4233 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset),
4237 /// Emit a load of return address if tail call
4238 /// optimization is performed and it is required.
4239 SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
4240 SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
4241 bool Is64Bit, int FPDiff, const SDLoc &dl) const {
4242 // Adjust the Return address stack slot.
4243 EVT VT = getPointerTy(DAG.getDataLayout());
4244 OutRetAddr = getReturnAddressFrameIndex(DAG);
4246 // Load the "old" Return address.
4247 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
4248 return SDValue(OutRetAddr.getNode(), 1);
4251 /// Emit a store of the return address if tail call
4252 /// optimization is performed and it is required (FPDiff!=0).
4253 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
4254 SDValue Chain, SDValue RetAddrFrIdx,
4255 EVT PtrVT, unsigned SlotSize,
4256 int FPDiff, const SDLoc &dl) {
4257 // Store the return address to the appropriate stack slot.
4258 if (!FPDiff) return Chain;
4259 // Calculate the new stack slot for the return address.
4260 int NewReturnAddrFI =
4261 MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
4263 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
4264 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
4265 MachinePointerInfo::getFixedStack(
4266 DAG.getMachineFunction(), NewReturnAddrFI));
4270 /// Returns a vector_shuffle mask for an movs{s|d}, movd
4271 /// operation of specified width.
4272 static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
4274 unsigned NumElems = VT.getVectorNumElements();
4275 SmallVector<int, 8> Mask;
4276 Mask.push_back(NumElems);
4277 for (unsigned i = 1; i != NumElems; ++i)
4279 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
4283 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
4284 SmallVectorImpl<SDValue> &InVals) const {
4285 SelectionDAG &DAG = CLI.DAG;
4287 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
4288 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
4289 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
4290 SDValue Chain = CLI.Chain;
4291 SDValue Callee = CLI.Callee;
4292 CallingConv::ID CallConv = CLI.CallConv;
4293 bool &isTailCall = CLI.IsTailCall;
4294 bool isVarArg = CLI.IsVarArg;
4295 const auto *CB = CLI.CB;
4297 MachineFunction &MF = DAG.getMachineFunction();
4298 bool Is64Bit = Subtarget.is64Bit();
4299 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
4300 bool IsSibcall = false;
4301 bool IsGuaranteeTCO = MF.getTarget().Options.GuaranteedTailCallOpt ||
4302 CallConv == CallingConv::Tail || CallConv == CallingConv::SwiftTail;
4303 bool IsCalleePopSRet = !IsGuaranteeTCO && hasCalleePopSRet(Outs, Subtarget);
4304 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
4305 bool HasNCSR = (CB && isa<CallInst>(CB) &&
4306 CB->hasFnAttr("no_caller_saved_registers"));
4307 bool HasNoCfCheck = (CB && CB->doesNoCfCheck());
4308 bool IsIndirectCall = (CB && isa<CallInst>(CB) && CB->isIndirectCall());
4309 const Module *M = MF.getMMI().getModule();
4310 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
4312 MachineFunction::CallSiteInfo CSInfo;
4313 if (CallConv == CallingConv::X86_INTR)
4314 report_fatal_error("X86 interrupts may not be called directly");
4316 bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall();
4317 if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO && !IsMustTail) {
4318 // If we are using a GOT, disable tail calls to external symbols with
4319 // default visibility. Tail calling such a symbol requires using a GOT
4320 // relocation, which forces early binding of the symbol. This breaks code
4321 // that require lazy function symbol resolution. Using musttail or
4322 // GuaranteedTailCallOpt will override this.
4323 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4324 if (!G || (!G->getGlobal()->hasLocalLinkage() &&
4325 G->getGlobal()->hasDefaultVisibility()))
4329 if (isTailCall && !IsMustTail) {
4330 // Check if it's really possible to do a tail call.
4331 isTailCall = IsEligibleForTailCallOptimization(
4332 Callee, CallConv, IsCalleePopSRet, isVarArg, CLI.RetTy, Outs, OutVals,
4335 // Sibcalls are automatically detected tailcalls which do not require
4337 if (!IsGuaranteeTCO && isTailCall)
4344 if (IsMustTail && !isTailCall)
4345 report_fatal_error("failed to perform tail call elimination on a call "
4346 "site marked musttail");
4348 assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
4349 "Var args not supported with calling convention fastcc, ghc or hipe");
4351 // Analyze operands of the call, assigning locations to each operand.
4352 SmallVector<CCValAssign, 16> ArgLocs;
4353 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
4355 // Allocate shadow area for Win64.
4357 CCInfo.AllocateStack(32, Align(8));
4359 CCInfo.AnalyzeArguments(Outs, CC_X86);
4361 // In vectorcall calling convention a second pass is required for the HVA
4363 if (CallingConv::X86_VectorCall == CallConv) {
4364 CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
4367 // Get a count of how many bytes are to be pushed on the stack.
4368 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
4370 // This is a sibcall. The memory operands are available in caller's
4371 // own caller's stack.
4373 else if (IsGuaranteeTCO && canGuaranteeTCO(CallConv))
4374 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
4378 shouldGuaranteeTCO(CallConv,
4379 MF.getTarget().Options.GuaranteedTailCallOpt)) {
4380 // Lower arguments at fp - stackoffset + fpdiff.
4381 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
4383 FPDiff = NumBytesCallerPushed - NumBytes;
4385 // Set the delta of movement of the returnaddr stackslot.
4386 // But only set if delta is greater than previous delta.
4387 if (FPDiff < X86Info->getTCReturnAddrDelta())
4388 X86Info->setTCReturnAddrDelta(FPDiff);
4391 unsigned NumBytesToPush = NumBytes;
4392 unsigned NumBytesToPop = NumBytes;
4394 // If we have an inalloca argument, all stack space has already been allocated
4395 // for us and be right at the top of the stack. We don't support multiple
4396 // arguments passed in memory when using inalloca.
4397 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
4399 if (!ArgLocs.back().isMemLoc())
4400 report_fatal_error("cannot use inalloca attribute on a register "
4402 if (ArgLocs.back().getLocMemOffset() != 0)
4403 report_fatal_error("any parameter with the inalloca attribute must be "
4404 "the only memory argument");
4405 } else if (CLI.IsPreallocated) {
4406 assert(ArgLocs.back().isMemLoc() &&
4407 "cannot use preallocated attribute on a register "
4409 SmallVector<size_t, 4> PreallocatedOffsets;
4410 for (size_t i = 0; i < CLI.OutVals.size(); ++i) {
4411 if (CLI.CB->paramHasAttr(i, Attribute::Preallocated)) {
4412 PreallocatedOffsets.push_back(ArgLocs[i].getLocMemOffset());
4415 auto *MFI = DAG.getMachineFunction().getInfo<X86MachineFunctionInfo>();
4416 size_t PreallocatedId = MFI->getPreallocatedIdForCallSite(CLI.CB);
4417 MFI->setPreallocatedStackSize(PreallocatedId, NumBytes);
4418 MFI->setPreallocatedArgOffsets(PreallocatedId, PreallocatedOffsets);
4422 if (!IsSibcall && !IsMustTail)
4423 Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
4424 NumBytes - NumBytesToPush, dl);
4426 SDValue RetAddrFrIdx;
4427 // Load return address for tail calls.
4428 if (isTailCall && FPDiff)
4429 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
4430 Is64Bit, FPDiff, dl);
4432 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
4433 SmallVector<SDValue, 8> MemOpChains;
4436 // The next loop assumes that the locations are in the same order of the
4438 assert(isSortedByValueNo(ArgLocs) &&
4439 "Argument Location list must be sorted before lowering");
4441 // Walk the register/memloc assignments, inserting copies/loads. In the case
4442 // of tail call optimization arguments are handle later.
4443 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4444 for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
4446 assert(OutIndex < Outs.size() && "Invalid Out index");
4447 // Skip inalloca/preallocated arguments, they have already been written.
4448 ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
4449 if (Flags.isInAlloca() || Flags.isPreallocated())
4452 CCValAssign &VA = ArgLocs[I];
4453 EVT RegVT = VA.getLocVT();
4454 SDValue Arg = OutVals[OutIndex];
4455 bool isByVal = Flags.isByVal();
4457 // Promote the value if needed.
4458 switch (VA.getLocInfo()) {
4459 default: llvm_unreachable("Unknown loc info!");
4460 case CCValAssign::Full: break;
4461 case CCValAssign::SExt:
4462 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
4464 case CCValAssign::ZExt:
4465 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
4467 case CCValAssign::AExt:
4468 if (Arg.getValueType().isVector() &&
4469 Arg.getValueType().getVectorElementType() == MVT::i1)
4470 Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
4471 else if (RegVT.is128BitVector()) {
4472 // Special case: passing MMX values in XMM registers.
4473 Arg = DAG.getBitcast(MVT::i64, Arg);
4474 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
4475 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
4477 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
4479 case CCValAssign::BCvt:
4480 Arg = DAG.getBitcast(RegVT, Arg);
4482 case CCValAssign::Indirect: {
4484 // Memcpy the argument to a temporary stack slot to prevent
4485 // the caller from seeing any modifications the callee may make
4486 // as guaranteed by the `byval` attribute.
4487 int FrameIdx = MF.getFrameInfo().CreateStackObject(
4488 Flags.getByValSize(),
4489 std::max(Align(16), Flags.getNonZeroByValAlign()), false);
4491 DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout()));
4493 CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl);
4494 // From now on treat this as a regular pointer
4498 // Store the argument.
4499 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
4500 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
4501 Chain = DAG.getStore(
4502 Chain, dl, Arg, SpillSlot,
4503 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
4510 if (VA.needsCustom()) {
4511 assert(VA.getValVT() == MVT::v64i1 &&
4512 "Currently the only custom case is when we split v64i1 to 2 regs");
4513 // Split v64i1 value into two registers
4514 Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget);
4515 } else if (VA.isRegLoc()) {
4516 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
4517 const TargetOptions &Options = DAG.getTarget().Options;
4518 if (Options.EmitCallSiteInfo)
4519 CSInfo.emplace_back(VA.getLocReg(), I);
4520 if (isVarArg && IsWin64) {
4521 // Win64 ABI requires argument XMM reg to be copied to the corresponding
4522 // shadow reg if callee is a varargs function.
4524 switch (VA.getLocReg()) {
4525 case X86::XMM0: ShadowReg = X86::RCX; break;
4526 case X86::XMM1: ShadowReg = X86::RDX; break;
4527 case X86::XMM2: ShadowReg = X86::R8; break;
4528 case X86::XMM3: ShadowReg = X86::R9; break;
4531 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
4533 } else if (!IsSibcall && (!isTailCall || isByVal)) {
4534 assert(VA.isMemLoc());
4535 if (!StackPtr.getNode())
4536 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4537 getPointerTy(DAG.getDataLayout()));
4538 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
4539 dl, DAG, VA, Flags, isByVal));
4543 if (!MemOpChains.empty())
4544 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
4546 if (Subtarget.isPICStyleGOT()) {
4547 // ELF / PIC requires GOT in the EBX register before function calls via PLT
4548 // GOT pointer (except regcall).
4550 // Indirect call with RegCall calling convertion may use up all the
4551 // general registers, so it is not suitable to bind EBX reister for
4552 // GOT address, just let register allocator handle it.
4553 if (CallConv != CallingConv::X86_RegCall)
4554 RegsToPass.push_back(std::make_pair(
4555 Register(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
4556 getPointerTy(DAG.getDataLayout()))));
4558 // If we are tail calling and generating PIC/GOT style code load the
4559 // address of the callee into ECX. The value in ecx is used as target of
4560 // the tail jump. This is done to circumvent the ebx/callee-saved problem
4561 // for tail calls on PIC/GOT architectures. Normally we would just put the
4562 // address of GOT into ebx and then call target@PLT. But for tail calls
4563 // ebx would be restored (since ebx is callee saved) before jumping to the
4566 // Note: The actual moving to ECX is done further down.
4567 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4568 if (G && !G->getGlobal()->hasLocalLinkage() &&
4569 G->getGlobal()->hasDefaultVisibility())
4570 Callee = LowerGlobalAddress(Callee, DAG);
4571 else if (isa<ExternalSymbolSDNode>(Callee))
4572 Callee = LowerExternalSymbol(Callee, DAG);
4576 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail &&
4577 (Subtarget.hasSSE1() || !M->getModuleFlag("SkipRaxSetup"))) {
4578 // From AMD64 ABI document:
4579 // For calls that may call functions that use varargs or stdargs
4580 // (prototype-less calls or calls to functions containing ellipsis (...) in
4581 // the declaration) %al is used as hidden argument to specify the number
4582 // of SSE registers used. The contents of %al do not need to match exactly
4583 // the number of registers, but must be an ubound on the number of SSE
4584 // registers used and is in the range 0 - 8 inclusive.
4586 // Count the number of XMM registers allocated.
4587 static const MCPhysReg XMMArgRegs[] = {
4588 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
4589 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
4591 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
4592 assert((Subtarget.hasSSE1() || !NumXMMRegs)
4593 && "SSE registers cannot be used when SSE is disabled");
4594 RegsToPass.push_back(std::make_pair(Register(X86::AL),
4595 DAG.getConstant(NumXMMRegs, dl,
4599 if (isVarArg && IsMustTail) {
4600 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
4601 for (const auto &F : Forwards) {
4602 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
4603 RegsToPass.push_back(std::make_pair(F.PReg, Val));
4607 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
4608 // don't need this because the eligibility check rejects calls that require
4609 // shuffling arguments passed in memory.
4610 if (!IsSibcall && isTailCall) {
4611 // Force all the incoming stack arguments to be loaded from the stack
4612 // before any new outgoing arguments are stored to the stack, because the
4613 // outgoing stack slots may alias the incoming argument stack slots, and
4614 // the alias isn't otherwise explicit. This is slightly more conservative
4615 // than necessary, because it means that each store effectively depends
4616 // on every argument instead of just those arguments it would clobber.
4617 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
4619 SmallVector<SDValue, 8> MemOpChains2;
4622 for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
4624 CCValAssign &VA = ArgLocs[I];
4626 if (VA.isRegLoc()) {
4627 if (VA.needsCustom()) {
4628 assert((CallConv == CallingConv::X86_RegCall) &&
4629 "Expecting custom case only in regcall calling convention");
4630 // This means that we are in special case where one argument was
4631 // passed through two register locations - Skip the next location
4638 assert(VA.isMemLoc());
4639 SDValue Arg = OutVals[OutsIndex];
4640 ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
4641 // Skip inalloca/preallocated arguments. They don't require any work.
4642 if (Flags.isInAlloca() || Flags.isPreallocated())
4644 // Create frame index.
4645 int32_t Offset = VA.getLocMemOffset()+FPDiff;
4646 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
4647 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4648 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4650 if (Flags.isByVal()) {
4651 // Copy relative to framepointer.
4652 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
4653 if (!StackPtr.getNode())
4654 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4655 getPointerTy(DAG.getDataLayout()));
4656 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
4659 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
4663 // Store relative to framepointer.
4664 MemOpChains2.push_back(DAG.getStore(
4665 ArgChain, dl, Arg, FIN,
4666 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4670 if (!MemOpChains2.empty())
4671 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4673 // Store the return address to the appropriate stack slot.
4674 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
4675 getPointerTy(DAG.getDataLayout()),
4676 RegInfo->getSlotSize(), FPDiff, dl);
4679 // Build a sequence of copy-to-reg nodes chained together with token chain
4680 // and flag operands which copy the outgoing args into registers.
4682 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
4683 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
4684 RegsToPass[i].second, InFlag);
4685 InFlag = Chain.getValue(1);
4688 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
4689 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
4690 // In the 64-bit large code model, we have to make all calls
4691 // through a register, since the call instruction's 32-bit
4692 // pc-relative offset may not be large enough to hold the whole
4694 } else if (Callee->getOpcode() == ISD::GlobalAddress ||
4695 Callee->getOpcode() == ISD::ExternalSymbol) {
4696 // Lower direct calls to global addresses and external symbols. Setting
4697 // ForCall to true here has the effect of removing WrapperRIP when possible
4698 // to allow direct calls to be selected without first materializing the
4699 // address into a register.
4700 Callee = LowerGlobalOrExternal(Callee, DAG, /*ForCall=*/true);
4701 } else if (Subtarget.isTarget64BitILP32() &&
4702 Callee.getValueType() == MVT::i32) {
4703 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
4704 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
4707 // Returns a chain & a flag for retval copy to use.
4708 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
4709 SmallVector<SDValue, 8> Ops;
4711 if (!IsSibcall && isTailCall && !IsMustTail) {
4712 Chain = DAG.getCALLSEQ_END(Chain,
4713 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4714 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4715 InFlag = Chain.getValue(1);
4718 Ops.push_back(Chain);
4719 Ops.push_back(Callee);
4722 Ops.push_back(DAG.getTargetConstant(FPDiff, dl, MVT::i32));
4724 // Add argument registers to the end of the list so that they are known live
4726 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
4727 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
4728 RegsToPass[i].second.getValueType()));
4730 // Add a register mask operand representing the call-preserved registers.
4731 const uint32_t *Mask = [&]() {
4732 auto AdaptedCC = CallConv;
4733 // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists),
4734 // use X86_INTR calling convention because it has the same CSR mask
4735 // (same preserved registers).
4737 AdaptedCC = (CallingConv::ID)CallingConv::X86_INTR;
4738 // If NoCalleeSavedRegisters is requested, than use GHC since it happens
4739 // to use the CSR_NoRegs_RegMask.
4740 if (CB && CB->hasFnAttr("no_callee_saved_registers"))
4741 AdaptedCC = (CallingConv::ID)CallingConv::GHC;
4742 return RegInfo->getCallPreservedMask(MF, AdaptedCC);
4744 assert(Mask && "Missing call preserved mask for calling convention");
4746 // If this is an invoke in a 32-bit function using a funclet-based
4747 // personality, assume the function clobbers all registers. If an exception
4748 // is thrown, the runtime will not restore CSRs.
4749 // FIXME: Model this more precisely so that we can register allocate across
4750 // the normal edge and spill and fill across the exceptional edge.
4751 if (!Is64Bit && CLI.CB && isa<InvokeInst>(CLI.CB)) {
4752 const Function &CallerFn = MF.getFunction();
4753 EHPersonality Pers =
4754 CallerFn.hasPersonalityFn()
4755 ? classifyEHPersonality(CallerFn.getPersonalityFn())
4756 : EHPersonality::Unknown;
4757 if (isFuncletEHPersonality(Pers))
4758 Mask = RegInfo->getNoPreservedMask();
4761 // Define a new register mask from the existing mask.
4762 uint32_t *RegMask = nullptr;
4764 // In some calling conventions we need to remove the used physical registers
4765 // from the reg mask.
4766 if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
4767 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
4769 // Allocate a new Reg Mask and copy Mask.
4770 RegMask = MF.allocateRegMask();
4771 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
4772 memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);
4774 // Make sure all sub registers of the argument registers are reset
4776 for (auto const &RegPair : RegsToPass)
4777 for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
4778 SubRegs.isValid(); ++SubRegs)
4779 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
4781 // Create the RegMask Operand according to our updated mask.
4782 Ops.push_back(DAG.getRegisterMask(RegMask));
4784 // Create the RegMask Operand according to the static mask.
4785 Ops.push_back(DAG.getRegisterMask(Mask));
4788 if (InFlag.getNode())
4789 Ops.push_back(InFlag);
4793 //// If this is the first return lowered for this function, add the regs
4794 //// to the liveout set for the function.
4795 // This isn't right, although it's probably harmless on x86; liveouts
4796 // should be computed from returns not tail calls. Consider a void
4797 // function making a tail call to a function returning int.
4798 MF.getFrameInfo().setHasTailCall();
4799 SDValue Ret = DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
4800 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
4804 if (HasNoCfCheck && IsCFProtectionSupported && IsIndirectCall) {
4805 Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
4806 } else if (CLI.CB && objcarc::hasAttachedCallOpBundle(CLI.CB)) {
4807 // Calls with a "clang.arc.attachedcall" bundle are special. They should be
4808 // expanded to the call, directly followed by a special marker sequence and
4809 // a call to a ObjC library function. Use the CALL_RVMARKER to do that.
4810 assert(!isTailCall &&
4811 "tail calls cannot be marked with clang.arc.attachedcall");
4812 assert(Is64Bit && "clang.arc.attachedcall is only supported in 64bit mode");
4814 // Add a target global address for the retainRV/claimRV runtime function
4815 // just before the call target.
4816 Function *ARCFn = *objcarc::getAttachedARCFunction(CLI.CB);
4817 auto PtrVT = getPointerTy(DAG.getDataLayout());
4818 auto GA = DAG.getTargetGlobalAddress(ARCFn, dl, PtrVT);
4819 Ops.insert(Ops.begin() + 1, GA);
4820 Chain = DAG.getNode(X86ISD::CALL_RVMARKER, dl, NodeTys, Ops);
4822 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
4825 InFlag = Chain.getValue(1);
4826 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
4827 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
4829 // Save heapallocsite metadata.
4831 if (MDNode *HeapAlloc = CLI.CB->getMetadata("heapallocsite"))
4832 DAG.addHeapAllocSite(Chain.getNode(), HeapAlloc);
4834 // Create the CALLSEQ_END node.
4835 unsigned NumBytesForCalleeToPop = 0; // Callee pops nothing.
4836 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
4837 DAG.getTarget().Options.GuaranteedTailCallOpt))
4838 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
4839 else if (!canGuaranteeTCO(CallConv) && IsCalleePopSRet)
4840 // If this call passes a struct-return pointer, the callee
4841 // pops that struct pointer.
4842 NumBytesForCalleeToPop = 4;
4844 // Returns a flag for retval copy to use.
4846 Chain = DAG.getCALLSEQ_END(Chain,
4847 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4848 DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
4851 InFlag = Chain.getValue(1);
4854 // Handle result values, copying them out of physregs into vregs that we
4856 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
4860 //===----------------------------------------------------------------------===//
4861 // Fast Calling Convention (tail call) implementation
4862 //===----------------------------------------------------------------------===//
4864 // Like std call, callee cleans arguments, convention except that ECX is
4865 // reserved for storing the tail called function address. Only 2 registers are
4866 // free for argument passing (inreg). Tail call optimization is performed
4868 // * tailcallopt is enabled
4869 // * caller/callee are fastcc
4870 // On X86_64 architecture with GOT-style position independent code only local
4871 // (within module) calls are supported at the moment.
4872 // To keep the stack aligned according to platform abi the function
4873 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
4874 // of stack alignment. (Dynamic linkers need this - Darwin's dyld for example)
4875 // If a tail called function callee has more arguments than the caller the
4876 // caller needs to make sure that there is room to move the RETADDR to. This is
4877 // achieved by reserving an area the size of the argument delta right after the
4878 // original RETADDR, but before the saved framepointer or the spilled registers
4879 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
4891 /// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
4894 X86TargetLowering::GetAlignedArgumentStackSize(const unsigned StackSize,
4895 SelectionDAG &DAG) const {
4896 const Align StackAlignment = Subtarget.getFrameLowering()->getStackAlign();
4897 const uint64_t SlotSize = Subtarget.getRegisterInfo()->getSlotSize();
4898 assert(StackSize % SlotSize == 0 &&
4899 "StackSize must be a multiple of SlotSize");
4900 return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;
4903 /// Return true if the given stack call argument is already available in the
4904 /// same position (relatively) of the caller's incoming argument stack.
4906 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
4907 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
4908 const X86InstrInfo *TII, const CCValAssign &VA) {
4909 unsigned Bytes = Arg.getValueSizeInBits() / 8;
4912 // Look through nodes that don't alter the bits of the incoming value.
4913 unsigned Op = Arg.getOpcode();
4914 if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
4915 Arg = Arg.getOperand(0);
4918 if (Op == ISD::TRUNCATE) {
4919 const SDValue &TruncInput = Arg.getOperand(0);
4920 if (TruncInput.getOpcode() == ISD::AssertZext &&
4921 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
4922 Arg.getValueType()) {
4923 Arg = TruncInput.getOperand(0);
4931 if (Arg.getOpcode() == ISD::CopyFromReg) {
4932 Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
4933 if (!VR.isVirtual())
4935 MachineInstr *Def = MRI->getVRegDef(VR);
4938 if (!Flags.isByVal()) {
4939 if (!TII->isLoadFromStackSlot(*Def, FI))
4942 unsigned Opcode = Def->getOpcode();
4943 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
4944 Opcode == X86::LEA64_32r) &&
4945 Def->getOperand(1).isFI()) {
4946 FI = Def->getOperand(1).getIndex();
4947 Bytes = Flags.getByValSize();
4951 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
4952 if (Flags.isByVal())
4953 // ByVal argument is passed in as a pointer but it's now being
4954 // dereferenced. e.g.
4955 // define @foo(%struct.X* %A) {
4956 // tail call @bar(%struct.X* byval %A)
4959 SDValue Ptr = Ld->getBasePtr();
4960 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
4963 FI = FINode->getIndex();
4964 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
4965 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
4966 FI = FINode->getIndex();
4967 Bytes = Flags.getByValSize();
4971 assert(FI != INT_MAX);
4972 if (!MFI.isFixedObjectIndex(FI))
4975 if (Offset != MFI.getObjectOffset(FI))
4978 // If this is not byval, check that the argument stack object is immutable.
4979 // inalloca and argument copy elision can create mutable argument stack
4980 // objects. Byval objects can be mutated, but a byval call intends to pass the
4982 if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
4985 if (VA.getLocVT().getFixedSizeInBits() >
4986 Arg.getValueSizeInBits().getFixedSize()) {
4987 // If the argument location is wider than the argument type, check that any
4988 // extension flags match.
4989 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
4990 Flags.isSExt() != MFI.isObjectSExt(FI)) {
4995 return Bytes == MFI.getObjectSize(FI);
4998 /// Check whether the call is eligible for tail call optimization. Targets
4999 /// that want to do tail call optimization should implement this function.
5000 bool X86TargetLowering::IsEligibleForTailCallOptimization(
5001 SDValue Callee, CallingConv::ID CalleeCC, bool IsCalleePopSRet,
5002 bool isVarArg, Type *RetTy, const SmallVectorImpl<ISD::OutputArg> &Outs,
5003 const SmallVectorImpl<SDValue> &OutVals,
5004 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
5005 if (!mayTailCallThisCC(CalleeCC))
5008 // If -tailcallopt is specified, make fastcc functions tail-callable.
5009 MachineFunction &MF = DAG.getMachineFunction();
5010 const Function &CallerF = MF.getFunction();
5012 // If the function return type is x86_fp80 and the callee return type is not,
5013 // then the FP_EXTEND of the call result is not a nop. It's not safe to
5014 // perform a tailcall optimization here.
5015 if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
5018 CallingConv::ID CallerCC = CallerF.getCallingConv();
5019 bool CCMatch = CallerCC == CalleeCC;
5020 bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
5021 bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
5022 bool IsGuaranteeTCO = DAG.getTarget().Options.GuaranteedTailCallOpt ||
5023 CalleeCC == CallingConv::Tail || CalleeCC == CallingConv::SwiftTail;
5025 // Win64 functions have extra shadow space for argument homing. Don't do the
5026 // sibcall if the caller and callee have mismatched expectations for this
5028 if (IsCalleeWin64 != IsCallerWin64)
5031 if (IsGuaranteeTCO) {
5032 if (canGuaranteeTCO(CalleeCC) && CCMatch)
5037 // Look for obvious safe cases to perform tail call optimization that do not
5038 // require ABI changes. This is what gcc calls sibcall.
5040 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
5041 // emit a special epilogue.
5042 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
5043 if (RegInfo->hasStackRealignment(MF))
5046 // Also avoid sibcall optimization if we're an sret return fn and the callee
5047 // is incompatible. See comment in LowerReturn about why hasStructRetAttr is
5049 if (MF.getInfo<X86MachineFunctionInfo>()->getSRetReturnReg()) {
5050 // For a compatible tail call the callee must return our sret pointer. So it
5051 // needs to be (a) an sret function itself and (b) we pass our sret as its
5052 // sret. Condition #b is harder to determine.
5054 } else if (IsCalleePopSRet)
5055 // The callee pops an sret, so we cannot tail-call, as our caller doesn't
5059 // Do not sibcall optimize vararg calls unless all arguments are passed via
5061 LLVMContext &C = *DAG.getContext();
5062 if (isVarArg && !Outs.empty()) {
5063 // Optimizing for varargs on Win64 is unlikely to be safe without
5064 // additional testing.
5065 if (IsCalleeWin64 || IsCallerWin64)
5068 SmallVector<CCValAssign, 16> ArgLocs;
5069 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
5071 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
5072 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
5073 if (!ArgLocs[i].isRegLoc())
5077 // If the call result is in ST0 / ST1, it needs to be popped off the x87
5078 // stack. Therefore, if it's not used by the call it is not safe to optimize
5079 // this into a sibcall.
5080 bool Unused = false;
5081 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5088 SmallVector<CCValAssign, 16> RVLocs;
5089 CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
5090 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
5091 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5092 CCValAssign &VA = RVLocs[i];
5093 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
5098 // Check that the call results are passed in the same way.
5099 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
5100 RetCC_X86, RetCC_X86))
5102 // The callee has to preserve all registers the caller needs to preserve.
5103 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
5104 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
5106 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
5107 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
5111 unsigned StackArgsSize = 0;
5113 // If the callee takes no arguments then go on to check the results of the
5115 if (!Outs.empty()) {
5116 // Check if stack adjustment is needed. For now, do not do this if any
5117 // argument is passed on the stack.
5118 SmallVector<CCValAssign, 16> ArgLocs;
5119 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
5121 // Allocate shadow area for Win64
5123 CCInfo.AllocateStack(32, Align(8));
5125 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
5126 StackArgsSize = CCInfo.getNextStackOffset();
5128 if (CCInfo.getNextStackOffset()) {
5129 // Check if the arguments are already laid out in the right way as
5130 // the caller's fixed stack objects.
5131 MachineFrameInfo &MFI = MF.getFrameInfo();
5132 const MachineRegisterInfo *MRI = &MF.getRegInfo();
5133 const X86InstrInfo *TII = Subtarget.getInstrInfo();
5134 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
5135 CCValAssign &VA = ArgLocs[i];
5136 SDValue Arg = OutVals[i];
5137 ISD::ArgFlagsTy Flags = Outs[i].Flags;
5138 if (VA.getLocInfo() == CCValAssign::Indirect)
5140 if (!VA.isRegLoc()) {
5141 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
5148 bool PositionIndependent = isPositionIndependent();
5149 // If the tailcall address may be in a register, then make sure it's
5150 // possible to register allocate for it. In 32-bit, the call address can
5151 // only target EAX, EDX, or ECX since the tail call must be scheduled after
5152 // callee-saved registers are restored. These happen to be the same
5153 // registers used to pass 'inreg' arguments so watch out for those.
5154 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
5155 !isa<ExternalSymbolSDNode>(Callee)) ||
5156 PositionIndependent)) {
5157 unsigned NumInRegs = 0;
5158 // In PIC we need an extra register to formulate the address computation
5160 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
5162 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
5163 CCValAssign &VA = ArgLocs[i];
5166 Register Reg = VA.getLocReg();
5169 case X86::EAX: case X86::EDX: case X86::ECX:
5170 if (++NumInRegs == MaxInRegs)
5177 const MachineRegisterInfo &MRI = MF.getRegInfo();
5178 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
5182 bool CalleeWillPop =
5183 X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
5184 MF.getTarget().Options.GuaranteedTailCallOpt);
5186 if (unsigned BytesToPop =
5187 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
5188 // If we have bytes to pop, the callee must pop them.
5189 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
5190 if (!CalleePopMatches)
5192 } else if (CalleeWillPop && StackArgsSize > 0) {
5193 // If we don't have bytes to pop, make sure the callee doesn't pop any.
5201 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
5202 const TargetLibraryInfo *libInfo) const {
5203 return X86::createFastISel(funcInfo, libInfo);
5206 //===----------------------------------------------------------------------===//
5207 // Other Lowering Hooks
5208 //===----------------------------------------------------------------------===//
5210 bool X86::mayFoldLoad(SDValue Op, const X86Subtarget &Subtarget,
5211 bool AssumeSingleUse) {
5212 if (!AssumeSingleUse && !Op.hasOneUse())
5214 if (!ISD::isNormalLoad(Op.getNode()))
5217 // If this is an unaligned vector, make sure the target supports folding it.
5218 auto *Ld = cast<LoadSDNode>(Op.getNode());
5219 if (!Subtarget.hasAVX() && !Subtarget.hasSSEUnalignedMem() &&
5220 Ld->getValueSizeInBits(0) == 128 && Ld->getAlign() < Align(16))
5223 // TODO: If this is a non-temporal load and the target has an instruction
5224 // for it, it should not be folded. See "useNonTemporalLoad()".
5229 bool X86::mayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT,
5230 const X86Subtarget &Subtarget,
5231 bool AssumeSingleUse) {
5232 assert(Subtarget.hasAVX() && "Expected AVX for broadcast from memory");
5233 if (!X86::mayFoldLoad(Op, Subtarget, AssumeSingleUse))
5236 // We can not replace a wide volatile load with a broadcast-from-memory,
5237 // because that would narrow the load, which isn't legal for volatiles.
5238 auto *Ld = cast<LoadSDNode>(Op.getNode());
5239 return !Ld->isVolatile() ||
5240 Ld->getValueSizeInBits(0) == EltVT.getScalarSizeInBits();
5243 bool X86::mayFoldIntoStore(SDValue Op) {
5244 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
5247 bool X86::mayFoldIntoZeroExtend(SDValue Op) {
5248 if (Op.hasOneUse()) {
5249 unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
5250 return (ISD::ZERO_EXTEND == Opcode);
5255 static bool isTargetShuffle(unsigned Opcode) {
5257 default: return false;
5258 case X86ISD::BLENDI:
5259 case X86ISD::PSHUFB:
5260 case X86ISD::PSHUFD:
5261 case X86ISD::PSHUFHW:
5262 case X86ISD::PSHUFLW:
5264 case X86ISD::INSERTPS:
5265 case X86ISD::EXTRQI:
5266 case X86ISD::INSERTQI:
5267 case X86ISD::VALIGN:
5268 case X86ISD::PALIGNR:
5269 case X86ISD::VSHLDQ:
5270 case X86ISD::VSRLDQ:
5271 case X86ISD::MOVLHPS:
5272 case X86ISD::MOVHLPS:
5273 case X86ISD::MOVSHDUP:
5274 case X86ISD::MOVSLDUP:
5275 case X86ISD::MOVDDUP:
5279 case X86ISD::UNPCKL:
5280 case X86ISD::UNPCKH:
5281 case X86ISD::VBROADCAST:
5282 case X86ISD::VPERMILPI:
5283 case X86ISD::VPERMILPV:
5284 case X86ISD::VPERM2X128:
5285 case X86ISD::SHUF128:
5286 case X86ISD::VPERMIL2:
5287 case X86ISD::VPERMI:
5288 case X86ISD::VPPERM:
5289 case X86ISD::VPERMV:
5290 case X86ISD::VPERMV3:
5291 case X86ISD::VZEXT_MOVL:
5296 static bool isTargetShuffleVariableMask(unsigned Opcode) {
5298 default: return false;
5300 case X86ISD::PSHUFB:
5301 case X86ISD::VPERMILPV:
5302 case X86ISD::VPERMIL2:
5303 case X86ISD::VPPERM:
5304 case X86ISD::VPERMV:
5305 case X86ISD::VPERMV3:
5307 // 'Faux' Target Shuffles.
5315 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
5316 MachineFunction &MF = DAG.getMachineFunction();
5317 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
5318 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
5319 int ReturnAddrIndex = FuncInfo->getRAIndex();
5321 if (ReturnAddrIndex == 0) {
5322 // Set up a frame object for the return address.
5323 unsigned SlotSize = RegInfo->getSlotSize();
5324 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
5327 FuncInfo->setRAIndex(ReturnAddrIndex);
5330 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
5333 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
5334 bool hasSymbolicDisplacement) {
5335 // Offset should fit into 32 bit immediate field.
5336 if (!isInt<32>(Offset))
5339 // If we don't have a symbolic displacement - we don't have any extra
5341 if (!hasSymbolicDisplacement)
5344 // FIXME: Some tweaks might be needed for medium code model.
5345 if (M != CodeModel::Small && M != CodeModel::Kernel)
5348 // For small code model we assume that latest object is 16MB before end of 31
5349 // bits boundary. We may also accept pretty large negative constants knowing
5350 // that all objects are in the positive half of address space.
5351 if (M == CodeModel::Small && Offset < 16*1024*1024)
5354 // For kernel code model we know that all object resist in the negative half
5355 // of 32bits address space. We may not accept negative offsets, since they may
5356 // be just off and we may accept pretty large positive ones.
5357 if (M == CodeModel::Kernel && Offset >= 0)
5363 /// Determines whether the callee is required to pop its own arguments.
5364 /// Callee pop is necessary to support tail calls.
5365 bool X86::isCalleePop(CallingConv::ID CallingConv,
5366 bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
5367 // If GuaranteeTCO is true, we force some calls to be callee pop so that we
5368 // can guarantee TCO.
5369 if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
5372 switch (CallingConv) {
5375 case CallingConv::X86_StdCall:
5376 case CallingConv::X86_FastCall:
5377 case CallingConv::X86_ThisCall:
5378 case CallingConv::X86_VectorCall:
5383 /// Return true if the condition is an signed comparison operation.
5384 static bool isX86CCSigned(unsigned X86CC) {
5387 llvm_unreachable("Invalid integer condition!");
5403 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
5404 switch (SetCCOpcode) {
5405 default: llvm_unreachable("Invalid integer condition!");
5406 case ISD::SETEQ: return X86::COND_E;
5407 case ISD::SETGT: return X86::COND_G;
5408 case ISD::SETGE: return X86::COND_GE;
5409 case ISD::SETLT: return X86::COND_L;
5410 case ISD::SETLE: return X86::COND_LE;
5411 case ISD::SETNE: return X86::COND_NE;
5412 case ISD::SETULT: return X86::COND_B;
5413 case ISD::SETUGT: return X86::COND_A;
5414 case ISD::SETULE: return X86::COND_BE;
5415 case ISD::SETUGE: return X86::COND_AE;
5419 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
5420 /// condition code, returning the condition code and the LHS/RHS of the
5421 /// comparison to make.
5422 static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
5423 bool isFP, SDValue &LHS, SDValue &RHS,
5424 SelectionDAG &DAG) {
5426 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5427 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnes()) {
5428 // X > -1 -> X == 0, jump !sign.
5429 RHS = DAG.getConstant(0, DL, RHS.getValueType());
5430 return X86::COND_NS;
5432 if (SetCCOpcode == ISD::SETLT && RHSC->isZero()) {
5433 // X < 0 -> X == 0, jump on sign.
5436 if (SetCCOpcode == ISD::SETGE && RHSC->isZero()) {
5437 // X >= 0 -> X == 0, jump on !sign.
5438 return X86::COND_NS;
5440 if (SetCCOpcode == ISD::SETLT && RHSC->isOne()) {
5442 RHS = DAG.getConstant(0, DL, RHS.getValueType());
5443 return X86::COND_LE;
5447 return TranslateIntegerX86CC(SetCCOpcode);
5450 // First determine if it is required or is profitable to flip the operands.
5452 // If LHS is a foldable load, but RHS is not, flip the condition.
5453 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
5454 !ISD::isNON_EXTLoad(RHS.getNode())) {
5455 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
5456 std::swap(LHS, RHS);
5459 switch (SetCCOpcode) {
5465 std::swap(LHS, RHS);
5469 // On a floating point condition, the flags are set as follows:
5471 // 0 | 0 | 0 | X > Y
5472 // 0 | 0 | 1 | X < Y
5473 // 1 | 0 | 0 | X == Y
5474 // 1 | 1 | 1 | unordered
5475 switch (SetCCOpcode) {
5476 default: llvm_unreachable("Condcode should be pre-legalized away");
5478 case ISD::SETEQ: return X86::COND_E;
5479 case ISD::SETOLT: // flipped
5481 case ISD::SETGT: return X86::COND_A;
5482 case ISD::SETOLE: // flipped
5484 case ISD::SETGE: return X86::COND_AE;
5485 case ISD::SETUGT: // flipped
5487 case ISD::SETLT: return X86::COND_B;
5488 case ISD::SETUGE: // flipped
5490 case ISD::SETLE: return X86::COND_BE;
5492 case ISD::SETNE: return X86::COND_NE;
5493 case ISD::SETUO: return X86::COND_P;
5494 case ISD::SETO: return X86::COND_NP;
5496 case ISD::SETUNE: return X86::COND_INVALID;
5500 /// Is there a floating point cmov for the specific X86 condition code?
5501 /// Current x86 isa includes the following FP cmov instructions:
5502 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
5503 static bool hasFPCMov(unsigned X86CC) {
5519 static bool useVPTERNLOG(const X86Subtarget &Subtarget, MVT VT) {
5520 return Subtarget.hasVLX() || Subtarget.canExtendTo512DQ() ||
5521 VT.is512BitVector();
5524 bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
5526 MachineFunction &MF,
5527 unsigned Intrinsic) const {
5528 Info.flags = MachineMemOperand::MONone;
5531 const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
5533 switch (Intrinsic) {
5534 case Intrinsic::x86_aesenc128kl:
5535 case Intrinsic::x86_aesdec128kl:
5536 Info.opc = ISD::INTRINSIC_W_CHAIN;
5537 Info.ptrVal = I.getArgOperand(1);
5538 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 48);
5539 Info.align = Align(1);
5540 Info.flags |= MachineMemOperand::MOLoad;
5542 case Intrinsic::x86_aesenc256kl:
5543 case Intrinsic::x86_aesdec256kl:
5544 Info.opc = ISD::INTRINSIC_W_CHAIN;
5545 Info.ptrVal = I.getArgOperand(1);
5546 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 64);
5547 Info.align = Align(1);
5548 Info.flags |= MachineMemOperand::MOLoad;
5550 case Intrinsic::x86_aesencwide128kl:
5551 case Intrinsic::x86_aesdecwide128kl:
5552 Info.opc = ISD::INTRINSIC_W_CHAIN;
5553 Info.ptrVal = I.getArgOperand(0);
5554 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 48);
5555 Info.align = Align(1);
5556 Info.flags |= MachineMemOperand::MOLoad;
5558 case Intrinsic::x86_aesencwide256kl:
5559 case Intrinsic::x86_aesdecwide256kl:
5560 Info.opc = ISD::INTRINSIC_W_CHAIN;
5561 Info.ptrVal = I.getArgOperand(0);
5562 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 64);
5563 Info.align = Align(1);
5564 Info.flags |= MachineMemOperand::MOLoad;
5566 case Intrinsic::x86_atomic_bts:
5567 case Intrinsic::x86_atomic_btc:
5568 case Intrinsic::x86_atomic_btr: {
5569 Info.opc = ISD::INTRINSIC_W_CHAIN;
5570 Info.ptrVal = I.getArgOperand(0);
5571 unsigned Size = I.getType()->getScalarSizeInBits();
5572 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
5573 Info.align = Align(Size);
5574 Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
5575 MachineMemOperand::MOVolatile;
5582 switch (IntrData->Type) {
5583 case TRUNCATE_TO_MEM_VI8:
5584 case TRUNCATE_TO_MEM_VI16:
5585 case TRUNCATE_TO_MEM_VI32: {
5586 Info.opc = ISD::INTRINSIC_VOID;
5587 Info.ptrVal = I.getArgOperand(0);
5588 MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
5589 MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
5590 if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
5592 else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
5593 ScalarVT = MVT::i16;
5594 else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
5595 ScalarVT = MVT::i32;
5597 Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
5598 Info.align = Align(1);
5599 Info.flags |= MachineMemOperand::MOStore;
5604 Info.opc = ISD::INTRINSIC_W_CHAIN;
5605 Info.ptrVal = nullptr;
5606 MVT DataVT = MVT::getVT(I.getType());
5607 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
5608 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
5609 IndexVT.getVectorNumElements());
5610 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
5611 Info.align = Align(1);
5612 Info.flags |= MachineMemOperand::MOLoad;
5616 Info.opc = ISD::INTRINSIC_VOID;
5617 Info.ptrVal = nullptr;
5618 MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
5619 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
5620 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
5621 IndexVT.getVectorNumElements());
5622 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
5623 Info.align = Align(1);
5624 Info.flags |= MachineMemOperand::MOStore;
5634 /// Returns true if the target can instruction select the
5635 /// specified FP immediate natively. If false, the legalizer will
5636 /// materialize the FP immediate as a load from a constant pool.
5637 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
5638 bool ForCodeSize) const {
5639 for (const APFloat &FPImm : LegalFPImmediates)
5640 if (Imm.bitwiseIsEqual(FPImm))
5645 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
5646 ISD::LoadExtType ExtTy,
5648 assert(cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow");
5650 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
5651 // relocation target a movq or addq instruction: don't let the load shrink.
5652 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
5653 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
5654 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
5655 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
5657 // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
5658 // those uses are extracted directly into a store, then the extract + store
5659 // can be store-folded. Therefore, it's probably not worth splitting the load.
5660 EVT VT = Load->getValueType(0);
5661 if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
5662 for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
5663 // Skip uses of the chain value. Result 0 of the node is the load value.
5664 if (UI.getUse().getResNo() != 0)
5667 // If this use is not an extract + store, it's probably worth splitting.
5668 if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
5669 UI->use_begin()->getOpcode() != ISD::STORE)
5672 // All non-chain uses are extract + store.
5679 /// Returns true if it is beneficial to convert a load of a constant
5680 /// to just the constant itself.
5681 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
5683 assert(Ty->isIntegerTy());
5685 unsigned BitSize = Ty->getPrimitiveSizeInBits();
5686 if (BitSize == 0 || BitSize > 64)
5691 bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
5692 // If we are using XMM registers in the ABI and the condition of the select is
5693 // a floating-point compare and we have blendv or conditional move, then it is
5694 // cheaper to select instead of doing a cross-register move and creating a
5695 // load that depends on the compare result.
5696 bool IsFPSetCC = CmpOpVT.isFloatingPoint() && CmpOpVT != MVT::f128;
5697 return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
5700 bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
5701 // TODO: It might be a win to ease or lift this restriction, but the generic
5702 // folds in DAGCombiner conflict with vector folds for an AVX512 target.
5703 if (VT.isVector() && Subtarget.hasAVX512())
5709 bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
5711 // TODO: We handle scalars using custom code, but generic combining could make
5712 // that unnecessary.
5714 if (!ISD::isConstantSplatVector(C.getNode(), MulC))
5717 // Find the type this will be legalized too. Otherwise we might prematurely
5718 // convert this to shl+add/sub and then still have to type legalize those ops.
5719 // Another choice would be to defer the decision for illegal types until
5720 // after type legalization. But constant splat vectors of i64 can't make it
5721 // through type legalization on 32-bit targets so we would need to special
5723 while (getTypeAction(Context, VT) != TypeLegal)
5724 VT = getTypeToTransformTo(Context, VT);
5726 // If vector multiply is legal, assume that's faster than shl + add/sub.
5727 // Multiply is a complex op with higher latency and lower throughput in
5728 // most implementations, sub-vXi32 vector multiplies are always fast,
5729 // vXi32 mustn't have a SlowMULLD implementation, and anything larger (vXi64)
5730 // is always going to be slow.
5731 unsigned EltSizeInBits = VT.getScalarSizeInBits();
5732 if (isOperationLegal(ISD::MUL, VT) && EltSizeInBits <= 32 &&
5733 (EltSizeInBits != 32 || !Subtarget.isPMULLDSlow()))
5736 // shl+add, shl+sub, shl+add+neg
5737 return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
5738 (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
5741 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
5742 unsigned Index) const {
5743 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
5746 // Mask vectors support all subregister combinations and operations that
5747 // extract half of vector.
5748 if (ResVT.getVectorElementType() == MVT::i1)
5749 return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
5750 (Index == ResVT.getVectorNumElements()));
5752 return (Index % ResVT.getVectorNumElements()) == 0;
5755 bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
5756 unsigned Opc = VecOp.getOpcode();
5758 // Assume target opcodes can't be scalarized.
5759 // TODO - do we have any exceptions?
5760 if (Opc >= ISD::BUILTIN_OP_END)
5763 // If the vector op is not supported, try to convert to scalar.
5764 EVT VecVT = VecOp.getValueType();
5765 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
5768 // If the vector op is supported, but the scalar op is not, the transform may
5769 // not be worthwhile.
5770 EVT ScalarVT = VecVT.getScalarType();
5771 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
5774 bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT,
5776 // TODO: Allow vectors?
5779 return VT.isSimple() || !isOperationExpand(Opcode, VT);
5782 bool X86TargetLowering::isCheapToSpeculateCttz() const {
5783 // Speculate cttz only if we can directly use TZCNT.
5784 return Subtarget.hasBMI();
5787 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
5788 // Speculate ctlz only if we can directly use LZCNT.
5789 return Subtarget.hasLZCNT();
5792 bool X86TargetLowering::hasBitPreservingFPLogic(EVT VT) const {
5793 return VT == MVT::f32 || VT == MVT::f64 || VT.isVector();
5796 bool X86TargetLowering::ShouldShrinkFPConstant(EVT VT) const {
5797 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
5798 // expensive than a straight movsd. On the other hand, it's important to
5799 // shrink long double fp constant since fldt is very slow.
5800 return !Subtarget.hasSSE2() || VT == MVT::f80;
5803 bool X86TargetLowering::isScalarFPTypeInSSEReg(EVT VT) const {
5804 return (VT == MVT::f64 && Subtarget.hasSSE2()) ||
5805 (VT == MVT::f32 && Subtarget.hasSSE1()) || VT == MVT::f16;
5808 bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
5809 const SelectionDAG &DAG,
5810 const MachineMemOperand &MMO) const {
5811 if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
5812 BitcastVT.getVectorElementType() == MVT::i1)
5815 if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
5818 // If both types are legal vectors, it's always ok to convert them.
5819 if (LoadVT.isVector() && BitcastVT.isVector() &&
5820 isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
5823 return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
5826 bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
5827 const MachineFunction &MF) const {
5828 // Do not merge to float value size (128 bytes) if no implicit
5829 // float attribute is set.
5830 bool NoFloat = MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat);
5833 unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
5834 return (MemVT.getSizeInBits() <= MaxIntSize);
5836 // Make sure we don't merge greater than our preferred vector
5838 if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
5844 bool X86TargetLowering::isCtlzFast() const {
5845 return Subtarget.hasFastLZCNT();
5848 bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
5849 const Instruction &AndI) const {
5853 bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
5854 EVT VT = Y.getValueType();
5859 if (!Subtarget.hasBMI())
5862 // There are only 32-bit and 64-bit forms for 'andn'.
5863 if (VT != MVT::i32 && VT != MVT::i64)
5866 return !isa<ConstantSDNode>(Y);
5869 bool X86TargetLowering::hasAndNot(SDValue Y) const {
5870 EVT VT = Y.getValueType();
5873 return hasAndNotCompare(Y);
5877 if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
5880 if (VT == MVT::v4i32)
5883 return Subtarget.hasSSE2();
5886 bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {
5887 return X.getValueType().isScalarInteger(); // 'bt'
5890 bool X86TargetLowering::
5891 shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5892 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
5893 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
5894 SelectionDAG &DAG) const {
5895 // Does baseline recommend not to perform the fold by default?
5896 if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5897 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
5899 // For scalars this transform is always beneficial.
5900 if (X.getValueType().isScalarInteger())
5902 // If all the shift amounts are identical, then transform is beneficial even
5903 // with rudimentary SSE2 shifts.
5904 if (DAG.isSplatValue(Y, /*AllowUndefs=*/true))
5906 // If we have AVX2 with it's powerful shift operations, then it's also good.
5907 if (Subtarget.hasAVX2())
5909 // Pre-AVX2 vector codegen for this pattern is best for variant with 'shl'.
5910 return NewShiftOpcode == ISD::SHL;
5913 bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
5914 const SDNode *N, CombineLevel Level) const {
5915 assert(((N->getOpcode() == ISD::SHL &&
5916 N->getOperand(0).getOpcode() == ISD::SRL) ||
5917 (N->getOpcode() == ISD::SRL &&
5918 N->getOperand(0).getOpcode() == ISD::SHL)) &&
5919 "Expected shift-shift mask");
5920 // TODO: Should we always create i64 masks? Or only folded immediates?
5921 EVT VT = N->getValueType(0);
5922 if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
5923 (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
5924 // Only fold if the shift values are equal - so it folds to AND.
5925 // TODO - we should fold if either is a non-uniform vector but we don't do
5926 // the fold for non-splats yet.
5927 return N->getOperand(1) == N->getOperand(0).getOperand(1);
5929 return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
5932 bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
5933 EVT VT = Y.getValueType();
5935 // For vectors, we don't have a preference, but we probably want a mask.
5939 // 64-bit shifts on 32-bit targets produce really bad bloated code.
5940 if (VT == MVT::i64 && !Subtarget.is64Bit())
5946 bool X86TargetLowering::shouldExpandShift(SelectionDAG &DAG,
5948 if (DAG.getMachineFunction().getFunction().hasMinSize() &&
5949 !Subtarget.isOSWindows())
5954 bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
5955 // Any legal vector type can be splatted more efficiently than
5956 // loading/spilling from memory.
5957 return isTypeLegal(VT);
5960 MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
5961 MVT VT = MVT::getIntegerVT(NumBits);
5962 if (isTypeLegal(VT))
5965 // PMOVMSKB can handle this.
5966 if (NumBits == 128 && isTypeLegal(MVT::v16i8))
5969 // VPMOVMSKB can handle this.
5970 if (NumBits == 256 && isTypeLegal(MVT::v32i8))
5973 // TODO: Allow 64-bit type for 32-bit target.
5974 // TODO: 512-bit types should be allowed, but make sure that those
5975 // cases are handled in combineVectorSizedSetCCEquality().
5977 return MVT::INVALID_SIMPLE_VALUE_TYPE;
5980 /// Val is the undef sentinel value or equal to the specified value.
5981 static bool isUndefOrEqual(int Val, int CmpVal) {
5982 return ((Val == SM_SentinelUndef) || (Val == CmpVal));
5985 /// Return true if every element in Mask is the undef sentinel value or equal to
5986 /// the specified value..
5987 static bool isUndefOrEqual(ArrayRef<int> Mask, int CmpVal) {
5988 return llvm::all_of(Mask, [CmpVal](int M) {
5989 return (M == SM_SentinelUndef) || (M == CmpVal);
5993 /// Val is either the undef or zero sentinel value.
5994 static bool isUndefOrZero(int Val) {
5995 return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
5998 /// Return true if every element in Mask, beginning from position Pos and ending
5999 /// in Pos+Size is the undef sentinel value.
6000 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
6001 return llvm::all_of(Mask.slice(Pos, Size),
6002 [](int M) { return M == SM_SentinelUndef; });
6005 /// Return true if the mask creates a vector whose lower half is undefined.
6006 static bool isUndefLowerHalf(ArrayRef<int> Mask) {
6007 unsigned NumElts = Mask.size();
6008 return isUndefInRange(Mask, 0, NumElts / 2);
6011 /// Return true if the mask creates a vector whose upper half is undefined.
6012 static bool isUndefUpperHalf(ArrayRef<int> Mask) {
6013 unsigned NumElts = Mask.size();
6014 return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
6017 /// Return true if Val falls within the specified range (L, H].
6018 static bool isInRange(int Val, int Low, int Hi) {
6019 return (Val >= Low && Val < Hi);
6022 /// Return true if the value of any element in Mask falls within the specified
6024 static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
6025 return llvm::any_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
6028 /// Return true if the value of any element in Mask is the zero sentinel value.
6029 static bool isAnyZero(ArrayRef<int> Mask) {
6030 return llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
6033 /// Return true if the value of any element in Mask is the zero or undef
6034 /// sentinel values.
6035 static bool isAnyZeroOrUndef(ArrayRef<int> Mask) {
6036 return llvm::any_of(Mask, [](int M) {
6037 return M == SM_SentinelZero || M == SM_SentinelUndef;
6041 /// Return true if Val is undef or if its value falls within the
6042 /// specified range (L, H].
6043 static bool isUndefOrInRange(int Val, int Low, int Hi) {
6044 return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
6047 /// Return true if every element in Mask is undef or if its value
6048 /// falls within the specified range (L, H].
6049 static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
6050 return llvm::all_of(
6051 Mask, [Low, Hi](int M) { return isUndefOrInRange(M, Low, Hi); });
6054 /// Return true if Val is undef, zero or if its value falls within the
6055 /// specified range (L, H].
6056 static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
6057 return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
6060 /// Return true if every element in Mask is undef, zero or if its value
6061 /// falls within the specified range (L, H].
6062 static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
6063 return llvm::all_of(
6064 Mask, [Low, Hi](int M) { return isUndefOrZeroOrInRange(M, Low, Hi); });
6067 /// Return true if every element in Mask, beginning
6068 /// from position Pos and ending in Pos + Size, falls within the specified
6069 /// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
6070 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
6071 unsigned Size, int Low, int Step = 1) {
6072 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
6073 if (!isUndefOrEqual(Mask[i], Low))
6078 /// Return true if every element in Mask, beginning
6079 /// from position Pos and ending in Pos+Size, falls within the specified
6080 /// sequential range (Low, Low+Size], or is undef or is zero.
6081 static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
6082 unsigned Size, int Low,
6084 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
6085 if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
6090 /// Return true if every element in Mask, beginning
6091 /// from position Pos and ending in Pos+Size is undef or is zero.
6092 static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
6094 return llvm::all_of(Mask.slice(Pos, Size), isUndefOrZero);
6097 /// Helper function to test whether a shuffle mask could be
6098 /// simplified by widening the elements being shuffled.
6100 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
6101 /// leaves it in an unspecified state.
6103 /// NOTE: This must handle normal vector shuffle masks and *target* vector
6104 /// shuffle masks. The latter have the special property of a '-2' representing
6105 /// a zero-ed lane of a vector.
6106 static bool canWidenShuffleElements(ArrayRef<int> Mask,
6107 SmallVectorImpl<int> &WidenedMask) {
6108 WidenedMask.assign(Mask.size() / 2, 0);
6109 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
6111 int M1 = Mask[i + 1];
6113 // If both elements are undef, its trivial.
6114 if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
6115 WidenedMask[i / 2] = SM_SentinelUndef;
6119 // Check for an undef mask and a mask value properly aligned to fit with
6120 // a pair of values. If we find such a case, use the non-undef mask's value.
6121 if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
6122 WidenedMask[i / 2] = M1 / 2;
6125 if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
6126 WidenedMask[i / 2] = M0 / 2;
6130 // When zeroing, we need to spread the zeroing across both lanes to widen.
6131 if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
6132 if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
6133 (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
6134 WidenedMask[i / 2] = SM_SentinelZero;
6140 // Finally check if the two mask values are adjacent and aligned with
6142 if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
6143 WidenedMask[i / 2] = M0 / 2;
6147 // Otherwise we can't safely widen the elements used in this shuffle.
6150 assert(WidenedMask.size() == Mask.size() / 2 &&
6151 "Incorrect size of mask after widening the elements!");
6156 static bool canWidenShuffleElements(ArrayRef<int> Mask,
6157 const APInt &Zeroable,
6159 SmallVectorImpl<int> &WidenedMask) {
6160 // Create an alternative mask with info about zeroable elements.
6161 // Here we do not set undef elements as zeroable.
6162 SmallVector<int, 64> ZeroableMask(Mask.begin(), Mask.end());
6164 assert(!Zeroable.isZero() && "V2's non-undef elements are used?!");
6165 for (int i = 0, Size = Mask.size(); i != Size; ++i)
6166 if (Mask[i] != SM_SentinelUndef && Zeroable[i])
6167 ZeroableMask[i] = SM_SentinelZero;
6169 return canWidenShuffleElements(ZeroableMask, WidenedMask);
6172 static bool canWidenShuffleElements(ArrayRef<int> Mask) {
6173 SmallVector<int, 32> WidenedMask;
6174 return canWidenShuffleElements(Mask, WidenedMask);
6177 // Attempt to narrow/widen shuffle mask until it matches the target number of
6179 static bool scaleShuffleElements(ArrayRef<int> Mask, unsigned NumDstElts,
6180 SmallVectorImpl<int> &ScaledMask) {
6181 unsigned NumSrcElts = Mask.size();
6182 assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) &&
6183 "Illegal shuffle scale factor");
6185 // Narrowing is guaranteed to work.
6186 if (NumDstElts >= NumSrcElts) {
6187 int Scale = NumDstElts / NumSrcElts;
6188 llvm::narrowShuffleMaskElts(Scale, Mask, ScaledMask);
6192 // We have to repeat the widening until we reach the target size, but we can
6193 // split out the first widening as it sets up ScaledMask for us.
6194 if (canWidenShuffleElements(Mask, ScaledMask)) {
6195 while (ScaledMask.size() > NumDstElts) {
6196 SmallVector<int, 16> WidenedMask;
6197 if (!canWidenShuffleElements(ScaledMask, WidenedMask))
6199 ScaledMask = std::move(WidenedMask);
6207 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
6208 bool X86::isZeroNode(SDValue Elt) {
6209 return isNullConstant(Elt) || isNullFPConstant(Elt);
6212 // Build a vector of constants.
6213 // Use an UNDEF node if MaskElt == -1.
6214 // Split 64-bit constants in the 32-bit mode.
6215 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
6216 const SDLoc &dl, bool IsMask = false) {
6218 SmallVector<SDValue, 32> Ops;
6221 MVT ConstVecVT = VT;
6222 unsigned NumElts = VT.getVectorNumElements();
6223 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
6224 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
6225 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
6229 MVT EltVT = ConstVecVT.getVectorElementType();
6230 for (unsigned i = 0; i < NumElts; ++i) {
6231 bool IsUndef = Values[i] < 0 && IsMask;
6232 SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
6233 DAG.getConstant(Values[i], dl, EltVT);
6234 Ops.push_back(OpNode);
6236 Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
6237 DAG.getConstant(0, dl, EltVT));
6239 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
6241 ConstsNode = DAG.getBitcast(VT, ConstsNode);
6245 static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
6246 MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
6247 assert(Bits.size() == Undefs.getBitWidth() &&
6248 "Unequal constant and undef arrays");
6249 SmallVector<SDValue, 32> Ops;
6252 MVT ConstVecVT = VT;
6253 unsigned NumElts = VT.getVectorNumElements();
6254 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
6255 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
6256 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
6260 MVT EltVT = ConstVecVT.getVectorElementType();
6261 for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
6263 Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
6266 const APInt &V = Bits[i];
6267 assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
6269 Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
6270 Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
6271 } else if (EltVT == MVT::f32) {
6272 APFloat FV(APFloat::IEEEsingle(), V);
6273 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
6274 } else if (EltVT == MVT::f64) {
6275 APFloat FV(APFloat::IEEEdouble(), V);
6276 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
6278 Ops.push_back(DAG.getConstant(V, dl, EltVT));
6282 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
6283 return DAG.getBitcast(VT, ConstsNode);
6286 /// Returns a vector of specified type with all zero elements.
6287 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
6288 SelectionDAG &DAG, const SDLoc &dl) {
6289 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
6290 VT.getVectorElementType() == MVT::i1) &&
6291 "Unexpected vector type");
6293 // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
6294 // type. This ensures they get CSE'd. But if the integer type is not
6295 // available, use a floating-point +0.0 instead.
6297 if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
6298 Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
6299 } else if (VT.isFloatingPoint()) {
6300 Vec = DAG.getConstantFP(+0.0, dl, VT);
6301 } else if (VT.getVectorElementType() == MVT::i1) {
6302 assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
6303 "Unexpected vector type");
6304 Vec = DAG.getConstant(0, dl, VT);
6306 unsigned Num32BitElts = VT.getSizeInBits() / 32;
6307 Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
6309 return DAG.getBitcast(VT, Vec);
6312 // Helper to determine if the ops are all the extracted subvectors come from a
6313 // single source. If we allow commute they don't have to be in order (Lo/Hi).
6314 static SDValue getSplitVectorSrc(SDValue LHS, SDValue RHS, bool AllowCommute) {
6315 if (LHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
6316 RHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
6317 LHS.getValueType() != RHS.getValueType() ||
6318 LHS.getOperand(0) != RHS.getOperand(0))
6321 SDValue Src = LHS.getOperand(0);
6322 if (Src.getValueSizeInBits() != (LHS.getValueSizeInBits() * 2))
6325 unsigned NumElts = LHS.getValueType().getVectorNumElements();
6326 if ((LHS.getConstantOperandAPInt(1) == 0 &&
6327 RHS.getConstantOperandAPInt(1) == NumElts) ||
6328 (AllowCommute && RHS.getConstantOperandAPInt(1) == 0 &&
6329 LHS.getConstantOperandAPInt(1) == NumElts))
6335 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
6336 const SDLoc &dl, unsigned vectorWidth) {
6337 EVT VT = Vec.getValueType();
6338 EVT ElVT = VT.getVectorElementType();
6339 unsigned Factor = VT.getSizeInBits() / vectorWidth;
6340 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
6341 VT.getVectorNumElements() / Factor);
6343 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
6344 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
6345 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
6347 // This is the index of the first element of the vectorWidth-bit chunk
6348 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
6349 IdxVal &= ~(ElemsPerChunk - 1);
6351 // If the input is a buildvector just emit a smaller one.
6352 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
6353 return DAG.getBuildVector(ResultVT, dl,
6354 Vec->ops().slice(IdxVal, ElemsPerChunk));
6356 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
6357 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
6360 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
6361 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
6362 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
6363 /// instructions or a simple subregister reference. Idx is an index in the
6364 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
6365 /// lowering EXTRACT_VECTOR_ELT operations easier.
6366 static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
6367 SelectionDAG &DAG, const SDLoc &dl) {
6368 assert((Vec.getValueType().is256BitVector() ||
6369 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
6370 return extractSubVector(Vec, IdxVal, DAG, dl, 128);
6373 /// Generate a DAG to grab 256-bits from a 512-bit vector.
6374 static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
6375 SelectionDAG &DAG, const SDLoc &dl) {
6376 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
6377 return extractSubVector(Vec, IdxVal, DAG, dl, 256);
6380 static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
6381 SelectionDAG &DAG, const SDLoc &dl,
6382 unsigned vectorWidth) {
6383 assert((vectorWidth == 128 || vectorWidth == 256) &&
6384 "Unsupported vector width");
6385 // Inserting UNDEF is Result
6388 EVT VT = Vec.getValueType();
6389 EVT ElVT = VT.getVectorElementType();
6390 EVT ResultVT = Result.getValueType();
6392 // Insert the relevant vectorWidth bits.
6393 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
6394 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
6396 // This is the index of the first element of the vectorWidth-bit chunk
6397 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
6398 IdxVal &= ~(ElemsPerChunk - 1);
6400 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
6401 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
6404 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
6405 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
6406 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
6407 /// simple superregister reference. Idx is an index in the 128 bits
6408 /// we want. It need not be aligned to a 128-bit boundary. That makes
6409 /// lowering INSERT_VECTOR_ELT operations easier.
6410 static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
6411 SelectionDAG &DAG, const SDLoc &dl) {
6412 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
6413 return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
6416 /// Widen a vector to a larger size with the same scalar type, with the new
6417 /// elements either zero or undef.
6418 static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
6419 const X86Subtarget &Subtarget, SelectionDAG &DAG,
6421 assert(Vec.getValueSizeInBits().getFixedSize() < VT.getFixedSizeInBits() &&
6422 Vec.getValueType().getScalarType() == VT.getScalarType() &&
6423 "Unsupported vector widening type");
6424 SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
6426 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
6427 DAG.getIntPtrConstant(0, dl));
6430 /// Widen a vector to a larger size with the same scalar type, with the new
6431 /// elements either zero or undef.
6432 static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
6433 const X86Subtarget &Subtarget, SelectionDAG &DAG,
6434 const SDLoc &dl, unsigned WideSizeInBits) {
6435 assert(Vec.getValueSizeInBits() < WideSizeInBits &&
6436 (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
6437 "Unsupported vector widening type");
6438 unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
6439 MVT SVT = Vec.getSimpleValueType().getScalarType();
6440 MVT VT = MVT::getVectorVT(SVT, WideNumElts);
6441 return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
6444 // Helper function to collect subvector ops that are concatenated together,
6445 // either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
6446 // The subvectors in Ops are guaranteed to be the same type.
6447 static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops,
6448 SelectionDAG &DAG) {
6449 assert(Ops.empty() && "Expected an empty ops vector");
6451 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
6452 Ops.append(N->op_begin(), N->op_end());
6456 if (N->getOpcode() == ISD::INSERT_SUBVECTOR) {
6457 SDValue Src = N->getOperand(0);
6458 SDValue Sub = N->getOperand(1);
6459 const APInt &Idx = N->getConstantOperandAPInt(2);
6460 EVT VT = Src.getValueType();
6461 EVT SubVT = Sub.getValueType();
6463 // TODO - Handle more general insert_subvector chains.
6464 if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2)) {
6465 // insert_subvector(undef, x, lo)
6466 if (Idx == 0 && Src.isUndef()) {
6468 Ops.push_back(DAG.getUNDEF(SubVT));
6471 if (Idx == (VT.getVectorNumElements() / 2)) {
6472 // insert_subvector(insert_subvector(undef, x, lo), y, hi)
6473 if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
6474 Src.getOperand(1).getValueType() == SubVT &&
6475 isNullConstant(Src.getOperand(2))) {
6476 Ops.push_back(Src.getOperand(1));
6480 // insert_subvector(x, extract_subvector(x, lo), hi)
6481 if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6482 Sub.getOperand(0) == Src && isNullConstant(Sub.getOperand(1))) {
6486 // insert_subvector(undef, x, hi)
6487 if (Src.isUndef()) {
6488 Ops.push_back(DAG.getUNDEF(SubVT));
6499 static std::pair<SDValue, SDValue> splitVector(SDValue Op, SelectionDAG &DAG,
6501 EVT VT = Op.getValueType();
6502 unsigned NumElems = VT.getVectorNumElements();
6503 unsigned SizeInBits = VT.getSizeInBits();
6504 assert((NumElems % 2) == 0 && (SizeInBits % 2) == 0 &&
6505 "Can't split odd sized vector");
6507 // If this is a splat value (with no-undefs) then use the lower subvector,
6508 // which should be a free extraction.
6509 SDValue Lo = extractSubVector(Op, 0, DAG, dl, SizeInBits / 2);
6510 if (DAG.isSplatValue(Op, /*AllowUndefs*/ false))
6511 return std::make_pair(Lo, Lo);
6513 SDValue Hi = extractSubVector(Op, NumElems / 2, DAG, dl, SizeInBits / 2);
6514 return std::make_pair(Lo, Hi);
6517 /// Break an operation into 2 half sized ops and then concatenate the results.
6518 static SDValue splitVectorOp(SDValue Op, SelectionDAG &DAG) {
6519 unsigned NumOps = Op.getNumOperands();
6520 EVT VT = Op.getValueType();
6523 // Extract the LHS Lo/Hi vectors
6524 SmallVector<SDValue> LoOps(NumOps, SDValue());
6525 SmallVector<SDValue> HiOps(NumOps, SDValue());
6526 for (unsigned I = 0; I != NumOps; ++I) {
6527 SDValue SrcOp = Op.getOperand(I);
6528 if (!SrcOp.getValueType().isVector()) {
6529 LoOps[I] = HiOps[I] = SrcOp;
6532 std::tie(LoOps[I], HiOps[I]) = splitVector(SrcOp, DAG, dl);
6536 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
6537 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
6538 DAG.getNode(Op.getOpcode(), dl, LoVT, LoOps),
6539 DAG.getNode(Op.getOpcode(), dl, HiVT, HiOps));
6542 /// Break an unary integer operation into 2 half sized ops and then
6543 /// concatenate the result back.
6544 static SDValue splitVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
6545 // Make sure we only try to split 256/512-bit types to avoid creating
6547 EVT VT = Op.getValueType();
6549 assert((Op.getOperand(0).getValueType().is256BitVector() ||
6550 Op.getOperand(0).getValueType().is512BitVector()) &&
6551 (VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
6552 assert(Op.getOperand(0).getValueType().getVectorNumElements() ==
6553 VT.getVectorNumElements() &&
6555 return splitVectorOp(Op, DAG);
6558 /// Break a binary integer operation into 2 half sized ops and then
6559 /// concatenate the result back.
6560 static SDValue splitVectorIntBinary(SDValue Op, SelectionDAG &DAG) {
6561 // Assert that all the types match.
6562 EVT VT = Op.getValueType();
6564 assert(Op.getOperand(0).getValueType() == VT &&
6565 Op.getOperand(1).getValueType() == VT && "Unexpected VTs!");
6566 assert((VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
6567 return splitVectorOp(Op, DAG);
6570 // Helper for splitting operands of an operation to legal target size and
6571 // apply a function on each part.
6572 // Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
6573 // 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
6574 // deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
6575 // The argument Builder is a function that will be applied on each split part:
6576 // SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
6577 template <typename F>
6578 SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
6579 const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
6580 F Builder, bool CheckBWI = true) {
6581 assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
6582 unsigned NumSubs = 1;
6583 if ((CheckBWI && Subtarget.useBWIRegs()) ||
6584 (!CheckBWI && Subtarget.useAVX512Regs())) {
6585 if (VT.getSizeInBits() > 512) {
6586 NumSubs = VT.getSizeInBits() / 512;
6587 assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
6589 } else if (Subtarget.hasAVX2()) {
6590 if (VT.getSizeInBits() > 256) {
6591 NumSubs = VT.getSizeInBits() / 256;
6592 assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
6595 if (VT.getSizeInBits() > 128) {
6596 NumSubs = VT.getSizeInBits() / 128;
6597 assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
6602 return Builder(DAG, DL, Ops);
6604 SmallVector<SDValue, 4> Subs;
6605 for (unsigned i = 0; i != NumSubs; ++i) {
6606 SmallVector<SDValue, 2> SubOps;
6607 for (SDValue Op : Ops) {
6608 EVT OpVT = Op.getValueType();
6609 unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
6610 unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
6611 SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
6613 Subs.push_back(Builder(DAG, DL, SubOps));
6615 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
6618 // Helper function that extends a non-512-bit vector op to 512-bits on non-VLX
6620 static SDValue getAVX512Node(unsigned Opcode, const SDLoc &DL, MVT VT,
6621 ArrayRef<SDValue> Ops, SelectionDAG &DAG,
6622 const X86Subtarget &Subtarget) {
6623 assert(Subtarget.hasAVX512() && "AVX512 target expected");
6624 MVT SVT = VT.getScalarType();
6626 // If we have a 32/64 splatted constant, splat it to DstTy to
6627 // encourage a foldable broadcast'd operand.
6628 auto MakeBroadcastOp = [&](SDValue Op, MVT OpVT, MVT DstVT) {
6629 unsigned OpEltSizeInBits = OpVT.getScalarSizeInBits();
6630 // AVX512 broadcasts 32/64-bit operands.
6631 // TODO: Support float once getAVX512Node is used by fp-ops.
6632 if (!OpVT.isInteger() || OpEltSizeInBits < 32 ||
6633 !DAG.getTargetLoweringInfo().isTypeLegal(SVT))
6635 // If we're not widening, don't bother if we're not bitcasting.
6636 if (OpVT == DstVT && Op.getOpcode() != ISD::BITCAST)
6638 if (auto *BV = dyn_cast<BuildVectorSDNode>(peekThroughBitcasts(Op))) {
6639 APInt SplatValue, SplatUndef;
6640 unsigned SplatBitSize;
6642 if (BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
6643 HasAnyUndefs, OpEltSizeInBits) &&
6644 !HasAnyUndefs && SplatValue.getBitWidth() == OpEltSizeInBits)
6645 return DAG.getConstant(SplatValue, DL, DstVT);
6650 bool Widen = !(Subtarget.hasVLX() || VT.is512BitVector());
6654 DstVT = MVT::getVectorVT(SVT, 512 / SVT.getSizeInBits());
6656 // Canonicalize src operands.
6657 SmallVector<SDValue> SrcOps(Ops.begin(), Ops.end());
6658 for (SDValue &Op : SrcOps) {
6659 MVT OpVT = Op.getSimpleValueType();
6660 // Just pass through scalar operands.
6661 if (!OpVT.isVector())
6663 assert(OpVT == VT && "Vector type mismatch");
6665 if (SDValue BroadcastOp = MakeBroadcastOp(Op, OpVT, DstVT)) {
6670 // Just widen the subvector by inserting into an undef wide vector.
6672 Op = widenSubVector(Op, false, Subtarget, DAG, DL, 512);
6675 SDValue Res = DAG.getNode(Opcode, DL, DstVT, SrcOps);
6677 // Perform the 512-bit op then extract the bottom subvector.
6679 Res = extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
6683 /// Insert i1-subvector to i1-vector.
6684 static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
6685 const X86Subtarget &Subtarget) {
6688 SDValue Vec = Op.getOperand(0);
6689 SDValue SubVec = Op.getOperand(1);
6690 SDValue Idx = Op.getOperand(2);
6691 unsigned IdxVal = Op.getConstantOperandVal(2);
6693 // Inserting undef is a nop. We can just return the original vector.
6694 if (SubVec.isUndef())
6697 if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
6700 MVT OpVT = Op.getSimpleValueType();
6701 unsigned NumElems = OpVT.getVectorNumElements();
6702 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
6704 // Extend to natively supported kshift.
6705 MVT WideOpVT = OpVT;
6706 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
6707 WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
6709 // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
6711 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
6712 // May need to promote to a legal type.
6713 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6714 DAG.getConstant(0, dl, WideOpVT),
6716 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6719 MVT SubVecVT = SubVec.getSimpleValueType();
6720 unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
6721 assert(IdxVal + SubVecNumElems <= NumElems &&
6722 IdxVal % SubVecVT.getSizeInBits() == 0 &&
6723 "Unexpected index value in INSERT_SUBVECTOR");
6725 SDValue Undef = DAG.getUNDEF(WideOpVT);
6728 // Zero lower bits of the Vec
6729 SDValue ShiftBits = DAG.getTargetConstant(SubVecNumElems, dl, MVT::i8);
6730 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
6732 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
6733 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
6734 // Merge them together, SubVec should be zero extended.
6735 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6736 DAG.getConstant(0, dl, WideOpVT),
6738 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
6739 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6742 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6743 Undef, SubVec, ZeroIdx);
6745 if (Vec.isUndef()) {
6746 assert(IdxVal != 0 && "Unexpected index");
6747 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6748 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
6749 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
6752 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
6753 assert(IdxVal != 0 && "Unexpected index");
6754 // If upper elements of Vec are known undef, then just shift into place.
6755 if (llvm::all_of(Vec->ops().slice(IdxVal + SubVecNumElems),
6756 [](SDValue V) { return V.isUndef(); })) {
6757 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6758 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
6760 NumElems = WideOpVT.getVectorNumElements();
6761 unsigned ShiftLeft = NumElems - SubVecNumElems;
6762 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
6763 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6764 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
6765 if (ShiftRight != 0)
6766 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
6767 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
6769 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
6772 // Simple case when we put subvector in the upper part
6773 if (IdxVal + SubVecNumElems == NumElems) {
6774 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6775 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
6776 if (SubVecNumElems * 2 == NumElems) {
6777 // Special case, use legal zero extending insert_subvector. This allows
6778 // isel to optimize when bits are known zero.
6779 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
6780 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6781 DAG.getConstant(0, dl, WideOpVT),
6784 // Otherwise use explicit shifts to zero the bits.
6785 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6786 Undef, Vec, ZeroIdx);
6787 NumElems = WideOpVT.getVectorNumElements();
6788 SDValue ShiftBits = DAG.getTargetConstant(NumElems - IdxVal, dl, MVT::i8);
6789 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
6790 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
6792 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
6793 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6796 // Inserting into the middle is more complicated.
6798 NumElems = WideOpVT.getVectorNumElements();
6800 // Widen the vector if needed.
6801 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
6803 unsigned ShiftLeft = NumElems - SubVecNumElems;
6804 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
6806 // Do an optimization for the the most frequently used types.
6807 if (WideOpVT != MVT::v64i1 || Subtarget.is64Bit()) {
6808 APInt Mask0 = APInt::getBitsSet(NumElems, IdxVal, IdxVal + SubVecNumElems);
6809 Mask0.flipAllBits();
6810 SDValue CMask0 = DAG.getConstant(Mask0, dl, MVT::getIntegerVT(NumElems));
6811 SDValue VMask0 = DAG.getNode(ISD::BITCAST, dl, WideOpVT, CMask0);
6812 Vec = DAG.getNode(ISD::AND, dl, WideOpVT, Vec, VMask0);
6813 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6814 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
6815 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
6816 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
6817 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
6819 // Reduce to original width if needed.
6820 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6823 // Clear the upper bits of the subvector and move it to its insert position.
6824 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6825 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
6826 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
6827 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
6829 // Isolate the bits below the insertion point.
6830 unsigned LowShift = NumElems - IdxVal;
6831 SDValue Low = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec,
6832 DAG.getTargetConstant(LowShift, dl, MVT::i8));
6833 Low = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Low,
6834 DAG.getTargetConstant(LowShift, dl, MVT::i8));
6836 // Isolate the bits after the last inserted bit.
6837 unsigned HighShift = IdxVal + SubVecNumElems;
6838 SDValue High = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
6839 DAG.getTargetConstant(HighShift, dl, MVT::i8));
6840 High = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, High,
6841 DAG.getTargetConstant(HighShift, dl, MVT::i8));
6843 // Now OR all 3 pieces together.
6844 Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Low, High);
6845 SubVec = DAG.getNode(ISD::OR, dl, WideOpVT, SubVec, Vec);
6847 // Reduce to original width if needed.
6848 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
6851 static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
6853 assert(V1.getValueType() == V2.getValueType() && "subvector type mismatch");
6854 EVT SubVT = V1.getValueType();
6855 EVT SubSVT = SubVT.getScalarType();
6856 unsigned SubNumElts = SubVT.getVectorNumElements();
6857 unsigned SubVectorWidth = SubVT.getSizeInBits();
6858 EVT VT = EVT::getVectorVT(*DAG.getContext(), SubSVT, 2 * SubNumElts);
6859 SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, SubVectorWidth);
6860 return insertSubVector(V, V2, SubNumElts, DAG, dl, SubVectorWidth);
6863 /// Returns a vector of specified type with all bits set.
6864 /// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
6865 /// Then bitcast to their original type, ensuring they get CSE'd.
6866 static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
6867 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6868 "Expected a 128/256/512-bit vector type");
6870 APInt Ones = APInt::getAllOnes(32);
6871 unsigned NumElts = VT.getSizeInBits() / 32;
6872 SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
6873 return DAG.getBitcast(VT, Vec);
6876 // Convert *_EXTEND_VECTOR_INREG to *_EXTEND opcode.
6877 static unsigned getOpcode_EXTEND(unsigned Opcode) {
6879 case ISD::ANY_EXTEND:
6880 case ISD::ANY_EXTEND_VECTOR_INREG:
6881 return ISD::ANY_EXTEND;
6882 case ISD::ZERO_EXTEND:
6883 case ISD::ZERO_EXTEND_VECTOR_INREG:
6884 return ISD::ZERO_EXTEND;
6885 case ISD::SIGN_EXTEND:
6886 case ISD::SIGN_EXTEND_VECTOR_INREG:
6887 return ISD::SIGN_EXTEND;
6889 llvm_unreachable("Unknown opcode");
6892 // Convert *_EXTEND to *_EXTEND_VECTOR_INREG opcode.
6893 static unsigned getOpcode_EXTEND_VECTOR_INREG(unsigned Opcode) {
6895 case ISD::ANY_EXTEND:
6896 case ISD::ANY_EXTEND_VECTOR_INREG:
6897 return ISD::ANY_EXTEND_VECTOR_INREG;
6898 case ISD::ZERO_EXTEND:
6899 case ISD::ZERO_EXTEND_VECTOR_INREG:
6900 return ISD::ZERO_EXTEND_VECTOR_INREG;
6901 case ISD::SIGN_EXTEND:
6902 case ISD::SIGN_EXTEND_VECTOR_INREG:
6903 return ISD::SIGN_EXTEND_VECTOR_INREG;
6905 llvm_unreachable("Unknown opcode");
6908 static SDValue getEXTEND_VECTOR_INREG(unsigned Opcode, const SDLoc &DL, EVT VT,
6909 SDValue In, SelectionDAG &DAG) {
6910 EVT InVT = In.getValueType();
6911 assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
6912 assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
6913 ISD::ZERO_EXTEND == Opcode) &&
6914 "Unknown extension opcode");
6916 // For 256-bit vectors, we only need the lower (128-bit) input half.
6917 // For 512-bit vectors, we only need the lower input half or quarter.
6918 if (InVT.getSizeInBits() > 128) {
6919 assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
6920 "Expected VTs to be the same size!");
6921 unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
6922 In = extractSubVector(In, 0, DAG, DL,
6923 std::max(128U, (unsigned)VT.getSizeInBits() / Scale));
6924 InVT = In.getValueType();
6927 if (VT.getVectorNumElements() != InVT.getVectorNumElements())
6928 Opcode = getOpcode_EXTEND_VECTOR_INREG(Opcode);
6930 return DAG.getNode(Opcode, DL, VT, In);
6933 // Match (xor X, -1) -> X.
6934 // Match extract_subvector(xor X, -1) -> extract_subvector(X).
6935 // Match concat_vectors(xor X, -1, xor Y, -1) -> concat_vectors(X, Y).
6936 static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {
6937 V = peekThroughBitcasts(V);
6938 if (V.getOpcode() == ISD::XOR &&
6939 ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()))
6940 return V.getOperand(0);
6941 if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6942 (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
6943 if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
6944 Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
6945 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
6946 Not, V.getOperand(1));
6949 SmallVector<SDValue, 2> CatOps;
6950 if (collectConcatOps(V.getNode(), CatOps, DAG)) {
6951 for (SDValue &CatOp : CatOps) {
6952 SDValue NotCat = IsNOT(CatOp, DAG);
6953 if (!NotCat) return SDValue();
6954 CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
6956 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
6961 void llvm::createUnpackShuffleMask(EVT VT, SmallVectorImpl<int> &Mask,
6962 bool Lo, bool Unary) {
6963 assert(VT.getScalarType().isSimple() && (VT.getSizeInBits() % 128) == 0 &&
6964 "Illegal vector type to unpack");
6965 assert(Mask.empty() && "Expected an empty shuffle mask vector");
6966 int NumElts = VT.getVectorNumElements();
6967 int NumEltsInLane = 128 / VT.getScalarSizeInBits();
6968 for (int i = 0; i < NumElts; ++i) {
6969 unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
6970 int Pos = (i % NumEltsInLane) / 2 + LaneStart;
6971 Pos += (Unary ? 0 : NumElts * (i % 2));
6972 Pos += (Lo ? 0 : NumEltsInLane / 2);
6973 Mask.push_back(Pos);
6977 /// Similar to unpacklo/unpackhi, but without the 128-bit lane limitation
6978 /// imposed by AVX and specific to the unary pattern. Example:
6979 /// v8iX Lo --> <0, 0, 1, 1, 2, 2, 3, 3>
6980 /// v8iX Hi --> <4, 4, 5, 5, 6, 6, 7, 7>
6981 void llvm::createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
6983 assert(Mask.empty() && "Expected an empty shuffle mask vector");
6984 int NumElts = VT.getVectorNumElements();
6985 for (int i = 0; i < NumElts; ++i) {
6987 Pos += (Lo ? 0 : NumElts / 2);
6988 Mask.push_back(Pos);
6992 // Attempt to constant fold, else just create a VECTOR_SHUFFLE.
6993 static SDValue getVectorShuffle(SelectionDAG &DAG, EVT VT, const SDLoc &dl,
6994 SDValue V1, SDValue V2, ArrayRef<int> Mask) {
6995 if ((ISD::isBuildVectorOfConstantSDNodes(V1.getNode()) || V1.isUndef()) &&
6996 (ISD::isBuildVectorOfConstantSDNodes(V2.getNode()) || V2.isUndef())) {
6997 SmallVector<SDValue> Ops(Mask.size(), DAG.getUNDEF(VT.getScalarType()));
6998 for (int I = 0, NumElts = Mask.size(); I != NumElts; ++I) {
7002 SDValue V = (M < NumElts) ? V1 : V2;
7005 Ops[I] = V.getOperand(M % NumElts);
7007 return DAG.getBuildVector(VT, dl, Ops);
7010 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
7013 /// Returns a vector_shuffle node for an unpackl operation.
7014 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
7015 SDValue V1, SDValue V2) {
7016 SmallVector<int, 8> Mask;
7017 createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
7018 return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
7021 /// Returns a vector_shuffle node for an unpackh operation.
7022 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
7023 SDValue V1, SDValue V2) {
7024 SmallVector<int, 8> Mask;
7025 createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
7026 return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
7029 /// Returns a node that packs the LHS + RHS nodes together at half width.
7030 /// May return X86ISD::PACKSS/PACKUS, packing the top/bottom half.
7031 /// TODO: Add subvector splitting if/when we have a need for it.
7032 static SDValue getPack(SelectionDAG &DAG, const X86Subtarget &Subtarget,
7033 const SDLoc &dl, MVT VT, SDValue LHS, SDValue RHS,
7034 bool PackHiHalf = false) {
7035 MVT OpVT = LHS.getSimpleValueType();
7036 unsigned EltSizeInBits = VT.getScalarSizeInBits();
7037 bool UsePackUS = Subtarget.hasSSE41() || EltSizeInBits == 8;
7038 assert(OpVT == RHS.getSimpleValueType() &&
7039 VT.getSizeInBits() == OpVT.getSizeInBits() &&
7040 (EltSizeInBits * 2) == OpVT.getScalarSizeInBits() &&
7041 "Unexpected PACK operand types");
7042 assert((EltSizeInBits == 8 || EltSizeInBits == 16 || EltSizeInBits == 32) &&
7043 "Unexpected PACK result type");
7045 // Rely on vector shuffles for vXi64 -> vXi32 packing.
7046 if (EltSizeInBits == 32) {
7047 SmallVector<int> PackMask;
7048 int Offset = PackHiHalf ? 1 : 0;
7049 int NumElts = VT.getVectorNumElements();
7050 for (int I = 0; I != NumElts; I += 4) {
7051 PackMask.push_back(I + Offset);
7052 PackMask.push_back(I + Offset + 2);
7053 PackMask.push_back(I + Offset + NumElts);
7054 PackMask.push_back(I + Offset + NumElts + 2);
7056 return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, LHS),
7057 DAG.getBitcast(VT, RHS), PackMask);
7060 // See if we already have sufficient leading bits for PACKSS/PACKUS.
7063 DAG.computeKnownBits(LHS).countMaxActiveBits() <= EltSizeInBits &&
7064 DAG.computeKnownBits(RHS).countMaxActiveBits() <= EltSizeInBits)
7065 return DAG.getNode(X86ISD::PACKUS, dl, VT, LHS, RHS);
7067 if (DAG.ComputeMaxSignificantBits(LHS) <= EltSizeInBits &&
7068 DAG.ComputeMaxSignificantBits(RHS) <= EltSizeInBits)
7069 return DAG.getNode(X86ISD::PACKSS, dl, VT, LHS, RHS);
7072 // Fallback to sign/zero extending the requested half and pack.
7073 SDValue Amt = DAG.getTargetConstant(EltSizeInBits, dl, MVT::i8);
7076 LHS = DAG.getNode(X86ISD::VSRLI, dl, OpVT, LHS, Amt);
7077 RHS = DAG.getNode(X86ISD::VSRLI, dl, OpVT, RHS, Amt);
7079 SDValue Mask = DAG.getConstant((1ULL << EltSizeInBits) - 1, dl, OpVT);
7080 LHS = DAG.getNode(ISD::AND, dl, OpVT, LHS, Mask);
7081 RHS = DAG.getNode(ISD::AND, dl, OpVT, RHS, Mask);
7083 return DAG.getNode(X86ISD::PACKUS, dl, VT, LHS, RHS);
7087 LHS = DAG.getNode(X86ISD::VSHLI, dl, OpVT, LHS, Amt);
7088 RHS = DAG.getNode(X86ISD::VSHLI, dl, OpVT, RHS, Amt);
7090 LHS = DAG.getNode(X86ISD::VSRAI, dl, OpVT, LHS, Amt);
7091 RHS = DAG.getNode(X86ISD::VSRAI, dl, OpVT, RHS, Amt);
7092 return DAG.getNode(X86ISD::PACKSS, dl, VT, LHS, RHS);
7095 /// Return a vector_shuffle of the specified vector of zero or undef vector.
7096 /// This produces a shuffle where the low element of V2 is swizzled into the
7097 /// zero/undef vector, landing at element Idx.
7098 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
7099 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
7101 const X86Subtarget &Subtarget,
7102 SelectionDAG &DAG) {
7103 MVT VT = V2.getSimpleValueType();
7105 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
7106 int NumElems = VT.getVectorNumElements();
7107 SmallVector<int, 16> MaskVec(NumElems);
7108 for (int i = 0; i != NumElems; ++i)
7109 // If this is the insertion idx, put the low elt of V2 here.
7110 MaskVec[i] = (i == Idx) ? NumElems : i;
7111 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
7114 static const Constant *getTargetConstantFromBasePtr(SDValue Ptr) {
7115 if (Ptr.getOpcode() == X86ISD::Wrapper ||
7116 Ptr.getOpcode() == X86ISD::WrapperRIP)
7117 Ptr = Ptr.getOperand(0);
7119 auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
7120 if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
7123 return CNode->getConstVal();
7126 static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
7127 if (!Load || !ISD::isNormalLoad(Load))
7129 return getTargetConstantFromBasePtr(Load->getBasePtr());
7132 static const Constant *getTargetConstantFromNode(SDValue Op) {
7133 Op = peekThroughBitcasts(Op);
7134 return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
7138 X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
7139 assert(LD && "Unexpected null LoadSDNode");
7140 return getTargetConstantFromNode(LD);
7143 // Extract raw constant bits from constant pools.
7144 static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
7146 SmallVectorImpl<APInt> &EltBits,
7147 bool AllowWholeUndefs = true,
7148 bool AllowPartialUndefs = true) {
7149 assert(EltBits.empty() && "Expected an empty EltBits vector");
7151 Op = peekThroughBitcasts(Op);
7153 EVT VT = Op.getValueType();
7154 unsigned SizeInBits = VT.getSizeInBits();
7155 assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
7156 unsigned NumElts = SizeInBits / EltSizeInBits;
7158 // Bitcast a source array of element bits to the target size.
7159 auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
7160 unsigned NumSrcElts = UndefSrcElts.getBitWidth();
7161 unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
7162 assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
7163 "Constant bit sizes don't match");
7165 // Don't split if we don't allow undef bits.
7166 bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
7167 if (UndefSrcElts.getBoolValue() && !AllowUndefs)
7170 // If we're already the right size, don't bother bitcasting.
7171 if (NumSrcElts == NumElts) {
7172 UndefElts = UndefSrcElts;
7173 EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
7177 // Extract all the undef/constant element data and pack into single bitsets.
7178 APInt UndefBits(SizeInBits, 0);
7179 APInt MaskBits(SizeInBits, 0);
7181 for (unsigned i = 0; i != NumSrcElts; ++i) {
7182 unsigned BitOffset = i * SrcEltSizeInBits;
7183 if (UndefSrcElts[i])
7184 UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
7185 MaskBits.insertBits(SrcEltBits[i], BitOffset);
7188 // Split the undef/constant single bitset data into the target elements.
7189 UndefElts = APInt(NumElts, 0);
7190 EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
7192 for (unsigned i = 0; i != NumElts; ++i) {
7193 unsigned BitOffset = i * EltSizeInBits;
7194 APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
7196 // Only treat an element as UNDEF if all bits are UNDEF.
7197 if (UndefEltBits.isAllOnes()) {
7198 if (!AllowWholeUndefs)
7200 UndefElts.setBit(i);
7204 // If only some bits are UNDEF then treat them as zero (or bail if not
7206 if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
7209 EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
7214 // Collect constant bits and insert into mask/undef bit masks.
7215 auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
7216 unsigned UndefBitIndex) {
7219 if (isa<UndefValue>(Cst)) {
7220 Undefs.setBit(UndefBitIndex);
7223 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
7224 Mask = CInt->getValue();
7227 if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
7228 Mask = CFP->getValueAPF().bitcastToAPInt();
7236 APInt UndefSrcElts = APInt::getAllOnes(NumElts);
7237 SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
7238 return CastBitData(UndefSrcElts, SrcEltBits);
7241 // Extract scalar constant bits.
7242 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
7243 APInt UndefSrcElts = APInt::getZero(1);
7244 SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
7245 return CastBitData(UndefSrcElts, SrcEltBits);
7247 if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
7248 APInt UndefSrcElts = APInt::getZero(1);
7249 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
7250 SmallVector<APInt, 64> SrcEltBits(1, RawBits);
7251 return CastBitData(UndefSrcElts, SrcEltBits);
7254 // Extract constant bits from build vector.
7255 if (auto *BV = dyn_cast<BuildVectorSDNode>(Op)) {
7257 SmallVector<APInt> SrcEltBits;
7258 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
7259 if (BV->getConstantRawBits(true, SrcEltSizeInBits, SrcEltBits, Undefs)) {
7260 APInt UndefSrcElts = APInt::getNullValue(SrcEltBits.size());
7261 for (unsigned I = 0, E = SrcEltBits.size(); I != E; ++I)
7263 UndefSrcElts.setBit(I);
7264 return CastBitData(UndefSrcElts, SrcEltBits);
7268 // Extract constant bits from constant pool vector.
7269 if (auto *Cst = getTargetConstantFromNode(Op)) {
7270 Type *CstTy = Cst->getType();
7271 unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
7272 if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
7275 unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
7276 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
7278 APInt UndefSrcElts(NumSrcElts, 0);
7279 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
7280 for (unsigned i = 0; i != NumSrcElts; ++i)
7281 if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
7285 return CastBitData(UndefSrcElts, SrcEltBits);
7288 // Extract constant bits from a broadcasted constant pool scalar.
7289 if (Op.getOpcode() == X86ISD::VBROADCAST_LOAD &&
7290 EltSizeInBits <= VT.getScalarSizeInBits()) {
7291 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
7292 if (MemIntr->getMemoryVT().getScalarSizeInBits() != VT.getScalarSizeInBits())
7295 SDValue Ptr = MemIntr->getBasePtr();
7296 if (const Constant *C = getTargetConstantFromBasePtr(Ptr)) {
7297 unsigned SrcEltSizeInBits = C->getType()->getScalarSizeInBits();
7298 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
7300 APInt UndefSrcElts(NumSrcElts, 0);
7301 SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
7302 if (CollectConstantBits(C, SrcEltBits[0], UndefSrcElts, 0)) {
7303 if (UndefSrcElts[0])
7304 UndefSrcElts.setBits(0, NumSrcElts);
7305 SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
7306 return CastBitData(UndefSrcElts, SrcEltBits);
7311 // Extract constant bits from a subvector broadcast.
7312 if (Op.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) {
7313 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
7314 SDValue Ptr = MemIntr->getBasePtr();
7315 // The source constant may be larger than the subvector broadcast,
7316 // ensure we extract the correct subvector constants.
7317 if (const Constant *Cst = getTargetConstantFromBasePtr(Ptr)) {
7318 Type *CstTy = Cst->getType();
7319 unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
7320 unsigned SubVecSizeInBits = MemIntr->getMemoryVT().getStoreSizeInBits();
7321 if (!CstTy->isVectorTy() || (CstSizeInBits % SubVecSizeInBits) != 0 ||
7322 (SizeInBits % SubVecSizeInBits) != 0)
7324 unsigned CstEltSizeInBits = CstTy->getScalarSizeInBits();
7325 unsigned NumSubElts = SubVecSizeInBits / CstEltSizeInBits;
7326 unsigned NumSubVecs = SizeInBits / SubVecSizeInBits;
7327 APInt UndefSubElts(NumSubElts, 0);
7328 SmallVector<APInt, 64> SubEltBits(NumSubElts * NumSubVecs,
7329 APInt(CstEltSizeInBits, 0));
7330 for (unsigned i = 0; i != NumSubElts; ++i) {
7331 if (!CollectConstantBits(Cst->getAggregateElement(i), SubEltBits[i],
7334 for (unsigned j = 1; j != NumSubVecs; ++j)
7335 SubEltBits[i + (j * NumSubElts)] = SubEltBits[i];
7337 UndefSubElts = APInt::getSplat(NumSubVecs * UndefSubElts.getBitWidth(),
7339 return CastBitData(UndefSubElts, SubEltBits);
7343 // Extract a rematerialized scalar constant insertion.
7344 if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
7345 Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
7346 isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
7347 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
7348 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
7350 APInt UndefSrcElts(NumSrcElts, 0);
7351 SmallVector<APInt, 64> SrcEltBits;
7352 auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
7353 SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
7354 SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
7355 return CastBitData(UndefSrcElts, SrcEltBits);
7358 // Insert constant bits from a base and sub vector sources.
7359 if (Op.getOpcode() == ISD::INSERT_SUBVECTOR) {
7360 // If bitcasts to larger elements we might lose track of undefs - don't
7361 // allow any to be safe.
7362 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
7363 bool AllowUndefs = EltSizeInBits >= SrcEltSizeInBits;
7365 APInt UndefSrcElts, UndefSubElts;
7366 SmallVector<APInt, 32> EltSrcBits, EltSubBits;
7367 if (getTargetConstantBitsFromNode(Op.getOperand(1), SrcEltSizeInBits,
7368 UndefSubElts, EltSubBits,
7369 AllowWholeUndefs && AllowUndefs,
7370 AllowPartialUndefs && AllowUndefs) &&
7371 getTargetConstantBitsFromNode(Op.getOperand(0), SrcEltSizeInBits,
7372 UndefSrcElts, EltSrcBits,
7373 AllowWholeUndefs && AllowUndefs,
7374 AllowPartialUndefs && AllowUndefs)) {
7375 unsigned BaseIdx = Op.getConstantOperandVal(2);
7376 UndefSrcElts.insertBits(UndefSubElts, BaseIdx);
7377 for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
7378 EltSrcBits[BaseIdx + i] = EltSubBits[i];
7379 return CastBitData(UndefSrcElts, EltSrcBits);
7383 // Extract constant bits from a subvector's source.
7384 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
7385 // TODO - support extract_subvector through bitcasts.
7386 if (EltSizeInBits != VT.getScalarSizeInBits())
7389 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
7390 UndefElts, EltBits, AllowWholeUndefs,
7391 AllowPartialUndefs)) {
7392 EVT SrcVT = Op.getOperand(0).getValueType();
7393 unsigned NumSrcElts = SrcVT.getVectorNumElements();
7394 unsigned NumSubElts = VT.getVectorNumElements();
7395 unsigned BaseIdx = Op.getConstantOperandVal(1);
7396 UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
7397 if ((BaseIdx + NumSubElts) != NumSrcElts)
7398 EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
7400 EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
7405 // Extract constant bits from shuffle node sources.
7406 if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
7407 // TODO - support shuffle through bitcasts.
7408 if (EltSizeInBits != VT.getScalarSizeInBits())
7411 ArrayRef<int> Mask = SVN->getMask();
7412 if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
7413 llvm::any_of(Mask, [](int M) { return M < 0; }))
7416 APInt UndefElts0, UndefElts1;
7417 SmallVector<APInt, 32> EltBits0, EltBits1;
7418 if (isAnyInRange(Mask, 0, NumElts) &&
7419 !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
7420 UndefElts0, EltBits0, AllowWholeUndefs,
7421 AllowPartialUndefs))
7423 if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
7424 !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
7425 UndefElts1, EltBits1, AllowWholeUndefs,
7426 AllowPartialUndefs))
7429 UndefElts = APInt::getZero(NumElts);
7430 for (int i = 0; i != (int)NumElts; ++i) {
7433 UndefElts.setBit(i);
7434 EltBits.push_back(APInt::getZero(EltSizeInBits));
7435 } else if (M < (int)NumElts) {
7437 UndefElts.setBit(i);
7438 EltBits.push_back(EltBits0[M]);
7440 if (UndefElts1[M - NumElts])
7441 UndefElts.setBit(i);
7442 EltBits.push_back(EltBits1[M - NumElts]);
7453 bool isConstantSplat(SDValue Op, APInt &SplatVal, bool AllowPartialUndefs) {
7455 SmallVector<APInt, 16> EltBits;
7456 if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
7457 UndefElts, EltBits, true,
7458 AllowPartialUndefs)) {
7459 int SplatIndex = -1;
7460 for (int i = 0, e = EltBits.size(); i != e; ++i) {
7463 if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
7469 if (0 <= SplatIndex) {
7470 SplatVal = EltBits[SplatIndex];
7480 static bool getTargetShuffleMaskIndices(SDValue MaskNode,
7481 unsigned MaskEltSizeInBits,
7482 SmallVectorImpl<uint64_t> &RawMask,
7484 // Extract the raw target constant bits.
7485 SmallVector<APInt, 64> EltBits;
7486 if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
7487 EltBits, /* AllowWholeUndefs */ true,
7488 /* AllowPartialUndefs */ false))
7491 // Insert the extracted elements into the mask.
7492 for (const APInt &Elt : EltBits)
7493 RawMask.push_back(Elt.getZExtValue());
7498 /// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
7499 /// A multi-stage pack shuffle mask is created by specifying NumStages > 1.
7500 /// Note: This ignores saturation, so inputs must be checked first.
7501 static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
7502 bool Unary, unsigned NumStages = 1) {
7503 assert(Mask.empty() && "Expected an empty shuffle mask vector");
7504 unsigned NumElts = VT.getVectorNumElements();
7505 unsigned NumLanes = VT.getSizeInBits() / 128;
7506 unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
7507 unsigned Offset = Unary ? 0 : NumElts;
7508 unsigned Repetitions = 1u << (NumStages - 1);
7509 unsigned Increment = 1u << NumStages;
7510 assert((NumEltsPerLane >> NumStages) > 0 && "Illegal packing compaction");
7512 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
7513 for (unsigned Stage = 0; Stage != Repetitions; ++Stage) {
7514 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
7515 Mask.push_back(Elt + (Lane * NumEltsPerLane));
7516 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
7517 Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
7522 // Split the demanded elts of a PACKSS/PACKUS node between its operands.
7523 static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
7524 APInt &DemandedLHS, APInt &DemandedRHS) {
7525 int NumLanes = VT.getSizeInBits() / 128;
7526 int NumElts = DemandedElts.getBitWidth();
7527 int NumInnerElts = NumElts / 2;
7528 int NumEltsPerLane = NumElts / NumLanes;
7529 int NumInnerEltsPerLane = NumInnerElts / NumLanes;
7531 DemandedLHS = APInt::getZero(NumInnerElts);
7532 DemandedRHS = APInt::getZero(NumInnerElts);
7534 // Map DemandedElts to the packed operands.
7535 for (int Lane = 0; Lane != NumLanes; ++Lane) {
7536 for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
7537 int OuterIdx = (Lane * NumEltsPerLane) + Elt;
7538 int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
7539 if (DemandedElts[OuterIdx])
7540 DemandedLHS.setBit(InnerIdx);
7541 if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
7542 DemandedRHS.setBit(InnerIdx);
7547 // Split the demanded elts of a HADD/HSUB node between its operands.
7548 static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
7549 APInt &DemandedLHS, APInt &DemandedRHS) {
7550 int NumLanes = VT.getSizeInBits() / 128;
7551 int NumElts = DemandedElts.getBitWidth();
7552 int NumEltsPerLane = NumElts / NumLanes;
7553 int HalfEltsPerLane = NumEltsPerLane / 2;
7555 DemandedLHS = APInt::getZero(NumElts);
7556 DemandedRHS = APInt::getZero(NumElts);
7558 // Map DemandedElts to the horizontal operands.
7559 for (int Idx = 0; Idx != NumElts; ++Idx) {
7560 if (!DemandedElts[Idx])
7562 int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
7563 int LocalIdx = Idx % NumEltsPerLane;
7564 if (LocalIdx < HalfEltsPerLane) {
7565 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
7566 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
7568 LocalIdx -= HalfEltsPerLane;
7569 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
7570 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
7575 /// Calculates the shuffle mask corresponding to the target-specific opcode.
7576 /// If the mask could be calculated, returns it in \p Mask, returns the shuffle
7577 /// operands in \p Ops, and returns true.
7578 /// Sets \p IsUnary to true if only one source is used. Note that this will set
7579 /// IsUnary for shuffles which use a single input multiple times, and in those
7580 /// cases it will adjust the mask to only have indices within that single input.
7581 /// It is an error to call this with non-empty Mask/Ops vectors.
7582 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
7583 SmallVectorImpl<SDValue> &Ops,
7584 SmallVectorImpl<int> &Mask, bool &IsUnary) {
7585 unsigned NumElems = VT.getVectorNumElements();
7586 unsigned MaskEltSize = VT.getScalarSizeInBits();
7587 SmallVector<uint64_t, 32> RawMask;
7591 assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
7592 assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
7595 bool IsFakeUnary = false;
7596 switch (N->getOpcode()) {
7597 case X86ISD::BLENDI:
7598 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7599 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7600 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7601 DecodeBLENDMask(NumElems, ImmN, Mask);
7602 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7605 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7606 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7607 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7608 DecodeSHUFPMask(NumElems, MaskEltSize, ImmN, Mask);
7609 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7611 case X86ISD::INSERTPS:
7612 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7613 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7614 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7615 DecodeINSERTPSMask(ImmN, Mask);
7616 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7618 case X86ISD::EXTRQI:
7619 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7620 if (isa<ConstantSDNode>(N->getOperand(1)) &&
7621 isa<ConstantSDNode>(N->getOperand(2))) {
7622 int BitLen = N->getConstantOperandVal(1);
7623 int BitIdx = N->getConstantOperandVal(2);
7624 DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
7628 case X86ISD::INSERTQI:
7629 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7630 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7631 if (isa<ConstantSDNode>(N->getOperand(2)) &&
7632 isa<ConstantSDNode>(N->getOperand(3))) {
7633 int BitLen = N->getConstantOperandVal(2);
7634 int BitIdx = N->getConstantOperandVal(3);
7635 DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
7636 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7639 case X86ISD::UNPCKH:
7640 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7641 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7642 DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
7643 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7645 case X86ISD::UNPCKL:
7646 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7647 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7648 DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
7649 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7651 case X86ISD::MOVHLPS:
7652 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7653 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7654 DecodeMOVHLPSMask(NumElems, Mask);
7655 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7657 case X86ISD::MOVLHPS:
7658 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7659 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7660 DecodeMOVLHPSMask(NumElems, Mask);
7661 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7663 case X86ISD::VALIGN:
7664 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
7665 "Only 32-bit and 64-bit elements are supported!");
7666 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7667 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7668 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7669 DecodeVALIGNMask(NumElems, ImmN, Mask);
7670 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7671 Ops.push_back(N->getOperand(1));
7672 Ops.push_back(N->getOperand(0));
7674 case X86ISD::PALIGNR:
7675 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
7676 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7677 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7678 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7679 DecodePALIGNRMask(NumElems, ImmN, Mask);
7680 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7681 Ops.push_back(N->getOperand(1));
7682 Ops.push_back(N->getOperand(0));
7684 case X86ISD::VSHLDQ:
7685 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
7686 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7687 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7688 DecodePSLLDQMask(NumElems, ImmN, Mask);
7691 case X86ISD::VSRLDQ:
7692 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
7693 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7694 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7695 DecodePSRLDQMask(NumElems, ImmN, Mask);
7698 case X86ISD::PSHUFD:
7699 case X86ISD::VPERMILPI:
7700 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7701 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7702 DecodePSHUFMask(NumElems, MaskEltSize, ImmN, Mask);
7705 case X86ISD::PSHUFHW:
7706 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7707 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7708 DecodePSHUFHWMask(NumElems, ImmN, Mask);
7711 case X86ISD::PSHUFLW:
7712 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7713 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7714 DecodePSHUFLWMask(NumElems, ImmN, Mask);
7717 case X86ISD::VZEXT_MOVL:
7718 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7719 DecodeZeroMoveLowMask(NumElems, Mask);
7722 case X86ISD::VBROADCAST:
7723 // We only decode broadcasts of same-sized vectors, peeking through to
7724 // extracted subvectors is likely to cause hasOneUse issues with
7725 // SimplifyDemandedBits etc.
7726 if (N->getOperand(0).getValueType() == VT) {
7727 DecodeVectorBroadcast(NumElems, Mask);
7732 case X86ISD::VPERMILPV: {
7733 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7735 SDValue MaskNode = N->getOperand(1);
7736 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7738 DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
7743 case X86ISD::PSHUFB: {
7744 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
7745 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7746 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7748 SDValue MaskNode = N->getOperand(1);
7749 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
7750 DecodePSHUFBMask(RawMask, RawUndefs, Mask);
7755 case X86ISD::VPERMI:
7756 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7757 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7758 DecodeVPERMMask(NumElems, ImmN, Mask);
7764 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7765 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7766 DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
7768 case X86ISD::VPERM2X128:
7769 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7770 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7771 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7772 DecodeVPERM2X128Mask(NumElems, ImmN, Mask);
7773 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7775 case X86ISD::SHUF128:
7776 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7777 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7778 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7779 decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize, ImmN, Mask);
7780 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7782 case X86ISD::MOVSLDUP:
7783 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7784 DecodeMOVSLDUPMask(NumElems, Mask);
7787 case X86ISD::MOVSHDUP:
7788 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7789 DecodeMOVSHDUPMask(NumElems, Mask);
7792 case X86ISD::MOVDDUP:
7793 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7794 DecodeMOVDDUPMask(NumElems, Mask);
7797 case X86ISD::VPERMIL2: {
7798 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7799 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7800 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7801 SDValue MaskNode = N->getOperand(2);
7802 SDValue CtrlNode = N->getOperand(3);
7803 if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
7804 unsigned CtrlImm = CtrlOp->getZExtValue();
7805 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7807 DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
7814 case X86ISD::VPPERM: {
7815 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7816 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7817 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7818 SDValue MaskNode = N->getOperand(2);
7819 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
7820 DecodeVPPERMMask(RawMask, RawUndefs, Mask);
7825 case X86ISD::VPERMV: {
7826 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7828 // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
7829 Ops.push_back(N->getOperand(1));
7830 SDValue MaskNode = N->getOperand(0);
7831 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7833 DecodeVPERMVMask(RawMask, RawUndefs, Mask);
7838 case X86ISD::VPERMV3: {
7839 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7840 assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
7841 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
7842 // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
7843 Ops.push_back(N->getOperand(0));
7844 Ops.push_back(N->getOperand(2));
7845 SDValue MaskNode = N->getOperand(1);
7846 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7848 DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
7853 default: llvm_unreachable("unknown target shuffle node");
7856 // Empty mask indicates the decode failed.
7860 // Check if we're getting a shuffle mask with zero'd elements.
7861 if (!AllowSentinelZero && isAnyZero(Mask))
7864 // If we have a fake unary shuffle, the shuffle mask is spread across two
7865 // inputs that are actually the same node. Re-map the mask to always point
7866 // into the first input.
7869 if (M >= (int)Mask.size())
7872 // If we didn't already add operands in the opcode-specific code, default to
7873 // adding 1 or 2 operands starting at 0.
7875 Ops.push_back(N->getOperand(0));
7876 if (!IsUnary || IsFakeUnary)
7877 Ops.push_back(N->getOperand(1));
7883 // Wrapper for getTargetShuffleMask with InUnary;
7884 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
7885 SmallVectorImpl<SDValue> &Ops,
7886 SmallVectorImpl<int> &Mask) {
7888 return getTargetShuffleMask(N, VT, AllowSentinelZero, Ops, Mask, IsUnary);
7891 /// Compute whether each element of a shuffle is zeroable.
7893 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7894 /// Either it is an undef element in the shuffle mask, the element of the input
7895 /// referenced is undef, or the element of the input referenced is known to be
7896 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7897 /// as many lanes with this technique as possible to simplify the remaining
7899 static void computeZeroableShuffleElements(ArrayRef<int> Mask,
7900 SDValue V1, SDValue V2,
7901 APInt &KnownUndef, APInt &KnownZero) {
7902 int Size = Mask.size();
7903 KnownUndef = KnownZero = APInt::getZero(Size);
7905 V1 = peekThroughBitcasts(V1);
7906 V2 = peekThroughBitcasts(V2);
7908 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7909 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7911 int VectorSizeInBits = V1.getValueSizeInBits();
7912 int ScalarSizeInBits = VectorSizeInBits / Size;
7913 assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
7915 for (int i = 0; i < Size; ++i) {
7917 // Handle the easy cases.
7919 KnownUndef.setBit(i);
7922 if ((M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7923 KnownZero.setBit(i);
7927 // Determine shuffle input and normalize the mask.
7928 SDValue V = M < Size ? V1 : V2;
7931 // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
7932 if (V.getOpcode() != ISD::BUILD_VECTOR)
7935 // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
7936 // the (larger) source element must be UNDEF/ZERO.
7937 if ((Size % V.getNumOperands()) == 0) {
7938 int Scale = Size / V->getNumOperands();
7939 SDValue Op = V.getOperand(M / Scale);
7941 KnownUndef.setBit(i);
7942 if (X86::isZeroNode(Op))
7943 KnownZero.setBit(i);
7944 else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
7945 APInt Val = Cst->getAPIntValue();
7946 Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
7948 KnownZero.setBit(i);
7949 } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
7950 APInt Val = Cst->getValueAPF().bitcastToAPInt();
7951 Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
7953 KnownZero.setBit(i);
7958 // If the BUILD_VECTOR has more elements then all the (smaller) source
7959 // elements must be UNDEF or ZERO.
7960 if ((V.getNumOperands() % Size) == 0) {
7961 int Scale = V->getNumOperands() / Size;
7962 bool AllUndef = true;
7963 bool AllZero = true;
7964 for (int j = 0; j < Scale; ++j) {
7965 SDValue Op = V.getOperand((M * Scale) + j);
7966 AllUndef &= Op.isUndef();
7967 AllZero &= X86::isZeroNode(Op);
7970 KnownUndef.setBit(i);
7972 KnownZero.setBit(i);
7978 /// Decode a target shuffle mask and inputs and see if any values are
7979 /// known to be undef or zero from their inputs.
7980 /// Returns true if the target shuffle mask was decoded.
7981 /// FIXME: Merge this with computeZeroableShuffleElements?
7982 static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
7983 SmallVectorImpl<SDValue> &Ops,
7984 APInt &KnownUndef, APInt &KnownZero) {
7986 if (!isTargetShuffle(N.getOpcode()))
7989 MVT VT = N.getSimpleValueType();
7990 if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
7993 int Size = Mask.size();
7994 SDValue V1 = Ops[0];
7995 SDValue V2 = IsUnary ? V1 : Ops[1];
7996 KnownUndef = KnownZero = APInt::getZero(Size);
7998 V1 = peekThroughBitcasts(V1);
7999 V2 = peekThroughBitcasts(V2);
8001 assert((VT.getSizeInBits() % Size) == 0 &&
8002 "Illegal split of shuffle value type");
8003 unsigned EltSizeInBits = VT.getSizeInBits() / Size;
8005 // Extract known constant input data.
8006 APInt UndefSrcElts[2];
8007 SmallVector<APInt, 32> SrcEltBits[2];
8008 bool IsSrcConstant[2] = {
8009 getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
8010 SrcEltBits[0], true, false),
8011 getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
8012 SrcEltBits[1], true, false)};
8014 for (int i = 0; i < Size; ++i) {
8017 // Already decoded as SM_SentinelZero / SM_SentinelUndef.
8019 assert(isUndefOrZero(M) && "Unknown shuffle sentinel value!");
8020 if (SM_SentinelUndef == M)
8021 KnownUndef.setBit(i);
8022 if (SM_SentinelZero == M)
8023 KnownZero.setBit(i);
8027 // Determine shuffle input and normalize the mask.
8028 unsigned SrcIdx = M / Size;
8029 SDValue V = M < Size ? V1 : V2;
8032 // We are referencing an UNDEF input.
8034 KnownUndef.setBit(i);
8038 // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
8039 // TODO: We currently only set UNDEF for integer types - floats use the same
8040 // registers as vectors and many of the scalar folded loads rely on the
8041 // SCALAR_TO_VECTOR pattern.
8042 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
8043 (Size % V.getValueType().getVectorNumElements()) == 0) {
8044 int Scale = Size / V.getValueType().getVectorNumElements();
8045 int Idx = M / Scale;
8046 if (Idx != 0 && !VT.isFloatingPoint())
8047 KnownUndef.setBit(i);
8048 else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
8049 KnownZero.setBit(i);
8053 // INSERT_SUBVECTOR - to widen vectors we often insert them into UNDEF
8055 if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
8056 SDValue Vec = V.getOperand(0);
8057 int NumVecElts = Vec.getValueType().getVectorNumElements();
8058 if (Vec.isUndef() && Size == NumVecElts) {
8059 int Idx = V.getConstantOperandVal(2);
8060 int NumSubElts = V.getOperand(1).getValueType().getVectorNumElements();
8061 if (M < Idx || (Idx + NumSubElts) <= M)
8062 KnownUndef.setBit(i);
8067 // Attempt to extract from the source's constant bits.
8068 if (IsSrcConstant[SrcIdx]) {
8069 if (UndefSrcElts[SrcIdx][M])
8070 KnownUndef.setBit(i);
8071 else if (SrcEltBits[SrcIdx][M] == 0)
8072 KnownZero.setBit(i);
8076 assert(VT.getVectorNumElements() == (unsigned)Size &&
8077 "Different mask size from vector size!");
8081 // Replace target shuffle mask elements with known undef/zero sentinels.
8082 static void resolveTargetShuffleFromZeroables(SmallVectorImpl<int> &Mask,
8083 const APInt &KnownUndef,
8084 const APInt &KnownZero,
8085 bool ResolveKnownZeros= true) {
8086 unsigned NumElts = Mask.size();
8087 assert(KnownUndef.getBitWidth() == NumElts &&
8088 KnownZero.getBitWidth() == NumElts && "Shuffle mask size mismatch");
8090 for (unsigned i = 0; i != NumElts; ++i) {
8092 Mask[i] = SM_SentinelUndef;
8093 else if (ResolveKnownZeros && KnownZero[i])
8094 Mask[i] = SM_SentinelZero;
8098 // Extract target shuffle mask sentinel elements to known undef/zero bitmasks.
8099 static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
8102 unsigned NumElts = Mask.size();
8103 KnownUndef = KnownZero = APInt::getZero(NumElts);
8105 for (unsigned i = 0; i != NumElts; ++i) {
8107 if (SM_SentinelUndef == M)
8108 KnownUndef.setBit(i);
8109 if (SM_SentinelZero == M)
8110 KnownZero.setBit(i);
8114 // Attempt to create a shuffle mask from a VSELECT/BLENDV condition mask.
8115 static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
8116 SDValue Cond, bool IsBLENDV = false) {
8117 EVT CondVT = Cond.getValueType();
8118 unsigned EltSizeInBits = CondVT.getScalarSizeInBits();
8119 unsigned NumElts = CondVT.getVectorNumElements();
8122 SmallVector<APInt, 32> EltBits;
8123 if (!getTargetConstantBitsFromNode(Cond, EltSizeInBits, UndefElts, EltBits,
8127 Mask.resize(NumElts, SM_SentinelUndef);
8129 for (int i = 0; i != (int)NumElts; ++i) {
8131 // Arbitrarily choose from the 2nd operand if the select condition element
8133 // TODO: Can we do better by matching patterns such as even/odd?
8134 if (UndefElts[i] || (!IsBLENDV && EltBits[i].isZero()) ||
8135 (IsBLENDV && EltBits[i].isNonNegative()))
8142 // Forward declaration (for getFauxShuffleMask recursive check).
8143 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
8144 SmallVectorImpl<SDValue> &Inputs,
8145 SmallVectorImpl<int> &Mask,
8146 const SelectionDAG &DAG, unsigned Depth,
8147 bool ResolveKnownElts);
8149 // Attempt to decode ops that could be represented as a shuffle mask.
8150 // The decoded shuffle mask may contain a different number of elements to the
8151 // destination value type.
8152 // TODO: Merge into getTargetShuffleInputs()
8153 static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
8154 SmallVectorImpl<int> &Mask,
8155 SmallVectorImpl<SDValue> &Ops,
8156 const SelectionDAG &DAG, unsigned Depth,
8157 bool ResolveKnownElts) {
8161 MVT VT = N.getSimpleValueType();
8162 unsigned NumElts = VT.getVectorNumElements();
8163 unsigned NumSizeInBits = VT.getSizeInBits();
8164 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
8165 if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
8167 assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
8168 unsigned NumSizeInBytes = NumSizeInBits / 8;
8169 unsigned NumBytesPerElt = NumBitsPerElt / 8;
8171 unsigned Opcode = N.getOpcode();
8173 case ISD::VECTOR_SHUFFLE: {
8174 // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
8175 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
8176 if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
8177 Mask.append(ShuffleMask.begin(), ShuffleMask.end());
8178 Ops.push_back(N.getOperand(0));
8179 Ops.push_back(N.getOperand(1));
8185 case X86ISD::ANDNP: {
8186 // Attempt to decode as a per-byte mask.
8188 SmallVector<APInt, 32> EltBits;
8189 SDValue N0 = N.getOperand(0);
8190 SDValue N1 = N.getOperand(1);
8191 bool IsAndN = (X86ISD::ANDNP == Opcode);
8192 uint64_t ZeroMask = IsAndN ? 255 : 0;
8193 if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
8195 // We can't assume an undef src element gives an undef dst - the other src
8197 if (!UndefElts.isZero())
8199 for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
8200 const APInt &ByteBits = EltBits[i];
8201 if (ByteBits != 0 && ByteBits != 255)
8203 Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
8205 Ops.push_back(IsAndN ? N1 : N0);
8209 // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
8210 // is a valid shuffle index.
8211 SDValue N0 = peekThroughBitcasts(N.getOperand(0));
8212 SDValue N1 = peekThroughBitcasts(N.getOperand(1));
8213 if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
8216 SmallVector<int, 64> SrcMask0, SrcMask1;
8217 SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
8218 APInt Demand0 = APInt::getAllOnes(N0.getValueType().getVectorNumElements());
8219 APInt Demand1 = APInt::getAllOnes(N1.getValueType().getVectorNumElements());
8220 if (!getTargetShuffleInputs(N0, Demand0, SrcInputs0, SrcMask0, DAG,
8222 !getTargetShuffleInputs(N1, Demand1, SrcInputs1, SrcMask1, DAG,
8226 size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
8227 SmallVector<int, 64> Mask0, Mask1;
8228 narrowShuffleMaskElts(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
8229 narrowShuffleMaskElts(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
8230 for (int i = 0; i != (int)MaskSize; ++i) {
8231 // NOTE: Don't handle SM_SentinelUndef, as we can end up in infinite
8232 // loops converting between OR and BLEND shuffles due to
8233 // canWidenShuffleElements merging away undef elements, meaning we
8234 // fail to recognise the OR as the undef element isn't known zero.
8235 if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
8236 Mask.push_back(SM_SentinelZero);
8237 else if (Mask1[i] == SM_SentinelZero)
8239 else if (Mask0[i] == SM_SentinelZero)
8240 Mask.push_back(i + MaskSize);
8248 case ISD::INSERT_SUBVECTOR: {
8249 SDValue Src = N.getOperand(0);
8250 SDValue Sub = N.getOperand(1);
8251 EVT SubVT = Sub.getValueType();
8252 unsigned NumSubElts = SubVT.getVectorNumElements();
8253 if (!N->isOnlyUserOf(Sub.getNode()))
8255 uint64_t InsertIdx = N.getConstantOperandVal(2);
8256 // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
8257 if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
8258 Sub.getOperand(0).getValueType() == VT) {
8259 uint64_t ExtractIdx = Sub.getConstantOperandVal(1);
8260 for (int i = 0; i != (int)NumElts; ++i)
8262 for (int i = 0; i != (int)NumSubElts; ++i)
8263 Mask[InsertIdx + i] = NumElts + ExtractIdx + i;
8265 Ops.push_back(Sub.getOperand(0));
8268 // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
8269 SmallVector<int, 64> SubMask;
8270 SmallVector<SDValue, 2> SubInputs;
8271 SDValue SubSrc = peekThroughOneUseBitcasts(Sub);
8272 EVT SubSrcVT = SubSrc.getValueType();
8273 if (!SubSrcVT.isVector())
8276 APInt SubDemand = APInt::getAllOnes(SubSrcVT.getVectorNumElements());
8277 if (!getTargetShuffleInputs(SubSrc, SubDemand, SubInputs, SubMask, DAG,
8278 Depth + 1, ResolveKnownElts))
8281 // Subvector shuffle inputs must not be larger than the subvector.
8282 if (llvm::any_of(SubInputs, [SubVT](SDValue SubInput) {
8283 return SubVT.getFixedSizeInBits() <
8284 SubInput.getValueSizeInBits().getFixedSize();
8288 if (SubMask.size() != NumSubElts) {
8289 assert(((SubMask.size() % NumSubElts) == 0 ||
8290 (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
8291 if ((NumSubElts % SubMask.size()) == 0) {
8292 int Scale = NumSubElts / SubMask.size();
8293 SmallVector<int,64> ScaledSubMask;
8294 narrowShuffleMaskElts(Scale, SubMask, ScaledSubMask);
8295 SubMask = ScaledSubMask;
8297 int Scale = SubMask.size() / NumSubElts;
8298 NumSubElts = SubMask.size();
8304 Ops.append(SubInputs.begin(), SubInputs.end());
8305 if (ISD::isBuildVectorAllZeros(Src.getNode()))
8306 Mask.append(NumElts, SM_SentinelZero);
8308 for (int i = 0; i != (int)NumElts; ++i)
8310 for (int i = 0; i != (int)NumSubElts; ++i) {
8313 int InputIdx = M / NumSubElts;
8314 M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
8316 Mask[i + InsertIdx] = M;
8320 case X86ISD::PINSRB:
8321 case X86ISD::PINSRW:
8322 case ISD::SCALAR_TO_VECTOR:
8323 case ISD::INSERT_VECTOR_ELT: {
8324 // Match against a insert_vector_elt/scalar_to_vector of an extract from a
8325 // vector, for matching src/dst vector types.
8326 SDValue Scl = N.getOperand(Opcode == ISD::SCALAR_TO_VECTOR ? 0 : 1);
8328 unsigned DstIdx = 0;
8329 if (Opcode != ISD::SCALAR_TO_VECTOR) {
8330 // Check we have an in-range constant insertion index.
8331 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
8332 N.getConstantOperandAPInt(2).uge(NumElts))
8334 DstIdx = N.getConstantOperandVal(2);
8336 // Attempt to recognise an INSERT*(VEC, 0, DstIdx) shuffle pattern.
8337 if (X86::isZeroNode(Scl)) {
8338 Ops.push_back(N.getOperand(0));
8339 for (unsigned i = 0; i != NumElts; ++i)
8340 Mask.push_back(i == DstIdx ? SM_SentinelZero : (int)i);
8345 // Peek through trunc/aext/zext.
8346 // TODO: aext shouldn't require SM_SentinelZero padding.
8347 // TODO: handle shift of scalars.
8348 unsigned MinBitsPerElt = Scl.getScalarValueSizeInBits();
8349 while (Scl.getOpcode() == ISD::TRUNCATE ||
8350 Scl.getOpcode() == ISD::ANY_EXTEND ||
8351 Scl.getOpcode() == ISD::ZERO_EXTEND) {
8352 Scl = Scl.getOperand(0);
8354 std::min<unsigned>(MinBitsPerElt, Scl.getScalarValueSizeInBits());
8356 if ((MinBitsPerElt % 8) != 0)
8359 // Attempt to find the source vector the scalar was extracted from.
8361 if ((Scl.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
8362 Scl.getOpcode() == X86ISD::PEXTRW ||
8363 Scl.getOpcode() == X86ISD::PEXTRB) &&
8364 Scl.getOperand(0).getValueSizeInBits() == NumSizeInBits) {
8367 if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
8370 SDValue SrcVec = SrcExtract.getOperand(0);
8371 EVT SrcVT = SrcVec.getValueType();
8372 if (!SrcVT.getScalarType().isByteSized())
8374 unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
8375 unsigned SrcByte = SrcIdx * (SrcVT.getScalarSizeInBits() / 8);
8376 unsigned DstByte = DstIdx * NumBytesPerElt;
8378 std::min<unsigned>(MinBitsPerElt, SrcVT.getScalarSizeInBits());
8380 // Create 'identity' byte level shuffle mask and then add inserted bytes.
8381 if (Opcode == ISD::SCALAR_TO_VECTOR) {
8382 Ops.push_back(SrcVec);
8383 Mask.append(NumSizeInBytes, SM_SentinelUndef);
8385 Ops.push_back(SrcVec);
8386 Ops.push_back(N.getOperand(0));
8387 for (int i = 0; i != (int)NumSizeInBytes; ++i)
8388 Mask.push_back(NumSizeInBytes + i);
8391 unsigned MinBytesPerElts = MinBitsPerElt / 8;
8392 MinBytesPerElts = std::min(MinBytesPerElts, NumBytesPerElt);
8393 for (unsigned i = 0; i != MinBytesPerElts; ++i)
8394 Mask[DstByte + i] = SrcByte + i;
8395 for (unsigned i = MinBytesPerElts; i < NumBytesPerElt; ++i)
8396 Mask[DstByte + i] = SM_SentinelZero;
8399 case X86ISD::PACKSS:
8400 case X86ISD::PACKUS: {
8401 SDValue N0 = N.getOperand(0);
8402 SDValue N1 = N.getOperand(1);
8403 assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
8404 N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
8405 "Unexpected input value type");
8407 APInt EltsLHS, EltsRHS;
8408 getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
8410 // If we know input saturation won't happen (or we don't care for particular
8411 // lanes), we can treat this as a truncation shuffle.
8412 bool Offset0 = false, Offset1 = false;
8413 if (Opcode == X86ISD::PACKSS) {
8414 if ((!(N0.isUndef() || EltsLHS.isZero()) &&
8415 DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
8416 (!(N1.isUndef() || EltsRHS.isZero()) &&
8417 DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
8419 // We can't easily fold ASHR into a shuffle, but if it was feeding a
8420 // PACKSS then it was likely being used for sign-extension for a
8421 // truncation, so just peek through and adjust the mask accordingly.
8422 if (N0.getOpcode() == X86ISD::VSRAI && N->isOnlyUserOf(N0.getNode()) &&
8423 N0.getConstantOperandAPInt(1) == NumBitsPerElt) {
8425 N0 = N0.getOperand(0);
8427 if (N1.getOpcode() == X86ISD::VSRAI && N->isOnlyUserOf(N1.getNode()) &&
8428 N1.getConstantOperandAPInt(1) == NumBitsPerElt) {
8430 N1 = N1.getOperand(0);
8433 APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
8434 if ((!(N0.isUndef() || EltsLHS.isZero()) &&
8435 !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
8436 (!(N1.isUndef() || EltsRHS.isZero()) &&
8437 !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
8441 bool IsUnary = (N0 == N1);
8447 createPackShuffleMask(VT, Mask, IsUnary);
8449 if (Offset0 || Offset1) {
8451 if ((Offset0 && isInRange(M, 0, NumElts)) ||
8452 (Offset1 && isInRange(M, NumElts, 2 * NumElts)))
8458 case X86ISD::BLENDV: {
8459 SDValue Cond = N.getOperand(0);
8460 if (createShuffleMaskFromVSELECT(Mask, Cond, Opcode == X86ISD::BLENDV)) {
8461 Ops.push_back(N.getOperand(1));
8462 Ops.push_back(N.getOperand(2));
8467 case X86ISD::VTRUNC: {
8468 SDValue Src = N.getOperand(0);
8469 EVT SrcVT = Src.getValueType();
8470 // Truncated source must be a simple vector.
8471 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
8472 (SrcVT.getScalarSizeInBits() % 8) != 0)
8474 unsigned NumSrcElts = SrcVT.getVectorNumElements();
8475 unsigned NumBitsPerSrcElt = SrcVT.getScalarSizeInBits();
8476 unsigned Scale = NumBitsPerSrcElt / NumBitsPerElt;
8477 assert((NumBitsPerSrcElt % NumBitsPerElt) == 0 && "Illegal truncation");
8478 for (unsigned i = 0; i != NumSrcElts; ++i)
8479 Mask.push_back(i * Scale);
8480 Mask.append(NumElts - NumSrcElts, SM_SentinelZero);
8485 case X86ISD::VSRLI: {
8486 uint64_t ShiftVal = N.getConstantOperandVal(1);
8487 // Out of range bit shifts are guaranteed to be zero.
8488 if (NumBitsPerElt <= ShiftVal) {
8489 Mask.append(NumElts, SM_SentinelZero);
8493 // We can only decode 'whole byte' bit shifts as shuffles.
8494 if ((ShiftVal % 8) != 0)
8497 uint64_t ByteShift = ShiftVal / 8;
8498 Ops.push_back(N.getOperand(0));
8500 // Clear mask to all zeros and insert the shifted byte indices.
8501 Mask.append(NumSizeInBytes, SM_SentinelZero);
8503 if (X86ISD::VSHLI == Opcode) {
8504 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
8505 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
8506 Mask[i + j] = i + j - ByteShift;
8508 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
8509 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
8510 Mask[i + j - ByteShift] = i + j;
8514 case X86ISD::VROTLI:
8515 case X86ISD::VROTRI: {
8516 // We can only decode 'whole byte' bit rotates as shuffles.
8517 uint64_t RotateVal = N.getConstantOperandAPInt(1).urem(NumBitsPerElt);
8518 if ((RotateVal % 8) != 0)
8520 Ops.push_back(N.getOperand(0));
8521 int Offset = RotateVal / 8;
8522 Offset = (X86ISD::VROTLI == Opcode ? NumBytesPerElt - Offset : Offset);
8523 for (int i = 0; i != (int)NumElts; ++i) {
8524 int BaseIdx = i * NumBytesPerElt;
8525 for (int j = 0; j != (int)NumBytesPerElt; ++j) {
8526 Mask.push_back(BaseIdx + ((Offset + j) % NumBytesPerElt));
8531 case X86ISD::VBROADCAST: {
8532 SDValue Src = N.getOperand(0);
8533 if (!Src.getSimpleValueType().isVector()) {
8534 if (Src.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8535 !isNullConstant(Src.getOperand(1)) ||
8536 Src.getOperand(0).getValueType().getScalarType() !=
8539 Src = Src.getOperand(0);
8542 Mask.append(NumElts, 0);
8545 case ISD::ZERO_EXTEND:
8546 case ISD::ANY_EXTEND:
8547 case ISD::ZERO_EXTEND_VECTOR_INREG:
8548 case ISD::ANY_EXTEND_VECTOR_INREG: {
8549 SDValue Src = N.getOperand(0);
8550 EVT SrcVT = Src.getValueType();
8552 // Extended source must be a simple vector.
8553 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
8554 (SrcVT.getScalarSizeInBits() % 8) != 0)
8558 (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
8559 DecodeZeroExtendMask(SrcVT.getScalarSizeInBits(), NumBitsPerElt, NumElts,
8569 /// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
8570 static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
8571 SmallVectorImpl<int> &Mask) {
8572 int MaskWidth = Mask.size();
8573 SmallVector<SDValue, 16> UsedInputs;
8574 for (int i = 0, e = Inputs.size(); i < e; ++i) {
8575 int lo = UsedInputs.size() * MaskWidth;
8576 int hi = lo + MaskWidth;
8578 // Strip UNDEF input usage.
8579 if (Inputs[i].isUndef())
8581 if ((lo <= M) && (M < hi))
8582 M = SM_SentinelUndef;
8584 // Check for unused inputs.
8585 if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
8592 // Check for repeated inputs.
8593 bool IsRepeat = false;
8594 for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
8595 if (UsedInputs[j] != Inputs[i])
8599 M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
8606 UsedInputs.push_back(Inputs[i]);
8608 Inputs = UsedInputs;
8611 /// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
8612 /// and then sets the SM_SentinelUndef and SM_SentinelZero values.
8613 /// Returns true if the target shuffle mask was decoded.
8614 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
8615 SmallVectorImpl<SDValue> &Inputs,
8616 SmallVectorImpl<int> &Mask,
8617 APInt &KnownUndef, APInt &KnownZero,
8618 const SelectionDAG &DAG, unsigned Depth,
8619 bool ResolveKnownElts) {
8620 if (Depth >= SelectionDAG::MaxRecursionDepth)
8621 return false; // Limit search depth.
8623 EVT VT = Op.getValueType();
8624 if (!VT.isSimple() || !VT.isVector())
8627 if (getTargetShuffleAndZeroables(Op, Mask, Inputs, KnownUndef, KnownZero)) {
8628 if (ResolveKnownElts)
8629 resolveTargetShuffleFromZeroables(Mask, KnownUndef, KnownZero);
8632 if (getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG, Depth,
8633 ResolveKnownElts)) {
8634 resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
8640 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
8641 SmallVectorImpl<SDValue> &Inputs,
8642 SmallVectorImpl<int> &Mask,
8643 const SelectionDAG &DAG, unsigned Depth,
8644 bool ResolveKnownElts) {
8645 APInt KnownUndef, KnownZero;
8646 return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
8647 KnownZero, DAG, Depth, ResolveKnownElts);
8650 static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
8651 SmallVectorImpl<int> &Mask,
8652 const SelectionDAG &DAG, unsigned Depth = 0,
8653 bool ResolveKnownElts = true) {
8654 EVT VT = Op.getValueType();
8655 if (!VT.isSimple() || !VT.isVector())
8658 unsigned NumElts = Op.getValueType().getVectorNumElements();
8659 APInt DemandedElts = APInt::getAllOnes(NumElts);
8660 return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, DAG, Depth,
8664 // Attempt to create a scalar/subvector broadcast from the base MemSDNode.
8665 static SDValue getBROADCAST_LOAD(unsigned Opcode, const SDLoc &DL, EVT VT,
8666 EVT MemVT, MemSDNode *Mem, unsigned Offset,
8667 SelectionDAG &DAG) {
8668 assert((Opcode == X86ISD::VBROADCAST_LOAD ||
8669 Opcode == X86ISD::SUBV_BROADCAST_LOAD) &&
8670 "Unknown broadcast load type");
8672 // Ensure this is a simple (non-atomic, non-voltile), temporal read memop.
8673 if (!Mem || !Mem->readMem() || !Mem->isSimple() || Mem->isNonTemporal())
8677 DAG.getMemBasePlusOffset(Mem->getBasePtr(), TypeSize::Fixed(Offset), DL);
8678 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
8679 SDValue Ops[] = {Mem->getChain(), Ptr};
8680 SDValue BcstLd = DAG.getMemIntrinsicNode(
8681 Opcode, DL, Tys, Ops, MemVT,
8682 DAG.getMachineFunction().getMachineMemOperand(
8683 Mem->getMemOperand(), Offset, MemVT.getStoreSize()));
8684 DAG.makeEquivalentMemoryOrdering(SDValue(Mem, 1), BcstLd.getValue(1));
8688 /// Returns the scalar element that will make up the i'th
8689 /// element of the result of the vector shuffle.
8690 static SDValue getShuffleScalarElt(SDValue Op, unsigned Index,
8691 SelectionDAG &DAG, unsigned Depth) {
8692 if (Depth >= SelectionDAG::MaxRecursionDepth)
8693 return SDValue(); // Limit search depth.
8695 EVT VT = Op.getValueType();
8696 unsigned Opcode = Op.getOpcode();
8697 unsigned NumElems = VT.getVectorNumElements();
8699 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
8700 if (auto *SV = dyn_cast<ShuffleVectorSDNode>(Op)) {
8701 int Elt = SV->getMaskElt(Index);
8704 return DAG.getUNDEF(VT.getVectorElementType());
8706 SDValue Src = (Elt < (int)NumElems) ? SV->getOperand(0) : SV->getOperand(1);
8707 return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
8710 // Recurse into target specific vector shuffles to find scalars.
8711 if (isTargetShuffle(Opcode)) {
8712 MVT ShufVT = VT.getSimpleVT();
8713 MVT ShufSVT = ShufVT.getVectorElementType();
8714 int NumElems = (int)ShufVT.getVectorNumElements();
8715 SmallVector<int, 16> ShuffleMask;
8716 SmallVector<SDValue, 16> ShuffleOps;
8717 if (!getTargetShuffleMask(Op.getNode(), ShufVT, true, ShuffleOps,
8721 int Elt = ShuffleMask[Index];
8722 if (Elt == SM_SentinelZero)
8723 return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(Op), ShufSVT)
8724 : DAG.getConstantFP(+0.0, SDLoc(Op), ShufSVT);
8725 if (Elt == SM_SentinelUndef)
8726 return DAG.getUNDEF(ShufSVT);
8728 assert(0 <= Elt && Elt < (2 * NumElems) && "Shuffle index out of range");
8729 SDValue Src = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
8730 return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
8733 // Recurse into insert_subvector base/sub vector to find scalars.
8734 if (Opcode == ISD::INSERT_SUBVECTOR) {
8735 SDValue Vec = Op.getOperand(0);
8736 SDValue Sub = Op.getOperand(1);
8737 uint64_t SubIdx = Op.getConstantOperandVal(2);
8738 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
8740 if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
8741 return getShuffleScalarElt(Sub, Index - SubIdx, DAG, Depth + 1);
8742 return getShuffleScalarElt(Vec, Index, DAG, Depth + 1);
8745 // Recurse into concat_vectors sub vector to find scalars.
8746 if (Opcode == ISD::CONCAT_VECTORS) {
8747 EVT SubVT = Op.getOperand(0).getValueType();
8748 unsigned NumSubElts = SubVT.getVectorNumElements();
8749 uint64_t SubIdx = Index / NumSubElts;
8750 uint64_t SubElt = Index % NumSubElts;
8751 return getShuffleScalarElt(Op.getOperand(SubIdx), SubElt, DAG, Depth + 1);
8754 // Recurse into extract_subvector src vector to find scalars.
8755 if (Opcode == ISD::EXTRACT_SUBVECTOR) {
8756 SDValue Src = Op.getOperand(0);
8757 uint64_t SrcIdx = Op.getConstantOperandVal(1);
8758 return getShuffleScalarElt(Src, Index + SrcIdx, DAG, Depth + 1);
8761 // We only peek through bitcasts of the same vector width.
8762 if (Opcode == ISD::BITCAST) {
8763 SDValue Src = Op.getOperand(0);
8764 EVT SrcVT = Src.getValueType();
8765 if (SrcVT.isVector() && SrcVT.getVectorNumElements() == NumElems)
8766 return getShuffleScalarElt(Src, Index, DAG, Depth + 1);
8770 // Actual nodes that may contain scalar elements
8772 // For insert_vector_elt - either return the index matching scalar or recurse
8773 // into the base vector.
8774 if (Opcode == ISD::INSERT_VECTOR_ELT &&
8775 isa<ConstantSDNode>(Op.getOperand(2))) {
8776 if (Op.getConstantOperandAPInt(2) == Index)
8777 return Op.getOperand(1);
8778 return getShuffleScalarElt(Op.getOperand(0), Index, DAG, Depth + 1);
8781 if (Opcode == ISD::SCALAR_TO_VECTOR)
8782 return (Index == 0) ? Op.getOperand(0)
8783 : DAG.getUNDEF(VT.getVectorElementType());
8785 if (Opcode == ISD::BUILD_VECTOR)
8786 return Op.getOperand(Index);
8791 // Use PINSRB/PINSRW/PINSRD to create a build vector.
8792 static SDValue LowerBuildVectorAsInsert(SDValue Op, const APInt &NonZeroMask,
8793 unsigned NumNonZero, unsigned NumZero,
8795 const X86Subtarget &Subtarget) {
8796 MVT VT = Op.getSimpleValueType();
8797 unsigned NumElts = VT.getVectorNumElements();
8798 assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
8799 ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
8800 "Illegal vector insertion");
8806 for (unsigned i = 0; i < NumElts; ++i) {
8807 bool IsNonZero = NonZeroMask[i];
8811 // If the build vector contains zeros or our first insertion is not the
8812 // first index then insert into zero vector to break any register
8813 // dependency else use SCALAR_TO_VECTOR.
8816 if (NumZero || 0 != i)
8817 V = getZeroVector(VT, Subtarget, DAG, dl);
8819 assert(0 == i && "Expected insertion into zero-index");
8820 V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
8821 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
8822 V = DAG.getBitcast(VT, V);
8826 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
8827 DAG.getIntPtrConstant(i, dl));
8833 /// Custom lower build_vector of v16i8.
8834 static SDValue LowerBuildVectorv16i8(SDValue Op, const APInt &NonZeroMask,
8835 unsigned NumNonZero, unsigned NumZero,
8837 const X86Subtarget &Subtarget) {
8838 if (NumNonZero > 8 && !Subtarget.hasSSE41())
8841 // SSE4.1 - use PINSRB to insert each byte directly.
8842 if (Subtarget.hasSSE41())
8843 return LowerBuildVectorAsInsert(Op, NonZeroMask, NumNonZero, NumZero, DAG,
8849 // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
8850 for (unsigned i = 0; i < 16; i += 2) {
8851 bool ThisIsNonZero = NonZeroMask[i];
8852 bool NextIsNonZero = NonZeroMask[i + 1];
8853 if (!ThisIsNonZero && !NextIsNonZero)
8856 // FIXME: Investigate combining the first 4 bytes as a i32 instead.
8858 if (ThisIsNonZero) {
8859 if (NumZero || NextIsNonZero)
8860 Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
8862 Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
8865 if (NextIsNonZero) {
8866 SDValue NextElt = Op.getOperand(i + 1);
8867 if (i == 0 && NumZero)
8868 NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
8870 NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
8871 NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
8872 DAG.getConstant(8, dl, MVT::i8));
8874 Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
8879 // If our first insertion is not the first index or zeros are needed, then
8880 // insert into zero vector. Otherwise, use SCALAR_TO_VECTOR (leaves high
8881 // elements undefined).
8883 if (i != 0 || NumZero)
8884 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
8886 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
8887 V = DAG.getBitcast(MVT::v8i16, V);
8891 Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
8892 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
8893 DAG.getIntPtrConstant(i / 2, dl));
8896 return DAG.getBitcast(MVT::v16i8, V);
8899 /// Custom lower build_vector of v8i16.
8900 static SDValue LowerBuildVectorv8i16(SDValue Op, const APInt &NonZeroMask,
8901 unsigned NumNonZero, unsigned NumZero,
8903 const X86Subtarget &Subtarget) {
8904 if (NumNonZero > 4 && !Subtarget.hasSSE41())
8907 // Use PINSRW to insert each byte directly.
8908 return LowerBuildVectorAsInsert(Op, NonZeroMask, NumNonZero, NumZero, DAG,
8912 /// Custom lower build_vector of v4i32 or v4f32.
8913 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
8914 const X86Subtarget &Subtarget) {
8915 // If this is a splat of a pair of elements, use MOVDDUP (unless the target
8916 // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
8917 // Because we're creating a less complicated build vector here, we may enable
8918 // further folding of the MOVDDUP via shuffle transforms.
8919 if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
8920 Op.getOperand(0) == Op.getOperand(2) &&
8921 Op.getOperand(1) == Op.getOperand(3) &&
8922 Op.getOperand(0) != Op.getOperand(1)) {
8924 MVT VT = Op.getSimpleValueType();
8925 MVT EltVT = VT.getVectorElementType();
8926 // Create a new build vector with the first 2 elements followed by undef
8927 // padding, bitcast to v2f64, duplicate, and bitcast back.
8928 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
8929 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
8930 SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
8931 SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
8932 return DAG.getBitcast(VT, Dup);
8935 // Find all zeroable elements.
8936 std::bitset<4> Zeroable, Undefs;
8937 for (int i = 0; i < 4; ++i) {
8938 SDValue Elt = Op.getOperand(i);
8939 Undefs[i] = Elt.isUndef();
8940 Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
8942 assert(Zeroable.size() - Zeroable.count() > 1 &&
8943 "We expect at least two non-zero elements!");
8945 // We only know how to deal with build_vector nodes where elements are either
8946 // zeroable or extract_vector_elt with constant index.
8947 SDValue FirstNonZero;
8948 unsigned FirstNonZeroIdx;
8949 for (unsigned i = 0; i < 4; ++i) {
8952 SDValue Elt = Op.getOperand(i);
8953 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8954 !isa<ConstantSDNode>(Elt.getOperand(1)))
8956 // Make sure that this node is extracting from a 128-bit vector.
8957 MVT VT = Elt.getOperand(0).getSimpleValueType();
8958 if (!VT.is128BitVector())
8960 if (!FirstNonZero.getNode()) {
8962 FirstNonZeroIdx = i;
8966 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
8967 SDValue V1 = FirstNonZero.getOperand(0);
8968 MVT VT = V1.getSimpleValueType();
8970 // See if this build_vector can be lowered as a blend with zero.
8972 unsigned EltMaskIdx, EltIdx;
8974 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
8975 if (Zeroable[EltIdx]) {
8976 // The zero vector will be on the right hand side.
8977 Mask[EltIdx] = EltIdx+4;
8981 Elt = Op->getOperand(EltIdx);
8982 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
8983 EltMaskIdx = Elt.getConstantOperandVal(1);
8984 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
8986 Mask[EltIdx] = EltIdx;
8990 // Let the shuffle legalizer deal with blend operations.
8991 SDValue VZeroOrUndef = (Zeroable == Undefs)
8993 : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
8994 if (V1.getSimpleValueType() != VT)
8995 V1 = DAG.getBitcast(VT, V1);
8996 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
8999 // See if we can lower this build_vector to a INSERTPS.
9000 if (!Subtarget.hasSSE41())
9003 SDValue V2 = Elt.getOperand(0);
9004 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
9007 bool CanFold = true;
9008 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
9012 SDValue Current = Op->getOperand(i);
9013 SDValue SrcVector = Current->getOperand(0);
9016 CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
9022 assert(V1.getNode() && "Expected at least two non-zero elements!");
9023 if (V1.getSimpleValueType() != MVT::v4f32)
9024 V1 = DAG.getBitcast(MVT::v4f32, V1);
9025 if (V2.getSimpleValueType() != MVT::v4f32)
9026 V2 = DAG.getBitcast(MVT::v4f32, V2);
9028 // Ok, we can emit an INSERTPS instruction.
9029 unsigned ZMask = Zeroable.to_ulong();
9031 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
9032 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
9034 SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
9035 DAG.getIntPtrConstant(InsertPSMask, DL, true));
9036 return DAG.getBitcast(VT, Result);
9039 /// Return a vector logical shift node.
9040 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
9041 SelectionDAG &DAG, const TargetLowering &TLI,
9043 assert(VT.is128BitVector() && "Unknown type for VShift");
9044 MVT ShVT = MVT::v16i8;
9045 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
9046 SrcOp = DAG.getBitcast(ShVT, SrcOp);
9047 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
9048 SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8);
9049 return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
9052 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
9053 SelectionDAG &DAG) {
9055 // Check if the scalar load can be widened into a vector load. And if
9056 // the address is "base + cst" see if the cst can be "absorbed" into
9057 // the shuffle mask.
9058 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
9059 SDValue Ptr = LD->getBasePtr();
9060 if (!ISD::isNormalLoad(LD) || !LD->isSimple())
9062 EVT PVT = LD->getValueType(0);
9063 if (PVT != MVT::i32 && PVT != MVT::f32)
9068 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
9069 FI = FINode->getIndex();
9071 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
9072 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
9073 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
9074 Offset = Ptr.getConstantOperandVal(1);
9075 Ptr = Ptr.getOperand(0);
9080 // FIXME: 256-bit vector instructions don't require a strict alignment,
9081 // improve this code to support it better.
9082 Align RequiredAlign(VT.getSizeInBits() / 8);
9083 SDValue Chain = LD->getChain();
9084 // Make sure the stack object alignment is at least 16 or 32.
9085 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9086 MaybeAlign InferredAlign = DAG.InferPtrAlign(Ptr);
9087 if (!InferredAlign || *InferredAlign < RequiredAlign) {
9088 if (MFI.isFixedObjectIndex(FI)) {
9089 // Can't change the alignment. FIXME: It's possible to compute
9090 // the exact stack offset and reference FI + adjust offset instead.
9091 // If someone *really* cares about this. That's the way to implement it.
9094 MFI.setObjectAlignment(FI, RequiredAlign);
9098 // (Offset % 16 or 32) must be multiple of 4. Then address is then
9099 // Ptr + (Offset & ~15).
9102 if ((Offset % RequiredAlign.value()) & 3)
9104 int64_t StartOffset = Offset & ~int64_t(RequiredAlign.value() - 1);
9107 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
9108 DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
9111 int EltNo = (Offset - StartOffset) >> 2;
9112 unsigned NumElems = VT.getVectorNumElements();
9114 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
9115 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
9116 LD->getPointerInfo().getWithOffset(StartOffset));
9118 SmallVector<int, 8> Mask(NumElems, EltNo);
9120 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
9126 // Recurse to find a LoadSDNode source and the accumulated ByteOffest.
9127 static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
9128 if (ISD::isNON_EXTLoad(Elt.getNode())) {
9129 auto *BaseLd = cast<LoadSDNode>(Elt);
9130 if (!BaseLd->isSimple())
9137 switch (Elt.getOpcode()) {
9140 case ISD::SCALAR_TO_VECTOR:
9141 return findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset);
9143 if (auto *AmtC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
9144 uint64_t Amt = AmtC->getZExtValue();
9145 if ((Amt % 8) == 0 && findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset)) {
9146 ByteOffset += Amt / 8;
9151 case ISD::EXTRACT_VECTOR_ELT:
9152 if (auto *IdxC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
9153 SDValue Src = Elt.getOperand(0);
9154 unsigned SrcSizeInBits = Src.getScalarValueSizeInBits();
9155 unsigned DstSizeInBits = Elt.getScalarValueSizeInBits();
9156 if (DstSizeInBits == SrcSizeInBits && (SrcSizeInBits % 8) == 0 &&
9157 findEltLoadSrc(Src, Ld, ByteOffset)) {
9158 uint64_t Idx = IdxC->getZExtValue();
9159 ByteOffset += Idx * (SrcSizeInBits / 8);
9169 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
9170 /// elements can be replaced by a single large load which has the same value as
9171 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
9173 /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
9174 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
9175 const SDLoc &DL, SelectionDAG &DAG,
9176 const X86Subtarget &Subtarget,
9177 bool IsAfterLegalize) {
9178 if ((VT.getScalarSizeInBits() % 8) != 0)
9181 unsigned NumElems = Elts.size();
9183 int LastLoadedElt = -1;
9184 APInt LoadMask = APInt::getZero(NumElems);
9185 APInt ZeroMask = APInt::getZero(NumElems);
9186 APInt UndefMask = APInt::getZero(NumElems);
9188 SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
9189 SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
9191 // For each element in the initializer, see if we've found a load, zero or an
9193 for (unsigned i = 0; i < NumElems; ++i) {
9194 SDValue Elt = peekThroughBitcasts(Elts[i]);
9197 if (Elt.isUndef()) {
9198 UndefMask.setBit(i);
9201 if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
9206 // Each loaded element must be the correct fractional portion of the
9207 // requested vector load.
9208 unsigned EltSizeInBits = Elt.getValueSizeInBits();
9209 if ((NumElems * EltSizeInBits) != VT.getSizeInBits())
9212 if (!findEltLoadSrc(Elt, Loads[i], ByteOffsets[i]) || ByteOffsets[i] < 0)
9214 unsigned LoadSizeInBits = Loads[i]->getValueSizeInBits(0);
9215 if (((ByteOffsets[i] * 8) + EltSizeInBits) > LoadSizeInBits)
9221 assert((ZeroMask.countPopulation() + UndefMask.countPopulation() +
9222 LoadMask.countPopulation()) == NumElems &&
9223 "Incomplete element masks");
9225 // Handle Special Cases - all undef or undef/zero.
9226 if (UndefMask.countPopulation() == NumElems)
9227 return DAG.getUNDEF(VT);
9228 if ((ZeroMask.countPopulation() + UndefMask.countPopulation()) == NumElems)
9229 return VT.isInteger() ? DAG.getConstant(0, DL, VT)
9230 : DAG.getConstantFP(0.0, DL, VT);
9232 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9233 int FirstLoadedElt = LoadMask.countTrailingZeros();
9234 SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
9235 EVT EltBaseVT = EltBase.getValueType();
9236 assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
9237 "Register/Memory size mismatch");
9238 LoadSDNode *LDBase = Loads[FirstLoadedElt];
9239 assert(LDBase && "Did not find base load for merging consecutive loads");
9240 unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
9241 unsigned BaseSizeInBytes = BaseSizeInBits / 8;
9242 int NumLoadedElts = (1 + LastLoadedElt - FirstLoadedElt);
9243 int LoadSizeInBits = NumLoadedElts * BaseSizeInBits;
9244 assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected");
9246 // TODO: Support offsetting the base load.
9247 if (ByteOffsets[FirstLoadedElt] != 0)
9250 // Check to see if the element's load is consecutive to the base load
9251 // or offset from a previous (already checked) load.
9252 auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
9253 LoadSDNode *Ld = Loads[EltIdx];
9254 int64_t ByteOffset = ByteOffsets[EltIdx];
9255 if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
9256 int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
9257 return (0 <= BaseIdx && BaseIdx < (int)NumElems && LoadMask[BaseIdx] &&
9258 Loads[BaseIdx] == Ld && ByteOffsets[BaseIdx] == 0);
9260 return DAG.areNonVolatileConsecutiveLoads(Ld, Base, BaseSizeInBytes,
9261 EltIdx - FirstLoadedElt);
9264 // Consecutive loads can contain UNDEFS but not ZERO elements.
9265 // Consecutive loads with UNDEFs and ZEROs elements require a
9266 // an additional shuffle stage to clear the ZERO elements.
9267 bool IsConsecutiveLoad = true;
9268 bool IsConsecutiveLoadWithZeros = true;
9269 for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
9271 if (!CheckConsecutiveLoad(LDBase, i)) {
9272 IsConsecutiveLoad = false;
9273 IsConsecutiveLoadWithZeros = false;
9276 } else if (ZeroMask[i]) {
9277 IsConsecutiveLoad = false;
9281 auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
9282 auto MMOFlags = LDBase->getMemOperand()->getFlags();
9283 assert(LDBase->isSimple() &&
9284 "Cannot merge volatile or atomic loads.");
9286 DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
9287 LDBase->getPointerInfo(), LDBase->getOriginalAlign(),
9289 for (auto *LD : Loads)
9291 DAG.makeEquivalentMemoryOrdering(LD, NewLd);
9295 // Check if the base load is entirely dereferenceable.
9296 bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
9297 VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());
9299 // LOAD - all consecutive load/undefs (must start/end with a load or be
9300 // entirely dereferenceable). If we have found an entire vector of loads and
9301 // undefs, then return a large load of the entire vector width starting at the
9302 // base pointer. If the vector contains zeros, then attempt to shuffle those
9304 if (FirstLoadedElt == 0 &&
9305 (NumLoadedElts == (int)NumElems || IsDereferenceable) &&
9306 (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
9307 if (IsAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
9310 // Don't create 256-bit non-temporal aligned loads without AVX2 as these
9311 // will lower to regular temporal loads and use the cache.
9312 if (LDBase->isNonTemporal() && LDBase->getAlign() >= Align(32) &&
9313 VT.is256BitVector() && !Subtarget.hasInt256())
9317 return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
9320 return CreateLoad(VT, LDBase);
9322 // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
9323 // vector and a zero vector to clear out the zero elements.
9324 if (!IsAfterLegalize && VT.isVector()) {
9325 unsigned NumMaskElts = VT.getVectorNumElements();
9326 if ((NumMaskElts % NumElems) == 0) {
9327 unsigned Scale = NumMaskElts / NumElems;
9328 SmallVector<int, 4> ClearMask(NumMaskElts, -1);
9329 for (unsigned i = 0; i < NumElems; ++i) {
9332 int Offset = ZeroMask[i] ? NumMaskElts : 0;
9333 for (unsigned j = 0; j != Scale; ++j)
9334 ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
9336 SDValue V = CreateLoad(VT, LDBase);
9337 SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
9338 : DAG.getConstantFP(0.0, DL, VT);
9339 return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
9344 // If the upper half of a ymm/zmm load is undef then just load the lower half.
9345 if (VT.is256BitVector() || VT.is512BitVector()) {
9346 unsigned HalfNumElems = NumElems / 2;
9347 if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnes()) {
9349 EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
9351 EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
9352 DAG, Subtarget, IsAfterLegalize);
9354 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
9355 HalfLD, DAG.getIntPtrConstant(0, DL));
9359 // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
9360 if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
9361 ((LoadSizeInBits == 16 && Subtarget.hasFP16()) || LoadSizeInBits == 32 ||
9362 LoadSizeInBits == 64) &&
9363 ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
9364 MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
9365 : MVT::getIntegerVT(LoadSizeInBits);
9366 MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
9367 // Allow v4f32 on SSE1 only targets.
9368 // FIXME: Add more isel patterns so we can just use VT directly.
9369 if (!Subtarget.hasSSE2() && VT == MVT::v4f32)
9371 if (TLI.isTypeLegal(VecVT)) {
9372 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
9373 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
9374 SDValue ResNode = DAG.getMemIntrinsicNode(
9375 X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT, LDBase->getPointerInfo(),
9376 LDBase->getOriginalAlign(), MachineMemOperand::MOLoad);
9377 for (auto *LD : Loads)
9379 DAG.makeEquivalentMemoryOrdering(LD, ResNode);
9380 return DAG.getBitcast(VT, ResNode);
9384 // BROADCAST - match the smallest possible repetition pattern, load that
9385 // scalar/subvector element and then broadcast to the entire vector.
9386 if (ZeroMask.isZero() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
9387 (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
9388 for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
9389 unsigned RepeatSize = SubElems * BaseSizeInBits;
9390 unsigned ScalarSize = std::min(RepeatSize, 64u);
9391 if (!Subtarget.hasAVX2() && ScalarSize < 32)
9394 // Don't attempt a 1:N subvector broadcast - it should be caught by
9395 // combineConcatVectorOps, else will cause infinite loops.
9396 if (RepeatSize > ScalarSize && SubElems == 1)
9400 SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
9401 for (unsigned i = 0; i != NumElems && Match; ++i) {
9404 SDValue Elt = peekThroughBitcasts(Elts[i]);
9405 if (RepeatedLoads[i % SubElems].isUndef())
9406 RepeatedLoads[i % SubElems] = Elt;
9408 Match &= (RepeatedLoads[i % SubElems] == Elt);
9411 // We must have loads at both ends of the repetition.
9412 Match &= !RepeatedLoads.front().isUndef();
9413 Match &= !RepeatedLoads.back().isUndef();
9418 VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
9419 ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
9420 : EVT::getFloatingPointVT(ScalarSize);
9421 if (RepeatSize > ScalarSize)
9422 RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
9423 RepeatSize / ScalarSize);
9425 EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
9426 VT.getSizeInBits() / ScalarSize);
9427 if (TLI.isTypeLegal(BroadcastVT)) {
9428 if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
9429 RepeatVT, RepeatedLoads, DL, DAG, Subtarget, IsAfterLegalize)) {
9430 SDValue Broadcast = RepeatLoad;
9431 if (RepeatSize > ScalarSize) {
9432 while (Broadcast.getValueSizeInBits() < VT.getSizeInBits())
9433 Broadcast = concatSubVectors(Broadcast, Broadcast, DAG, DL);
9435 if (!Subtarget.hasAVX2() &&
9436 !X86::mayFoldLoadIntoBroadcastFromMem(
9437 RepeatLoad, RepeatVT.getScalarType().getSimpleVT(),
9439 /*AssumeSingleUse=*/true))
9442 DAG.getNode(X86ISD::VBROADCAST, DL, BroadcastVT, RepeatLoad);
9444 return DAG.getBitcast(VT, Broadcast);
9453 // Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
9454 // load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
9455 // are consecutive, non-overlapping, and in the right order.
9456 static SDValue combineToConsecutiveLoads(EVT VT, SDValue Op, const SDLoc &DL,
9458 const X86Subtarget &Subtarget,
9459 bool IsAfterLegalize) {
9460 SmallVector<SDValue, 64> Elts;
9461 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
9462 if (SDValue Elt = getShuffleScalarElt(Op, i, DAG, 0)) {
9463 Elts.push_back(Elt);
9468 assert(Elts.size() == VT.getVectorNumElements());
9469 return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
9473 static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
9474 unsigned SplatBitSize, LLVMContext &C) {
9475 unsigned ScalarSize = VT.getScalarSizeInBits();
9476 unsigned NumElm = SplatBitSize / ScalarSize;
9478 SmallVector<Constant *, 32> ConstantVec;
9479 for (unsigned i = 0; i < NumElm; i++) {
9480 APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
9482 if (VT.isFloatingPoint()) {
9483 if (ScalarSize == 16) {
9484 Const = ConstantFP::get(C, APFloat(APFloat::IEEEhalf(), Val));
9485 } else if (ScalarSize == 32) {
9486 Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
9488 assert(ScalarSize == 64 && "Unsupported floating point scalar size");
9489 Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
9492 Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
9493 ConstantVec.push_back(Const);
9495 return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
9498 static bool isFoldableUseOfShuffle(SDNode *N) {
9499 for (auto *U : N->uses()) {
9500 unsigned Opc = U->getOpcode();
9501 // VPERMV/VPERMV3 shuffles can never fold their index operands.
9502 if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
9504 if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
9506 if (isTargetShuffle(Opc))
9508 if (Opc == ISD::BITCAST) // Ignore bitcasts
9509 return isFoldableUseOfShuffle(U);
9510 if (N->hasOneUse()) {
9511 // TODO, there may be some general way to know if a SDNode can
9512 // be folded. We now only know whether an MI is foldable.
9513 if (Opc == X86ISD::VPDPBUSD && U->getOperand(2).getNode() != N)
9521 /// Attempt to use the vbroadcast instruction to generate a splat value
9522 /// from a splat BUILD_VECTOR which uses:
9523 /// a. A single scalar load, or a constant.
9524 /// b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
9526 /// The VBROADCAST node is returned when a pattern is found,
9527 /// or SDValue() otherwise.
9528 static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
9529 const X86Subtarget &Subtarget,
9530 SelectionDAG &DAG) {
9531 // VBROADCAST requires AVX.
9532 // TODO: Splats could be generated for non-AVX CPUs using SSE
9533 // instructions, but there's less potential gain for only 128-bit vectors.
9534 if (!Subtarget.hasAVX())
9537 MVT VT = BVOp->getSimpleValueType(0);
9538 unsigned NumElts = VT.getVectorNumElements();
9541 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
9542 "Unsupported vector type for broadcast.");
9544 // See if the build vector is a repeating sequence of scalars (inc. splat).
9546 BitVector UndefElements;
9547 SmallVector<SDValue, 16> Sequence;
9548 if (BVOp->getRepeatedSequence(Sequence, &UndefElements)) {
9549 assert((NumElts % Sequence.size()) == 0 && "Sequence doesn't fit.");
9550 if (Sequence.size() == 1)
9554 // Attempt to use VBROADCASTM
9555 // From this pattern:
9556 // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
9557 // b. t1 = (build_vector t0 t0)
9559 // Create (VBROADCASTM v2i1 X)
9560 if (!Sequence.empty() && Subtarget.hasCDI()) {
9561 // If not a splat, are the upper sequence values zeroable?
9562 unsigned SeqLen = Sequence.size();
9563 bool UpperZeroOrUndef =
9565 llvm::all_of(makeArrayRef(Sequence).drop_front(), [](SDValue V) {
9566 return !V || V.isUndef() || isNullConstant(V);
9568 SDValue Op0 = Sequence[0];
9569 if (UpperZeroOrUndef && ((Op0.getOpcode() == ISD::BITCAST) ||
9570 (Op0.getOpcode() == ISD::ZERO_EXTEND &&
9571 Op0.getOperand(0).getOpcode() == ISD::BITCAST))) {
9572 SDValue BOperand = Op0.getOpcode() == ISD::BITCAST
9574 : Op0.getOperand(0).getOperand(0);
9575 MVT MaskVT = BOperand.getSimpleValueType();
9576 MVT EltType = MVT::getIntegerVT(VT.getScalarSizeInBits() * SeqLen);
9577 if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q
9578 (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
9579 MVT BcstVT = MVT::getVectorVT(EltType, NumElts / SeqLen);
9580 if (!VT.is512BitVector() && !Subtarget.hasVLX()) {
9581 unsigned Scale = 512 / VT.getSizeInBits();
9582 BcstVT = MVT::getVectorVT(EltType, Scale * (NumElts / SeqLen));
9584 SDValue Bcst = DAG.getNode(X86ISD::VBROADCASTM, dl, BcstVT, BOperand);
9585 if (BcstVT.getSizeInBits() != VT.getSizeInBits())
9586 Bcst = extractSubVector(Bcst, 0, DAG, dl, VT.getSizeInBits());
9587 return DAG.getBitcast(VT, Bcst);
9592 unsigned NumUndefElts = UndefElements.count();
9593 if (!Ld || (NumElts - NumUndefElts) <= 1) {
9594 APInt SplatValue, Undef;
9595 unsigned SplatBitSize;
9597 // Check if this is a repeated constant pattern suitable for broadcasting.
9598 if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
9599 SplatBitSize > VT.getScalarSizeInBits() &&
9600 SplatBitSize < VT.getSizeInBits()) {
9601 // Avoid replacing with broadcast when it's a use of a shuffle
9602 // instruction to preserve the present custom lowering of shuffles.
9603 if (isFoldableUseOfShuffle(BVOp))
9605 // replace BUILD_VECTOR with broadcast of the repeated constants.
9606 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9607 LLVMContext *Ctx = DAG.getContext();
9608 MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
9609 if (Subtarget.hasAVX()) {
9610 if (SplatBitSize == 32 || SplatBitSize == 64 ||
9611 (SplatBitSize < 32 && Subtarget.hasAVX2())) {
9612 // Splatted value can fit in one INTEGER constant in constant pool.
9613 // Load the constant and broadcast it.
9614 MVT CVT = MVT::getIntegerVT(SplatBitSize);
9615 Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
9616 Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
9617 SDValue CP = DAG.getConstantPool(C, PVT);
9618 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
9620 Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
9622 DAG.getVTList(MVT::getVectorVT(CVT, Repeat), MVT::Other);
9623 SDValue Ops[] = {DAG.getEntryNode(), CP};
9624 MachinePointerInfo MPI =
9625 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
9626 SDValue Brdcst = DAG.getMemIntrinsicNode(
9627 X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT, MPI, Alignment,
9628 MachineMemOperand::MOLoad);
9629 return DAG.getBitcast(VT, Brdcst);
9631 if (SplatBitSize > 64) {
9632 // Load the vector of constants and broadcast it.
9633 Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
9635 SDValue VCP = DAG.getConstantPool(VecC, PVT);
9636 unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
9637 MVT VVT = MVT::getVectorVT(VT.getScalarType(), NumElm);
9638 Align Alignment = cast<ConstantPoolSDNode>(VCP)->getAlign();
9639 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
9640 SDValue Ops[] = {DAG.getEntryNode(), VCP};
9641 MachinePointerInfo MPI =
9642 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
9643 return DAG.getMemIntrinsicNode(
9644 X86ISD::SUBV_BROADCAST_LOAD, dl, Tys, Ops, VVT, MPI, Alignment,
9645 MachineMemOperand::MOLoad);
9650 // If we are moving a scalar into a vector (Ld must be set and all elements
9651 // but 1 are undef) and that operation is not obviously supported by
9652 // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
9653 // That's better than general shuffling and may eliminate a load to GPR and
9654 // move from scalar to vector register.
9655 if (!Ld || NumElts - NumUndefElts != 1)
9657 unsigned ScalarSize = Ld.getValueSizeInBits();
9658 if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
9662 bool ConstSplatVal =
9663 (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
9664 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
9666 // TODO: Handle broadcasts of non-constant sequences.
9668 // Make sure that all of the users of a non-constant load are from the
9669 // BUILD_VECTOR node.
9670 // FIXME: Is the use count needed for non-constant, non-load case?
9671 if (!ConstSplatVal && !IsLoad && !BVOp->isOnlyUserOf(Ld.getNode()))
9674 unsigned ScalarSize = Ld.getValueSizeInBits();
9675 bool IsGE256 = (VT.getSizeInBits() >= 256);
9677 // When optimizing for size, generate up to 5 extra bytes for a broadcast
9678 // instruction to save 8 or more bytes of constant pool data.
9679 // TODO: If multiple splats are generated to load the same constant,
9680 // it may be detrimental to overall size. There needs to be a way to detect
9681 // that condition to know if this is truly a size win.
9682 bool OptForSize = DAG.shouldOptForSize();
9684 // Handle broadcasting a single constant scalar from the constant pool
9686 // On Sandybridge (no AVX2), it is still better to load a constant vector
9687 // from the constant pool and not to broadcast it from a scalar.
9688 // But override that restriction when optimizing for size.
9689 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
9690 if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
9691 EVT CVT = Ld.getValueType();
9692 assert(!CVT.isVector() && "Must not broadcast a vector type");
9694 // Splat f16, f32, i32, v4f64, v4i64 in all cases with AVX2.
9695 // For size optimization, also splat v2f64 and v2i64, and for size opt
9696 // with AVX2, also splat i8 and i16.
9697 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
9698 if (ScalarSize == 32 ||
9699 (ScalarSize == 64 && (IsGE256 || Subtarget.hasVLX())) ||
9701 (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
9702 const Constant *C = nullptr;
9703 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
9704 C = CI->getConstantIntValue();
9705 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
9706 C = CF->getConstantFPValue();
9708 assert(C && "Invalid constant type");
9710 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9712 DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
9713 Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
9715 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
9716 SDValue Ops[] = {DAG.getEntryNode(), CP};
9717 MachinePointerInfo MPI =
9718 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
9719 return DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT,
9720 MPI, Alignment, MachineMemOperand::MOLoad);
9724 // Handle AVX2 in-register broadcasts.
9725 if (!IsLoad && Subtarget.hasInt256() &&
9726 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
9727 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
9729 // The scalar source must be a normal load.
9733 // Make sure the non-chain result is only used by this build vector.
9734 if (!Ld->hasNUsesOfValue(NumElts - NumUndefElts, 0))
9737 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
9738 (Subtarget.hasVLX() && ScalarSize == 64)) {
9739 auto *LN = cast<LoadSDNode>(Ld);
9740 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
9741 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
9743 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
9744 LN->getMemoryVT(), LN->getMemOperand());
9745 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
9749 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
9750 // double since there is no vbroadcastsd xmm
9751 if (Subtarget.hasInt256() && Ld.getValueType().isInteger() &&
9752 (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)) {
9753 auto *LN = cast<LoadSDNode>(Ld);
9754 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
9755 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
9757 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
9758 LN->getMemoryVT(), LN->getMemOperand());
9759 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
9763 if (ScalarSize == 16 && Subtarget.hasFP16() && IsGE256)
9764 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
9766 // Unsupported broadcast.
9770 /// For an EXTRACT_VECTOR_ELT with a constant index return the real
9771 /// underlying vector and index.
9773 /// Modifies \p ExtractedFromVec to the real vector and returns the real
9775 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
9777 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
9778 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
9781 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
9783 // (extract_vector_elt (v8f32 %1), Constant<6>)
9785 // (extract_vector_elt (vector_shuffle<2,u,u,u>
9786 // (extract_subvector (v8f32 %0), Constant<4>),
9789 // In this case the vector is the extract_subvector expression and the index
9790 // is 2, as specified by the shuffle.
9791 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
9792 SDValue ShuffleVec = SVOp->getOperand(0);
9793 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
9794 assert(ShuffleVecVT.getVectorElementType() ==
9795 ExtractedFromVec.getSimpleValueType().getVectorElementType());
9797 int ShuffleIdx = SVOp->getMaskElt(Idx);
9798 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
9799 ExtractedFromVec = ShuffleVec;
9805 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
9806 MVT VT = Op.getSimpleValueType();
9808 // Skip if insert_vec_elt is not supported.
9809 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9810 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
9814 unsigned NumElems = Op.getNumOperands();
9818 SmallVector<unsigned, 4> InsertIndices;
9819 SmallVector<int, 8> Mask(NumElems, -1);
9821 for (unsigned i = 0; i != NumElems; ++i) {
9822 unsigned Opc = Op.getOperand(i).getOpcode();
9824 if (Opc == ISD::UNDEF)
9827 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
9828 // Quit if more than 1 elements need inserting.
9829 if (InsertIndices.size() > 1)
9832 InsertIndices.push_back(i);
9836 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
9837 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
9839 // Quit if non-constant index.
9840 if (!isa<ConstantSDNode>(ExtIdx))
9842 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
9844 // Quit if extracted from vector of different type.
9845 if (ExtractedFromVec.getValueType() != VT)
9848 if (!VecIn1.getNode())
9849 VecIn1 = ExtractedFromVec;
9850 else if (VecIn1 != ExtractedFromVec) {
9851 if (!VecIn2.getNode())
9852 VecIn2 = ExtractedFromVec;
9853 else if (VecIn2 != ExtractedFromVec)
9854 // Quit if more than 2 vectors to shuffle
9858 if (ExtractedFromVec == VecIn1)
9860 else if (ExtractedFromVec == VecIn2)
9861 Mask[i] = Idx + NumElems;
9864 if (!VecIn1.getNode())
9867 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
9868 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
9870 for (unsigned Idx : InsertIndices)
9871 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
9872 DAG.getIntPtrConstant(Idx, DL));
9877 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
9878 static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
9879 const X86Subtarget &Subtarget) {
9881 MVT VT = Op.getSimpleValueType();
9882 assert((VT.getVectorElementType() == MVT::i1) &&
9883 "Unexpected type in LowerBUILD_VECTORvXi1!");
9886 if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
9887 ISD::isBuildVectorAllOnes(Op.getNode()))
9890 uint64_t Immediate = 0;
9891 SmallVector<unsigned, 16> NonConstIdx;
9892 bool IsSplat = true;
9893 bool HasConstElts = false;
9895 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
9896 SDValue In = Op.getOperand(idx);
9899 if (auto *InC = dyn_cast<ConstantSDNode>(In)) {
9900 Immediate |= (InC->getZExtValue() & 0x1) << idx;
9901 HasConstElts = true;
9903 NonConstIdx.push_back(idx);
9907 else if (In != Op.getOperand(SplatIdx))
9911 // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
9913 // The build_vector allows the scalar element to be larger than the vector
9914 // element type. We need to mask it to use as a condition unless we know
9915 // the upper bits are zero.
9916 // FIXME: Use computeKnownBits instead of checking specific opcode?
9917 SDValue Cond = Op.getOperand(SplatIdx);
9918 assert(Cond.getValueType() == MVT::i8 && "Unexpected VT!");
9919 if (Cond.getOpcode() != ISD::SETCC)
9920 Cond = DAG.getNode(ISD::AND, dl, MVT::i8, Cond,
9921 DAG.getConstant(1, dl, MVT::i8));
9923 // Perform the select in the scalar domain so we can use cmov.
9924 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
9925 SDValue Select = DAG.getSelect(dl, MVT::i32, Cond,
9926 DAG.getAllOnesConstant(dl, MVT::i32),
9927 DAG.getConstant(0, dl, MVT::i32));
9928 Select = DAG.getBitcast(MVT::v32i1, Select);
9929 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Select, Select);
9931 MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
9932 SDValue Select = DAG.getSelect(dl, ImmVT, Cond,
9933 DAG.getAllOnesConstant(dl, ImmVT),
9934 DAG.getConstant(0, dl, ImmVT));
9935 MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
9936 Select = DAG.getBitcast(VecVT, Select);
9937 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Select,
9938 DAG.getIntPtrConstant(0, dl));
9942 // insert elements one by one
9945 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
9946 SDValue ImmL = DAG.getConstant(Lo_32(Immediate), dl, MVT::i32);
9947 SDValue ImmH = DAG.getConstant(Hi_32(Immediate), dl, MVT::i32);
9948 ImmL = DAG.getBitcast(MVT::v32i1, ImmL);
9949 ImmH = DAG.getBitcast(MVT::v32i1, ImmH);
9950 DstVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, ImmL, ImmH);
9952 MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
9953 SDValue Imm = DAG.getConstant(Immediate, dl, ImmVT);
9954 MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
9955 DstVec = DAG.getBitcast(VecVT, Imm);
9956 DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, DstVec,
9957 DAG.getIntPtrConstant(0, dl));
9960 DstVec = DAG.getUNDEF(VT);
9962 for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
9963 unsigned InsertIdx = NonConstIdx[i];
9964 DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
9965 Op.getOperand(InsertIdx),
9966 DAG.getIntPtrConstant(InsertIdx, dl));
9971 LLVM_ATTRIBUTE_UNUSED static bool isHorizOp(unsigned Opcode) {
9973 case X86ISD::PACKSS:
9974 case X86ISD::PACKUS:
9984 /// This is a helper function of LowerToHorizontalOp().
9985 /// This function checks that the build_vector \p N in input implements a
9986 /// 128-bit partial horizontal operation on a 256-bit vector, but that operation
9987 /// may not match the layout of an x86 256-bit horizontal instruction.
9988 /// In other words, if this returns true, then some extraction/insertion will
9989 /// be required to produce a valid horizontal instruction.
9991 /// Parameter \p Opcode defines the kind of horizontal operation to match.
9992 /// For example, if \p Opcode is equal to ISD::ADD, then this function
9993 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
9994 /// is equal to ISD::SUB, then this function checks if this is a horizontal
9997 /// This function only analyzes elements of \p N whose indices are
9998 /// in range [BaseIdx, LastIdx).
10000 /// TODO: This function was originally used to match both real and fake partial
10001 /// horizontal operations, but the index-matching logic is incorrect for that.
10002 /// See the corrected implementation in isHopBuildVector(). Can we reduce this
10003 /// code because it is only used for partial h-op matching now?
10004 static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
10006 unsigned BaseIdx, unsigned LastIdx,
10007 SDValue &V0, SDValue &V1) {
10008 EVT VT = N->getValueType(0);
10009 assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
10010 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
10011 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
10012 "Invalid Vector in input!");
10014 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
10015 bool CanFold = true;
10016 unsigned ExpectedVExtractIdx = BaseIdx;
10017 unsigned NumElts = LastIdx - BaseIdx;
10018 V0 = DAG.getUNDEF(VT);
10019 V1 = DAG.getUNDEF(VT);
10021 // Check if N implements a horizontal binop.
10022 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
10023 SDValue Op = N->getOperand(i + BaseIdx);
10026 if (Op->isUndef()) {
10027 // Update the expected vector extract index.
10028 if (i * 2 == NumElts)
10029 ExpectedVExtractIdx = BaseIdx;
10030 ExpectedVExtractIdx += 2;
10034 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
10039 SDValue Op0 = Op.getOperand(0);
10040 SDValue Op1 = Op.getOperand(1);
10042 // Try to match the following pattern:
10043 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
10044 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
10045 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
10046 Op0.getOperand(0) == Op1.getOperand(0) &&
10047 isa<ConstantSDNode>(Op0.getOperand(1)) &&
10048 isa<ConstantSDNode>(Op1.getOperand(1)));
10052 unsigned I0 = Op0.getConstantOperandVal(1);
10053 unsigned I1 = Op1.getConstantOperandVal(1);
10055 if (i * 2 < NumElts) {
10056 if (V0.isUndef()) {
10057 V0 = Op0.getOperand(0);
10058 if (V0.getValueType() != VT)
10062 if (V1.isUndef()) {
10063 V1 = Op0.getOperand(0);
10064 if (V1.getValueType() != VT)
10067 if (i * 2 == NumElts)
10068 ExpectedVExtractIdx = BaseIdx;
10071 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
10072 if (I0 == ExpectedVExtractIdx)
10073 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
10074 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
10075 // Try to match the following dag sequence:
10076 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
10077 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
10081 ExpectedVExtractIdx += 2;
10087 /// Emit a sequence of two 128-bit horizontal add/sub followed by
10088 /// a concat_vector.
10090 /// This is a helper function of LowerToHorizontalOp().
10091 /// This function expects two 256-bit vectors called V0 and V1.
10092 /// At first, each vector is split into two separate 128-bit vectors.
10093 /// Then, the resulting 128-bit vectors are used to implement two
10094 /// horizontal binary operations.
10096 /// The kind of horizontal binary operation is defined by \p X86Opcode.
10098 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
10099 /// the two new horizontal binop.
10100 /// When Mode is set, the first horizontal binop dag node would take as input
10101 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
10102 /// horizontal binop dag node would take as input the lower 128-bit of V1
10103 /// and the upper 128-bit of V1.
10105 /// HADD V0_LO, V0_HI
10106 /// HADD V1_LO, V1_HI
10108 /// Otherwise, the first horizontal binop dag node takes as input the lower
10109 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
10110 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
10112 /// HADD V0_LO, V1_LO
10113 /// HADD V0_HI, V1_HI
10115 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
10116 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
10117 /// the upper 128-bits of the result.
10118 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
10119 const SDLoc &DL, SelectionDAG &DAG,
10120 unsigned X86Opcode, bool Mode,
10121 bool isUndefLO, bool isUndefHI) {
10122 MVT VT = V0.getSimpleValueType();
10123 assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
10124 "Invalid nodes in input!");
10126 unsigned NumElts = VT.getVectorNumElements();
10127 SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
10128 SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
10129 SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
10130 SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
10131 MVT NewVT = V0_LO.getSimpleValueType();
10133 SDValue LO = DAG.getUNDEF(NewVT);
10134 SDValue HI = DAG.getUNDEF(NewVT);
10137 // Don't emit a horizontal binop if the result is expected to be UNDEF.
10138 if (!isUndefLO && !V0->isUndef())
10139 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
10140 if (!isUndefHI && !V1->isUndef())
10141 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
10143 // Don't emit a horizontal binop if the result is expected to be UNDEF.
10144 if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
10145 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
10147 if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
10148 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
10151 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
10154 /// Returns true iff \p BV builds a vector with the result equivalent to
10155 /// the result of ADDSUB/SUBADD operation.
10156 /// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
10157 /// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
10158 /// \p Opnd0 and \p Opnd1.
10159 static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
10160 const X86Subtarget &Subtarget, SelectionDAG &DAG,
10161 SDValue &Opnd0, SDValue &Opnd1,
10162 unsigned &NumExtracts,
10165 MVT VT = BV->getSimpleValueType(0);
10166 if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
10169 unsigned NumElts = VT.getVectorNumElements();
10170 SDValue InVec0 = DAG.getUNDEF(VT);
10171 SDValue InVec1 = DAG.getUNDEF(VT);
10175 // Odd-numbered elements in the input build vector are obtained from
10176 // adding/subtracting two integer/float elements.
10177 // Even-numbered elements in the input build vector are obtained from
10178 // subtracting/adding two integer/float elements.
10179 unsigned Opc[2] = {0, 0};
10180 for (unsigned i = 0, e = NumElts; i != e; ++i) {
10181 SDValue Op = BV->getOperand(i);
10183 // Skip 'undef' values.
10184 unsigned Opcode = Op.getOpcode();
10185 if (Opcode == ISD::UNDEF)
10188 // Early exit if we found an unexpected opcode.
10189 if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
10192 SDValue Op0 = Op.getOperand(0);
10193 SDValue Op1 = Op.getOperand(1);
10195 // Try to match the following pattern:
10196 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
10197 // Early exit if we cannot match that sequence.
10198 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10199 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10200 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
10201 Op0.getOperand(1) != Op1.getOperand(1))
10204 unsigned I0 = Op0.getConstantOperandVal(1);
10208 // We found a valid add/sub node, make sure its the same opcode as previous
10209 // elements for this parity.
10210 if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
10212 Opc[i % 2] = Opcode;
10214 // Update InVec0 and InVec1.
10215 if (InVec0.isUndef()) {
10216 InVec0 = Op0.getOperand(0);
10217 if (InVec0.getSimpleValueType() != VT)
10220 if (InVec1.isUndef()) {
10221 InVec1 = Op1.getOperand(0);
10222 if (InVec1.getSimpleValueType() != VT)
10226 // Make sure that operands in input to each add/sub node always
10227 // come from a same pair of vectors.
10228 if (InVec0 != Op0.getOperand(0)) {
10229 if (Opcode == ISD::FSUB)
10232 // FADD is commutable. Try to commute the operands
10233 // and then test again.
10234 std::swap(Op0, Op1);
10235 if (InVec0 != Op0.getOperand(0))
10239 if (InVec1 != Op1.getOperand(0))
10242 // Increment the number of extractions done.
10246 // Ensure we have found an opcode for both parities and that they are
10247 // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
10248 // inputs are undef.
10249 if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
10250 InVec0.isUndef() || InVec1.isUndef())
10253 IsSubAdd = Opc[0] == ISD::FADD;
10260 /// Returns true if is possible to fold MUL and an idiom that has already been
10261 /// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
10262 /// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
10263 /// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
10265 /// Prior to calling this function it should be known that there is some
10266 /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
10267 /// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
10268 /// before replacement of such SDNode with ADDSUB operation. Thus the number
10269 /// of \p Opnd0 uses is expected to be equal to 2.
10270 /// For example, this function may be called for the following IR:
10271 /// %AB = fmul fast <2 x double> %A, %B
10272 /// %Sub = fsub fast <2 x double> %AB, %C
10273 /// %Add = fadd fast <2 x double> %AB, %C
10274 /// %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
10275 /// <2 x i32> <i32 0, i32 3>
10276 /// There is a def for %Addsub here, which potentially can be replaced by
10277 /// X86ISD::ADDSUB operation:
10278 /// %Addsub = X86ISD::ADDSUB %AB, %C
10279 /// and such ADDSUB can further be replaced with FMADDSUB:
10280 /// %Addsub = FMADDSUB %A, %B, %C.
10282 /// The main reason why this method is called before the replacement of the
10283 /// recognized ADDSUB idiom with ADDSUB operation is that such replacement
10284 /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
10286 static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
10288 SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
10289 unsigned ExpectedUses) {
10290 if (Opnd0.getOpcode() != ISD::FMUL ||
10291 !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
10294 // FIXME: These checks must match the similar ones in
10295 // DAGCombiner::visitFADDForFMACombine. It would be good to have one
10296 // function that would answer if it is Ok to fuse MUL + ADD to FMADD
10297 // or MUL + ADDSUB to FMADDSUB.
10298 const TargetOptions &Options = DAG.getTarget().Options;
10300 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
10305 Opnd1 = Opnd0.getOperand(1);
10306 Opnd0 = Opnd0.getOperand(0);
10311 /// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
10312 /// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
10313 /// X86ISD::FMSUBADD node.
10314 static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
10315 const X86Subtarget &Subtarget,
10316 SelectionDAG &DAG) {
10317 SDValue Opnd0, Opnd1;
10318 unsigned NumExtracts;
10320 if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
10324 MVT VT = BV->getSimpleValueType(0);
10327 // Try to generate X86ISD::FMADDSUB node here.
10329 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
10330 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
10331 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
10334 // We only support ADDSUB.
10338 // There are no known X86 targets with 512-bit ADDSUB instructions!
10339 // Convert to blend(fsub,fadd).
10340 if (VT.is512BitVector()) {
10341 SmallVector<int> Mask;
10342 for (int I = 0, E = VT.getVectorNumElements(); I != E; I += 2) {
10344 Mask.push_back(I + E + 1);
10346 SDValue Sub = DAG.getNode(ISD::FSUB, DL, VT, Opnd0, Opnd1);
10347 SDValue Add = DAG.getNode(ISD::FADD, DL, VT, Opnd0, Opnd1);
10348 return DAG.getVectorShuffle(VT, DL, Sub, Add, Mask);
10351 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
10354 static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
10355 unsigned &HOpcode, SDValue &V0, SDValue &V1) {
10356 // Initialize outputs to known values.
10357 MVT VT = BV->getSimpleValueType(0);
10358 HOpcode = ISD::DELETED_NODE;
10359 V0 = DAG.getUNDEF(VT);
10360 V1 = DAG.getUNDEF(VT);
10362 // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
10363 // half of the result is calculated independently from the 128-bit halves of
10364 // the inputs, so that makes the index-checking logic below more complicated.
10365 unsigned NumElts = VT.getVectorNumElements();
10366 unsigned GenericOpcode = ISD::DELETED_NODE;
10367 unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
10368 unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
10369 unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
10370 for (unsigned i = 0; i != Num128BitChunks; ++i) {
10371 for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
10372 // Ignore undef elements.
10373 SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
10377 // If there's an opcode mismatch, we're done.
10378 if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
10381 // Initialize horizontal opcode.
10382 if (HOpcode == ISD::DELETED_NODE) {
10383 GenericOpcode = Op.getOpcode();
10384 switch (GenericOpcode) {
10385 case ISD::ADD: HOpcode = X86ISD::HADD; break;
10386 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
10387 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
10388 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
10389 default: return false;
10393 SDValue Op0 = Op.getOperand(0);
10394 SDValue Op1 = Op.getOperand(1);
10395 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10396 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10397 Op0.getOperand(0) != Op1.getOperand(0) ||
10398 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
10399 !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
10402 // The source vector is chosen based on which 64-bit half of the
10403 // destination vector is being calculated.
10404 if (j < NumEltsIn64Bits) {
10406 V0 = Op0.getOperand(0);
10409 V1 = Op0.getOperand(0);
10412 SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
10413 if (SourceVec != Op0.getOperand(0))
10416 // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
10417 unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
10418 unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
10419 unsigned ExpectedIndex = i * NumEltsIn128Bits +
10420 (j % NumEltsIn64Bits) * 2;
10421 if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
10424 // If this is not a commutative op, this does not match.
10425 if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
10428 // Addition is commutative, so try swapping the extract indexes.
10429 // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
10430 if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
10433 // Extract indexes do not match horizontal requirement.
10437 // We matched. Opcode and operands are returned by reference as arguments.
10441 static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
10442 SelectionDAG &DAG, unsigned HOpcode,
10443 SDValue V0, SDValue V1) {
10444 // If either input vector is not the same size as the build vector,
10445 // extract/insert the low bits to the correct size.
10446 // This is free (examples: zmm --> xmm, xmm --> ymm).
10447 MVT VT = BV->getSimpleValueType(0);
10448 unsigned Width = VT.getSizeInBits();
10449 if (V0.getValueSizeInBits() > Width)
10450 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
10451 else if (V0.getValueSizeInBits() < Width)
10452 V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
10454 if (V1.getValueSizeInBits() > Width)
10455 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
10456 else if (V1.getValueSizeInBits() < Width)
10457 V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
10459 unsigned NumElts = VT.getVectorNumElements();
10460 APInt DemandedElts = APInt::getAllOnes(NumElts);
10461 for (unsigned i = 0; i != NumElts; ++i)
10462 if (BV->getOperand(i).isUndef())
10463 DemandedElts.clearBit(i);
10465 // If we don't need the upper xmm, then perform as a xmm hop.
10466 unsigned HalfNumElts = NumElts / 2;
10467 if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
10468 MVT HalfVT = VT.getHalfNumVectorElementsVT();
10469 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
10470 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
10471 SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
10472 return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
10475 return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
10478 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
10479 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
10480 const X86Subtarget &Subtarget,
10481 SelectionDAG &DAG) {
10482 // We need at least 2 non-undef elements to make this worthwhile by default.
10483 unsigned NumNonUndefs =
10484 count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
10485 if (NumNonUndefs < 2)
10488 // There are 4 sets of horizontal math operations distinguished by type:
10489 // int/FP at 128-bit/256-bit. Each type was introduced with a different
10490 // subtarget feature. Try to match those "native" patterns first.
10491 MVT VT = BV->getSimpleValueType(0);
10492 if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
10493 ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
10494 ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
10495 ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
10498 if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
10499 return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
10502 // Try harder to match 256-bit ops by using extract/concat.
10503 if (!Subtarget.hasAVX() || !VT.is256BitVector())
10506 // Count the number of UNDEF operands in the build_vector in input.
10507 unsigned NumElts = VT.getVectorNumElements();
10508 unsigned Half = NumElts / 2;
10509 unsigned NumUndefsLO = 0;
10510 unsigned NumUndefsHI = 0;
10511 for (unsigned i = 0, e = Half; i != e; ++i)
10512 if (BV->getOperand(i)->isUndef())
10515 for (unsigned i = Half, e = NumElts; i != e; ++i)
10516 if (BV->getOperand(i)->isUndef())
10520 SDValue InVec0, InVec1;
10521 if (VT == MVT::v8i32 || VT == MVT::v16i16) {
10522 SDValue InVec2, InVec3;
10523 unsigned X86Opcode;
10524 bool CanFold = true;
10526 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
10527 isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
10529 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
10530 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
10531 X86Opcode = X86ISD::HADD;
10532 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
10534 isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
10536 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
10537 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
10538 X86Opcode = X86ISD::HSUB;
10543 // Do not try to expand this build_vector into a pair of horizontal
10544 // add/sub if we can emit a pair of scalar add/sub.
10545 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
10548 // Convert this build_vector into a pair of horizontal binops followed by
10549 // a concat vector. We must adjust the outputs from the partial horizontal
10550 // matching calls above to account for undefined vector halves.
10551 SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
10552 SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
10553 assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
10554 bool isUndefLO = NumUndefsLO == Half;
10555 bool isUndefHI = NumUndefsHI == Half;
10556 return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
10561 if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
10562 VT == MVT::v16i16) {
10563 unsigned X86Opcode;
10564 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
10565 X86Opcode = X86ISD::HADD;
10566 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
10568 X86Opcode = X86ISD::HSUB;
10569 else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
10571 X86Opcode = X86ISD::FHADD;
10572 else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
10574 X86Opcode = X86ISD::FHSUB;
10578 // Don't try to expand this build_vector into a pair of horizontal add/sub
10579 // if we can simply emit a pair of scalar add/sub.
10580 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
10583 // Convert this build_vector into two horizontal add/sub followed by
10584 // a concat vector.
10585 bool isUndefLO = NumUndefsLO == Half;
10586 bool isUndefHI = NumUndefsHI == Half;
10587 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
10588 isUndefLO, isUndefHI);
10594 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
10595 SelectionDAG &DAG);
10597 /// If a BUILD_VECTOR's source elements all apply the same bit operation and
10598 /// one of their operands is constant, lower to a pair of BUILD_VECTOR and
10599 /// just apply the bit to the vectors.
10600 /// NOTE: Its not in our interest to start make a general purpose vectorizer
10601 /// from this, but enough scalar bit operations are created from the later
10602 /// legalization + scalarization stages to need basic support.
10603 static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
10604 const X86Subtarget &Subtarget,
10605 SelectionDAG &DAG) {
10607 MVT VT = Op->getSimpleValueType(0);
10608 unsigned NumElems = VT.getVectorNumElements();
10609 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10611 // Check that all elements have the same opcode.
10612 // TODO: Should we allow UNDEFS and if so how many?
10613 unsigned Opcode = Op->getOperand(0).getOpcode();
10614 for (unsigned i = 1; i < NumElems; ++i)
10615 if (Opcode != Op->getOperand(i).getOpcode())
10618 // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
10619 bool IsShift = false;
10631 // Don't do this if the buildvector is a splat - we'd replace one
10632 // constant with an entire vector.
10633 if (Op->getSplatValue())
10635 if (!TLI.isOperationLegalOrPromote(Opcode, VT))
10640 SmallVector<SDValue, 4> LHSElts, RHSElts;
10641 for (SDValue Elt : Op->ops()) {
10642 SDValue LHS = Elt.getOperand(0);
10643 SDValue RHS = Elt.getOperand(1);
10645 // We expect the canonicalized RHS operand to be the constant.
10646 if (!isa<ConstantSDNode>(RHS))
10649 // Extend shift amounts.
10650 if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
10653 RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
10656 LHSElts.push_back(LHS);
10657 RHSElts.push_back(RHS);
10660 // Limit to shifts by uniform immediates.
10661 // TODO: Only accept vXi8/vXi64 special cases?
10662 // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
10663 if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
10666 SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
10667 SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
10668 SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
10673 // Immediately lower the shift to ensure the constant build vector doesn't
10674 // get converted to a constant pool before the shift is lowered.
10675 return LowerShift(Res, Subtarget, DAG);
10678 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
10679 /// functionality to do this, so it's all zeros, all ones, or some derivation
10680 /// that is cheap to calculate.
10681 static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
10682 const X86Subtarget &Subtarget) {
10684 MVT VT = Op.getSimpleValueType();
10686 // Vectors containing all zeros can be matched by pxor and xorps.
10687 if (ISD::isBuildVectorAllZeros(Op.getNode()))
10690 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
10691 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
10692 // vpcmpeqd on 256-bit vectors.
10693 if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
10694 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
10697 return getOnesVector(VT, DAG, DL);
10703 /// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
10704 /// from a vector of source values and a vector of extraction indices.
10705 /// The vectors might be manipulated to match the type of the permute op.
10706 static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
10707 SDLoc &DL, SelectionDAG &DAG,
10708 const X86Subtarget &Subtarget) {
10709 MVT ShuffleVT = VT;
10710 EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
10711 unsigned NumElts = VT.getVectorNumElements();
10712 unsigned SizeInBits = VT.getSizeInBits();
10714 // Adjust IndicesVec to match VT size.
10715 assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
10716 "Illegal variable permute mask size");
10717 if (IndicesVec.getValueType().getVectorNumElements() > NumElts) {
10718 // Narrow/widen the indices vector to the correct size.
10719 if (IndicesVec.getValueSizeInBits() > SizeInBits)
10720 IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
10721 NumElts * VT.getScalarSizeInBits());
10722 else if (IndicesVec.getValueSizeInBits() < SizeInBits)
10723 IndicesVec = widenSubVector(IndicesVec, false, Subtarget, DAG,
10724 SDLoc(IndicesVec), SizeInBits);
10725 // Zero-extend the index elements within the vector.
10726 if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
10727 IndicesVec = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(IndicesVec),
10728 IndicesVT, IndicesVec);
10730 IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
10732 // Handle SrcVec that don't match VT type.
10733 if (SrcVec.getValueSizeInBits() != SizeInBits) {
10734 if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
10735 // Handle larger SrcVec by treating it as a larger permute.
10736 unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
10737 VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
10738 IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
10739 IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
10740 Subtarget, DAG, SDLoc(IndicesVec));
10741 SDValue NewSrcVec =
10742 createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
10744 return extractSubVector(NewSrcVec, 0, DAG, DL, SizeInBits);
10746 } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
10747 // Widen smaller SrcVec to match VT.
10748 SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
10753 auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
10754 assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
10755 EVT SrcVT = Idx.getValueType();
10756 unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
10757 uint64_t IndexScale = 0;
10758 uint64_t IndexOffset = 0;
10760 // If we're scaling a smaller permute op, then we need to repeat the
10761 // indices, scaling and offsetting them as well.
10762 // e.g. v4i32 -> v16i8 (Scale = 4)
10763 // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
10764 // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
10765 for (uint64_t i = 0; i != Scale; ++i) {
10766 IndexScale |= Scale << (i * NumDstBits);
10767 IndexOffset |= i << (i * NumDstBits);
10770 Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
10771 DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
10772 Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
10773 DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
10777 unsigned Opcode = 0;
10778 switch (VT.SimpleTy) {
10782 if (Subtarget.hasSSSE3())
10783 Opcode = X86ISD::PSHUFB;
10786 if (Subtarget.hasVLX() && Subtarget.hasBWI())
10787 Opcode = X86ISD::VPERMV;
10788 else if (Subtarget.hasSSSE3()) {
10789 Opcode = X86ISD::PSHUFB;
10790 ShuffleVT = MVT::v16i8;
10795 if (Subtarget.hasAVX()) {
10796 Opcode = X86ISD::VPERMILPV;
10797 ShuffleVT = MVT::v4f32;
10798 } else if (Subtarget.hasSSSE3()) {
10799 Opcode = X86ISD::PSHUFB;
10800 ShuffleVT = MVT::v16i8;
10805 if (Subtarget.hasAVX()) {
10806 // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
10807 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
10808 Opcode = X86ISD::VPERMILPV;
10809 ShuffleVT = MVT::v2f64;
10810 } else if (Subtarget.hasSSE41()) {
10811 // SSE41 can compare v2i64 - select between indices 0 and 1.
10812 return DAG.getSelectCC(
10814 getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
10815 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
10816 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
10817 ISD::CondCode::SETEQ);
10821 if (Subtarget.hasVLX() && Subtarget.hasVBMI())
10822 Opcode = X86ISD::VPERMV;
10823 else if (Subtarget.hasXOP()) {
10824 SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
10825 SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
10826 SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
10827 SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
10828 return DAG.getNode(
10829 ISD::CONCAT_VECTORS, DL, VT,
10830 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
10831 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
10832 } else if (Subtarget.hasAVX()) {
10833 SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
10834 SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
10835 SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
10836 SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
10837 auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
10838 ArrayRef<SDValue> Ops) {
10839 // Permute Lo and Hi and then select based on index range.
10840 // This works as SHUFB uses bits[3:0] to permute elements and we don't
10841 // care about the bit[7] as its just an index vector.
10842 SDValue Idx = Ops[2];
10843 EVT VT = Idx.getValueType();
10844 return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
10845 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
10846 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
10847 ISD::CondCode::SETGT);
10849 SDValue Ops[] = {LoLo, HiHi, IndicesVec};
10850 return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
10855 if (Subtarget.hasVLX() && Subtarget.hasBWI())
10856 Opcode = X86ISD::VPERMV;
10857 else if (Subtarget.hasAVX()) {
10858 // Scale to v32i8 and perform as v32i8.
10859 IndicesVec = ScaleIndices(IndicesVec, 2);
10860 return DAG.getBitcast(
10861 VT, createVariablePermute(
10862 MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
10863 DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
10868 if (Subtarget.hasAVX2())
10869 Opcode = X86ISD::VPERMV;
10870 else if (Subtarget.hasAVX()) {
10871 SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
10872 SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
10873 {0, 1, 2, 3, 0, 1, 2, 3});
10874 SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
10875 {4, 5, 6, 7, 4, 5, 6, 7});
10876 if (Subtarget.hasXOP())
10877 return DAG.getBitcast(
10878 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, LoLo, HiHi,
10879 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
10880 // Permute Lo and Hi and then select based on index range.
10881 // This works as VPERMILPS only uses index bits[0:1] to permute elements.
10882 SDValue Res = DAG.getSelectCC(
10883 DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
10884 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
10885 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
10886 ISD::CondCode::SETGT);
10887 return DAG.getBitcast(VT, Res);
10892 if (Subtarget.hasAVX512()) {
10893 if (!Subtarget.hasVLX()) {
10894 MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
10895 SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
10897 IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
10898 DAG, SDLoc(IndicesVec));
10899 SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
10901 return extract256BitVector(Res, 0, DAG, DL);
10903 Opcode = X86ISD::VPERMV;
10904 } else if (Subtarget.hasAVX()) {
10905 SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
10907 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
10909 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
10910 // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
10911 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
10912 if (Subtarget.hasXOP())
10913 return DAG.getBitcast(
10914 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, LoLo, HiHi,
10915 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
10916 // Permute Lo and Hi and then select based on index range.
10917 // This works as VPERMILPD only uses index bit[1] to permute elements.
10918 SDValue Res = DAG.getSelectCC(
10919 DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
10920 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
10921 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
10922 ISD::CondCode::SETGT);
10923 return DAG.getBitcast(VT, Res);
10927 if (Subtarget.hasVBMI())
10928 Opcode = X86ISD::VPERMV;
10931 if (Subtarget.hasBWI())
10932 Opcode = X86ISD::VPERMV;
10938 if (Subtarget.hasAVX512())
10939 Opcode = X86ISD::VPERMV;
10945 assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
10946 (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
10947 "Illegal variable permute shuffle type");
10949 uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
10951 IndicesVec = ScaleIndices(IndicesVec, Scale);
10953 EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
10954 IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
10956 SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
10957 SDValue Res = Opcode == X86ISD::VPERMV
10958 ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
10959 : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
10960 return DAG.getBitcast(VT, Res);
10963 // Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
10964 // reasoned to be a permutation of a vector by indices in a non-constant vector.
10965 // (build_vector (extract_elt V, (extract_elt I, 0)),
10966 // (extract_elt V, (extract_elt I, 1)),
10971 // TODO: Handle undefs
10972 // TODO: Utilize pshufb and zero mask blending to support more efficient
10973 // construction of vectors with constant-0 elements.
10975 LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
10976 const X86Subtarget &Subtarget) {
10977 SDValue SrcVec, IndicesVec;
10978 // Check for a match of the permute source vector and permute index elements.
10979 // This is done by checking that the i-th build_vector operand is of the form:
10980 // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
10981 for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
10982 SDValue Op = V.getOperand(Idx);
10983 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
10986 // If this is the first extract encountered in V, set the source vector,
10987 // otherwise verify the extract is from the previously defined source
10990 SrcVec = Op.getOperand(0);
10991 else if (SrcVec != Op.getOperand(0))
10993 SDValue ExtractedIndex = Op->getOperand(1);
10994 // Peek through extends.
10995 if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
10996 ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
10997 ExtractedIndex = ExtractedIndex.getOperand(0);
10998 if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11001 // If this is the first extract from the index vector candidate, set the
11002 // indices vector, otherwise verify the extract is from the previously
11003 // defined indices vector.
11005 IndicesVec = ExtractedIndex.getOperand(0);
11006 else if (IndicesVec != ExtractedIndex.getOperand(0))
11009 auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
11010 if (!PermIdx || PermIdx->getAPIntValue() != Idx)
11015 MVT VT = V.getSimpleValueType();
11016 return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
11020 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
11023 MVT VT = Op.getSimpleValueType();
11024 MVT EltVT = VT.getVectorElementType();
11025 unsigned NumElems = Op.getNumOperands();
11027 // Generate vectors for predicate vectors.
11028 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
11029 return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
11031 if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
11032 return VectorConstant;
11034 unsigned EVTBits = EltVT.getSizeInBits();
11035 APInt UndefMask = APInt::getZero(NumElems);
11036 APInt ZeroMask = APInt::getZero(NumElems);
11037 APInt NonZeroMask = APInt::getZero(NumElems);
11038 bool IsAllConstants = true;
11039 SmallSet<SDValue, 8> Values;
11040 unsigned NumConstants = NumElems;
11041 for (unsigned i = 0; i < NumElems; ++i) {
11042 SDValue Elt = Op.getOperand(i);
11043 if (Elt.isUndef()) {
11044 UndefMask.setBit(i);
11047 Values.insert(Elt);
11048 if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) {
11049 IsAllConstants = false;
11052 if (X86::isZeroNode(Elt)) {
11053 ZeroMask.setBit(i);
11055 NonZeroMask.setBit(i);
11059 // All undef vector. Return an UNDEF. All zero vectors were handled above.
11060 if (NonZeroMask == 0) {
11061 assert(UndefMask.isAllOnes() && "Fully undef mask expected");
11062 return DAG.getUNDEF(VT);
11065 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
11067 // If the upper elts of a ymm/zmm are undef/zero then we might be better off
11068 // lowering to a smaller build vector and padding with undef/zero.
11069 if ((VT.is256BitVector() || VT.is512BitVector()) &&
11070 !isFoldableUseOfShuffle(BV)) {
11071 unsigned UpperElems = NumElems / 2;
11072 APInt UndefOrZeroMask = UndefMask | ZeroMask;
11073 unsigned NumUpperUndefsOrZeros = UndefOrZeroMask.countLeadingOnes();
11074 if (NumUpperUndefsOrZeros >= UpperElems) {
11075 if (VT.is512BitVector() &&
11076 NumUpperUndefsOrZeros >= (NumElems - (NumElems / 4)))
11077 UpperElems = NumElems - (NumElems / 4);
11078 bool UndefUpper = UndefMask.countLeadingOnes() >= UpperElems;
11079 MVT LowerVT = MVT::getVectorVT(EltVT, NumElems - UpperElems);
11081 DAG.getBuildVector(LowerVT, dl, Op->ops().drop_back(UpperElems));
11082 return widenSubVector(VT, NewBV, !UndefUpper, Subtarget, DAG, dl);
11086 if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
11088 if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
11089 return HorizontalOp;
11090 if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
11092 if (SDValue BitOp = lowerBuildVectorToBitOp(BV, Subtarget, DAG))
11095 unsigned NumZero = ZeroMask.countPopulation();
11096 unsigned NumNonZero = NonZeroMask.countPopulation();
11098 // If we are inserting one variable into a vector of non-zero constants, try
11099 // to avoid loading each constant element as a scalar. Load the constants as a
11100 // vector and then insert the variable scalar element. If insertion is not
11101 // supported, fall back to a shuffle to get the scalar blended with the
11102 // constants. Insertion into a zero vector is handled as a special-case
11103 // somewhere below here.
11104 if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
11105 (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
11106 isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
11107 // Create an all-constant vector. The variable element in the old
11108 // build vector is replaced by undef in the constant vector. Save the
11109 // variable scalar element and its index for use in the insertelement.
11110 LLVMContext &Context = *DAG.getContext();
11111 Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
11112 SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
11115 for (unsigned i = 0; i != NumElems; ++i) {
11116 SDValue Elt = Op.getOperand(i);
11117 if (auto *C = dyn_cast<ConstantSDNode>(Elt))
11118 ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
11119 else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
11120 ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
11121 else if (!Elt.isUndef()) {
11122 assert(!VarElt.getNode() && !InsIndex.getNode() &&
11123 "Expected one variable element in this vector");
11125 InsIndex = DAG.getVectorIdxConstant(i, dl);
11128 Constant *CV = ConstantVector::get(ConstVecOps);
11129 SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
11131 // The constants we just created may not be legal (eg, floating point). We
11132 // must lower the vector right here because we can not guarantee that we'll
11133 // legalize it before loading it. This is also why we could not just create
11134 // a new build vector here. If the build vector contains illegal constants,
11135 // it could get split back up into a series of insert elements.
11136 // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
11137 SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
11138 MachineFunction &MF = DAG.getMachineFunction();
11139 MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
11140 SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
11141 unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
11142 unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
11143 if (InsertC < NumEltsInLow128Bits)
11144 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
11146 // There's no good way to insert into the high elements of a >128-bit
11147 // vector, so use shuffles to avoid an extract/insert sequence.
11148 assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
11149 assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
11150 SmallVector<int, 8> ShuffleMask;
11151 unsigned NumElts = VT.getVectorNumElements();
11152 for (unsigned i = 0; i != NumElts; ++i)
11153 ShuffleMask.push_back(i == InsertC ? NumElts : i);
11154 SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
11155 return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
11158 // Special case for single non-zero, non-undef, element.
11159 if (NumNonZero == 1) {
11160 unsigned Idx = NonZeroMask.countTrailingZeros();
11161 SDValue Item = Op.getOperand(Idx);
11163 // If we have a constant or non-constant insertion into the low element of
11164 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
11165 // the rest of the elements. This will be matched as movd/movq/movss/movsd
11166 // depending on what the source datatype is.
11169 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
11171 if (EltVT == MVT::i32 || EltVT == MVT::f16 || EltVT == MVT::f32 ||
11172 EltVT == MVT::f64 || (EltVT == MVT::i64 && Subtarget.is64Bit()) ||
11173 (EltVT == MVT::i16 && Subtarget.hasFP16())) {
11174 assert((VT.is128BitVector() || VT.is256BitVector() ||
11175 VT.is512BitVector()) &&
11176 "Expected an SSE value type!");
11177 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
11178 // Turn it into a MOVL (i.e. movsh, movss, movsd, movw or movd) to a
11180 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
11183 // We can't directly insert an i8 or i16 into a vector, so zero extend
11184 // it to i32 first.
11185 if (EltVT == MVT::i16 || EltVT == MVT::i8) {
11186 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
11187 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
11188 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
11189 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
11190 return DAG.getBitcast(VT, Item);
11194 // Is it a vector logical left shift?
11195 if (NumElems == 2 && Idx == 1 &&
11196 X86::isZeroNode(Op.getOperand(0)) &&
11197 !X86::isZeroNode(Op.getOperand(1))) {
11198 unsigned NumBits = VT.getSizeInBits();
11199 return getVShift(true, VT,
11200 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
11201 VT, Op.getOperand(1)),
11202 NumBits/2, DAG, *this, dl);
11205 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
11208 // Otherwise, if this is a vector with i32 or f32 elements, and the element
11209 // is a non-constant being inserted into an element other than the low one,
11210 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
11211 // movd/movss) to move this into the low element, then shuffle it into
11213 if (EVTBits == 32) {
11214 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
11215 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
11219 // Splat is obviously ok. Let legalizer expand it to a shuffle.
11220 if (Values.size() == 1) {
11221 if (EVTBits == 32) {
11222 // Instead of a shuffle like this:
11223 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
11224 // Check if it's possible to issue this instead.
11225 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
11226 unsigned Idx = NonZeroMask.countTrailingZeros();
11227 SDValue Item = Op.getOperand(Idx);
11228 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
11229 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
11234 // A vector full of immediates; various special cases are already
11235 // handled, so this is best done with a single constant-pool load.
11236 if (IsAllConstants)
11239 if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
11242 // See if we can use a vector load to get all of the elements.
11244 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
11246 EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
11250 // If this is a splat of pairs of 32-bit elements, we can use a narrower
11251 // build_vector and broadcast it.
11252 // TODO: We could probably generalize this more.
11253 if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
11254 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
11255 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
11256 auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
11257 // Make sure all the even/odd operands match.
11258 for (unsigned i = 2; i != NumElems; ++i)
11259 if (Ops[i % 2] != Op.getOperand(i))
11263 if (CanSplat(Op, NumElems, Ops)) {
11264 MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
11265 MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
11266 // Create a new build vector and cast to v2i64/v2f64.
11267 SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
11268 DAG.getBuildVector(NarrowVT, dl, Ops));
11269 // Broadcast from v2i64/v2f64 and cast to final VT.
11270 MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems / 2);
11271 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
11276 // For AVX-length vectors, build the individual 128-bit pieces and use
11277 // shuffles to put them in place.
11278 if (VT.getSizeInBits() > 128) {
11279 MVT HVT = MVT::getVectorVT(EltVT, NumElems / 2);
11281 // Build both the lower and upper subvector.
11283 DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
11284 SDValue Upper = DAG.getBuildVector(
11285 HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
11287 // Recreate the wider vector with the lower and upper part.
11288 return concatSubVectors(Lower, Upper, DAG, dl);
11291 // Let legalizer expand 2-wide build_vectors.
11292 if (EVTBits == 64) {
11293 if (NumNonZero == 1) {
11294 // One half is zero or undef.
11295 unsigned Idx = NonZeroMask.countTrailingZeros();
11296 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
11297 Op.getOperand(Idx));
11298 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
11303 // If element VT is < 32 bits, convert it to inserts into a zero vector.
11304 if (EVTBits == 8 && NumElems == 16)
11305 if (SDValue V = LowerBuildVectorv16i8(Op, NonZeroMask, NumNonZero, NumZero,
11309 if (EltVT == MVT::i16 && NumElems == 8)
11310 if (SDValue V = LowerBuildVectorv8i16(Op, NonZeroMask, NumNonZero, NumZero,
11314 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
11315 if (EVTBits == 32 && NumElems == 4)
11316 if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
11319 // If element VT is == 32 bits, turn it into a number of shuffles.
11320 if (NumElems == 4 && NumZero > 0) {
11321 SmallVector<SDValue, 8> Ops(NumElems);
11322 for (unsigned i = 0; i < 4; ++i) {
11323 bool isZero = !NonZeroMask[i];
11325 Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
11327 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
11330 for (unsigned i = 0; i < 2; ++i) {
11331 switch (NonZeroMask.extractBitsAsZExtValue(2, i * 2)) {
11332 default: llvm_unreachable("Unexpected NonZero count");
11334 Ops[i] = Ops[i*2]; // Must be a zero vector.
11337 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
11340 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
11343 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
11348 bool Reverse1 = NonZeroMask.extractBitsAsZExtValue(2, 0) == 2;
11349 bool Reverse2 = NonZeroMask.extractBitsAsZExtValue(2, 2) == 2;
11353 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
11354 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
11356 return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
11359 assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
11361 // Check for a build vector from mostly shuffle plus few inserting.
11362 if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
11365 // For SSE 4.1, use insertps to put the high elements into the low element.
11366 if (Subtarget.hasSSE41() && EltVT != MVT::f16) {
11368 if (!Op.getOperand(0).isUndef())
11369 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
11371 Result = DAG.getUNDEF(VT);
11373 for (unsigned i = 1; i < NumElems; ++i) {
11374 if (Op.getOperand(i).isUndef()) continue;
11375 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
11376 Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
11381 // Otherwise, expand into a number of unpckl*, start by extending each of
11382 // our (non-undef) elements to the full vector width with the element in the
11383 // bottom slot of the vector (which generates no code for SSE).
11384 SmallVector<SDValue, 8> Ops(NumElems);
11385 for (unsigned i = 0; i < NumElems; ++i) {
11386 if (!Op.getOperand(i).isUndef())
11387 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
11389 Ops[i] = DAG.getUNDEF(VT);
11392 // Next, we iteratively mix elements, e.g. for v4f32:
11393 // Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
11394 // : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
11395 // Step 2: unpcklpd X, Y ==> <3, 2, 1, 0>
11396 for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
11397 // Generate scaled UNPCKL shuffle mask.
11398 SmallVector<int, 16> Mask;
11399 for(unsigned i = 0; i != Scale; ++i)
11401 for (unsigned i = 0; i != Scale; ++i)
11402 Mask.push_back(NumElems+i);
11403 Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
11405 for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
11406 Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
11411 // 256-bit AVX can use the vinsertf128 instruction
11412 // to create 256-bit vectors from two other 128-bit ones.
11413 // TODO: Detect subvector broadcast here instead of DAG combine?
11414 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
11415 const X86Subtarget &Subtarget) {
11417 MVT ResVT = Op.getSimpleValueType();
11419 assert((ResVT.is256BitVector() ||
11420 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
11422 unsigned NumOperands = Op.getNumOperands();
11423 unsigned NumZero = 0;
11424 unsigned NumNonZero = 0;
11425 unsigned NonZeros = 0;
11426 for (unsigned i = 0; i != NumOperands; ++i) {
11427 SDValue SubVec = Op.getOperand(i);
11428 if (SubVec.isUndef())
11430 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
11433 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
11434 NonZeros |= 1 << i;
11439 // If we have more than 2 non-zeros, build each half separately.
11440 if (NumNonZero > 2) {
11441 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
11442 ArrayRef<SDUse> Ops = Op->ops();
11443 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11444 Ops.slice(0, NumOperands/2));
11445 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11446 Ops.slice(NumOperands/2));
11447 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
11450 // Otherwise, build it up through insert_subvectors.
11451 SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
11452 : DAG.getUNDEF(ResVT);
11454 MVT SubVT = Op.getOperand(0).getSimpleValueType();
11455 unsigned NumSubElems = SubVT.getVectorNumElements();
11456 for (unsigned i = 0; i != NumOperands; ++i) {
11457 if ((NonZeros & (1 << i)) == 0)
11460 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
11462 DAG.getIntPtrConstant(i * NumSubElems, dl));
11468 // Returns true if the given node is a type promotion (by concatenating i1
11469 // zeros) of the result of a node that already zeros all upper bits of
11471 // TODO: Merge this with LowerAVXCONCAT_VECTORS?
11472 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
11473 const X86Subtarget &Subtarget,
11474 SelectionDAG & DAG) {
11476 MVT ResVT = Op.getSimpleValueType();
11477 unsigned NumOperands = Op.getNumOperands();
11479 assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
11480 "Unexpected number of operands in CONCAT_VECTORS");
11482 uint64_t Zeros = 0;
11483 uint64_t NonZeros = 0;
11484 for (unsigned i = 0; i != NumOperands; ++i) {
11485 SDValue SubVec = Op.getOperand(i);
11486 if (SubVec.isUndef())
11488 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
11489 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
11490 Zeros |= (uint64_t)1 << i;
11492 NonZeros |= (uint64_t)1 << i;
11495 unsigned NumElems = ResVT.getVectorNumElements();
11497 // If we are inserting non-zero vector and there are zeros in LSBs and undef
11498 // in the MSBs we need to emit a KSHIFTL. The generic lowering to
11499 // insert_subvector will give us two kshifts.
11500 if (isPowerOf2_64(NonZeros) && Zeros != 0 && NonZeros > Zeros &&
11501 Log2_64(NonZeros) != NumOperands - 1) {
11502 MVT ShiftVT = ResVT;
11503 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
11504 ShiftVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
11505 unsigned Idx = Log2_64(NonZeros);
11506 SDValue SubVec = Op.getOperand(Idx);
11507 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
11508 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ShiftVT,
11509 DAG.getUNDEF(ShiftVT), SubVec,
11510 DAG.getIntPtrConstant(0, dl));
11511 Op = DAG.getNode(X86ISD::KSHIFTL, dl, ShiftVT, SubVec,
11512 DAG.getTargetConstant(Idx * SubVecNumElts, dl, MVT::i8));
11513 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Op,
11514 DAG.getIntPtrConstant(0, dl));
11517 // If there are zero or one non-zeros we can handle this very simply.
11518 if (NonZeros == 0 || isPowerOf2_64(NonZeros)) {
11519 SDValue Vec = Zeros ? DAG.getConstant(0, dl, ResVT) : DAG.getUNDEF(ResVT);
11522 unsigned Idx = Log2_64(NonZeros);
11523 SDValue SubVec = Op.getOperand(Idx);
11524 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
11525 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
11526 DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
11529 if (NumOperands > 2) {
11530 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
11531 ArrayRef<SDUse> Ops = Op->ops();
11532 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11533 Ops.slice(0, NumOperands/2));
11534 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11535 Ops.slice(NumOperands/2));
11536 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
11539 assert(countPopulation(NonZeros) == 2 && "Simple cases not handled?");
11541 if (ResVT.getVectorNumElements() >= 16)
11542 return Op; // The operation is legal with KUNPCK
11544 SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
11545 DAG.getUNDEF(ResVT), Op.getOperand(0),
11546 DAG.getIntPtrConstant(0, dl));
11547 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
11548 DAG.getIntPtrConstant(NumElems/2, dl));
11551 static SDValue LowerCONCAT_VECTORS(SDValue Op,
11552 const X86Subtarget &Subtarget,
11553 SelectionDAG &DAG) {
11554 MVT VT = Op.getSimpleValueType();
11555 if (VT.getVectorElementType() == MVT::i1)
11556 return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
11558 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
11559 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
11560 Op.getNumOperands() == 4)));
11562 // AVX can use the vinsertf128 instruction to create 256-bit vectors
11563 // from two other 128-bit ones.
11565 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
11566 return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
11569 //===----------------------------------------------------------------------===//
11570 // Vector shuffle lowering
11572 // This is an experimental code path for lowering vector shuffles on x86. It is
11573 // designed to handle arbitrary vector shuffles and blends, gracefully
11574 // degrading performance as necessary. It works hard to recognize idiomatic
11575 // shuffles and lower them to optimal instruction patterns without leaving
11576 // a framework that allows reasonably efficient handling of all vector shuffle
11578 //===----------------------------------------------------------------------===//
11580 /// Tiny helper function to identify a no-op mask.
11582 /// This is a somewhat boring predicate function. It checks whether the mask
11583 /// array input, which is assumed to be a single-input shuffle mask of the kind
11584 /// used by the X86 shuffle instructions (not a fully general
11585 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
11586 /// in-place shuffle are 'no-op's.
11587 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
11588 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11589 assert(Mask[i] >= -1 && "Out of bound mask element!");
11590 if (Mask[i] >= 0 && Mask[i] != i)
11596 /// Test whether there are elements crossing LaneSizeInBits lanes in this
11599 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
11600 /// and we routinely test for these.
11601 static bool isLaneCrossingShuffleMask(unsigned LaneSizeInBits,
11602 unsigned ScalarSizeInBits,
11603 ArrayRef<int> Mask) {
11604 assert(LaneSizeInBits && ScalarSizeInBits &&
11605 (LaneSizeInBits % ScalarSizeInBits) == 0 &&
11606 "Illegal shuffle lane size");
11607 int LaneSize = LaneSizeInBits / ScalarSizeInBits;
11608 int Size = Mask.size();
11609 for (int i = 0; i < Size; ++i)
11610 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
11615 /// Test whether there are elements crossing 128-bit lanes in this
11617 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
11618 return isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), Mask);
11621 /// Test whether elements in each LaneSizeInBits lane in this shuffle mask come
11622 /// from multiple lanes - this is different to isLaneCrossingShuffleMask to
11623 /// better support 'repeated mask + lane permute' style shuffles.
11624 static bool isMultiLaneShuffleMask(unsigned LaneSizeInBits,
11625 unsigned ScalarSizeInBits,
11626 ArrayRef<int> Mask) {
11627 assert(LaneSizeInBits && ScalarSizeInBits &&
11628 (LaneSizeInBits % ScalarSizeInBits) == 0 &&
11629 "Illegal shuffle lane size");
11630 int NumElts = Mask.size();
11631 int NumEltsPerLane = LaneSizeInBits / ScalarSizeInBits;
11632 int NumLanes = NumElts / NumEltsPerLane;
11633 if (NumLanes > 1) {
11634 for (int i = 0; i != NumLanes; ++i) {
11636 for (int j = 0; j != NumEltsPerLane; ++j) {
11637 int M = Mask[(i * NumEltsPerLane) + j];
11640 int Lane = (M % NumElts) / NumEltsPerLane;
11641 if (SrcLane >= 0 && SrcLane != Lane)
11650 /// Test whether a shuffle mask is equivalent within each sub-lane.
11652 /// This checks a shuffle mask to see if it is performing the same
11653 /// lane-relative shuffle in each sub-lane. This trivially implies
11654 /// that it is also not lane-crossing. It may however involve a blend from the
11655 /// same lane of a second vector.
11657 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
11658 /// non-trivial to compute in the face of undef lanes. The representation is
11659 /// suitable for use with existing 128-bit shuffles as entries from the second
11660 /// vector have been remapped to [LaneSize, 2*LaneSize).
11661 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
11662 ArrayRef<int> Mask,
11663 SmallVectorImpl<int> &RepeatedMask) {
11664 auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
11665 RepeatedMask.assign(LaneSize, -1);
11666 int Size = Mask.size();
11667 for (int i = 0; i < Size; ++i) {
11668 assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
11671 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
11672 // This entry crosses lanes, so there is no way to model this shuffle.
11675 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
11676 // Adjust second vector indices to start at LaneSize instead of Size.
11677 int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
11678 : Mask[i] % LaneSize + LaneSize;
11679 if (RepeatedMask[i % LaneSize] < 0)
11680 // This is the first non-undef entry in this slot of a 128-bit lane.
11681 RepeatedMask[i % LaneSize] = LocalM;
11682 else if (RepeatedMask[i % LaneSize] != LocalM)
11683 // Found a mismatch with the repeated mask.
11689 /// Test whether a shuffle mask is equivalent within each 128-bit lane.
11691 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
11692 SmallVectorImpl<int> &RepeatedMask) {
11693 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
11697 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
11698 SmallVector<int, 32> RepeatedMask;
11699 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
11702 /// Test whether a shuffle mask is equivalent within each 256-bit lane.
11704 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
11705 SmallVectorImpl<int> &RepeatedMask) {
11706 return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
11709 /// Test whether a target shuffle mask is equivalent within each sub-lane.
11710 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
11711 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits,
11712 unsigned EltSizeInBits,
11713 ArrayRef<int> Mask,
11714 SmallVectorImpl<int> &RepeatedMask) {
11715 int LaneSize = LaneSizeInBits / EltSizeInBits;
11716 RepeatedMask.assign(LaneSize, SM_SentinelUndef);
11717 int Size = Mask.size();
11718 for (int i = 0; i < Size; ++i) {
11719 assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
11720 if (Mask[i] == SM_SentinelUndef)
11722 if (Mask[i] == SM_SentinelZero) {
11723 if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
11725 RepeatedMask[i % LaneSize] = SM_SentinelZero;
11728 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
11729 // This entry crosses lanes, so there is no way to model this shuffle.
11732 // Handle the in-lane shuffles by detecting if and when they repeat. Adjust
11733 // later vector indices to start at multiples of LaneSize instead of Size.
11734 int LaneM = Mask[i] / Size;
11735 int LocalM = (Mask[i] % LaneSize) + (LaneM * LaneSize);
11736 if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
11737 // This is the first non-undef entry in this slot of a 128-bit lane.
11738 RepeatedMask[i % LaneSize] = LocalM;
11739 else if (RepeatedMask[i % LaneSize] != LocalM)
11740 // Found a mismatch with the repeated mask.
11746 /// Test whether a target shuffle mask is equivalent within each sub-lane.
11747 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
11748 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
11749 ArrayRef<int> Mask,
11750 SmallVectorImpl<int> &RepeatedMask) {
11751 return isRepeatedTargetShuffleMask(LaneSizeInBits, VT.getScalarSizeInBits(),
11752 Mask, RepeatedMask);
11755 /// Checks whether the vector elements referenced by two shuffle masks are
11757 static bool IsElementEquivalent(int MaskSize, SDValue Op, SDValue ExpectedOp,
11758 int Idx, int ExpectedIdx) {
11759 assert(0 <= Idx && Idx < MaskSize && 0 <= ExpectedIdx &&
11760 ExpectedIdx < MaskSize && "Out of range element index");
11761 if (!Op || !ExpectedOp || Op.getOpcode() != ExpectedOp.getOpcode())
11764 switch (Op.getOpcode()) {
11765 case ISD::BUILD_VECTOR:
11766 // If the values are build vectors, we can look through them to find
11767 // equivalent inputs that make the shuffles equivalent.
11768 // TODO: Handle MaskSize != Op.getNumOperands()?
11769 if (MaskSize == (int)Op.getNumOperands() &&
11770 MaskSize == (int)ExpectedOp.getNumOperands())
11771 return Op.getOperand(Idx) == ExpectedOp.getOperand(ExpectedIdx);
11773 case X86ISD::VBROADCAST:
11774 case X86ISD::VBROADCAST_LOAD:
11775 // TODO: Handle MaskSize != Op.getValueType().getVectorNumElements()?
11776 return (Op == ExpectedOp &&
11777 (int)Op.getValueType().getVectorNumElements() == MaskSize);
11780 case X86ISD::FHADD:
11781 case X86ISD::FHSUB:
11782 case X86ISD::PACKSS:
11783 case X86ISD::PACKUS:
11784 // HOP(X,X) can refer to the elt from the lower/upper half of a lane.
11785 // TODO: Handle MaskSize != NumElts?
11786 // TODO: Handle HOP(X,Y) vs HOP(Y,X) equivalence cases.
11787 if (Op == ExpectedOp && Op.getOperand(0) == Op.getOperand(1)) {
11788 MVT VT = Op.getSimpleValueType();
11789 int NumElts = VT.getVectorNumElements();
11790 if (MaskSize == NumElts) {
11791 int NumLanes = VT.getSizeInBits() / 128;
11792 int NumEltsPerLane = NumElts / NumLanes;
11793 int NumHalfEltsPerLane = NumEltsPerLane / 2;
11795 (Idx / NumEltsPerLane) == (ExpectedIdx / NumEltsPerLane);
11797 (Idx % NumHalfEltsPerLane) == (ExpectedIdx % NumHalfEltsPerLane);
11798 return SameLane && SameElt;
11807 /// Checks whether a shuffle mask is equivalent to an explicit list of
11810 /// This is a fast way to test a shuffle mask against a fixed pattern:
11812 /// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
11814 /// It returns true if the mask is exactly as wide as the argument list, and
11815 /// each element of the mask is either -1 (signifying undef) or the value given
11816 /// in the argument.
11817 static bool isShuffleEquivalent(ArrayRef<int> Mask, ArrayRef<int> ExpectedMask,
11818 SDValue V1 = SDValue(),
11819 SDValue V2 = SDValue()) {
11820 int Size = Mask.size();
11821 if (Size != (int)ExpectedMask.size())
11824 for (int i = 0; i < Size; ++i) {
11825 assert(Mask[i] >= -1 && "Out of bound mask element!");
11826 int MaskIdx = Mask[i];
11827 int ExpectedIdx = ExpectedMask[i];
11828 if (0 <= MaskIdx && MaskIdx != ExpectedIdx) {
11829 SDValue MaskV = MaskIdx < Size ? V1 : V2;
11830 SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
11831 MaskIdx = MaskIdx < Size ? MaskIdx : (MaskIdx - Size);
11832 ExpectedIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
11833 if (!IsElementEquivalent(Size, MaskV, ExpectedV, MaskIdx, ExpectedIdx))
11840 /// Checks whether a target shuffle mask is equivalent to an explicit pattern.
11842 /// The masks must be exactly the same width.
11844 /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
11845 /// value in ExpectedMask is always accepted. Otherwise the indices must match.
11847 /// SM_SentinelZero is accepted as a valid negative index but must match in
11848 /// both, or via a known bits test.
11849 static bool isTargetShuffleEquivalent(MVT VT, ArrayRef<int> Mask,
11850 ArrayRef<int> ExpectedMask,
11851 const SelectionDAG &DAG,
11852 SDValue V1 = SDValue(),
11853 SDValue V2 = SDValue()) {
11854 int Size = Mask.size();
11855 if (Size != (int)ExpectedMask.size())
11857 assert(llvm::all_of(ExpectedMask,
11858 [Size](int M) { return isInRange(M, 0, 2 * Size); }) &&
11859 "Illegal target shuffle mask");
11861 // Check for out-of-range target shuffle mask indices.
11862 if (!isUndefOrZeroOrInRange(Mask, 0, 2 * Size))
11865 // Don't use V1/V2 if they're not the same size as the shuffle mask type.
11866 if (V1 && V1.getValueSizeInBits() != VT.getSizeInBits())
11868 if (V2 && V2.getValueSizeInBits() != VT.getSizeInBits())
11871 APInt ZeroV1 = APInt::getNullValue(Size);
11872 APInt ZeroV2 = APInt::getNullValue(Size);
11874 for (int i = 0; i < Size; ++i) {
11875 int MaskIdx = Mask[i];
11876 int ExpectedIdx = ExpectedMask[i];
11877 if (MaskIdx == SM_SentinelUndef || MaskIdx == ExpectedIdx)
11879 if (MaskIdx == SM_SentinelZero) {
11880 // If we need this expected index to be a zero element, then update the
11881 // relevant zero mask and perform the known bits at the end to minimize
11882 // repeated computes.
11883 SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
11885 Size == (int)ExpectedV.getValueType().getVectorNumElements()) {
11886 int BitIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
11887 APInt &ZeroMask = ExpectedIdx < Size ? ZeroV1 : ZeroV2;
11888 ZeroMask.setBit(BitIdx);
11892 if (MaskIdx >= 0) {
11893 SDValue MaskV = MaskIdx < Size ? V1 : V2;
11894 SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
11895 MaskIdx = MaskIdx < Size ? MaskIdx : (MaskIdx - Size);
11896 ExpectedIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
11897 if (IsElementEquivalent(Size, MaskV, ExpectedV, MaskIdx, ExpectedIdx))
11902 return (ZeroV1.isNullValue() || DAG.MaskedVectorIsZero(V1, ZeroV1)) &&
11903 (ZeroV2.isNullValue() || DAG.MaskedVectorIsZero(V2, ZeroV2));
11906 // Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
11908 static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT,
11909 const SelectionDAG &DAG) {
11910 if (VT != MVT::v8i32 && VT != MVT::v8f32)
11913 SmallVector<int, 8> Unpcklwd;
11914 createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
11915 /* Unary = */ false);
11916 SmallVector<int, 8> Unpckhwd;
11917 createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
11918 /* Unary = */ false);
11919 bool IsUnpackwdMask = (isTargetShuffleEquivalent(VT, Mask, Unpcklwd, DAG) ||
11920 isTargetShuffleEquivalent(VT, Mask, Unpckhwd, DAG));
11921 return IsUnpackwdMask;
11924 static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask,
11925 const SelectionDAG &DAG) {
11926 // Create 128-bit vector type based on mask size.
11927 MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
11928 MVT VT = MVT::getVectorVT(EltVT, Mask.size());
11930 // We can't assume a canonical shuffle mask, so try the commuted version too.
11931 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
11932 ShuffleVectorSDNode::commuteMask(CommutedMask);
11934 // Match any of unary/binary or low/high.
11935 for (unsigned i = 0; i != 4; ++i) {
11936 SmallVector<int, 16> UnpackMask;
11937 createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
11938 if (isTargetShuffleEquivalent(VT, Mask, UnpackMask, DAG) ||
11939 isTargetShuffleEquivalent(VT, CommutedMask, UnpackMask, DAG))
11945 /// Return true if a shuffle mask chooses elements identically in its top and
11946 /// bottom halves. For example, any splat mask has the same top and bottom
11947 /// halves. If an element is undefined in only one half of the mask, the halves
11948 /// are not considered identical.
11949 static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
11950 assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
11951 unsigned HalfSize = Mask.size() / 2;
11952 for (unsigned i = 0; i != HalfSize; ++i) {
11953 if (Mask[i] != Mask[i + HalfSize])
11959 /// Get a 4-lane 8-bit shuffle immediate for a mask.
11961 /// This helper function produces an 8-bit shuffle immediate corresponding to
11962 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
11963 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
11966 /// NB: We rely heavily on "undef" masks preserving the input lane.
11967 static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
11968 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
11969 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
11970 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
11971 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
11972 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
11974 // If the mask only uses one non-undef element, then fully 'splat' it to
11975 // improve later broadcast matching.
11976 int FirstIndex = find_if(Mask, [](int M) { return M >= 0; }) - Mask.begin();
11977 assert(0 <= FirstIndex && FirstIndex < 4 && "All undef shuffle mask");
11979 int FirstElt = Mask[FirstIndex];
11980 if (all_of(Mask, [FirstElt](int M) { return M < 0 || M == FirstElt; }))
11981 return (FirstElt << 6) | (FirstElt << 4) | (FirstElt << 2) | FirstElt;
11984 Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
11985 Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
11986 Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
11987 Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
11991 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
11992 SelectionDAG &DAG) {
11993 return DAG.getTargetConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
11996 // The Shuffle result is as follow:
11997 // 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
11998 // Each Zeroable's element correspond to a particular Mask's element.
11999 // As described in computeZeroableShuffleElements function.
12001 // The function looks for a sub-mask that the nonzero elements are in
12002 // increasing order. If such sub-mask exist. The function returns true.
12003 static bool isNonZeroElementsInOrder(const APInt &Zeroable,
12004 ArrayRef<int> Mask, const EVT &VectorType,
12005 bool &IsZeroSideLeft) {
12006 int NextElement = -1;
12007 // Check if the Mask's nonzero elements are in increasing order.
12008 for (int i = 0, e = Mask.size(); i < e; i++) {
12009 // Checks if the mask's zeros elements are built from only zeros.
12010 assert(Mask[i] >= -1 && "Out of bound mask element!");
12015 // Find the lowest non zero element
12016 if (NextElement < 0) {
12017 NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
12018 IsZeroSideLeft = NextElement != 0;
12020 // Exit if the mask's non zero elements are not in increasing order.
12021 if (NextElement != Mask[i])
12028 /// Try to lower a shuffle with a single PSHUFB of V1 or V2.
12029 static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
12030 ArrayRef<int> Mask, SDValue V1,
12031 SDValue V2, const APInt &Zeroable,
12032 const X86Subtarget &Subtarget,
12033 SelectionDAG &DAG) {
12034 int Size = Mask.size();
12035 int LaneSize = 128 / VT.getScalarSizeInBits();
12036 const int NumBytes = VT.getSizeInBits() / 8;
12037 const int NumEltBytes = VT.getScalarSizeInBits() / 8;
12039 assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
12040 (Subtarget.hasAVX2() && VT.is256BitVector()) ||
12041 (Subtarget.hasBWI() && VT.is512BitVector()));
12043 SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
12044 // Sign bit set in i8 mask means zero element.
12045 SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
12048 for (int i = 0; i < NumBytes; ++i) {
12049 int M = Mask[i / NumEltBytes];
12051 PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
12054 if (Zeroable[i / NumEltBytes]) {
12055 PSHUFBMask[i] = ZeroMask;
12059 // We can only use a single input of V1 or V2.
12060 SDValue SrcV = (M >= Size ? V2 : V1);
12061 if (V && V != SrcV)
12066 // PSHUFB can't cross lanes, ensure this doesn't happen.
12067 if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
12071 M = M * NumEltBytes + (i % NumEltBytes);
12072 PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
12074 assert(V && "Failed to find a source input");
12076 MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
12077 return DAG.getBitcast(
12078 VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
12079 DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
12082 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
12083 const X86Subtarget &Subtarget, SelectionDAG &DAG,
12086 // X86 has dedicated shuffle that can be lowered to VEXPAND
12087 static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
12088 const APInt &Zeroable,
12089 ArrayRef<int> Mask, SDValue &V1,
12090 SDValue &V2, SelectionDAG &DAG,
12091 const X86Subtarget &Subtarget) {
12092 bool IsLeftZeroSide = true;
12093 if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
12096 unsigned VEXPANDMask = (~Zeroable).getZExtValue();
12098 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
12099 SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
12100 unsigned NumElts = VT.getVectorNumElements();
12101 assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
12102 "Unexpected number of vector elements");
12103 SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
12104 Subtarget, DAG, DL);
12105 SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
12106 SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
12107 return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
12110 static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
12111 unsigned &UnpackOpcode, bool IsUnary,
12112 ArrayRef<int> TargetMask, const SDLoc &DL,
12114 const X86Subtarget &Subtarget) {
12115 int NumElts = VT.getVectorNumElements();
12117 bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
12118 for (int i = 0; i != NumElts; i += 2) {
12119 int M1 = TargetMask[i + 0];
12120 int M2 = TargetMask[i + 1];
12121 Undef1 &= (SM_SentinelUndef == M1);
12122 Undef2 &= (SM_SentinelUndef == M2);
12123 Zero1 &= isUndefOrZero(M1);
12124 Zero2 &= isUndefOrZero(M2);
12126 assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
12127 "Zeroable shuffle detected");
12129 // Attempt to match the target mask against the unpack lo/hi mask patterns.
12130 SmallVector<int, 64> Unpckl, Unpckh;
12131 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
12132 if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG, V1,
12133 (IsUnary ? V1 : V2))) {
12134 UnpackOpcode = X86ISD::UNPCKL;
12135 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
12136 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
12140 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
12141 if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG, V1,
12142 (IsUnary ? V1 : V2))) {
12143 UnpackOpcode = X86ISD::UNPCKH;
12144 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
12145 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
12149 // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
12150 if (IsUnary && (Zero1 || Zero2)) {
12151 // Don't bother if we can blend instead.
12152 if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
12153 isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
12156 bool MatchLo = true, MatchHi = true;
12157 for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
12158 int M = TargetMask[i];
12160 // Ignore if the input is known to be zero or the index is undef.
12161 if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
12162 (M == SM_SentinelUndef))
12165 MatchLo &= (M == Unpckl[i]);
12166 MatchHi &= (M == Unpckh[i]);
12169 if (MatchLo || MatchHi) {
12170 UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
12171 V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
12172 V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
12177 // If a binary shuffle, commute and try again.
12179 ShuffleVectorSDNode::commuteMask(Unpckl);
12180 if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG)) {
12181 UnpackOpcode = X86ISD::UNPCKL;
12186 ShuffleVectorSDNode::commuteMask(Unpckh);
12187 if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG)) {
12188 UnpackOpcode = X86ISD::UNPCKH;
12197 // X86 has dedicated unpack instructions that can handle specific blend
12198 // operations: UNPCKH and UNPCKL.
12199 static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
12200 ArrayRef<int> Mask, SDValue V1, SDValue V2,
12201 SelectionDAG &DAG) {
12202 SmallVector<int, 8> Unpckl;
12203 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
12204 if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
12205 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
12207 SmallVector<int, 8> Unpckh;
12208 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
12209 if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
12210 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
12212 // Commute and try again.
12213 ShuffleVectorSDNode::commuteMask(Unpckl);
12214 if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
12215 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
12217 ShuffleVectorSDNode::commuteMask(Unpckh);
12218 if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
12219 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
12224 /// Check if the mask can be mapped to a preliminary shuffle (vperm 64-bit)
12225 /// followed by unpack 256-bit.
12226 static SDValue lowerShuffleWithUNPCK256(const SDLoc &DL, MVT VT,
12227 ArrayRef<int> Mask, SDValue V1,
12228 SDValue V2, SelectionDAG &DAG) {
12229 SmallVector<int, 32> Unpckl, Unpckh;
12230 createSplat2ShuffleMask(VT, Unpckl, /* Lo */ true);
12231 createSplat2ShuffleMask(VT, Unpckh, /* Lo */ false);
12233 unsigned UnpackOpcode;
12234 if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
12235 UnpackOpcode = X86ISD::UNPCKL;
12236 else if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
12237 UnpackOpcode = X86ISD::UNPCKH;
12241 // This is a "natural" unpack operation (rather than the 128-bit sectored
12242 // operation implemented by AVX). We need to rearrange 64-bit chunks of the
12243 // input in order to use the x86 instruction.
12244 V1 = DAG.getVectorShuffle(MVT::v4f64, DL, DAG.getBitcast(MVT::v4f64, V1),
12245 DAG.getUNDEF(MVT::v4f64), {0, 2, 1, 3});
12246 V1 = DAG.getBitcast(VT, V1);
12247 return DAG.getNode(UnpackOpcode, DL, VT, V1, V1);
12250 // Check if the mask can be mapped to a TRUNCATE or VTRUNC, truncating the
12251 // source into the lower elements and zeroing the upper elements.
12252 static bool matchShuffleAsVTRUNC(MVT &SrcVT, MVT &DstVT, MVT VT,
12253 ArrayRef<int> Mask, const APInt &Zeroable,
12254 const X86Subtarget &Subtarget) {
12255 if (!VT.is512BitVector() && !Subtarget.hasVLX())
12258 unsigned NumElts = Mask.size();
12259 unsigned EltSizeInBits = VT.getScalarSizeInBits();
12260 unsigned MaxScale = 64 / EltSizeInBits;
12262 for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
12263 unsigned SrcEltBits = EltSizeInBits * Scale;
12264 if (SrcEltBits < 32 && !Subtarget.hasBWI())
12266 unsigned NumSrcElts = NumElts / Scale;
12267 if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale))
12269 unsigned UpperElts = NumElts - NumSrcElts;
12270 if (!Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
12272 SrcVT = MVT::getIntegerVT(EltSizeInBits * Scale);
12273 SrcVT = MVT::getVectorVT(SrcVT, NumSrcElts);
12274 DstVT = MVT::getIntegerVT(EltSizeInBits);
12275 if ((NumSrcElts * EltSizeInBits) >= 128) {
12277 DstVT = MVT::getVectorVT(DstVT, NumSrcElts);
12280 DstVT = MVT::getVectorVT(DstVT, 128 / EltSizeInBits);
12288 // Helper to create TRUNCATE/VTRUNC nodes, optionally with zero/undef upper
12289 // element padding to the final DstVT.
12290 static SDValue getAVX512TruncNode(const SDLoc &DL, MVT DstVT, SDValue Src,
12291 const X86Subtarget &Subtarget,
12292 SelectionDAG &DAG, bool ZeroUppers) {
12293 MVT SrcVT = Src.getSimpleValueType();
12294 MVT DstSVT = DstVT.getScalarType();
12295 unsigned NumDstElts = DstVT.getVectorNumElements();
12296 unsigned NumSrcElts = SrcVT.getVectorNumElements();
12297 unsigned DstEltSizeInBits = DstVT.getScalarSizeInBits();
12299 if (!DAG.getTargetLoweringInfo().isTypeLegal(SrcVT))
12302 // Perform a direct ISD::TRUNCATE if possible.
12303 if (NumSrcElts == NumDstElts)
12304 return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Src);
12306 if (NumSrcElts > NumDstElts) {
12307 MVT TruncVT = MVT::getVectorVT(DstSVT, NumSrcElts);
12308 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Src);
12309 return extractSubVector(Trunc, 0, DAG, DL, DstVT.getSizeInBits());
12312 if ((NumSrcElts * DstEltSizeInBits) >= 128) {
12313 MVT TruncVT = MVT::getVectorVT(DstSVT, NumSrcElts);
12314 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Src);
12315 return widenSubVector(Trunc, ZeroUppers, Subtarget, DAG, DL,
12316 DstVT.getSizeInBits());
12319 // Non-VLX targets must truncate from a 512-bit type, so we need to
12320 // widen, truncate and then possibly extract the original subvector.
12321 if (!Subtarget.hasVLX() && !SrcVT.is512BitVector()) {
12322 SDValue NewSrc = widenSubVector(Src, ZeroUppers, Subtarget, DAG, DL, 512);
12323 return getAVX512TruncNode(DL, DstVT, NewSrc, Subtarget, DAG, ZeroUppers);
12326 // Fallback to a X86ISD::VTRUNC, padding if necessary.
12327 MVT TruncVT = MVT::getVectorVT(DstSVT, 128 / DstEltSizeInBits);
12328 SDValue Trunc = DAG.getNode(X86ISD::VTRUNC, DL, TruncVT, Src);
12329 if (DstVT != TruncVT)
12330 Trunc = widenSubVector(Trunc, ZeroUppers, Subtarget, DAG, DL,
12331 DstVT.getSizeInBits());
12335 // Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
12337 // An example is the following:
12339 // t0: ch = EntryToken
12340 // t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
12341 // t25: v4i32 = truncate t2
12342 // t41: v8i16 = bitcast t25
12343 // t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
12344 // Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
12345 // t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
12346 // t18: v2i64 = bitcast t51
12348 // One can just use a single vpmovdw instruction, without avx512vl we need to
12349 // use the zmm variant and extract the lower subvector, padding with zeroes.
12350 // TODO: Merge with lowerShuffleAsVTRUNC.
12351 static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, MVT VT, SDValue V1,
12352 SDValue V2, ArrayRef<int> Mask,
12353 const APInt &Zeroable,
12354 const X86Subtarget &Subtarget,
12355 SelectionDAG &DAG) {
12356 assert((VT == MVT::v16i8 || VT == MVT::v8i16) && "Unexpected VTRUNC type");
12357 if (!Subtarget.hasAVX512())
12360 unsigned NumElts = VT.getVectorNumElements();
12361 unsigned EltSizeInBits = VT.getScalarSizeInBits();
12362 unsigned MaxScale = 64 / EltSizeInBits;
12363 for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
12364 unsigned NumSrcElts = NumElts / Scale;
12365 unsigned UpperElts = NumElts - NumSrcElts;
12366 if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale) ||
12367 !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
12371 if (!Src.hasOneUse())
12374 Src = peekThroughOneUseBitcasts(Src);
12375 if (Src.getOpcode() != ISD::TRUNCATE ||
12376 Src.getScalarValueSizeInBits() != (EltSizeInBits * Scale))
12378 Src = Src.getOperand(0);
12380 // VPMOVWB is only available with avx512bw.
12381 MVT SrcVT = Src.getSimpleValueType();
12382 if (SrcVT.getVectorElementType() == MVT::i16 && VT == MVT::v16i8 &&
12383 !Subtarget.hasBWI())
12386 bool UndefUppers = isUndefInRange(Mask, NumSrcElts, UpperElts);
12387 return getAVX512TruncNode(DL, VT, Src, Subtarget, DAG, !UndefUppers);
12393 // Attempt to match binary shuffle patterns as a truncate.
12394 static SDValue lowerShuffleAsVTRUNC(const SDLoc &DL, MVT VT, SDValue V1,
12395 SDValue V2, ArrayRef<int> Mask,
12396 const APInt &Zeroable,
12397 const X86Subtarget &Subtarget,
12398 SelectionDAG &DAG) {
12399 assert((VT.is128BitVector() || VT.is256BitVector()) &&
12400 "Unexpected VTRUNC type");
12401 if (!Subtarget.hasAVX512())
12404 unsigned NumElts = VT.getVectorNumElements();
12405 unsigned EltSizeInBits = VT.getScalarSizeInBits();
12406 unsigned MaxScale = 64 / EltSizeInBits;
12407 for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
12408 // TODO: Support non-BWI VPMOVWB truncations?
12409 unsigned SrcEltBits = EltSizeInBits * Scale;
12410 if (SrcEltBits < 32 && !Subtarget.hasBWI())
12413 // Match shuffle <0,Scale,2*Scale,..,undef_or_zero,undef_or_zero,...>
12414 // Bail if the V2 elements are undef.
12415 unsigned NumHalfSrcElts = NumElts / Scale;
12416 unsigned NumSrcElts = 2 * NumHalfSrcElts;
12417 if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale) ||
12418 isUndefInRange(Mask, NumHalfSrcElts, NumHalfSrcElts))
12421 // The elements beyond the truncation must be undef/zero.
12422 unsigned UpperElts = NumElts - NumSrcElts;
12423 if (UpperElts > 0 &&
12424 !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
12427 UpperElts > 0 && isUndefInRange(Mask, NumSrcElts, UpperElts);
12429 // As we're using both sources then we need to concat them together
12430 // and truncate from the double-sized src.
12431 MVT ConcatVT = MVT::getVectorVT(VT.getScalarType(), NumElts * 2);
12432 SDValue Src = DAG.getNode(ISD::CONCAT_VECTORS, DL, ConcatVT, V1, V2);
12434 MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
12435 MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
12436 Src = DAG.getBitcast(SrcVT, Src);
12437 return getAVX512TruncNode(DL, VT, Src, Subtarget, DAG, !UndefUppers);
12443 /// Check whether a compaction lowering can be done by dropping even/odd
12444 /// elements and compute how many times even/odd elements must be dropped.
12446 /// This handles shuffles which take every Nth element where N is a power of
12447 /// two. Example shuffle masks:
12450 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
12451 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
12452 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
12453 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
12454 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
12455 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
12458 /// N = 1: 1, 3, 5, 7, 9, 11, 13, 15, 0, 2, 4, 6, 8, 10, 12, 14
12459 /// N = 1: 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
12461 /// Any of these lanes can of course be undef.
12463 /// This routine only supports N <= 3.
12464 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
12467 /// \returns N above, or the number of times even/odd elements must be dropped
12468 /// if there is such a number. Otherwise returns zero.
12469 static int canLowerByDroppingElements(ArrayRef<int> Mask, bool MatchEven,
12470 bool IsSingleInput) {
12471 // The modulus for the shuffle vector entries is based on whether this is
12472 // a single input or not.
12473 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
12474 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
12475 "We should only be called with masks with a power-of-2 size!");
12477 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
12478 int Offset = MatchEven ? 0 : 1;
12480 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
12481 // and 2^3 simultaneously. This is because we may have ambiguity with
12482 // partially undef inputs.
12483 bool ViableForN[3] = {true, true, true};
12485 for (int i = 0, e = Mask.size(); i < e; ++i) {
12486 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
12491 bool IsAnyViable = false;
12492 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
12493 if (ViableForN[j]) {
12494 uint64_t N = j + 1;
12496 // The shuffle mask must be equal to (i * 2^N) % M.
12497 if ((uint64_t)(Mask[i] - Offset) == (((uint64_t)i << N) & ModMask))
12498 IsAnyViable = true;
12500 ViableForN[j] = false;
12502 // Early exit if we exhaust the possible powers of two.
12507 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
12511 // Return 0 as there is no viable power of two.
12515 // X86 has dedicated pack instructions that can handle specific truncation
12516 // operations: PACKSS and PACKUS.
12517 // Checks for compaction shuffle masks if MaxStages > 1.
12518 // TODO: Add support for matching multiple PACKSS/PACKUS stages.
12519 static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
12520 unsigned &PackOpcode, ArrayRef<int> TargetMask,
12521 const SelectionDAG &DAG,
12522 const X86Subtarget &Subtarget,
12523 unsigned MaxStages = 1) {
12524 unsigned NumElts = VT.getVectorNumElements();
12525 unsigned BitSize = VT.getScalarSizeInBits();
12526 assert(0 < MaxStages && MaxStages <= 3 && (BitSize << MaxStages) <= 64 &&
12527 "Illegal maximum compaction");
12529 auto MatchPACK = [&](SDValue N1, SDValue N2, MVT PackVT) {
12530 unsigned NumSrcBits = PackVT.getScalarSizeInBits();
12531 unsigned NumPackedBits = NumSrcBits - BitSize;
12532 N1 = peekThroughBitcasts(N1);
12533 N2 = peekThroughBitcasts(N2);
12534 unsigned NumBits1 = N1.getScalarValueSizeInBits();
12535 unsigned NumBits2 = N2.getScalarValueSizeInBits();
12536 bool IsZero1 = llvm::isNullOrNullSplat(N1, /*AllowUndefs*/ false);
12537 bool IsZero2 = llvm::isNullOrNullSplat(N2, /*AllowUndefs*/ false);
12538 if ((!N1.isUndef() && !IsZero1 && NumBits1 != NumSrcBits) ||
12539 (!N2.isUndef() && !IsZero2 && NumBits2 != NumSrcBits))
12541 if (Subtarget.hasSSE41() || BitSize == 8) {
12542 APInt ZeroMask = APInt::getHighBitsSet(NumSrcBits, NumPackedBits);
12543 if ((N1.isUndef() || IsZero1 || DAG.MaskedValueIsZero(N1, ZeroMask)) &&
12544 (N2.isUndef() || IsZero2 || DAG.MaskedValueIsZero(N2, ZeroMask))) {
12548 PackOpcode = X86ISD::PACKUS;
12552 bool IsAllOnes1 = llvm::isAllOnesOrAllOnesSplat(N1, /*AllowUndefs*/ false);
12553 bool IsAllOnes2 = llvm::isAllOnesOrAllOnesSplat(N2, /*AllowUndefs*/ false);
12554 if ((N1.isUndef() || IsZero1 || IsAllOnes1 ||
12555 DAG.ComputeNumSignBits(N1) > NumPackedBits) &&
12556 (N2.isUndef() || IsZero2 || IsAllOnes2 ||
12557 DAG.ComputeNumSignBits(N2) > NumPackedBits)) {
12561 PackOpcode = X86ISD::PACKSS;
12567 // Attempt to match against wider and wider compaction patterns.
12568 for (unsigned NumStages = 1; NumStages <= MaxStages; ++NumStages) {
12569 MVT PackSVT = MVT::getIntegerVT(BitSize << NumStages);
12570 MVT PackVT = MVT::getVectorVT(PackSVT, NumElts >> NumStages);
12572 // Try binary shuffle.
12573 SmallVector<int, 32> BinaryMask;
12574 createPackShuffleMask(VT, BinaryMask, false, NumStages);
12575 if (isTargetShuffleEquivalent(VT, TargetMask, BinaryMask, DAG, V1, V2))
12576 if (MatchPACK(V1, V2, PackVT))
12579 // Try unary shuffle.
12580 SmallVector<int, 32> UnaryMask;
12581 createPackShuffleMask(VT, UnaryMask, true, NumStages);
12582 if (isTargetShuffleEquivalent(VT, TargetMask, UnaryMask, DAG, V1))
12583 if (MatchPACK(V1, V1, PackVT))
12590 static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
12591 SDValue V1, SDValue V2, SelectionDAG &DAG,
12592 const X86Subtarget &Subtarget) {
12594 unsigned PackOpcode;
12595 unsigned SizeBits = VT.getSizeInBits();
12596 unsigned EltBits = VT.getScalarSizeInBits();
12597 unsigned MaxStages = Log2_32(64 / EltBits);
12598 if (!matchShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
12599 Subtarget, MaxStages))
12602 unsigned CurrentEltBits = PackVT.getScalarSizeInBits();
12603 unsigned NumStages = Log2_32(CurrentEltBits / EltBits);
12605 // Don't lower multi-stage packs on AVX512, truncation is better.
12606 if (NumStages != 1 && SizeBits == 128 && Subtarget.hasVLX())
12609 // Pack to the largest type possible:
12610 // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
12611 unsigned MaxPackBits = 16;
12612 if (CurrentEltBits > 16 &&
12613 (PackOpcode == X86ISD::PACKSS || Subtarget.hasSSE41()))
12616 // Repeatedly pack down to the target size.
12618 for (unsigned i = 0; i != NumStages; ++i) {
12619 unsigned SrcEltBits = std::min(MaxPackBits, CurrentEltBits);
12620 unsigned NumSrcElts = SizeBits / SrcEltBits;
12621 MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
12622 MVT DstSVT = MVT::getIntegerVT(SrcEltBits / 2);
12623 MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
12624 MVT DstVT = MVT::getVectorVT(DstSVT, NumSrcElts * 2);
12625 Res = DAG.getNode(PackOpcode, DL, DstVT, DAG.getBitcast(SrcVT, V1),
12626 DAG.getBitcast(SrcVT, V2));
12628 CurrentEltBits /= 2;
12630 assert(Res && Res.getValueType() == VT &&
12631 "Failed to lower compaction shuffle");
12635 /// Try to emit a bitmask instruction for a shuffle.
12637 /// This handles cases where we can model a blend exactly as a bitmask due to
12638 /// one of the inputs being zeroable.
12639 static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
12640 SDValue V2, ArrayRef<int> Mask,
12641 const APInt &Zeroable,
12642 const X86Subtarget &Subtarget,
12643 SelectionDAG &DAG) {
12645 MVT EltVT = VT.getVectorElementType();
12646 SDValue Zero, AllOnes;
12647 // Use f64 if i64 isn't legal.
12648 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
12650 MaskVT = MVT::getVectorVT(EltVT, Mask.size());
12654 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
12655 Zero = DAG.getConstantFP(0.0, DL, EltVT);
12656 APFloat AllOnesValue =
12657 APFloat::getAllOnesValue(SelectionDAG::EVTToAPFloatSemantics(EltVT));
12658 AllOnes = DAG.getConstantFP(AllOnesValue, DL, EltVT);
12660 MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
12662 Zero = DAG.getConstant(0, DL, EltVT);
12663 AllOnes = DAG.getAllOnesConstant(DL, EltVT);
12666 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
12668 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
12671 if (Mask[i] % Size != i)
12672 return SDValue(); // Not a blend.
12674 V = Mask[i] < Size ? V1 : V2;
12675 else if (V != (Mask[i] < Size ? V1 : V2))
12676 return SDValue(); // Can only let one input through the mask.
12678 VMaskOps[i] = AllOnes;
12681 return SDValue(); // No non-zeroable elements!
12683 SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
12684 VMask = DAG.getBitcast(LogicVT, VMask);
12685 V = DAG.getBitcast(LogicVT, V);
12686 SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
12687 return DAG.getBitcast(VT, And);
12690 /// Try to emit a blend instruction for a shuffle using bit math.
12692 /// This is used as a fallback approach when first class blend instructions are
12693 /// unavailable. Currently it is only suitable for integer vectors, but could
12694 /// be generalized for floating point vectors if desirable.
12695 static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
12696 SDValue V2, ArrayRef<int> Mask,
12697 SelectionDAG &DAG) {
12698 assert(VT.isInteger() && "Only supports integer vector types!");
12699 MVT EltVT = VT.getVectorElementType();
12700 SDValue Zero = DAG.getConstant(0, DL, EltVT);
12701 SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
12702 SmallVector<SDValue, 16> MaskOps;
12703 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
12704 if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
12705 return SDValue(); // Shuffled input!
12706 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
12709 SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
12710 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
12711 V2 = DAG.getNode(X86ISD::ANDNP, DL, VT, V1Mask, V2);
12712 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
12715 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
12716 SDValue PreservedSrc,
12717 const X86Subtarget &Subtarget,
12718 SelectionDAG &DAG);
12720 static bool matchShuffleAsBlend(SDValue V1, SDValue V2,
12721 MutableArrayRef<int> Mask,
12722 const APInt &Zeroable, bool &ForceV1Zero,
12723 bool &ForceV2Zero, uint64_t &BlendMask) {
12724 bool V1IsZeroOrUndef =
12725 V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
12726 bool V2IsZeroOrUndef =
12727 V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
12730 ForceV1Zero = false, ForceV2Zero = false;
12731 assert(Mask.size() <= 64 && "Shuffle mask too big for blend mask");
12733 // Attempt to generate the binary blend mask. If an input is zero then
12734 // we can use any lane.
12735 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
12737 if (M == SM_SentinelUndef)
12740 (0 <= M && M < Size && IsElementEquivalent(Size, V1, V1, M, i))) {
12744 if (M == (i + Size) ||
12745 (Size <= M && IsElementEquivalent(Size, V2, V2, M - Size, i))) {
12746 BlendMask |= 1ull << i;
12747 Mask[i] = i + Size;
12751 if (V1IsZeroOrUndef) {
12752 ForceV1Zero = true;
12756 if (V2IsZeroOrUndef) {
12757 ForceV2Zero = true;
12758 BlendMask |= 1ull << i;
12759 Mask[i] = i + Size;
12768 static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
12770 uint64_t ScaledMask = 0;
12771 for (int i = 0; i != Size; ++i)
12772 if (BlendMask & (1ull << i))
12773 ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
12777 /// Try to emit a blend instruction for a shuffle.
12779 /// This doesn't do any checks for the availability of instructions for blending
12780 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
12781 /// be matched in the backend with the type given. What it does check for is
12782 /// that the shuffle mask is a blend, or convertible into a blend with zero.
12783 static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
12784 SDValue V2, ArrayRef<int> Original,
12785 const APInt &Zeroable,
12786 const X86Subtarget &Subtarget,
12787 SelectionDAG &DAG) {
12788 uint64_t BlendMask = 0;
12789 bool ForceV1Zero = false, ForceV2Zero = false;
12790 SmallVector<int, 64> Mask(Original.begin(), Original.end());
12791 if (!matchShuffleAsBlend(V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
12795 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
12797 V1 = getZeroVector(VT, Subtarget, DAG, DL);
12799 V2 = getZeroVector(VT, Subtarget, DAG, DL);
12801 unsigned NumElts = VT.getVectorNumElements();
12803 switch (VT.SimpleTy) {
12806 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
12810 assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
12817 assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
12818 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
12819 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
12820 case MVT::v16i16: {
12821 assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
12822 SmallVector<int, 8> RepeatedMask;
12823 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
12824 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
12825 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
12827 for (int i = 0; i < 8; ++i)
12828 if (RepeatedMask[i] >= 8)
12829 BlendMask |= 1ull << i;
12830 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
12831 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
12833 // Use PBLENDW for lower/upper lanes and then blend lanes.
12834 // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
12835 // merge to VSELECT where useful.
12836 uint64_t LoMask = BlendMask & 0xFF;
12837 uint64_t HiMask = (BlendMask >> 8) & 0xFF;
12838 if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
12839 SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
12840 DAG.getTargetConstant(LoMask, DL, MVT::i8));
12841 SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
12842 DAG.getTargetConstant(HiMask, DL, MVT::i8));
12843 return DAG.getVectorShuffle(
12844 MVT::v16i16, DL, Lo, Hi,
12845 {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
12850 assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
12853 assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
12855 // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
12856 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
12860 if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
12861 MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
12862 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
12863 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
12866 // If we have VPTERNLOG, we can use that as a bit blend.
12867 if (Subtarget.hasVLX())
12868 if (SDValue BitBlend =
12869 lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
12872 // Scale the blend by the number of bytes per element.
12873 int Scale = VT.getScalarSizeInBits() / 8;
12875 // This form of blend is always done on bytes. Compute the byte vector
12877 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
12879 // x86 allows load folding with blendvb from the 2nd source operand. But
12880 // we are still using LLVM select here (see comment below), so that's V1.
12881 // If V2 can be load-folded and V1 cannot be load-folded, then commute to
12882 // allow that load-folding possibility.
12883 if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
12884 ShuffleVectorSDNode::commuteMask(Mask);
12888 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
12889 // mix of LLVM's code generator and the x86 backend. We tell the code
12890 // generator that boolean values in the elements of an x86 vector register
12891 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
12892 // mapping a select to operand #1, and 'false' mapping to operand #2. The
12893 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
12894 // of the element (the remaining are ignored) and 0 in that high bit would
12895 // mean operand #1 while 1 in the high bit would mean operand #2. So while
12896 // the LLVM model for boolean values in vector elements gets the relevant
12897 // bit set, it is set backwards and over constrained relative to x86's
12899 SmallVector<SDValue, 32> VSELECTMask;
12900 for (int i = 0, Size = Mask.size(); i < Size; ++i)
12901 for (int j = 0; j < Scale; ++j)
12902 VSELECTMask.push_back(
12903 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
12904 : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
12907 V1 = DAG.getBitcast(BlendVT, V1);
12908 V2 = DAG.getBitcast(BlendVT, V2);
12909 return DAG.getBitcast(
12911 DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
12920 // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
12921 bool OptForSize = DAG.shouldOptForSize();
12923 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
12928 // Otherwise load an immediate into a GPR, cast to k-register, and use a
12930 MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
12931 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
12932 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
12935 llvm_unreachable("Not a supported integer vector type!");
12939 /// Try to lower as a blend of elements from two inputs followed by
12940 /// a single-input permutation.
12942 /// This matches the pattern where we can blend elements from two inputs and
12943 /// then reduce the shuffle to a single-input permutation.
12944 static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
12945 SDValue V1, SDValue V2,
12946 ArrayRef<int> Mask,
12948 bool ImmBlends = false) {
12949 // We build up the blend mask while checking whether a blend is a viable way
12950 // to reduce the shuffle.
12951 SmallVector<int, 32> BlendMask(Mask.size(), -1);
12952 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
12954 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
12958 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
12960 if (BlendMask[Mask[i] % Size] < 0)
12961 BlendMask[Mask[i] % Size] = Mask[i];
12962 else if (BlendMask[Mask[i] % Size] != Mask[i])
12963 return SDValue(); // Can't blend in the needed input!
12965 PermuteMask[i] = Mask[i] % Size;
12968 // If only immediate blends, then bail if the blend mask can't be widened to
12970 unsigned EltSize = VT.getScalarSizeInBits();
12971 if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
12974 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
12975 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
12978 /// Try to lower as an unpack of elements from two inputs followed by
12979 /// a single-input permutation.
12981 /// This matches the pattern where we can unpack elements from two inputs and
12982 /// then reduce the shuffle to a single-input (wider) permutation.
12983 static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
12984 SDValue V1, SDValue V2,
12985 ArrayRef<int> Mask,
12986 SelectionDAG &DAG) {
12987 int NumElts = Mask.size();
12988 int NumLanes = VT.getSizeInBits() / 128;
12989 int NumLaneElts = NumElts / NumLanes;
12990 int NumHalfLaneElts = NumLaneElts / 2;
12992 bool MatchLo = true, MatchHi = true;
12993 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
12995 // Determine UNPCKL/UNPCKH type and operand order.
12996 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
12997 for (int Elt = 0; Elt != NumLaneElts; ++Elt) {
12998 int M = Mask[Lane + Elt];
13002 SDValue &Op = Ops[Elt & 1];
13003 if (M < NumElts && (Op.isUndef() || Op == V1))
13005 else if (NumElts <= M && (Op.isUndef() || Op == V2))
13010 int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
13011 MatchLo &= isUndefOrInRange(M, Lo, Mid) ||
13012 isUndefOrInRange(M, NumElts + Lo, NumElts + Mid);
13013 MatchHi &= isUndefOrInRange(M, Mid, Hi) ||
13014 isUndefOrInRange(M, NumElts + Mid, NumElts + Hi);
13015 if (!MatchLo && !MatchHi)
13019 assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
13021 // Now check that each pair of elts come from the same unpack pair
13022 // and set the permute mask based on each pair.
13023 // TODO - Investigate cases where we permute individual elements.
13024 SmallVector<int, 32> PermuteMask(NumElts, -1);
13025 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
13026 for (int Elt = 0; Elt != NumLaneElts; Elt += 2) {
13027 int M0 = Mask[Lane + Elt + 0];
13028 int M1 = Mask[Lane + Elt + 1];
13029 if (0 <= M0 && 0 <= M1 &&
13030 (M0 % NumHalfLaneElts) != (M1 % NumHalfLaneElts))
13033 PermuteMask[Lane + Elt + 0] = Lane + (2 * (M0 % NumHalfLaneElts));
13035 PermuteMask[Lane + Elt + 1] = Lane + (2 * (M1 % NumHalfLaneElts)) + 1;
13039 unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
13040 SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
13041 return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
13044 /// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
13045 /// permuting the elements of the result in place.
13046 static SDValue lowerShuffleAsByteRotateAndPermute(
13047 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13048 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13049 if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
13050 (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
13051 (VT.is512BitVector() && !Subtarget.hasBWI()))
13054 // We don't currently support lane crossing permutes.
13055 if (is128BitLaneCrossingShuffleMask(VT, Mask))
13058 int Scale = VT.getScalarSizeInBits() / 8;
13059 int NumLanes = VT.getSizeInBits() / 128;
13060 int NumElts = VT.getVectorNumElements();
13061 int NumEltsPerLane = NumElts / NumLanes;
13063 // Determine range of mask elts.
13064 bool Blend1 = true;
13065 bool Blend2 = true;
13066 std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
13067 std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
13068 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
13069 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
13070 int M = Mask[Lane + Elt];
13074 Blend1 &= (M == (Lane + Elt));
13075 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
13076 M = M % NumEltsPerLane;
13077 Range1.first = std::min(Range1.first, M);
13078 Range1.second = std::max(Range1.second, M);
13081 Blend2 &= (M == (Lane + Elt));
13082 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
13083 M = M % NumEltsPerLane;
13084 Range2.first = std::min(Range2.first, M);
13085 Range2.second = std::max(Range2.second, M);
13090 // Bail if we don't need both elements.
13091 // TODO - it might be worth doing this for unary shuffles if the permute
13093 if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
13094 !(0 <= Range2.first && Range2.second < NumEltsPerLane))
13097 if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
13100 // Rotate the 2 ops so we can access both ranges, then permute the result.
13101 auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
13102 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
13103 SDValue Rotate = DAG.getBitcast(
13104 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
13105 DAG.getBitcast(ByteVT, Lo),
13106 DAG.getTargetConstant(Scale * RotAmt, DL, MVT::i8)));
13107 SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
13108 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
13109 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
13110 int M = Mask[Lane + Elt];
13114 PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
13116 PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
13119 return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
13122 // Check if the ranges are small enough to rotate from either direction.
13123 if (Range2.second < Range1.first)
13124 return RotateAndPermute(V1, V2, Range1.first, 0);
13125 if (Range1.second < Range2.first)
13126 return RotateAndPermute(V2, V1, Range2.first, NumElts);
13130 static bool isBroadcastShuffleMask(ArrayRef<int> Mask) {
13131 return isUndefOrEqual(Mask, 0);
13134 static bool isNoopOrBroadcastShuffleMask(ArrayRef<int> Mask) {
13135 return isNoopShuffleMask(Mask) || isBroadcastShuffleMask(Mask);
13138 /// Generic routine to decompose a shuffle and blend into independent
13139 /// blends and permutes.
13141 /// This matches the extremely common pattern for handling combined
13142 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
13143 /// operations. It will try to pick the best arrangement of shuffles and
13144 /// blends. For vXi8/vXi16 shuffles we may use unpack instead of blend.
13145 static SDValue lowerShuffleAsDecomposedShuffleMerge(
13146 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13147 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13148 int NumElts = Mask.size();
13149 int NumLanes = VT.getSizeInBits() / 128;
13150 int NumEltsPerLane = NumElts / NumLanes;
13152 // Shuffle the input elements into the desired positions in V1 and V2 and
13153 // unpack/blend them together.
13154 bool IsAlternating = true;
13155 SmallVector<int, 32> V1Mask(NumElts, -1);
13156 SmallVector<int, 32> V2Mask(NumElts, -1);
13157 SmallVector<int, 32> FinalMask(NumElts, -1);
13158 for (int i = 0; i < NumElts; ++i) {
13160 if (M >= 0 && M < NumElts) {
13163 IsAlternating &= (i & 1) == 0;
13164 } else if (M >= NumElts) {
13165 V2Mask[i] = M - NumElts;
13166 FinalMask[i] = i + NumElts;
13167 IsAlternating &= (i & 1) == 1;
13171 // If we effectively only demand the 0'th element of \p Input, and not only
13172 // as 0'th element, then broadcast said input,
13173 // and change \p InputMask to be a no-op (identity) mask.
13174 auto canonicalizeBroadcastableInput = [DL, VT, &Subtarget,
13175 &DAG](SDValue &Input,
13176 MutableArrayRef<int> InputMask) {
13177 unsigned EltSizeInBits = Input.getScalarValueSizeInBits();
13178 if (!Subtarget.hasAVX2() && (!Subtarget.hasAVX() || EltSizeInBits < 32 ||
13179 !X86::mayFoldLoad(Input, Subtarget)))
13181 if (isNoopShuffleMask(InputMask))
13183 assert(isBroadcastShuffleMask(InputMask) &&
13184 "Expected to demand only the 0'th element.");
13185 Input = DAG.getNode(X86ISD::VBROADCAST, DL, VT, Input);
13186 for (auto I : enumerate(InputMask)) {
13187 int &InputMaskElt = I.value();
13188 if (InputMaskElt >= 0)
13189 InputMaskElt = I.index();
13193 // Currently, we may need to produce one shuffle per input, and blend results.
13194 // It is possible that the shuffle for one of the inputs is already a no-op.
13195 // See if we can simplify non-no-op shuffles into broadcasts,
13196 // which we consider to be strictly better than an arbitrary shuffle.
13197 if (isNoopOrBroadcastShuffleMask(V1Mask) &&
13198 isNoopOrBroadcastShuffleMask(V2Mask)) {
13199 canonicalizeBroadcastableInput(V1, V1Mask);
13200 canonicalizeBroadcastableInput(V2, V2Mask);
13203 // Try to lower with the simpler initial blend/unpack/rotate strategies unless
13204 // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
13205 // the shuffle may be able to fold with a load or other benefit. However, when
13206 // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
13207 // pre-shuffle first is a better strategy.
13208 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
13209 // Only prefer immediate blends to unpack/rotate.
13210 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
13213 if (SDValue UnpackPerm = lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask,
13216 if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
13217 DL, VT, V1, V2, Mask, Subtarget, DAG))
13219 // Unpack/rotate failed - try again with variable blends.
13220 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
13225 // If the final mask is an alternating blend of vXi8/vXi16, convert to an
13226 // UNPCKL(SHUFFLE, SHUFFLE) pattern.
13227 // TODO: It doesn't have to be alternating - but each lane mustn't have more
13228 // than half the elements coming from each source.
13229 if (IsAlternating && VT.getScalarSizeInBits() < 32) {
13230 V1Mask.assign(NumElts, -1);
13231 V2Mask.assign(NumElts, -1);
13232 FinalMask.assign(NumElts, -1);
13233 for (int i = 0; i != NumElts; i += NumEltsPerLane)
13234 for (int j = 0; j != NumEltsPerLane; ++j) {
13235 int M = Mask[i + j];
13236 if (M >= 0 && M < NumElts) {
13237 V1Mask[i + (j / 2)] = M;
13238 FinalMask[i + j] = i + (j / 2);
13239 } else if (M >= NumElts) {
13240 V2Mask[i + (j / 2)] = M - NumElts;
13241 FinalMask[i + j] = i + (j / 2) + NumElts;
13246 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
13247 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
13248 return DAG.getVectorShuffle(VT, DL, V1, V2, FinalMask);
13251 /// Try to lower a vector shuffle as a bit rotation.
13253 /// Look for a repeated rotation pattern in each sub group.
13254 /// Returns a ISD::ROTL element rotation amount or -1 if failed.
13255 static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
13256 int NumElts = Mask.size();
13257 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
13259 int RotateAmt = -1;
13260 for (int i = 0; i != NumElts; i += NumSubElts) {
13261 for (int j = 0; j != NumSubElts; ++j) {
13262 int M = Mask[i + j];
13265 if (!isInRange(M, i, i + NumSubElts))
13267 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
13268 if (0 <= RotateAmt && Offset != RotateAmt)
13270 RotateAmt = Offset;
13276 static int matchShuffleAsBitRotate(MVT &RotateVT, int EltSizeInBits,
13277 const X86Subtarget &Subtarget,
13278 ArrayRef<int> Mask) {
13279 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
13280 assert(EltSizeInBits < 64 && "Can't rotate 64-bit integers");
13282 // AVX512 only has vXi32/vXi64 rotates, so limit the rotation sub group size.
13283 int MinSubElts = Subtarget.hasAVX512() ? std::max(32 / EltSizeInBits, 2) : 2;
13284 int MaxSubElts = 64 / EltSizeInBits;
13285 for (int NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
13286 int RotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
13290 int NumElts = Mask.size();
13291 MVT RotateSVT = MVT::getIntegerVT(EltSizeInBits * NumSubElts);
13292 RotateVT = MVT::getVectorVT(RotateSVT, NumElts / NumSubElts);
13293 return RotateAmt * EltSizeInBits;
13299 /// Lower shuffle using X86ISD::VROTLI rotations.
13300 static SDValue lowerShuffleAsBitRotate(const SDLoc &DL, MVT VT, SDValue V1,
13301 ArrayRef<int> Mask,
13302 const X86Subtarget &Subtarget,
13303 SelectionDAG &DAG) {
13304 // Only XOP + AVX512 targets have bit rotation instructions.
13305 // If we at least have SSSE3 (PSHUFB) then we shouldn't attempt to use this.
13307 (VT.is128BitVector() && Subtarget.hasXOP()) || Subtarget.hasAVX512();
13308 if (!IsLegal && Subtarget.hasSSE3())
13312 int RotateAmt = matchShuffleAsBitRotate(RotateVT, VT.getScalarSizeInBits(),
13317 // For pre-SSSE3 targets, if we are shuffling vXi8 elts then ISD::ROTL,
13318 // expanded to OR(SRL,SHL), will be more efficient, but if they can
13319 // widen to vXi16 or more then existing lowering should will be better.
13321 if ((RotateAmt % 16) == 0)
13323 // TODO: Use getTargetVShiftByConstNode.
13324 unsigned ShlAmt = RotateAmt;
13325 unsigned SrlAmt = RotateVT.getScalarSizeInBits() - RotateAmt;
13326 V1 = DAG.getBitcast(RotateVT, V1);
13327 SDValue SHL = DAG.getNode(X86ISD::VSHLI, DL, RotateVT, V1,
13328 DAG.getTargetConstant(ShlAmt, DL, MVT::i8));
13329 SDValue SRL = DAG.getNode(X86ISD::VSRLI, DL, RotateVT, V1,
13330 DAG.getTargetConstant(SrlAmt, DL, MVT::i8));
13331 SDValue Rot = DAG.getNode(ISD::OR, DL, RotateVT, SHL, SRL);
13332 return DAG.getBitcast(VT, Rot);
13336 DAG.getNode(X86ISD::VROTLI, DL, RotateVT, DAG.getBitcast(RotateVT, V1),
13337 DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
13338 return DAG.getBitcast(VT, Rot);
13341 /// Try to match a vector shuffle as an element rotation.
13343 /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
13344 static int matchShuffleAsElementRotate(SDValue &V1, SDValue &V2,
13345 ArrayRef<int> Mask) {
13346 int NumElts = Mask.size();
13348 // We need to detect various ways of spelling a rotation:
13349 // [11, 12, 13, 14, 15, 0, 1, 2]
13350 // [-1, 12, 13, 14, -1, -1, 1, -1]
13351 // [-1, -1, -1, -1, -1, -1, 1, 2]
13352 // [ 3, 4, 5, 6, 7, 8, 9, 10]
13353 // [-1, 4, 5, 6, -1, -1, 9, -1]
13354 // [-1, 4, 5, 6, -1, -1, -1, -1]
13357 for (int i = 0; i < NumElts; ++i) {
13359 assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
13360 "Unexpected mask index.");
13364 // Determine where a rotated vector would have started.
13365 int StartIdx = i - (M % NumElts);
13367 // The identity rotation isn't interesting, stop.
13370 // If we found the tail of a vector the rotation must be the missing
13371 // front. If we found the head of a vector, it must be how much of the
13373 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
13376 Rotation = CandidateRotation;
13377 else if (Rotation != CandidateRotation)
13378 // The rotations don't match, so we can't match this mask.
13381 // Compute which value this mask is pointing at.
13382 SDValue MaskV = M < NumElts ? V1 : V2;
13384 // Compute which of the two target values this index should be assigned
13385 // to. This reflects whether the high elements are remaining or the low
13386 // elements are remaining.
13387 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
13389 // Either set up this value if we've not encountered it before, or check
13390 // that it remains consistent.
13393 else if (TargetV != MaskV)
13394 // This may be a rotation, but it pulls from the inputs in some
13395 // unsupported interleaving.
13399 // Check that we successfully analyzed the mask, and normalize the results.
13400 assert(Rotation != 0 && "Failed to locate a viable rotation!");
13401 assert((Lo || Hi) && "Failed to find a rotated input vector!");
13413 /// Try to lower a vector shuffle as a byte rotation.
13415 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
13416 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
13417 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
13418 /// try to generically lower a vector shuffle through such an pattern. It
13419 /// does not check for the profitability of lowering either as PALIGNR or
13420 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
13421 /// This matches shuffle vectors that look like:
13423 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
13425 /// Essentially it concatenates V1 and V2, shifts right by some number of
13426 /// elements, and takes the low elements as the result. Note that while this is
13427 /// specified as a *right shift* because x86 is little-endian, it is a *left
13428 /// rotate* of the vector lanes.
13429 static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
13430 ArrayRef<int> Mask) {
13431 // Don't accept any shuffles with zero elements.
13432 if (isAnyZero(Mask))
13435 // PALIGNR works on 128-bit lanes.
13436 SmallVector<int, 16> RepeatedMask;
13437 if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
13440 int Rotation = matchShuffleAsElementRotate(V1, V2, RepeatedMask);
13444 // PALIGNR rotates bytes, so we need to scale the
13445 // rotation based on how many bytes are in the vector lane.
13446 int NumElts = RepeatedMask.size();
13447 int Scale = 16 / NumElts;
13448 return Rotation * Scale;
13451 static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
13452 SDValue V2, ArrayRef<int> Mask,
13453 const X86Subtarget &Subtarget,
13454 SelectionDAG &DAG) {
13455 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
13457 SDValue Lo = V1, Hi = V2;
13458 int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
13459 if (ByteRotation <= 0)
13462 // Cast the inputs to i8 vector of correct length to match PALIGNR or
13464 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
13465 Lo = DAG.getBitcast(ByteVT, Lo);
13466 Hi = DAG.getBitcast(ByteVT, Hi);
13468 // SSSE3 targets can use the palignr instruction.
13469 if (Subtarget.hasSSSE3()) {
13470 assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
13471 "512-bit PALIGNR requires BWI instructions");
13472 return DAG.getBitcast(
13473 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
13474 DAG.getTargetConstant(ByteRotation, DL, MVT::i8)));
13477 assert(VT.is128BitVector() &&
13478 "Rotate-based lowering only supports 128-bit lowering!");
13479 assert(Mask.size() <= 16 &&
13480 "Can shuffle at most 16 bytes in a 128-bit vector!");
13481 assert(ByteVT == MVT::v16i8 &&
13482 "SSE2 rotate lowering only needed for v16i8!");
13484 // Default SSE2 implementation
13485 int LoByteShift = 16 - ByteRotation;
13486 int HiByteShift = ByteRotation;
13489 DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
13490 DAG.getTargetConstant(LoByteShift, DL, MVT::i8));
13492 DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
13493 DAG.getTargetConstant(HiByteShift, DL, MVT::i8));
13494 return DAG.getBitcast(VT,
13495 DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
13498 /// Try to lower a vector shuffle as a dword/qword rotation.
13500 /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
13501 /// rotation of the concatenation of two vectors; This routine will
13502 /// try to generically lower a vector shuffle through such an pattern.
13504 /// Essentially it concatenates V1 and V2, shifts right by some number of
13505 /// elements, and takes the low elements as the result. Note that while this is
13506 /// specified as a *right shift* because x86 is little-endian, it is a *left
13507 /// rotate* of the vector lanes.
13508 static SDValue lowerShuffleAsVALIGN(const SDLoc &DL, MVT VT, SDValue V1,
13509 SDValue V2, ArrayRef<int> Mask,
13510 const X86Subtarget &Subtarget,
13511 SelectionDAG &DAG) {
13512 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
13513 "Only 32-bit and 64-bit elements are supported!");
13515 // 128/256-bit vectors are only supported with VLX.
13516 assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
13517 && "VLX required for 128/256-bit vectors");
13519 SDValue Lo = V1, Hi = V2;
13520 int Rotation = matchShuffleAsElementRotate(Lo, Hi, Mask);
13524 return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
13525 DAG.getTargetConstant(Rotation, DL, MVT::i8));
13528 /// Try to lower a vector shuffle as a byte shift sequence.
13529 static SDValue lowerShuffleAsByteShiftMask(const SDLoc &DL, MVT VT, SDValue V1,
13530 SDValue V2, ArrayRef<int> Mask,
13531 const APInt &Zeroable,
13532 const X86Subtarget &Subtarget,
13533 SelectionDAG &DAG) {
13534 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
13535 assert(VT.is128BitVector() && "Only 128-bit vectors supported");
13537 // We need a shuffle that has zeros at one/both ends and a sequential
13538 // shuffle from one source within.
13539 unsigned ZeroLo = Zeroable.countTrailingOnes();
13540 unsigned ZeroHi = Zeroable.countLeadingOnes();
13541 if (!ZeroLo && !ZeroHi)
13544 unsigned NumElts = Mask.size();
13545 unsigned Len = NumElts - (ZeroLo + ZeroHi);
13546 if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
13549 unsigned Scale = VT.getScalarSizeInBits() / 8;
13550 ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
13551 if (!isUndefOrInRange(StubMask, 0, NumElts) &&
13552 !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
13555 SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
13556 Res = DAG.getBitcast(MVT::v16i8, Res);
13558 // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
13559 // inner sequential set of elements, possibly offset:
13560 // 01234567 --> zzzzzz01 --> 1zzzzzzz
13561 // 01234567 --> 4567zzzz --> zzzzz456
13562 // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
13564 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
13565 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
13566 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
13567 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
13568 DAG.getTargetConstant(Scale * ZeroHi, DL, MVT::i8));
13569 } else if (ZeroHi == 0) {
13570 unsigned Shift = Mask[ZeroLo] % NumElts;
13571 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
13572 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
13573 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
13574 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
13575 } else if (!Subtarget.hasSSSE3()) {
13576 // If we don't have PSHUFB then its worth avoiding an AND constant mask
13577 // by performing 3 byte shifts. Shuffle combining can kick in above that.
13578 // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
13579 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
13580 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
13581 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
13582 Shift += Mask[ZeroLo] % NumElts;
13583 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
13584 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
13585 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
13586 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
13590 return DAG.getBitcast(VT, Res);
13593 /// Try to lower a vector shuffle as a bit shift (shifts in zeros).
13595 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
13596 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
13597 /// matches elements from one of the input vectors shuffled to the left or
13598 /// right with zeroable elements 'shifted in'. It handles both the strictly
13599 /// bit-wise element shifts and the byte shift across an entire 128-bit double
13600 /// quad word lane.
13602 /// PSHL : (little-endian) left bit shift.
13603 /// [ zz, 0, zz, 2 ]
13604 /// [ -1, 4, zz, -1 ]
13605 /// PSRL : (little-endian) right bit shift.
13606 /// [ 1, zz, 3, zz]
13607 /// [ -1, -1, 7, zz]
13608 /// PSLLDQ : (little-endian) left byte shift
13609 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
13610 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
13611 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
13612 /// PSRLDQ : (little-endian) right byte shift
13613 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
13614 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
13615 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
13616 static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
13617 unsigned ScalarSizeInBits, ArrayRef<int> Mask,
13618 int MaskOffset, const APInt &Zeroable,
13619 const X86Subtarget &Subtarget) {
13620 int Size = Mask.size();
13621 unsigned SizeInBits = Size * ScalarSizeInBits;
13623 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
13624 for (int i = 0; i < Size; i += Scale)
13625 for (int j = 0; j < Shift; ++j)
13626 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
13632 auto MatchShift = [&](int Shift, int Scale, bool Left) {
13633 for (int i = 0; i != Size; i += Scale) {
13634 unsigned Pos = Left ? i + Shift : i;
13635 unsigned Low = Left ? i : i + Shift;
13636 unsigned Len = Scale - Shift;
13637 if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
13641 int ShiftEltBits = ScalarSizeInBits * Scale;
13642 bool ByteShift = ShiftEltBits > 64;
13643 Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
13644 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
13645 int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
13647 // Normalize the scale for byte shifts to still produce an i64 element
13649 Scale = ByteShift ? Scale / 2 : Scale;
13651 // We need to round trip through the appropriate type for the shift.
13652 MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
13653 ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
13654 : MVT::getVectorVT(ShiftSVT, Size / Scale);
13655 return (int)ShiftAmt;
13658 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
13659 // keep doubling the size of the integer elements up to that. We can
13660 // then shift the elements of the integer vector by whole multiples of
13661 // their width within the elements of the larger integer vector. Test each
13662 // multiple to see if we can find a match with the moved element indices
13663 // and that the shifted in elements are all zeroable.
13664 unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
13665 for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
13666 for (int Shift = 1; Shift != Scale; ++Shift)
13667 for (bool Left : {true, false})
13668 if (CheckZeros(Shift, Scale, Left)) {
13669 int ShiftAmt = MatchShift(Shift, Scale, Left);
13678 static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
13679 SDValue V2, ArrayRef<int> Mask,
13680 const APInt &Zeroable,
13681 const X86Subtarget &Subtarget,
13682 SelectionDAG &DAG) {
13683 int Size = Mask.size();
13684 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
13690 // Try to match shuffle against V1 shift.
13691 int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
13692 Mask, 0, Zeroable, Subtarget);
13694 // If V1 failed, try to match shuffle against V2 shift.
13695 if (ShiftAmt < 0) {
13696 ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
13697 Mask, Size, Zeroable, Subtarget);
13704 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
13705 "Illegal integer vector type");
13706 V = DAG.getBitcast(ShiftVT, V);
13707 V = DAG.getNode(Opcode, DL, ShiftVT, V,
13708 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
13709 return DAG.getBitcast(VT, V);
13712 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
13713 // Remainder of lower half result is zero and upper half is all undef.
13714 static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
13715 ArrayRef<int> Mask, uint64_t &BitLen,
13716 uint64_t &BitIdx, const APInt &Zeroable) {
13717 int Size = Mask.size();
13718 int HalfSize = Size / 2;
13719 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
13720 assert(!Zeroable.isAllOnes() && "Fully zeroable shuffle mask");
13722 // Upper half must be undefined.
13723 if (!isUndefUpperHalf(Mask))
13726 // Determine the extraction length from the part of the
13727 // lower half that isn't zeroable.
13728 int Len = HalfSize;
13729 for (; Len > 0; --Len)
13730 if (!Zeroable[Len - 1])
13732 assert(Len > 0 && "Zeroable shuffle mask");
13734 // Attempt to match first Len sequential elements from the lower half.
13737 for (int i = 0; i != Len; ++i) {
13739 if (M == SM_SentinelUndef)
13741 SDValue &V = (M < Size ? V1 : V2);
13744 // The extracted elements must start at a valid index and all mask
13745 // elements must be in the lower half.
13746 if (i > M || M >= HalfSize)
13749 if (Idx < 0 || (Src == V && Idx == (M - i))) {
13757 if (!Src || Idx < 0)
13760 assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
13761 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
13762 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
13767 // INSERTQ: Extract lowest Len elements from lower half of second source and
13768 // insert over first source, starting at Idx.
13769 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
13770 static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
13771 ArrayRef<int> Mask, uint64_t &BitLen,
13772 uint64_t &BitIdx) {
13773 int Size = Mask.size();
13774 int HalfSize = Size / 2;
13775 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
13777 // Upper half must be undefined.
13778 if (!isUndefUpperHalf(Mask))
13781 for (int Idx = 0; Idx != HalfSize; ++Idx) {
13784 // Attempt to match first source from mask before insertion point.
13785 if (isUndefInRange(Mask, 0, Idx)) {
13787 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
13789 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
13795 // Extend the extraction length looking to match both the insertion of
13796 // the second source and the remaining elements of the first.
13797 for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
13799 int Len = Hi - Idx;
13801 // Match insertion.
13802 if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
13804 } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
13810 // Match the remaining elements of the lower half.
13811 if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
13813 } else if ((!Base || (Base == V1)) &&
13814 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
13816 } else if ((!Base || (Base == V2)) &&
13817 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
13824 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
13825 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
13835 /// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
13836 static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
13837 SDValue V2, ArrayRef<int> Mask,
13838 const APInt &Zeroable, SelectionDAG &DAG) {
13839 uint64_t BitLen, BitIdx;
13840 if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
13841 return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
13842 DAG.getTargetConstant(BitLen, DL, MVT::i8),
13843 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
13845 if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
13846 return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
13847 V2 ? V2 : DAG.getUNDEF(VT),
13848 DAG.getTargetConstant(BitLen, DL, MVT::i8),
13849 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
13854 /// Lower a vector shuffle as a zero or any extension.
13856 /// Given a specific number of elements, element bit width, and extension
13857 /// stride, produce either a zero or any extension based on the available
13858 /// features of the subtarget. The extended elements are consecutive and
13859 /// begin and can start from an offsetted element index in the input; to
13860 /// avoid excess shuffling the offset must either being in the bottom lane
13861 /// or at the start of a higher lane. All extended elements must be from
13863 static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
13864 const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
13865 ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13866 assert(Scale > 1 && "Need a scale to extend.");
13867 int EltBits = VT.getScalarSizeInBits();
13868 int NumElements = VT.getVectorNumElements();
13869 int NumEltsPerLane = 128 / EltBits;
13870 int OffsetLane = Offset / NumEltsPerLane;
13871 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
13872 "Only 8, 16, and 32 bit elements can be extended.");
13873 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
13874 assert(0 <= Offset && "Extension offset must be positive.");
13875 assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
13876 "Extension offset must be in the first lane or start an upper lane.");
13878 // Check that an index is in same lane as the base offset.
13879 auto SafeOffset = [&](int Idx) {
13880 return OffsetLane == (Idx / NumEltsPerLane);
13883 // Shift along an input so that the offset base moves to the first element.
13884 auto ShuffleOffset = [&](SDValue V) {
13888 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
13889 for (int i = 0; i * Scale < NumElements; ++i) {
13890 int SrcIdx = i + Offset;
13891 ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
13893 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
13896 // Found a valid a/zext mask! Try various lowering strategies based on the
13897 // input type and available ISA extensions.
13898 if (Subtarget.hasSSE41()) {
13899 // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
13900 // PUNPCK will catch this in a later shuffle match.
13901 if (Offset && Scale == 2 && VT.is128BitVector())
13903 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
13904 NumElements / Scale);
13905 InputV = ShuffleOffset(InputV);
13906 InputV = getEXTEND_VECTOR_INREG(AnyExt ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND,
13907 DL, ExtVT, InputV, DAG);
13908 return DAG.getBitcast(VT, InputV);
13911 assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
13913 // For any extends we can cheat for larger element sizes and use shuffle
13914 // instructions that can fold with a load and/or copy.
13915 if (AnyExt && EltBits == 32) {
13916 int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
13918 return DAG.getBitcast(
13919 VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
13920 DAG.getBitcast(MVT::v4i32, InputV),
13921 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13923 if (AnyExt && EltBits == 16 && Scale > 2) {
13924 int PSHUFDMask[4] = {Offset / 2, -1,
13925 SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
13926 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
13927 DAG.getBitcast(MVT::v4i32, InputV),
13928 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
13929 int PSHUFWMask[4] = {1, -1, -1, -1};
13930 unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
13931 return DAG.getBitcast(
13932 VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
13933 DAG.getBitcast(MVT::v8i16, InputV),
13934 getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
13937 // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
13939 if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
13940 assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
13941 assert(VT.is128BitVector() && "Unexpected vector width!");
13943 int LoIdx = Offset * EltBits;
13944 SDValue Lo = DAG.getBitcast(
13945 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
13946 DAG.getTargetConstant(EltBits, DL, MVT::i8),
13947 DAG.getTargetConstant(LoIdx, DL, MVT::i8)));
13949 if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
13950 return DAG.getBitcast(VT, Lo);
13952 int HiIdx = (Offset + 1) * EltBits;
13953 SDValue Hi = DAG.getBitcast(
13954 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
13955 DAG.getTargetConstant(EltBits, DL, MVT::i8),
13956 DAG.getTargetConstant(HiIdx, DL, MVT::i8)));
13957 return DAG.getBitcast(VT,
13958 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
13961 // If this would require more than 2 unpack instructions to expand, use
13962 // pshufb when available. We can only use more than 2 unpack instructions
13963 // when zero extending i8 elements which also makes it easier to use pshufb.
13964 if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
13965 assert(NumElements == 16 && "Unexpected byte vector width!");
13966 SDValue PSHUFBMask[16];
13967 for (int i = 0; i < 16; ++i) {
13968 int Idx = Offset + (i / Scale);
13969 if ((i % Scale == 0 && SafeOffset(Idx))) {
13970 PSHUFBMask[i] = DAG.getConstant(Idx, DL, MVT::i8);
13974 AnyExt ? DAG.getUNDEF(MVT::i8) : DAG.getConstant(0x80, DL, MVT::i8);
13976 InputV = DAG.getBitcast(MVT::v16i8, InputV);
13977 return DAG.getBitcast(
13978 VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
13979 DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
13982 // If we are extending from an offset, ensure we start on a boundary that
13983 // we can unpack from.
13984 int AlignToUnpack = Offset % (NumElements / Scale);
13985 if (AlignToUnpack) {
13986 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
13987 for (int i = AlignToUnpack; i < NumElements; ++i)
13988 ShMask[i - AlignToUnpack] = i;
13989 InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
13990 Offset -= AlignToUnpack;
13993 // Otherwise emit a sequence of unpacks.
13995 unsigned UnpackLoHi = X86ISD::UNPCKL;
13996 if (Offset >= (NumElements / 2)) {
13997 UnpackLoHi = X86ISD::UNPCKH;
13998 Offset -= (NumElements / 2);
14001 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
14002 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
14003 : getZeroVector(InputVT, Subtarget, DAG, DL);
14004 InputV = DAG.getBitcast(InputVT, InputV);
14005 InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
14009 } while (Scale > 1);
14010 return DAG.getBitcast(VT, InputV);
14013 /// Try to lower a vector shuffle as a zero extension on any microarch.
14015 /// This routine will try to do everything in its power to cleverly lower
14016 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
14017 /// check for the profitability of this lowering, it tries to aggressively
14018 /// match this pattern. It will use all of the micro-architectural details it
14019 /// can to emit an efficient lowering. It handles both blends with all-zero
14020 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
14021 /// masking out later).
14023 /// The reason we have dedicated lowering for zext-style shuffles is that they
14024 /// are both incredibly common and often quite performance sensitive.
14025 static SDValue lowerShuffleAsZeroOrAnyExtend(
14026 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14027 const APInt &Zeroable, const X86Subtarget &Subtarget,
14028 SelectionDAG &DAG) {
14029 int Bits = VT.getSizeInBits();
14030 int NumLanes = Bits / 128;
14031 int NumElements = VT.getVectorNumElements();
14032 int NumEltsPerLane = NumElements / NumLanes;
14033 assert(VT.getScalarSizeInBits() <= 32 &&
14034 "Exceeds 32-bit integer zero extension limit");
14035 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
14037 // Define a helper function to check a particular ext-scale and lower to it if
14039 auto Lower = [&](int Scale) -> SDValue {
14041 bool AnyExt = true;
14044 for (int i = 0; i < NumElements; ++i) {
14047 continue; // Valid anywhere but doesn't tell us anything.
14048 if (i % Scale != 0) {
14049 // Each of the extended elements need to be zeroable.
14053 // We no longer are in the anyext case.
14058 // Each of the base elements needs to be consecutive indices into the
14059 // same input vector.
14060 SDValue V = M < NumElements ? V1 : V2;
14061 M = M % NumElements;
14064 Offset = M - (i / Scale);
14065 } else if (InputV != V)
14066 return SDValue(); // Flip-flopping inputs.
14068 // Offset must start in the lowest 128-bit lane or at the start of an
14070 // FIXME: Is it ever worth allowing a negative base offset?
14071 if (!((0 <= Offset && Offset < NumEltsPerLane) ||
14072 (Offset % NumEltsPerLane) == 0))
14075 // If we are offsetting, all referenced entries must come from the same
14077 if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
14080 if ((M % NumElements) != (Offset + (i / Scale)))
14081 return SDValue(); // Non-consecutive strided elements.
14085 // If we fail to find an input, we have a zero-shuffle which should always
14086 // have already been handled.
14087 // FIXME: Maybe handle this here in case during blending we end up with one?
14091 // If we are offsetting, don't extend if we only match a single input, we
14092 // can always do better by using a basic PSHUF or PUNPCK.
14093 if (Offset != 0 && Matches < 2)
14096 return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
14097 InputV, Mask, Subtarget, DAG);
14100 // The widest scale possible for extending is to a 64-bit integer.
14101 assert(Bits % 64 == 0 &&
14102 "The number of bits in a vector must be divisible by 64 on x86!");
14103 int NumExtElements = Bits / 64;
14105 // Each iteration, try extending the elements half as much, but into twice as
14107 for (; NumExtElements < NumElements; NumExtElements *= 2) {
14108 assert(NumElements % NumExtElements == 0 &&
14109 "The input vector size must be divisible by the extended size.");
14110 if (SDValue V = Lower(NumElements / NumExtElements))
14114 // General extends failed, but 128-bit vectors may be able to use MOVQ.
14118 // Returns one of the source operands if the shuffle can be reduced to a
14119 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
14120 auto CanZExtLowHalf = [&]() {
14121 for (int i = NumElements / 2; i != NumElements; ++i)
14124 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
14126 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
14131 if (SDValue V = CanZExtLowHalf()) {
14132 V = DAG.getBitcast(MVT::v2i64, V);
14133 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
14134 return DAG.getBitcast(VT, V);
14137 // No viable ext lowering found.
14141 /// Try to get a scalar value for a specific element of a vector.
14143 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
14144 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
14145 SelectionDAG &DAG) {
14146 MVT VT = V.getSimpleValueType();
14147 MVT EltVT = VT.getVectorElementType();
14148 V = peekThroughBitcasts(V);
14150 // If the bitcasts shift the element size, we can't extract an equivalent
14151 // element from it.
14152 MVT NewVT = V.getSimpleValueType();
14153 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
14156 if (V.getOpcode() == ISD::BUILD_VECTOR ||
14157 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
14158 // Ensure the scalar operand is the same size as the destination.
14159 // FIXME: Add support for scalar truncation where possible.
14160 SDValue S = V.getOperand(Idx);
14161 if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
14162 return DAG.getBitcast(EltVT, S);
14168 /// Helper to test for a load that can be folded with x86 shuffles.
14170 /// This is particularly important because the set of instructions varies
14171 /// significantly based on whether the operand is a load or not.
14172 static bool isShuffleFoldableLoad(SDValue V) {
14173 return V->hasOneUse() &&
14174 ISD::isNON_EXTLoad(peekThroughOneUseBitcasts(V).getNode());
14177 template<typename T>
14178 static bool isSoftFP16(T VT, const X86Subtarget &Subtarget) {
14179 return VT.getScalarType() == MVT::f16 && !Subtarget.hasFP16();
14182 template<typename T>
14183 bool X86TargetLowering::isSoftFP16(T VT) const {
14184 return ::isSoftFP16(VT, Subtarget);
14187 /// Try to lower insertion of a single element into a zero vector.
14189 /// This is a common pattern that we have especially efficient patterns to lower
14190 /// across all subtarget feature sets.
14191 static SDValue lowerShuffleAsElementInsertion(
14192 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14193 const APInt &Zeroable, const X86Subtarget &Subtarget,
14194 SelectionDAG &DAG) {
14196 MVT EltVT = VT.getVectorElementType();
14198 if (isSoftFP16(EltVT, Subtarget))
14202 find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
14204 bool IsV1Zeroable = true;
14205 for (int i = 0, Size = Mask.size(); i < Size; ++i)
14206 if (i != V2Index && !Zeroable[i]) {
14207 IsV1Zeroable = false;
14211 // Check for a single input from a SCALAR_TO_VECTOR node.
14212 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
14213 // all the smarts here sunk into that routine. However, the current
14214 // lowering of BUILD_VECTOR makes that nearly impossible until the old
14215 // vector shuffle lowering is dead.
14216 SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
14218 if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
14219 // We need to zext the scalar if it is smaller than an i32.
14220 V2S = DAG.getBitcast(EltVT, V2S);
14221 if (EltVT == MVT::i8 || (EltVT == MVT::i16 && !Subtarget.hasFP16())) {
14222 // Using zext to expand a narrow element won't work for non-zero
14227 // Zero-extend directly to i32.
14228 ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
14229 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
14231 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
14232 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
14233 EltVT == MVT::i16) {
14234 // Either not inserting from the low element of the input or the input
14235 // element size is too small to use VZEXT_MOVL to clear the high bits.
14239 if (!IsV1Zeroable) {
14240 // If V1 can't be treated as a zero vector we have fewer options to lower
14241 // this. We can't support integer vectors or non-zero targets cheaply, and
14242 // the V1 elements can't be permuted in any way.
14243 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
14244 if (!VT.isFloatingPoint() || V2Index != 0)
14246 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
14247 V1Mask[V2Index] = -1;
14248 if (!isNoopShuffleMask(V1Mask))
14250 if (!VT.is128BitVector())
14253 // Otherwise, use MOVSD, MOVSS or MOVSH.
14254 unsigned MovOpc = 0;
14255 if (EltVT == MVT::f16)
14256 MovOpc = X86ISD::MOVSH;
14257 else if (EltVT == MVT::f32)
14258 MovOpc = X86ISD::MOVSS;
14259 else if (EltVT == MVT::f64)
14260 MovOpc = X86ISD::MOVSD;
14262 llvm_unreachable("Unsupported floating point element type to handle!");
14263 return DAG.getNode(MovOpc, DL, ExtVT, V1, V2);
14266 // This lowering only works for the low element with floating point vectors.
14267 if (VT.isFloatingPoint() && V2Index != 0)
14270 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
14272 V2 = DAG.getBitcast(VT, V2);
14274 if (V2Index != 0) {
14275 // If we have 4 or fewer lanes we can cheaply shuffle the element into
14276 // the desired position. Otherwise it is more efficient to do a vector
14277 // shift left. We know that we can do a vector shift left because all
14278 // the inputs are zero.
14279 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
14280 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
14281 V2Shuffle[V2Index] = 0;
14282 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
14284 V2 = DAG.getBitcast(MVT::v16i8, V2);
14285 V2 = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
14286 DAG.getTargetConstant(
14287 V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8));
14288 V2 = DAG.getBitcast(VT, V2);
14294 /// Try to lower broadcast of a single - truncated - integer element,
14295 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
14297 /// This assumes we have AVX2.
14298 static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
14300 const X86Subtarget &Subtarget,
14301 SelectionDAG &DAG) {
14302 assert(Subtarget.hasAVX2() &&
14303 "We can only lower integer broadcasts with AVX2!");
14305 MVT EltVT = VT.getVectorElementType();
14306 MVT V0VT = V0.getSimpleValueType();
14308 assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
14309 assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
14311 MVT V0EltVT = V0VT.getVectorElementType();
14312 if (!V0EltVT.isInteger())
14315 const unsigned EltSize = EltVT.getSizeInBits();
14316 const unsigned V0EltSize = V0EltVT.getSizeInBits();
14318 // This is only a truncation if the original element type is larger.
14319 if (V0EltSize <= EltSize)
14322 assert(((V0EltSize % EltSize) == 0) &&
14323 "Scalar type sizes must all be powers of 2 on x86!");
14325 const unsigned V0Opc = V0.getOpcode();
14326 const unsigned Scale = V0EltSize / EltSize;
14327 const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
14329 if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
14330 V0Opc != ISD::BUILD_VECTOR)
14333 SDValue Scalar = V0.getOperand(V0BroadcastIdx);
14335 // If we're extracting non-least-significant bits, shift so we can truncate.
14336 // Hopefully, we can fold away the trunc/srl/load into the broadcast.
14337 // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
14338 // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
14339 if (const int OffsetIdx = BroadcastIdx % Scale)
14340 Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
14341 DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
14343 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
14344 DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
14347 /// Test whether this can be lowered with a single SHUFPS instruction.
14349 /// This is used to disable more specialized lowerings when the shufps lowering
14350 /// will happen to be efficient.
14351 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
14352 // This routine only handles 128-bit shufps.
14353 assert(Mask.size() == 4 && "Unsupported mask size!");
14354 assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
14355 assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
14356 assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
14357 assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
14359 // To lower with a single SHUFPS we need to have the low half and high half
14360 // each requiring a single input.
14361 if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
14363 if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
14369 /// If we are extracting two 128-bit halves of a vector and shuffling the
14370 /// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
14371 /// multi-shuffle lowering.
14372 static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
14373 SDValue N1, ArrayRef<int> Mask,
14374 SelectionDAG &DAG) {
14375 MVT VT = N0.getSimpleValueType();
14376 assert((VT.is128BitVector() &&
14377 (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
14378 "VPERM* family of shuffles requires 32-bit or 64-bit elements");
14380 // Check that both sources are extracts of the same source vector.
14381 if (!N0.hasOneUse() || !N1.hasOneUse() ||
14382 N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
14383 N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
14384 N0.getOperand(0) != N1.getOperand(0))
14387 SDValue WideVec = N0.getOperand(0);
14388 MVT WideVT = WideVec.getSimpleValueType();
14389 if (!WideVT.is256BitVector())
14392 // Match extracts of each half of the wide source vector. Commute the shuffle
14393 // if the extract of the low half is N1.
14394 unsigned NumElts = VT.getVectorNumElements();
14395 SmallVector<int, 4> NewMask(Mask.begin(), Mask.end());
14396 const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
14397 const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
14398 if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
14399 ShuffleVectorSDNode::commuteMask(NewMask);
14400 else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
14403 // Final bailout: if the mask is simple, we are better off using an extract
14404 // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
14405 // because that avoids a constant load from memory.
14406 if (NumElts == 4 &&
14407 (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask, DAG)))
14410 // Extend the shuffle mask with undef elements.
14411 NewMask.append(NumElts, -1);
14413 // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
14414 SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
14416 // This is free: ymm -> xmm.
14417 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
14418 DAG.getIntPtrConstant(0, DL));
14421 /// Try to lower broadcast of a single element.
14423 /// For convenience, this code also bundles all of the subtarget feature set
14424 /// filtering. While a little annoying to re-dispatch on type here, there isn't
14425 /// a convenient way to factor it out.
14426 static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
14427 SDValue V2, ArrayRef<int> Mask,
14428 const X86Subtarget &Subtarget,
14429 SelectionDAG &DAG) {
14430 if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
14431 (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
14432 (Subtarget.hasAVX2() && VT.isInteger())))
14435 // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
14436 // we can only broadcast from a register with AVX2.
14437 unsigned NumEltBits = VT.getScalarSizeInBits();
14438 unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
14440 : X86ISD::VBROADCAST;
14441 bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
14443 // Check that the mask is a broadcast.
14444 int BroadcastIdx = getSplatIndex(Mask);
14445 if (BroadcastIdx < 0)
14447 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
14448 "a sorted mask where the broadcast "
14451 // Go up the chain of (vector) values to find a scalar load that we can
14452 // combine with the broadcast.
14453 // TODO: Combine this logic with findEltLoadSrc() used by
14454 // EltsFromConsecutiveLoads().
14455 int BitOffset = BroadcastIdx * NumEltBits;
14458 switch (V.getOpcode()) {
14459 case ISD::BITCAST: {
14460 V = V.getOperand(0);
14463 case ISD::CONCAT_VECTORS: {
14464 int OpBitWidth = V.getOperand(0).getValueSizeInBits();
14465 int OpIdx = BitOffset / OpBitWidth;
14466 V = V.getOperand(OpIdx);
14467 BitOffset %= OpBitWidth;
14470 case ISD::EXTRACT_SUBVECTOR: {
14471 // The extraction index adds to the existing offset.
14472 unsigned EltBitWidth = V.getScalarValueSizeInBits();
14473 unsigned Idx = V.getConstantOperandVal(1);
14474 unsigned BeginOffset = Idx * EltBitWidth;
14475 BitOffset += BeginOffset;
14476 V = V.getOperand(0);
14479 case ISD::INSERT_SUBVECTOR: {
14480 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
14481 int EltBitWidth = VOuter.getScalarValueSizeInBits();
14482 int Idx = (int)V.getConstantOperandVal(2);
14483 int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
14484 int BeginOffset = Idx * EltBitWidth;
14485 int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
14486 if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
14487 BitOffset -= BeginOffset;
14497 assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
14498 BroadcastIdx = BitOffset / NumEltBits;
14500 // Do we need to bitcast the source to retrieve the original broadcast index?
14501 bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
14503 // Check if this is a broadcast of a scalar. We special case lowering
14504 // for scalars so that we can more effectively fold with loads.
14505 // If the original value has a larger element type than the shuffle, the
14506 // broadcast element is in essence truncated. Make that explicit to ease
14508 if (BitCastSrc && VT.isInteger())
14509 if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
14510 DL, VT, V, BroadcastIdx, Subtarget, DAG))
14511 return TruncBroadcast;
14513 // Also check the simpler case, where we can directly reuse the scalar.
14515 ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
14516 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
14517 V = V.getOperand(BroadcastIdx);
14519 // If we can't broadcast from a register, check that the input is a load.
14520 if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
14522 } else if (ISD::isNormalLoad(V.getNode()) &&
14523 cast<LoadSDNode>(V)->isSimple()) {
14524 // We do not check for one-use of the vector load because a broadcast load
14525 // is expected to be a win for code size, register pressure, and possibly
14526 // uops even if the original vector load is not eliminated.
14528 // Reduce the vector load and shuffle to a broadcasted scalar load.
14529 LoadSDNode *Ld = cast<LoadSDNode>(V);
14530 SDValue BaseAddr = Ld->getOperand(1);
14531 MVT SVT = VT.getScalarType();
14532 unsigned Offset = BroadcastIdx * SVT.getStoreSize();
14533 assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
14535 DAG.getMemBasePlusOffset(BaseAddr, TypeSize::Fixed(Offset), DL);
14537 // Directly form VBROADCAST_LOAD if we're using VBROADCAST opcode rather
14539 // FIXME: Should we add VBROADCAST_LOAD isel patterns for pre-AVX?
14540 if (Opcode == X86ISD::VBROADCAST) {
14541 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
14542 SDValue Ops[] = {Ld->getChain(), NewAddr};
14543 V = DAG.getMemIntrinsicNode(
14544 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SVT,
14545 DAG.getMachineFunction().getMachineMemOperand(
14546 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
14547 DAG.makeEquivalentMemoryOrdering(Ld, V);
14548 return DAG.getBitcast(VT, V);
14550 assert(SVT == MVT::f64 && "Unexpected VT!");
14551 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
14552 DAG.getMachineFunction().getMachineMemOperand(
14553 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
14554 DAG.makeEquivalentMemoryOrdering(Ld, V);
14555 } else if (!BroadcastFromReg) {
14556 // We can't broadcast from a vector register.
14558 } else if (BitOffset != 0) {
14559 // We can only broadcast from the zero-element of a vector register,
14560 // but it can be advantageous to broadcast from the zero-element of a
14562 if (!VT.is256BitVector() && !VT.is512BitVector())
14565 // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
14566 if (VT == MVT::v4f64 || VT == MVT::v4i64)
14569 // Only broadcast the zero-element of a 128-bit subvector.
14570 if ((BitOffset % 128) != 0)
14573 assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
14574 "Unexpected bit-offset");
14575 assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
14576 "Unexpected vector size");
14577 unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
14578 V = extract128BitVector(V, ExtractIdx, DAG, DL);
14581 // On AVX we can use VBROADCAST directly for scalar sources.
14582 if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector()) {
14583 V = DAG.getBitcast(MVT::f64, V);
14584 if (Subtarget.hasAVX()) {
14585 V = DAG.getNode(X86ISD::VBROADCAST, DL, MVT::v2f64, V);
14586 return DAG.getBitcast(VT, V);
14588 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V);
14591 // If this is a scalar, do the broadcast on this type and bitcast.
14592 if (!V.getValueType().isVector()) {
14593 assert(V.getScalarValueSizeInBits() == NumEltBits &&
14594 "Unexpected scalar size");
14595 MVT BroadcastVT = MVT::getVectorVT(V.getSimpleValueType(),
14596 VT.getVectorNumElements());
14597 return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
14600 // We only support broadcasting from 128-bit vectors to minimize the
14601 // number of patterns we need to deal with in isel. So extract down to
14602 // 128-bits, removing as many bitcasts as possible.
14603 if (V.getValueSizeInBits() > 128)
14604 V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
14606 // Otherwise cast V to a vector with the same element type as VT, but
14607 // possibly narrower than VT. Then perform the broadcast.
14608 unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
14609 MVT CastVT = MVT::getVectorVT(VT.getVectorElementType(), NumSrcElts);
14610 return DAG.getNode(Opcode, DL, VT, DAG.getBitcast(CastVT, V));
14613 // Check for whether we can use INSERTPS to perform the shuffle. We only use
14614 // INSERTPS when the V1 elements are already in the correct locations
14615 // because otherwise we can just always use two SHUFPS instructions which
14616 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
14617 // perform INSERTPS if a single V1 element is out of place and all V2
14618 // elements are zeroable.
14619 static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
14620 unsigned &InsertPSMask,
14621 const APInt &Zeroable,
14622 ArrayRef<int> Mask, SelectionDAG &DAG) {
14623 assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
14624 assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
14625 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
14627 // Attempt to match INSERTPS with one element from VA or VB being
14628 // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
14630 auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
14631 ArrayRef<int> CandidateMask) {
14632 unsigned ZMask = 0;
14633 int VADstIndex = -1;
14634 int VBDstIndex = -1;
14635 bool VAUsedInPlace = false;
14637 for (int i = 0; i < 4; ++i) {
14638 // Synthesize a zero mask from the zeroable elements (includes undefs).
14644 // Flag if we use any VA inputs in place.
14645 if (i == CandidateMask[i]) {
14646 VAUsedInPlace = true;
14650 // We can only insert a single non-zeroable element.
14651 if (VADstIndex >= 0 || VBDstIndex >= 0)
14654 if (CandidateMask[i] < 4) {
14655 // VA input out of place for insertion.
14658 // VB input for insertion.
14663 // Don't bother if we have no (non-zeroable) element for insertion.
14664 if (VADstIndex < 0 && VBDstIndex < 0)
14667 // Determine element insertion src/dst indices. The src index is from the
14668 // start of the inserted vector, not the start of the concatenated vector.
14669 unsigned VBSrcIndex = 0;
14670 if (VADstIndex >= 0) {
14671 // If we have a VA input out of place, we use VA as the V2 element
14672 // insertion and don't use the original V2 at all.
14673 VBSrcIndex = CandidateMask[VADstIndex];
14674 VBDstIndex = VADstIndex;
14677 VBSrcIndex = CandidateMask[VBDstIndex] - 4;
14680 // If no V1 inputs are used in place, then the result is created only from
14681 // the zero mask and the V2 insertion - so remove V1 dependency.
14682 if (!VAUsedInPlace)
14683 VA = DAG.getUNDEF(MVT::v4f32);
14685 // Update V1, V2 and InsertPSMask accordingly.
14689 // Insert the V2 element into the desired position.
14690 InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
14691 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
14695 if (matchAsInsertPS(V1, V2, Mask))
14698 // Commute and try again.
14699 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
14700 ShuffleVectorSDNode::commuteMask(CommutedMask);
14701 if (matchAsInsertPS(V2, V1, CommutedMask))
14707 static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
14708 ArrayRef<int> Mask, const APInt &Zeroable,
14709 SelectionDAG &DAG) {
14710 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
14711 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
14713 // Attempt to match the insertps pattern.
14714 unsigned InsertPSMask = 0;
14715 if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
14718 // Insert the V2 element into the desired position.
14719 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
14720 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
14723 /// Try to lower a shuffle as a permute of the inputs followed by an
14724 /// UNPCK instruction.
14726 /// This specifically targets cases where we end up with alternating between
14727 /// the two inputs, and so can permute them into something that feeds a single
14728 /// UNPCK instruction. Note that this routine only targets integer vectors
14729 /// because for floating point vectors we have a generalized SHUFPS lowering
14730 /// strategy that handles everything that doesn't *exactly* match an unpack,
14731 /// making this clever lowering unnecessary.
14732 static SDValue lowerShuffleAsPermuteAndUnpack(
14733 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14734 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
14735 assert(!VT.isFloatingPoint() &&
14736 "This routine only supports integer vectors.");
14737 assert(VT.is128BitVector() &&
14738 "This routine only works on 128-bit vectors.");
14739 assert(!V2.isUndef() &&
14740 "This routine should only be used when blending two inputs.");
14741 assert(Mask.size() >= 2 && "Single element masks are invalid.");
14743 int Size = Mask.size();
14746 count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
14748 count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
14750 bool UnpackLo = NumLoInputs >= NumHiInputs;
14752 auto TryUnpack = [&](int ScalarSize, int Scale) {
14753 SmallVector<int, 16> V1Mask((unsigned)Size, -1);
14754 SmallVector<int, 16> V2Mask((unsigned)Size, -1);
14756 for (int i = 0; i < Size; ++i) {
14760 // Each element of the unpack contains Scale elements from this mask.
14761 int UnpackIdx = i / Scale;
14763 // We only handle the case where V1 feeds the first slots of the unpack.
14764 // We rely on canonicalization to ensure this is the case.
14765 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
14768 // Setup the mask for this input. The indexing is tricky as we have to
14769 // handle the unpack stride.
14770 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
14771 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
14775 // If we will have to shuffle both inputs to use the unpack, check whether
14776 // we can just unpack first and shuffle the result. If so, skip this unpack.
14777 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
14778 !isNoopShuffleMask(V2Mask))
14781 // Shuffle the inputs into place.
14782 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
14783 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
14785 // Cast the inputs to the type we will use to unpack them.
14786 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
14787 V1 = DAG.getBitcast(UnpackVT, V1);
14788 V2 = DAG.getBitcast(UnpackVT, V2);
14790 // Unpack the inputs and cast the result back to the desired type.
14791 return DAG.getBitcast(
14792 VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
14793 UnpackVT, V1, V2));
14796 // We try each unpack from the largest to the smallest to try and find one
14797 // that fits this mask.
14798 int OrigScalarSize = VT.getScalarSizeInBits();
14799 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
14800 if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
14803 // If we're shuffling with a zero vector then we're better off not doing
14804 // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
14805 if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
14806 ISD::isBuildVectorAllZeros(V2.getNode()))
14809 // If none of the unpack-rooted lowerings worked (or were profitable) try an
14811 if (NumLoInputs == 0 || NumHiInputs == 0) {
14812 assert((NumLoInputs > 0 || NumHiInputs > 0) &&
14813 "We have to have *some* inputs!");
14814 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
14816 // FIXME: We could consider the total complexity of the permute of each
14817 // possible unpacking. Or at the least we should consider how many
14818 // half-crossings are created.
14819 // FIXME: We could consider commuting the unpacks.
14821 SmallVector<int, 32> PermMask((unsigned)Size, -1);
14822 for (int i = 0; i < Size; ++i) {
14826 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
14829 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
14831 return DAG.getVectorShuffle(
14832 VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
14834 DAG.getUNDEF(VT), PermMask);
14840 /// Handle lowering of 2-lane 64-bit floating point shuffles.
14842 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
14843 /// support for floating point shuffles but not integer shuffles. These
14844 /// instructions will incur a domain crossing penalty on some chips though so
14845 /// it is better to avoid lowering through this for integer vectors where
14847 static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14848 const APInt &Zeroable, SDValue V1, SDValue V2,
14849 const X86Subtarget &Subtarget,
14850 SelectionDAG &DAG) {
14851 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
14852 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
14853 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
14855 if (V2.isUndef()) {
14856 // Check for being able to broadcast a single element.
14857 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
14858 Mask, Subtarget, DAG))
14861 // Straight shuffle of a single input vector. Simulate this by using the
14862 // single input as both of the "inputs" to this instruction..
14863 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
14865 if (Subtarget.hasAVX()) {
14866 // If we have AVX, we can use VPERMILPS which will allow folding a load
14867 // into the shuffle.
14868 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
14869 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
14872 return DAG.getNode(
14873 X86ISD::SHUFP, DL, MVT::v2f64,
14874 Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
14875 Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
14876 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
14878 assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
14879 assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
14880 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
14881 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
14883 if (Subtarget.hasAVX2())
14884 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
14887 // When loading a scalar and then shuffling it into a vector we can often do
14888 // the insertion cheaply.
14889 if (SDValue Insertion = lowerShuffleAsElementInsertion(
14890 DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
14892 // Try inverting the insertion since for v2 masks it is easy to do and we
14893 // can't reliably sort the mask one way or the other.
14894 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
14895 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
14896 if (SDValue Insertion = lowerShuffleAsElementInsertion(
14897 DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
14900 // Try to use one of the special instruction patterns to handle two common
14901 // blend patterns if a zero-blend above didn't work.
14902 if (isShuffleEquivalent(Mask, {0, 3}, V1, V2) ||
14903 isShuffleEquivalent(Mask, {1, 3}, V1, V2))
14904 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
14905 // We can either use a special instruction to load over the low double or
14906 // to move just the low double.
14907 return DAG.getNode(
14908 X86ISD::MOVSD, DL, MVT::v2f64, V2,
14909 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
14911 if (Subtarget.hasSSE41())
14912 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
14913 Zeroable, Subtarget, DAG))
14916 // Use dedicated unpack instructions for masks that match their pattern.
14917 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
14920 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
14921 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
14922 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
14925 /// Handle lowering of 2-lane 64-bit integer shuffles.
14927 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
14928 /// the integer unit to minimize domain crossing penalties. However, for blends
14929 /// it falls back to the floating point shuffle operation with appropriate bit
14931 static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14932 const APInt &Zeroable, SDValue V1, SDValue V2,
14933 const X86Subtarget &Subtarget,
14934 SelectionDAG &DAG) {
14935 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
14936 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
14937 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
14939 if (V2.isUndef()) {
14940 // Check for being able to broadcast a single element.
14941 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
14942 Mask, Subtarget, DAG))
14945 // Straight shuffle of a single input vector. For everything from SSE2
14946 // onward this has a single fast instruction with no scary immediates.
14947 // We have to map the mask as it is actually a v4i32 shuffle instruction.
14948 V1 = DAG.getBitcast(MVT::v4i32, V1);
14949 int WidenedMask[4] = {Mask[0] < 0 ? -1 : (Mask[0] * 2),
14950 Mask[0] < 0 ? -1 : ((Mask[0] * 2) + 1),
14951 Mask[1] < 0 ? -1 : (Mask[1] * 2),
14952 Mask[1] < 0 ? -1 : ((Mask[1] * 2) + 1)};
14953 return DAG.getBitcast(
14955 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
14956 getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
14958 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
14959 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
14960 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
14961 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
14963 if (Subtarget.hasAVX2())
14964 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
14967 // Try to use shift instructions.
14968 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
14969 Zeroable, Subtarget, DAG))
14972 // When loading a scalar and then shuffling it into a vector we can often do
14973 // the insertion cheaply.
14974 if (SDValue Insertion = lowerShuffleAsElementInsertion(
14975 DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
14977 // Try inverting the insertion since for v2 masks it is easy to do and we
14978 // can't reliably sort the mask one way or the other.
14979 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
14980 if (SDValue Insertion = lowerShuffleAsElementInsertion(
14981 DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
14984 // We have different paths for blend lowering, but they all must use the
14985 // *exact* same predicate.
14986 bool IsBlendSupported = Subtarget.hasSSE41();
14987 if (IsBlendSupported)
14988 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
14989 Zeroable, Subtarget, DAG))
14992 // Use dedicated unpack instructions for masks that match their pattern.
14993 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
14996 // Try to use byte rotation instructions.
14997 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
14998 if (Subtarget.hasSSSE3()) {
14999 if (Subtarget.hasVLX())
15000 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v2i64, V1, V2, Mask,
15004 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
15009 // If we have direct support for blends, we should lower by decomposing into
15010 // a permute. That will be faster than the domain cross.
15011 if (IsBlendSupported)
15012 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v2i64, V1, V2, Mask,
15015 // We implement this with SHUFPD which is pretty lame because it will likely
15016 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
15017 // However, all the alternatives are still more cycles and newer chips don't
15018 // have this problem. It would be really nice if x86 had better shuffles here.
15019 V1 = DAG.getBitcast(MVT::v2f64, V1);
15020 V2 = DAG.getBitcast(MVT::v2f64, V2);
15021 return DAG.getBitcast(MVT::v2i64,
15022 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
15025 /// Lower a vector shuffle using the SHUFPS instruction.
15027 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
15028 /// It makes no assumptions about whether this is the *best* lowering, it simply
15030 static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
15031 ArrayRef<int> Mask, SDValue V1,
15032 SDValue V2, SelectionDAG &DAG) {
15033 SDValue LowV = V1, HighV = V2;
15034 SmallVector<int, 4> NewMask(Mask.begin(), Mask.end());
15035 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
15037 if (NumV2Elements == 1) {
15038 int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
15040 // Compute the index adjacent to V2Index and in the same half by toggling
15042 int V2AdjIndex = V2Index ^ 1;
15044 if (Mask[V2AdjIndex] < 0) {
15045 // Handles all the cases where we have a single V2 element and an undef.
15046 // This will only ever happen in the high lanes because we commute the
15047 // vector otherwise.
15049 std::swap(LowV, HighV);
15050 NewMask[V2Index] -= 4;
15052 // Handle the case where the V2 element ends up adjacent to a V1 element.
15053 // To make this work, blend them together as the first step.
15054 int V1Index = V2AdjIndex;
15055 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
15056 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
15057 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
15059 // Now proceed to reconstruct the final blend as we have the necessary
15060 // high or low half formed.
15067 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
15068 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
15070 } else if (NumV2Elements == 2) {
15071 if (Mask[0] < 4 && Mask[1] < 4) {
15072 // Handle the easy case where we have V1 in the low lanes and V2 in the
15076 } else if (Mask[2] < 4 && Mask[3] < 4) {
15077 // We also handle the reversed case because this utility may get called
15078 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
15079 // arrange things in the right direction.
15085 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
15086 // trying to place elements directly, just blend them and set up the final
15087 // shuffle to place them.
15089 // The first two blend mask elements are for V1, the second two are for
15091 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
15092 Mask[2] < 4 ? Mask[2] : Mask[3],
15093 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
15094 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
15095 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
15096 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
15098 // Now we do a normal shuffle of V1 by giving V1 as both operands to
15101 NewMask[0] = Mask[0] < 4 ? 0 : 2;
15102 NewMask[1] = Mask[0] < 4 ? 2 : 0;
15103 NewMask[2] = Mask[2] < 4 ? 1 : 3;
15104 NewMask[3] = Mask[2] < 4 ? 3 : 1;
15106 } else if (NumV2Elements == 3) {
15107 // Ideally canonicalizeShuffleMaskWithCommute should have caught this, but
15108 // we can get here due to other paths (e.g repeated mask matching) that we
15109 // don't want to do another round of lowerVECTOR_SHUFFLE.
15110 ShuffleVectorSDNode::commuteMask(NewMask);
15111 return lowerShuffleWithSHUFPS(DL, VT, NewMask, V2, V1, DAG);
15113 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
15114 getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
15117 /// Lower 4-lane 32-bit floating point shuffles.
15119 /// Uses instructions exclusively from the floating point unit to minimize
15120 /// domain crossing penalties, as these are sufficient to implement all v4f32
15122 static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15123 const APInt &Zeroable, SDValue V1, SDValue V2,
15124 const X86Subtarget &Subtarget,
15125 SelectionDAG &DAG) {
15126 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
15127 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
15128 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15130 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
15132 if (NumV2Elements == 0) {
15133 // Check for being able to broadcast a single element.
15134 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
15135 Mask, Subtarget, DAG))
15138 // Use even/odd duplicate instructions for masks that match their pattern.
15139 if (Subtarget.hasSSE3()) {
15140 if (isShuffleEquivalent(Mask, {0, 0, 2, 2}, V1, V2))
15141 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
15142 if (isShuffleEquivalent(Mask, {1, 1, 3, 3}, V1, V2))
15143 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
15146 if (Subtarget.hasAVX()) {
15147 // If we have AVX, we can use VPERMILPS which will allow folding a load
15148 // into the shuffle.
15149 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
15150 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15153 // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
15154 // in SSE1 because otherwise they are widened to v2f64 and never get here.
15155 if (!Subtarget.hasSSE2()) {
15156 if (isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1, V2))
15157 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
15158 if (isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1, V2))
15159 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
15162 // Otherwise, use a straight shuffle of a single input vector. We pass the
15163 // input vector to both operands to simulate this with a SHUFPS.
15164 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
15165 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15168 if (Subtarget.hasAVX2())
15169 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
15172 // There are special ways we can lower some single-element blends. However, we
15173 // have custom ways we can lower more complex single-element blends below that
15174 // we defer to if both this and BLENDPS fail to match, so restrict this to
15175 // when the V2 input is targeting element 0 of the mask -- that is the fast
15177 if (NumV2Elements == 1 && Mask[0] >= 4)
15178 if (SDValue V = lowerShuffleAsElementInsertion(
15179 DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
15182 if (Subtarget.hasSSE41()) {
15183 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
15184 Zeroable, Subtarget, DAG))
15187 // Use INSERTPS if we can complete the shuffle efficiently.
15188 if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
15191 if (!isSingleSHUFPSMask(Mask))
15192 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
15197 // Use low/high mov instructions. These are only valid in SSE1 because
15198 // otherwise they are widened to v2f64 and never get here.
15199 if (!Subtarget.hasSSE2()) {
15200 if (isShuffleEquivalent(Mask, {0, 1, 4, 5}, V1, V2))
15201 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
15202 if (isShuffleEquivalent(Mask, {2, 3, 6, 7}, V1, V2))
15203 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
15206 // Use dedicated unpack instructions for masks that match their pattern.
15207 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
15210 // Otherwise fall back to a SHUFPS lowering strategy.
15211 return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
15214 /// Lower 4-lane i32 vector shuffles.
15216 /// We try to handle these with integer-domain shuffles where we can, but for
15217 /// blends we use the floating point domain blend instructions.
15218 static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15219 const APInt &Zeroable, SDValue V1, SDValue V2,
15220 const X86Subtarget &Subtarget,
15221 SelectionDAG &DAG) {
15222 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
15223 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
15224 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15226 // Whenever we can lower this as a zext, that instruction is strictly faster
15227 // than any alternative. It also allows us to fold memory operands into the
15228 // shuffle in many cases.
15229 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
15230 Zeroable, Subtarget, DAG))
15233 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
15235 if (NumV2Elements == 0) {
15236 // Try to use broadcast unless the mask only has one non-undef element.
15237 if (count_if(Mask, [](int M) { return M >= 0 && M < 4; }) > 1) {
15238 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
15239 Mask, Subtarget, DAG))
15243 // Straight shuffle of a single input vector. For everything from SSE2
15244 // onward this has a single fast instruction with no scary immediates.
15245 // We coerce the shuffle pattern to be compatible with UNPCK instructions
15246 // but we aren't actually going to use the UNPCK instruction because doing
15247 // so prevents folding a load into this instruction or making a copy.
15248 const int UnpackLoMask[] = {0, 0, 1, 1};
15249 const int UnpackHiMask[] = {2, 2, 3, 3};
15250 if (isShuffleEquivalent(Mask, {0, 0, 1, 1}, V1, V2))
15251 Mask = UnpackLoMask;
15252 else if (isShuffleEquivalent(Mask, {2, 2, 3, 3}, V1, V2))
15253 Mask = UnpackHiMask;
15255 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
15256 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15259 if (Subtarget.hasAVX2())
15260 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
15263 // Try to use shift instructions.
15264 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
15265 Zeroable, Subtarget, DAG))
15268 // There are special ways we can lower some single-element blends.
15269 if (NumV2Elements == 1)
15270 if (SDValue V = lowerShuffleAsElementInsertion(
15271 DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
15274 // We have different paths for blend lowering, but they all must use the
15275 // *exact* same predicate.
15276 bool IsBlendSupported = Subtarget.hasSSE41();
15277 if (IsBlendSupported)
15278 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
15279 Zeroable, Subtarget, DAG))
15282 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
15283 Zeroable, Subtarget, DAG))
15286 // Use dedicated unpack instructions for masks that match their pattern.
15287 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
15290 // Try to use byte rotation instructions.
15291 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
15292 if (Subtarget.hasSSSE3()) {
15293 if (Subtarget.hasVLX())
15294 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i32, V1, V2, Mask,
15298 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
15303 // Assume that a single SHUFPS is faster than an alternative sequence of
15304 // multiple instructions (even if the CPU has a domain penalty).
15305 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
15306 if (!isSingleSHUFPSMask(Mask)) {
15307 // If we have direct support for blends, we should lower by decomposing into
15308 // a permute. That will be faster than the domain cross.
15309 if (IsBlendSupported)
15310 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i32, V1, V2, Mask,
15313 // Try to lower by permuting the inputs into an unpack instruction.
15314 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
15315 Mask, Subtarget, DAG))
15319 // We implement this with SHUFPS because it can blend from two vectors.
15320 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
15321 // up the inputs, bypassing domain shift penalties that we would incur if we
15322 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
15324 SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
15325 SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
15326 SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
15327 return DAG.getBitcast(MVT::v4i32, ShufPS);
15330 /// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
15331 /// shuffle lowering, and the most complex part.
15333 /// The lowering strategy is to try to form pairs of input lanes which are
15334 /// targeted at the same half of the final vector, and then use a dword shuffle
15335 /// to place them onto the right half, and finally unpack the paired lanes into
15336 /// their final position.
15338 /// The exact breakdown of how to form these dword pairs and align them on the
15339 /// correct sides is really tricky. See the comments within the function for
15340 /// more of the details.
15342 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
15343 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
15344 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
15345 /// vector, form the analogous 128-bit 8-element Mask.
15346 static SDValue lowerV8I16GeneralSingleInputShuffle(
15347 const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
15348 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15349 assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
15350 MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
15352 assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
15353 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
15354 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
15356 // Attempt to directly match PSHUFLW or PSHUFHW.
15357 if (isUndefOrInRange(LoMask, 0, 4) &&
15358 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
15359 return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
15360 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
15362 if (isUndefOrInRange(HiMask, 4, 8) &&
15363 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
15364 for (int i = 0; i != 4; ++i)
15365 HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
15366 return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
15367 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
15370 SmallVector<int, 4> LoInputs;
15371 copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
15372 array_pod_sort(LoInputs.begin(), LoInputs.end());
15373 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
15374 SmallVector<int, 4> HiInputs;
15375 copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
15376 array_pod_sort(HiInputs.begin(), HiInputs.end());
15377 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
15378 int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
15379 int NumHToL = LoInputs.size() - NumLToL;
15380 int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
15381 int NumHToH = HiInputs.size() - NumLToH;
15382 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
15383 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
15384 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
15385 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
15387 // If we are shuffling values from one half - check how many different DWORD
15388 // pairs we need to create. If only 1 or 2 then we can perform this as a
15389 // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
15390 auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
15391 ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
15392 V = DAG.getNode(ShufWOp, DL, VT, V,
15393 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
15394 V = DAG.getBitcast(PSHUFDVT, V);
15395 V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
15396 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
15397 return DAG.getBitcast(VT, V);
15400 if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
15401 int PSHUFDMask[4] = { -1, -1, -1, -1 };
15402 SmallVector<std::pair<int, int>, 4> DWordPairs;
15403 int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
15405 // Collect the different DWORD pairs.
15406 for (int DWord = 0; DWord != 4; ++DWord) {
15407 int M0 = Mask[2 * DWord + 0];
15408 int M1 = Mask[2 * DWord + 1];
15409 M0 = (M0 >= 0 ? M0 % 4 : M0);
15410 M1 = (M1 >= 0 ? M1 % 4 : M1);
15411 if (M0 < 0 && M1 < 0)
15414 bool Match = false;
15415 for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
15416 auto &DWordPair = DWordPairs[j];
15417 if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
15418 (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
15419 DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
15420 DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
15421 PSHUFDMask[DWord] = DOffset + j;
15427 PSHUFDMask[DWord] = DOffset + DWordPairs.size();
15428 DWordPairs.push_back(std::make_pair(M0, M1));
15432 if (DWordPairs.size() <= 2) {
15433 DWordPairs.resize(2, std::make_pair(-1, -1));
15434 int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
15435 DWordPairs[1].first, DWordPairs[1].second};
15436 if ((NumHToL + NumHToH) == 0)
15437 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
15438 if ((NumLToL + NumLToH) == 0)
15439 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
15443 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
15444 // such inputs we can swap two of the dwords across the half mark and end up
15445 // with <=2 inputs to each half in each half. Once there, we can fall through
15446 // to the generic code below. For example:
15448 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
15449 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
15451 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
15452 // and an existing 2-into-2 on the other half. In this case we may have to
15453 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
15454 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
15455 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
15456 // because any other situation (including a 3-into-1 or 1-into-3 in the other
15457 // half than the one we target for fixing) will be fixed when we re-enter this
15458 // path. We will also combine away any sequence of PSHUFD instructions that
15459 // result into a single instruction. Here is an example of the tricky case:
15461 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
15462 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
15464 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
15466 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
15467 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
15469 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
15470 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
15472 // The result is fine to be handled by the generic logic.
15473 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
15474 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
15475 int AOffset, int BOffset) {
15476 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
15477 "Must call this with A having 3 or 1 inputs from the A half.");
15478 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
15479 "Must call this with B having 1 or 3 inputs from the B half.");
15480 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
15481 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
15483 bool ThreeAInputs = AToAInputs.size() == 3;
15485 // Compute the index of dword with only one word among the three inputs in
15486 // a half by taking the sum of the half with three inputs and subtracting
15487 // the sum of the actual three inputs. The difference is the remaining
15489 int ADWord = 0, BDWord = 0;
15490 int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
15491 int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
15492 int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
15493 ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
15494 int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
15495 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
15496 int TripleNonInputIdx =
15497 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
15498 TripleDWord = TripleNonInputIdx / 2;
15500 // We use xor with one to compute the adjacent DWord to whichever one the
15502 OneInputDWord = (OneInput / 2) ^ 1;
15504 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
15505 // and BToA inputs. If there is also such a problem with the BToB and AToB
15506 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
15507 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
15508 // is essential that we don't *create* a 3<-1 as then we might oscillate.
15509 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
15510 // Compute how many inputs will be flipped by swapping these DWords. We
15512 // to balance this to ensure we don't form a 3-1 shuffle in the other
15514 int NumFlippedAToBInputs = llvm::count(AToBInputs, 2 * ADWord) +
15515 llvm::count(AToBInputs, 2 * ADWord + 1);
15516 int NumFlippedBToBInputs = llvm::count(BToBInputs, 2 * BDWord) +
15517 llvm::count(BToBInputs, 2 * BDWord + 1);
15518 if ((NumFlippedAToBInputs == 1 &&
15519 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
15520 (NumFlippedBToBInputs == 1 &&
15521 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
15522 // We choose whether to fix the A half or B half based on whether that
15523 // half has zero flipped inputs. At zero, we may not be able to fix it
15524 // with that half. We also bias towards fixing the B half because that
15525 // will more commonly be the high half, and we have to bias one way.
15526 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
15527 ArrayRef<int> Inputs) {
15528 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
15529 bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
15530 // Determine whether the free index is in the flipped dword or the
15531 // unflipped dword based on where the pinned index is. We use this bit
15532 // in an xor to conditionally select the adjacent dword.
15533 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
15534 bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
15535 if (IsFixIdxInput == IsFixFreeIdxInput)
15537 IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
15538 assert(IsFixIdxInput != IsFixFreeIdxInput &&
15539 "We need to be changing the number of flipped inputs!");
15540 int PSHUFHalfMask[] = {0, 1, 2, 3};
15541 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
15543 FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
15544 MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
15545 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
15547 for (int &M : Mask)
15548 if (M >= 0 && M == FixIdx)
15550 else if (M >= 0 && M == FixFreeIdx)
15553 if (NumFlippedBToBInputs != 0) {
15555 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
15556 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
15558 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
15559 int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
15560 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
15565 int PSHUFDMask[] = {0, 1, 2, 3};
15566 PSHUFDMask[ADWord] = BDWord;
15567 PSHUFDMask[BDWord] = ADWord;
15568 V = DAG.getBitcast(
15570 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
15571 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
15573 // Adjust the mask to match the new locations of A and B.
15574 for (int &M : Mask)
15575 if (M >= 0 && M/2 == ADWord)
15576 M = 2 * BDWord + M % 2;
15577 else if (M >= 0 && M/2 == BDWord)
15578 M = 2 * ADWord + M % 2;
15580 // Recurse back into this routine to re-compute state now that this isn't
15581 // a 3 and 1 problem.
15582 return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
15584 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
15585 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
15586 if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
15587 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
15589 // At this point there are at most two inputs to the low and high halves from
15590 // each half. That means the inputs can always be grouped into dwords and
15591 // those dwords can then be moved to the correct half with a dword shuffle.
15592 // We use at most one low and one high word shuffle to collect these paired
15593 // inputs into dwords, and finally a dword shuffle to place them.
15594 int PSHUFLMask[4] = {-1, -1, -1, -1};
15595 int PSHUFHMask[4] = {-1, -1, -1, -1};
15596 int PSHUFDMask[4] = {-1, -1, -1, -1};
15598 // First fix the masks for all the inputs that are staying in their
15599 // original halves. This will then dictate the targets of the cross-half
15601 auto fixInPlaceInputs =
15602 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
15603 MutableArrayRef<int> SourceHalfMask,
15604 MutableArrayRef<int> HalfMask, int HalfOffset) {
15605 if (InPlaceInputs.empty())
15607 if (InPlaceInputs.size() == 1) {
15608 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
15609 InPlaceInputs[0] - HalfOffset;
15610 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
15613 if (IncomingInputs.empty()) {
15614 // Just fix all of the in place inputs.
15615 for (int Input : InPlaceInputs) {
15616 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
15617 PSHUFDMask[Input / 2] = Input / 2;
15622 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
15623 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
15624 InPlaceInputs[0] - HalfOffset;
15625 // Put the second input next to the first so that they are packed into
15626 // a dword. We find the adjacent index by toggling the low bit.
15627 int AdjIndex = InPlaceInputs[0] ^ 1;
15628 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
15629 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
15630 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
15632 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
15633 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
15635 // Now gather the cross-half inputs and place them into a free dword of
15636 // their target half.
15637 // FIXME: This operation could almost certainly be simplified dramatically to
15638 // look more like the 3-1 fixing operation.
15639 auto moveInputsToRightHalf = [&PSHUFDMask](
15640 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
15641 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
15642 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
15644 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
15645 return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
15647 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
15649 int LowWord = Word & ~1;
15650 int HighWord = Word | 1;
15651 return isWordClobbered(SourceHalfMask, LowWord) ||
15652 isWordClobbered(SourceHalfMask, HighWord);
15655 if (IncomingInputs.empty())
15658 if (ExistingInputs.empty()) {
15659 // Map any dwords with inputs from them into the right half.
15660 for (int Input : IncomingInputs) {
15661 // If the source half mask maps over the inputs, turn those into
15662 // swaps and use the swapped lane.
15663 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
15664 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
15665 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
15666 Input - SourceOffset;
15667 // We have to swap the uses in our half mask in one sweep.
15668 for (int &M : HalfMask)
15669 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
15671 else if (M == Input)
15672 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
15674 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
15675 Input - SourceOffset &&
15676 "Previous placement doesn't match!");
15678 // Note that this correctly re-maps both when we do a swap and when
15679 // we observe the other side of the swap above. We rely on that to
15680 // avoid swapping the members of the input list directly.
15681 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
15684 // Map the input's dword into the correct half.
15685 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
15686 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
15688 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
15690 "Previous placement doesn't match!");
15693 // And just directly shift any other-half mask elements to be same-half
15694 // as we will have mirrored the dword containing the element into the
15695 // same position within that half.
15696 for (int &M : HalfMask)
15697 if (M >= SourceOffset && M < SourceOffset + 4) {
15698 M = M - SourceOffset + DestOffset;
15699 assert(M >= 0 && "This should never wrap below zero!");
15704 // Ensure we have the input in a viable dword of its current half. This
15705 // is particularly tricky because the original position may be clobbered
15706 // by inputs being moved and *staying* in that half.
15707 if (IncomingInputs.size() == 1) {
15708 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
15709 int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
15711 SourceHalfMask[InputFixed - SourceOffset] =
15712 IncomingInputs[0] - SourceOffset;
15713 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
15715 IncomingInputs[0] = InputFixed;
15717 } else if (IncomingInputs.size() == 2) {
15718 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
15719 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
15720 // We have two non-adjacent or clobbered inputs we need to extract from
15721 // the source half. To do this, we need to map them into some adjacent
15722 // dword slot in the source mask.
15723 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
15724 IncomingInputs[1] - SourceOffset};
15726 // If there is a free slot in the source half mask adjacent to one of
15727 // the inputs, place the other input in it. We use (Index XOR 1) to
15728 // compute an adjacent index.
15729 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
15730 SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
15731 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
15732 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
15733 InputsFixed[1] = InputsFixed[0] ^ 1;
15734 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
15735 SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
15736 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
15737 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
15738 InputsFixed[0] = InputsFixed[1] ^ 1;
15739 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
15740 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
15741 // The two inputs are in the same DWord but it is clobbered and the
15742 // adjacent DWord isn't used at all. Move both inputs to the free
15744 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
15745 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
15746 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
15747 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
15749 // The only way we hit this point is if there is no clobbering
15750 // (because there are no off-half inputs to this half) and there is no
15751 // free slot adjacent to one of the inputs. In this case, we have to
15752 // swap an input with a non-input.
15753 for (int i = 0; i < 4; ++i)
15754 assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
15755 "We can't handle any clobbers here!");
15756 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
15757 "Cannot have adjacent inputs here!");
15759 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
15760 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
15762 // We also have to update the final source mask in this case because
15763 // it may need to undo the above swap.
15764 for (int &M : FinalSourceHalfMask)
15765 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
15766 M = InputsFixed[1] + SourceOffset;
15767 else if (M == InputsFixed[1] + SourceOffset)
15768 M = (InputsFixed[0] ^ 1) + SourceOffset;
15770 InputsFixed[1] = InputsFixed[0] ^ 1;
15773 // Point everything at the fixed inputs.
15774 for (int &M : HalfMask)
15775 if (M == IncomingInputs[0])
15776 M = InputsFixed[0] + SourceOffset;
15777 else if (M == IncomingInputs[1])
15778 M = InputsFixed[1] + SourceOffset;
15780 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
15781 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
15784 llvm_unreachable("Unhandled input size!");
15787 // Now hoist the DWord down to the right half.
15788 int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
15789 assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
15790 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
15791 for (int &M : HalfMask)
15792 for (int Input : IncomingInputs)
15794 M = FreeDWord * 2 + Input % 2;
15796 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
15797 /*SourceOffset*/ 4, /*DestOffset*/ 0);
15798 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
15799 /*SourceOffset*/ 0, /*DestOffset*/ 4);
15801 // Now enact all the shuffles we've computed to move the inputs into their
15803 if (!isNoopShuffleMask(PSHUFLMask))
15804 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
15805 getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
15806 if (!isNoopShuffleMask(PSHUFHMask))
15807 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
15808 getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
15809 if (!isNoopShuffleMask(PSHUFDMask))
15810 V = DAG.getBitcast(
15812 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
15813 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
15815 // At this point, each half should contain all its inputs, and we can then
15816 // just shuffle them into their final position.
15817 assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
15818 "Failed to lift all the high half inputs to the low mask!");
15819 assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
15820 "Failed to lift all the low half inputs to the high mask!");
15822 // Do a half shuffle for the low mask.
15823 if (!isNoopShuffleMask(LoMask))
15824 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
15825 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
15827 // Do a half shuffle with the high mask after shifting its values down.
15828 for (int &M : HiMask)
15831 if (!isNoopShuffleMask(HiMask))
15832 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
15833 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
15838 /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
15839 /// blend if only one input is used.
15840 static SDValue lowerShuffleAsBlendOfPSHUFBs(
15841 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15842 const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
15843 assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
15844 "Lane crossing shuffle masks not supported");
15846 int NumBytes = VT.getSizeInBits() / 8;
15847 int Size = Mask.size();
15848 int Scale = NumBytes / Size;
15850 SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
15851 SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
15855 for (int i = 0; i < NumBytes; ++i) {
15856 int M = Mask[i / Scale];
15860 const int ZeroMask = 0x80;
15861 int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
15862 int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
15863 if (Zeroable[i / Scale])
15864 V1Idx = V2Idx = ZeroMask;
15866 V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
15867 V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
15868 V1InUse |= (ZeroMask != V1Idx);
15869 V2InUse |= (ZeroMask != V2Idx);
15872 MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
15874 V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
15875 DAG.getBuildVector(ShufVT, DL, V1Mask));
15877 V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
15878 DAG.getBuildVector(ShufVT, DL, V2Mask));
15880 // If we need shuffled inputs from both, blend the two.
15882 if (V1InUse && V2InUse)
15883 V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
15885 V = V1InUse ? V1 : V2;
15887 // Cast the result back to the correct type.
15888 return DAG.getBitcast(VT, V);
15891 /// Generic lowering of 8-lane i16 shuffles.
15893 /// This handles both single-input shuffles and combined shuffle/blends with
15894 /// two inputs. The single input shuffles are immediately delegated to
15895 /// a dedicated lowering routine.
15897 /// The blends are lowered in one of three fundamental ways. If there are few
15898 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
15899 /// of the input is significantly cheaper when lowered as an interleaving of
15900 /// the two inputs, try to interleave them. Otherwise, blend the low and high
15901 /// halves of the inputs separately (making them have relatively few inputs)
15902 /// and then concatenate them.
15903 static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15904 const APInt &Zeroable, SDValue V1, SDValue V2,
15905 const X86Subtarget &Subtarget,
15906 SelectionDAG &DAG) {
15907 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
15908 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
15909 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
15911 // Whenever we can lower this as a zext, that instruction is strictly faster
15912 // than any alternative.
15913 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
15914 Zeroable, Subtarget, DAG))
15917 // Try to use lower using a truncation.
15918 if (SDValue V = lowerShuffleWithVPMOV(DL, MVT::v8i16, V1, V2, Mask, Zeroable,
15922 int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
15924 if (NumV2Inputs == 0) {
15925 // Try to use shift instructions.
15926 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
15927 Zeroable, Subtarget, DAG))
15930 // Check for being able to broadcast a single element.
15931 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
15932 Mask, Subtarget, DAG))
15935 // Try to use bit rotation instructions.
15936 if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v8i16, V1, Mask,
15940 // Use dedicated unpack instructions for masks that match their pattern.
15941 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
15944 // Use dedicated pack instructions for masks that match their pattern.
15945 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
15949 // Try to use byte rotation instructions.
15950 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
15954 // Make a copy of the mask so it can be modified.
15955 SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
15956 return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
15960 assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
15961 "All single-input shuffles should be canonicalized to be V1-input "
15964 // Try to use shift instructions.
15965 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
15966 Zeroable, Subtarget, DAG))
15969 // See if we can use SSE4A Extraction / Insertion.
15970 if (Subtarget.hasSSE4A())
15971 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
15975 // There are special ways we can lower some single-element blends.
15976 if (NumV2Inputs == 1)
15977 if (SDValue V = lowerShuffleAsElementInsertion(
15978 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
15981 // We have different paths for blend lowering, but they all must use the
15982 // *exact* same predicate.
15983 bool IsBlendSupported = Subtarget.hasSSE41();
15984 if (IsBlendSupported)
15985 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
15986 Zeroable, Subtarget, DAG))
15989 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
15990 Zeroable, Subtarget, DAG))
15993 // Use dedicated unpack instructions for masks that match their pattern.
15994 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
15997 // Use dedicated pack instructions for masks that match their pattern.
15998 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
16002 // Try to use lower using a truncation.
16003 if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v8i16, V1, V2, Mask, Zeroable,
16007 // Try to use byte rotation instructions.
16008 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
16012 if (SDValue BitBlend =
16013 lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
16016 // Try to use byte shift instructions to mask.
16017 if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v8i16, V1, V2, Mask,
16018 Zeroable, Subtarget, DAG))
16021 // Attempt to lower using compaction, SSE41 is necessary for PACKUSDW.
16022 // We could use SIGN_EXTEND_INREG+PACKSSDW for older targets but this seems to
16023 // be slower than a PSHUFLW+PSHUFHW+PSHUFD chain.
16024 int NumEvenDrops = canLowerByDroppingElements(Mask, true, false);
16025 if ((NumEvenDrops == 1 || NumEvenDrops == 2) && Subtarget.hasSSE41() &&
16026 !Subtarget.hasVLX()) {
16027 // Check if this is part of a 256-bit vector truncation.
16028 if (NumEvenDrops == 2 && Subtarget.hasAVX2() &&
16029 peekThroughBitcasts(V1).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
16030 peekThroughBitcasts(V2).getOpcode() == ISD::EXTRACT_SUBVECTOR) {
16031 SDValue V1V2 = concatSubVectors(V1, V2, DAG, DL);
16032 V1V2 = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1V2,
16033 getZeroVector(MVT::v16i16, Subtarget, DAG, DL),
16034 DAG.getTargetConstant(0xEE, DL, MVT::i8));
16035 V1V2 = DAG.getBitcast(MVT::v8i32, V1V2);
16036 V1 = extract128BitVector(V1V2, 0, DAG, DL);
16037 V2 = extract128BitVector(V1V2, 4, DAG, DL);
16039 SmallVector<SDValue, 4> DWordClearOps(4,
16040 DAG.getConstant(0, DL, MVT::i32));
16041 for (unsigned i = 0; i != 4; i += 1 << (NumEvenDrops - 1))
16042 DWordClearOps[i] = DAG.getConstant(0xFFFF, DL, MVT::i32);
16043 SDValue DWordClearMask =
16044 DAG.getBuildVector(MVT::v4i32, DL, DWordClearOps);
16045 V1 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V1),
16047 V2 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V2),
16050 // Now pack things back together.
16051 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v8i16, V1, V2);
16052 if (NumEvenDrops == 2) {
16053 Result = DAG.getBitcast(MVT::v4i32, Result);
16054 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v8i16, Result, Result);
16059 // When compacting odd (upper) elements, use PACKSS pre-SSE41.
16060 int NumOddDrops = canLowerByDroppingElements(Mask, false, false);
16061 if (NumOddDrops == 1) {
16062 bool HasSSE41 = Subtarget.hasSSE41();
16063 V1 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
16064 DAG.getBitcast(MVT::v4i32, V1),
16065 DAG.getTargetConstant(16, DL, MVT::i8));
16066 V2 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
16067 DAG.getBitcast(MVT::v4i32, V2),
16068 DAG.getTargetConstant(16, DL, MVT::i8));
16069 return DAG.getNode(HasSSE41 ? X86ISD::PACKUS : X86ISD::PACKSS, DL,
16070 MVT::v8i16, V1, V2);
16073 // Try to lower by permuting the inputs into an unpack instruction.
16074 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
16075 Mask, Subtarget, DAG))
16078 // If we can't directly blend but can use PSHUFB, that will be better as it
16079 // can both shuffle and set up the inefficient blend.
16080 if (!IsBlendSupported && Subtarget.hasSSSE3()) {
16081 bool V1InUse, V2InUse;
16082 return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
16083 Zeroable, DAG, V1InUse, V2InUse);
16086 // We can always bit-blend if we have to so the fallback strategy is to
16087 // decompose into single-input permutes and blends/unpacks.
16088 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8i16, V1, V2,
16089 Mask, Subtarget, DAG);
16092 /// Lower 8-lane 16-bit floating point shuffles.
16093 static SDValue lowerV8F16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16094 const APInt &Zeroable, SDValue V1, SDValue V2,
16095 const X86Subtarget &Subtarget,
16096 SelectionDAG &DAG) {
16097 assert(V1.getSimpleValueType() == MVT::v8f16 && "Bad operand type!");
16098 assert(V2.getSimpleValueType() == MVT::v8f16 && "Bad operand type!");
16099 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16100 int NumV2Elements = count_if(Mask, [](int M) { return M >= 8; });
16102 if (Subtarget.hasFP16()) {
16103 if (NumV2Elements == 0) {
16104 // Check for being able to broadcast a single element.
16105 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f16, V1, V2,
16106 Mask, Subtarget, DAG))
16109 if (NumV2Elements == 1 && Mask[0] >= 8)
16110 if (SDValue V = lowerShuffleAsElementInsertion(
16111 DL, MVT::v8f16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16115 V1 = DAG.getBitcast(MVT::v8i16, V1);
16116 V2 = DAG.getBitcast(MVT::v8i16, V2);
16117 return DAG.getBitcast(MVT::v8f16,
16118 DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, Mask));
16121 // Lowers unary/binary shuffle as VPERMV/VPERMV3, for non-VLX targets,
16122 // sub-512-bit shuffles are padded to 512-bits for the shuffle and then
16123 // the active subvector is extracted.
16124 static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
16125 ArrayRef<int> Mask, SDValue V1, SDValue V2,
16126 const X86Subtarget &Subtarget,
16127 SelectionDAG &DAG) {
16128 MVT MaskVT = VT.changeTypeToInteger();
16130 MVT ShuffleVT = VT;
16131 if (!VT.is512BitVector() && !Subtarget.hasVLX()) {
16132 V1 = widenSubVector(V1, false, Subtarget, DAG, DL, 512);
16133 V2 = widenSubVector(V2, false, Subtarget, DAG, DL, 512);
16134 ShuffleVT = V1.getSimpleValueType();
16136 // Adjust mask to correct indices for the second input.
16137 int NumElts = VT.getVectorNumElements();
16138 unsigned Scale = 512 / VT.getSizeInBits();
16139 SmallVector<int, 32> AdjustedMask(Mask.begin(), Mask.end());
16140 for (int &M : AdjustedMask)
16142 M += (Scale - 1) * NumElts;
16143 MaskNode = getConstVector(AdjustedMask, MaskVT, DAG, DL, true);
16144 MaskNode = widenSubVector(MaskNode, false, Subtarget, DAG, DL, 512);
16146 MaskNode = getConstVector(Mask, MaskVT, DAG, DL, true);
16151 Result = DAG.getNode(X86ISD::VPERMV, DL, ShuffleVT, MaskNode, V1);
16153 Result = DAG.getNode(X86ISD::VPERMV3, DL, ShuffleVT, V1, MaskNode, V2);
16155 if (VT != ShuffleVT)
16156 Result = extractSubVector(Result, 0, DAG, DL, VT.getSizeInBits());
16161 /// Generic lowering of v16i8 shuffles.
16163 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
16164 /// detect any complexity reducing interleaving. If that doesn't help, it uses
16165 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
16166 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
16168 static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16169 const APInt &Zeroable, SDValue V1, SDValue V2,
16170 const X86Subtarget &Subtarget,
16171 SelectionDAG &DAG) {
16172 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
16173 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
16174 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16176 // Try to use shift instructions.
16177 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
16178 Zeroable, Subtarget, DAG))
16181 // Try to use byte rotation instructions.
16182 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
16186 // Use dedicated pack instructions for masks that match their pattern.
16187 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
16191 // Try to use a zext lowering.
16192 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
16193 Zeroable, Subtarget, DAG))
16196 // Try to use lower using a truncation.
16197 if (SDValue V = lowerShuffleWithVPMOV(DL, MVT::v16i8, V1, V2, Mask, Zeroable,
16201 if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v16i8, V1, V2, Mask, Zeroable,
16205 // See if we can use SSE4A Extraction / Insertion.
16206 if (Subtarget.hasSSE4A())
16207 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
16211 int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
16213 // For single-input shuffles, there are some nicer lowering tricks we can use.
16214 if (NumV2Elements == 0) {
16215 // Check for being able to broadcast a single element.
16216 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
16217 Mask, Subtarget, DAG))
16220 // Try to use bit rotation instructions.
16221 if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v16i8, V1, Mask,
16225 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
16228 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
16229 // Notably, this handles splat and partial-splat shuffles more efficiently.
16230 // However, it only makes sense if the pre-duplication shuffle simplifies
16231 // things significantly. Currently, this means we need to be able to
16232 // express the pre-duplication shuffle as an i16 shuffle.
16234 // FIXME: We should check for other patterns which can be widened into an
16235 // i16 shuffle as well.
16236 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
16237 for (int i = 0; i < 16; i += 2)
16238 if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
16243 auto tryToWidenViaDuplication = [&]() -> SDValue {
16244 if (!canWidenViaDuplication(Mask))
16246 SmallVector<int, 4> LoInputs;
16247 copy_if(Mask, std::back_inserter(LoInputs),
16248 [](int M) { return M >= 0 && M < 8; });
16249 array_pod_sort(LoInputs.begin(), LoInputs.end());
16250 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
16252 SmallVector<int, 4> HiInputs;
16253 copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
16254 array_pod_sort(HiInputs.begin(), HiInputs.end());
16255 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
16258 bool TargetLo = LoInputs.size() >= HiInputs.size();
16259 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
16260 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
16262 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
16263 SmallDenseMap<int, int, 8> LaneMap;
16264 for (int I : InPlaceInputs) {
16265 PreDupI16Shuffle[I/2] = I/2;
16268 int j = TargetLo ? 0 : 4, je = j + 4;
16269 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
16270 // Check if j is already a shuffle of this input. This happens when
16271 // there are two adjacent bytes after we move the low one.
16272 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
16273 // If we haven't yet mapped the input, search for a slot into which
16275 while (j < je && PreDupI16Shuffle[j] >= 0)
16279 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
16282 // Map this input with the i16 shuffle.
16283 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
16286 // Update the lane map based on the mapping we ended up with.
16287 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
16289 V1 = DAG.getBitcast(
16291 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
16292 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
16294 // Unpack the bytes to form the i16s that will be shuffled into place.
16295 bool EvenInUse = false, OddInUse = false;
16296 for (int i = 0; i < 16; i += 2) {
16297 EvenInUse |= (Mask[i + 0] >= 0);
16298 OddInUse |= (Mask[i + 1] >= 0);
16299 if (EvenInUse && OddInUse)
16302 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
16303 MVT::v16i8, EvenInUse ? V1 : DAG.getUNDEF(MVT::v16i8),
16304 OddInUse ? V1 : DAG.getUNDEF(MVT::v16i8));
16306 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
16307 for (int i = 0; i < 16; ++i)
16308 if (Mask[i] >= 0) {
16309 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
16310 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
16311 if (PostDupI16Shuffle[i / 2] < 0)
16312 PostDupI16Shuffle[i / 2] = MappedMask;
16314 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
16315 "Conflicting entries in the original shuffle!");
16317 return DAG.getBitcast(
16319 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
16320 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
16322 if (SDValue V = tryToWidenViaDuplication())
16326 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
16327 Zeroable, Subtarget, DAG))
16330 // Use dedicated unpack instructions for masks that match their pattern.
16331 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
16334 // Try to use byte shift instructions to mask.
16335 if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v16i8, V1, V2, Mask,
16336 Zeroable, Subtarget, DAG))
16339 // Check for compaction patterns.
16340 bool IsSingleInput = V2.isUndef();
16341 int NumEvenDrops = canLowerByDroppingElements(Mask, true, IsSingleInput);
16343 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
16344 // with PSHUFB. It is important to do this before we attempt to generate any
16345 // blends but after all of the single-input lowerings. If the single input
16346 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
16347 // want to preserve that and we can DAG combine any longer sequences into
16348 // a PSHUFB in the end. But once we start blending from multiple inputs,
16349 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
16350 // and there are *very* few patterns that would actually be faster than the
16351 // PSHUFB approach because of its ability to zero lanes.
16353 // If the mask is a binary compaction, we can more efficiently perform this
16354 // as a PACKUS(AND(),AND()) - which is quicker than UNPACK(PSHUFB(),PSHUFB()).
16356 // FIXME: The only exceptions to the above are blends which are exact
16357 // interleavings with direct instructions supporting them. We currently don't
16358 // handle those well here.
16359 if (Subtarget.hasSSSE3() && (IsSingleInput || NumEvenDrops != 1)) {
16360 bool V1InUse = false;
16361 bool V2InUse = false;
16363 SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
16364 DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
16366 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
16367 // do so. This avoids using them to handle blends-with-zero which is
16368 // important as a single pshufb is significantly faster for that.
16369 if (V1InUse && V2InUse) {
16370 if (Subtarget.hasSSE41())
16371 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
16372 Zeroable, Subtarget, DAG))
16375 // We can use an unpack to do the blending rather than an or in some
16376 // cases. Even though the or may be (very minorly) more efficient, we
16377 // preference this lowering because there are common cases where part of
16378 // the complexity of the shuffles goes away when we do the final blend as
16380 // FIXME: It might be worth trying to detect if the unpack-feeding
16381 // shuffles will both be pshufb, in which case we shouldn't bother with
16383 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
16384 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
16387 // AVX512VBMI can lower to VPERMB (non-VLX will pad to v64i8).
16388 if (Subtarget.hasVBMI())
16389 return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, Subtarget,
16392 // If we have XOP we can use one VPPERM instead of multiple PSHUFBs.
16393 if (Subtarget.hasXOP()) {
16394 SDValue MaskNode = getConstVector(Mask, MVT::v16i8, DAG, DL, true);
16395 return DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, V1, V2, MaskNode);
16398 // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
16399 // PALIGNR will be cheaper than the second PSHUFB+OR.
16400 if (SDValue V = lowerShuffleAsByteRotateAndPermute(
16401 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
16408 // There are special ways we can lower some single-element blends.
16409 if (NumV2Elements == 1)
16410 if (SDValue V = lowerShuffleAsElementInsertion(
16411 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
16414 if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
16417 // Check whether a compaction lowering can be done. This handles shuffles
16418 // which take every Nth element for some even N. See the helper function for
16421 // We special case these as they can be particularly efficiently handled with
16422 // the PACKUSB instruction on x86 and they show up in common patterns of
16423 // rearranging bytes to truncate wide elements.
16424 if (NumEvenDrops) {
16425 // NumEvenDrops is the power of two stride of the elements. Another way of
16426 // thinking about it is that we need to drop the even elements this many
16427 // times to get the original input.
16429 // First we need to zero all the dropped bytes.
16430 assert(NumEvenDrops <= 3 &&
16431 "No support for dropping even elements more than 3 times.");
16432 SmallVector<SDValue, 8> WordClearOps(8, DAG.getConstant(0, DL, MVT::i16));
16433 for (unsigned i = 0; i != 8; i += 1 << (NumEvenDrops - 1))
16434 WordClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i16);
16435 SDValue WordClearMask = DAG.getBuildVector(MVT::v8i16, DL, WordClearOps);
16436 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V1),
16438 if (!IsSingleInput)
16439 V2 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V2),
16442 // Now pack things back together.
16443 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
16444 IsSingleInput ? V1 : V2);
16445 for (int i = 1; i < NumEvenDrops; ++i) {
16446 Result = DAG.getBitcast(MVT::v8i16, Result);
16447 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
16452 int NumOddDrops = canLowerByDroppingElements(Mask, false, IsSingleInput);
16453 if (NumOddDrops == 1) {
16454 V1 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
16455 DAG.getBitcast(MVT::v8i16, V1),
16456 DAG.getTargetConstant(8, DL, MVT::i8));
16457 if (!IsSingleInput)
16458 V2 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
16459 DAG.getBitcast(MVT::v8i16, V2),
16460 DAG.getTargetConstant(8, DL, MVT::i8));
16461 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
16462 IsSingleInput ? V1 : V2);
16465 // Handle multi-input cases by blending/unpacking single-input shuffles.
16466 if (NumV2Elements > 0)
16467 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v16i8, V1, V2, Mask,
16470 // The fallback path for single-input shuffles widens this into two v8i16
16471 // vectors with unpacks, shuffles those, and then pulls them back together
16475 std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
16476 std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
16477 for (int i = 0; i < 16; ++i)
16479 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
16481 SDValue VLoHalf, VHiHalf;
16482 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
16483 // them out and avoid using UNPCK{L,H} to extract the elements of V as
16485 if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
16486 none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
16487 // Use a mask to drop the high bytes.
16488 VLoHalf = DAG.getBitcast(MVT::v8i16, V);
16489 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
16490 DAG.getConstant(0x00FF, DL, MVT::v8i16));
16492 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
16493 VHiHalf = DAG.getUNDEF(MVT::v8i16);
16495 // Squash the masks to point directly into VLoHalf.
16496 for (int &M : LoBlendMask)
16499 for (int &M : HiBlendMask)
16503 // Otherwise just unpack the low half of V into VLoHalf and the high half into
16504 // VHiHalf so that we can blend them as i16s.
16505 SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
16507 VLoHalf = DAG.getBitcast(
16508 MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
16509 VHiHalf = DAG.getBitcast(
16510 MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
16513 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
16514 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
16516 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
16519 /// Dispatching routine to lower various 128-bit x86 vector shuffles.
16521 /// This routine breaks down the specific type of 128-bit shuffle and
16522 /// dispatches to the lowering routines accordingly.
16523 static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
16524 MVT VT, SDValue V1, SDValue V2,
16525 const APInt &Zeroable,
16526 const X86Subtarget &Subtarget,
16527 SelectionDAG &DAG) {
16528 switch (VT.SimpleTy) {
16530 return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16532 return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16534 return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16536 return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16538 return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16540 return lowerV8F16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16542 return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16545 llvm_unreachable("Unimplemented!");
16549 /// Generic routine to split vector shuffle into half-sized shuffles.
16551 /// This routine just extracts two subvectors, shuffles them independently, and
16552 /// then concatenates them back together. This should work effectively with all
16553 /// AVX vector shuffle types.
16554 static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
16555 SDValue V2, ArrayRef<int> Mask,
16556 SelectionDAG &DAG) {
16557 assert(VT.getSizeInBits() >= 256 &&
16558 "Only for 256-bit or wider vector shuffles!");
16559 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
16560 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
16562 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
16563 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
16565 int NumElements = VT.getVectorNumElements();
16566 int SplitNumElements = NumElements / 2;
16567 MVT ScalarVT = VT.getVectorElementType();
16568 MVT SplitVT = MVT::getVectorVT(ScalarVT, SplitNumElements);
16570 // Use splitVector/extractSubVector so that split build-vectors just build two
16571 // narrower build vectors. This helps shuffling with splats and zeros.
16572 auto SplitVector = [&](SDValue V) {
16574 std::tie(LoV, HiV) = splitVector(peekThroughBitcasts(V), DAG, DL);
16575 return std::make_pair(DAG.getBitcast(SplitVT, LoV),
16576 DAG.getBitcast(SplitVT, HiV));
16579 SDValue LoV1, HiV1, LoV2, HiV2;
16580 std::tie(LoV1, HiV1) = SplitVector(V1);
16581 std::tie(LoV2, HiV2) = SplitVector(V2);
16583 // Now create two 4-way blends of these half-width vectors.
16584 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
16585 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
16586 SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
16587 SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
16588 SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
16589 for (int i = 0; i < SplitNumElements; ++i) {
16590 int M = HalfMask[i];
16591 if (M >= NumElements) {
16592 if (M >= NumElements + SplitNumElements)
16596 V2BlendMask[i] = M - NumElements;
16597 BlendMask[i] = SplitNumElements + i;
16598 } else if (M >= 0) {
16599 if (M >= SplitNumElements)
16603 V1BlendMask[i] = M;
16608 // Because the lowering happens after all combining takes place, we need to
16609 // manually combine these blend masks as much as possible so that we create
16610 // a minimal number of high-level vector shuffle nodes.
16612 // First try just blending the halves of V1 or V2.
16613 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
16614 return DAG.getUNDEF(SplitVT);
16615 if (!UseLoV2 && !UseHiV2)
16616 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
16617 if (!UseLoV1 && !UseHiV1)
16618 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
16620 SDValue V1Blend, V2Blend;
16621 if (UseLoV1 && UseHiV1) {
16623 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
16625 // We only use half of V1 so map the usage down into the final blend mask.
16626 V1Blend = UseLoV1 ? LoV1 : HiV1;
16627 for (int i = 0; i < SplitNumElements; ++i)
16628 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
16629 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
16631 if (UseLoV2 && UseHiV2) {
16633 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
16635 // We only use half of V2 so map the usage down into the final blend mask.
16636 V2Blend = UseLoV2 ? LoV2 : HiV2;
16637 for (int i = 0; i < SplitNumElements; ++i)
16638 if (BlendMask[i] >= SplitNumElements)
16639 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
16641 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
16643 SDValue Lo = HalfBlend(LoMask);
16644 SDValue Hi = HalfBlend(HiMask);
16645 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
16648 /// Either split a vector in halves or decompose the shuffles and the
16651 /// This is provided as a good fallback for many lowerings of non-single-input
16652 /// shuffles with more than one 128-bit lane. In those cases, we want to select
16653 /// between splitting the shuffle into 128-bit components and stitching those
16654 /// back together vs. extracting the single-input shuffles and blending those
16656 static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
16657 SDValue V2, ArrayRef<int> Mask,
16658 const X86Subtarget &Subtarget,
16659 SelectionDAG &DAG) {
16660 assert(!V2.isUndef() && "This routine must not be used to lower single-input "
16661 "shuffles as it could then recurse on itself.");
16662 int Size = Mask.size();
16664 // If this can be modeled as a broadcast of two elements followed by a blend,
16665 // prefer that lowering. This is especially important because broadcasts can
16666 // often fold with memory operands.
16667 auto DoBothBroadcast = [&] {
16668 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
16671 if (V2BroadcastIdx < 0)
16672 V2BroadcastIdx = M - Size;
16673 else if (M - Size != V2BroadcastIdx)
16675 } else if (M >= 0) {
16676 if (V1BroadcastIdx < 0)
16677 V1BroadcastIdx = M;
16678 else if (M != V1BroadcastIdx)
16683 if (DoBothBroadcast())
16684 return lowerShuffleAsDecomposedShuffleMerge(DL, VT, V1, V2, Mask, Subtarget,
16687 // If the inputs all stem from a single 128-bit lane of each input, then we
16688 // split them rather than blending because the split will decompose to
16689 // unusually few instructions.
16690 int LaneCount = VT.getSizeInBits() / 128;
16691 int LaneSize = Size / LaneCount;
16692 SmallBitVector LaneInputs[2];
16693 LaneInputs[0].resize(LaneCount, false);
16694 LaneInputs[1].resize(LaneCount, false);
16695 for (int i = 0; i < Size; ++i)
16697 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
16698 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
16699 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
16701 // Otherwise, just fall back to decomposed shuffles and a blend/unpack. This
16702 // requires that the decomposed single-input shuffles don't end up here.
16703 return lowerShuffleAsDecomposedShuffleMerge(DL, VT, V1, V2, Mask, Subtarget,
16707 // Lower as SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
16708 // TODO: Extend to support v8f32 (+ 512-bit shuffles).
16709 static SDValue lowerShuffleAsLanePermuteAndSHUFP(const SDLoc &DL, MVT VT,
16710 SDValue V1, SDValue V2,
16711 ArrayRef<int> Mask,
16712 SelectionDAG &DAG) {
16713 assert(VT == MVT::v4f64 && "Only for v4f64 shuffles");
16715 int LHSMask[4] = {-1, -1, -1, -1};
16716 int RHSMask[4] = {-1, -1, -1, -1};
16717 unsigned SHUFPMask = 0;
16719 // As SHUFPD uses a single LHS/RHS element per lane, we can always
16720 // perform the shuffle once the lanes have been shuffled in place.
16721 for (int i = 0; i != 4; ++i) {
16725 int LaneBase = i & ~1;
16726 auto &LaneMask = (i & 1) ? RHSMask : LHSMask;
16727 LaneMask[LaneBase + (M & 1)] = M;
16728 SHUFPMask |= (M & 1) << i;
16731 SDValue LHS = DAG.getVectorShuffle(VT, DL, V1, V2, LHSMask);
16732 SDValue RHS = DAG.getVectorShuffle(VT, DL, V1, V2, RHSMask);
16733 return DAG.getNode(X86ISD::SHUFP, DL, VT, LHS, RHS,
16734 DAG.getTargetConstant(SHUFPMask, DL, MVT::i8));
16737 /// Lower a vector shuffle crossing multiple 128-bit lanes as
16738 /// a lane permutation followed by a per-lane permutation.
16740 /// This is mainly for cases where we can have non-repeating permutes
16743 /// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
16744 /// we should investigate merging them.
16745 static SDValue lowerShuffleAsLanePermuteAndPermute(
16746 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
16747 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
16748 int NumElts = VT.getVectorNumElements();
16749 int NumLanes = VT.getSizeInBits() / 128;
16750 int NumEltsPerLane = NumElts / NumLanes;
16751 bool CanUseSublanes = Subtarget.hasAVX2() && V2.isUndef();
16753 /// Attempts to find a sublane permute with the given size
16754 /// that gets all elements into their target lanes.
16756 /// If successful, fills CrossLaneMask and InLaneMask and returns true.
16757 /// If unsuccessful, returns false and may overwrite InLaneMask.
16758 auto getSublanePermute = [&](int NumSublanes) -> SDValue {
16759 int NumSublanesPerLane = NumSublanes / NumLanes;
16760 int NumEltsPerSublane = NumElts / NumSublanes;
16762 SmallVector<int, 16> CrossLaneMask;
16763 SmallVector<int, 16> InLaneMask(NumElts, SM_SentinelUndef);
16764 // CrossLaneMask but one entry == one sublane.
16765 SmallVector<int, 16> CrossLaneMaskLarge(NumSublanes, SM_SentinelUndef);
16767 for (int i = 0; i != NumElts; ++i) {
16772 int SrcSublane = M / NumEltsPerSublane;
16773 int DstLane = i / NumEltsPerLane;
16775 // We only need to get the elements into the right lane, not sublane.
16776 // So search all sublanes that make up the destination lane.
16777 bool Found = false;
16778 int DstSubStart = DstLane * NumSublanesPerLane;
16779 int DstSubEnd = DstSubStart + NumSublanesPerLane;
16780 for (int DstSublane = DstSubStart; DstSublane < DstSubEnd; ++DstSublane) {
16781 if (!isUndefOrEqual(CrossLaneMaskLarge[DstSublane], SrcSublane))
16785 CrossLaneMaskLarge[DstSublane] = SrcSublane;
16786 int DstSublaneOffset = DstSublane * NumEltsPerSublane;
16787 InLaneMask[i] = DstSublaneOffset + M % NumEltsPerSublane;
16794 // Fill CrossLaneMask using CrossLaneMaskLarge.
16795 narrowShuffleMaskElts(NumEltsPerSublane, CrossLaneMaskLarge, CrossLaneMask);
16797 if (!CanUseSublanes) {
16798 // If we're only shuffling a single lowest lane and the rest are identity
16799 // then don't bother.
16800 // TODO - isShuffleMaskInputInPlace could be extended to something like
16802 int NumIdentityLanes = 0;
16803 bool OnlyShuffleLowestLane = true;
16804 for (int i = 0; i != NumLanes; ++i) {
16805 int LaneOffset = i * NumEltsPerLane;
16806 if (isSequentialOrUndefInRange(InLaneMask, LaneOffset, NumEltsPerLane,
16807 i * NumEltsPerLane))
16808 NumIdentityLanes++;
16809 else if (CrossLaneMask[LaneOffset] != 0)
16810 OnlyShuffleLowestLane = false;
16812 if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
16816 SDValue CrossLane = DAG.getVectorShuffle(VT, DL, V1, V2, CrossLaneMask);
16817 return DAG.getVectorShuffle(VT, DL, CrossLane, DAG.getUNDEF(VT),
16821 // First attempt a solution with full lanes.
16822 if (SDValue V = getSublanePermute(/*NumSublanes=*/NumLanes))
16825 // The rest of the solutions use sublanes.
16826 if (!CanUseSublanes)
16829 // Then attempt a solution with 64-bit sublanes (vpermq).
16830 if (SDValue V = getSublanePermute(/*NumSublanes=*/NumLanes * 2))
16833 // If that doesn't work and we have fast variable cross-lane shuffle,
16834 // attempt 32-bit sublanes (vpermd).
16835 if (!Subtarget.hasFastVariableCrossLaneShuffle())
16838 return getSublanePermute(/*NumSublanes=*/NumLanes * 4);
16841 /// Lower a vector shuffle crossing multiple 128-bit lanes by shuffling one
16842 /// source with a lane permutation.
16844 /// This lowering strategy results in four instructions in the worst case for a
16845 /// single-input cross lane shuffle which is lower than any other fully general
16846 /// cross-lane shuffle strategy I'm aware of. Special cases for each particular
16847 /// shuffle pattern should be handled prior to trying this lowering.
16848 static SDValue lowerShuffleAsLanePermuteAndShuffle(
16849 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
16850 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
16851 // FIXME: This should probably be generalized for 512-bit vectors as well.
16852 assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
16853 int Size = Mask.size();
16854 int LaneSize = Size / 2;
16856 // Fold to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
16857 // Only do this if the elements aren't all from the lower lane,
16858 // otherwise we're (probably) better off doing a split.
16859 if (VT == MVT::v4f64 &&
16860 !all_of(Mask, [LaneSize](int M) { return M < LaneSize; }))
16861 return lowerShuffleAsLanePermuteAndSHUFP(DL, VT, V1, V2, Mask, DAG);
16863 // If there are only inputs from one 128-bit lane, splitting will in fact be
16864 // less expensive. The flags track whether the given lane contains an element
16865 // that crosses to another lane.
16867 if (!Subtarget.hasAVX2()) {
16868 bool LaneCrossing[2] = {false, false};
16869 for (int i = 0; i < Size; ++i)
16870 if (Mask[i] >= 0 && ((Mask[i] % Size) / LaneSize) != (i / LaneSize))
16871 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
16872 AllLanes = LaneCrossing[0] && LaneCrossing[1];
16874 bool LaneUsed[2] = {false, false};
16875 for (int i = 0; i < Size; ++i)
16877 LaneUsed[(Mask[i] % Size) / LaneSize] = true;
16878 AllLanes = LaneUsed[0] && LaneUsed[1];
16881 // TODO - we could support shuffling V2 in the Flipped input.
16882 assert(V2.isUndef() &&
16883 "This last part of this routine only works on single input shuffles");
16885 SmallVector<int, 32> InLaneMask(Mask.begin(), Mask.end());
16886 for (int i = 0; i < Size; ++i) {
16887 int &M = InLaneMask[i];
16890 if (((M % Size) / LaneSize) != (i / LaneSize))
16891 M = (M % LaneSize) + ((i / LaneSize) * LaneSize) + Size;
16893 assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
16894 "In-lane shuffle mask expected");
16896 // If we're not using both lanes in each lane and the inlane mask is not
16897 // repeating, then we're better off splitting.
16898 if (!AllLanes && !is128BitLaneRepeatedShuffleMask(VT, InLaneMask))
16899 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
16901 // Flip the lanes, and shuffle the results which should now be in-lane.
16902 MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
16903 SDValue Flipped = DAG.getBitcast(PVT, V1);
16905 DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), {2, 3, 0, 1});
16906 Flipped = DAG.getBitcast(VT, Flipped);
16907 return DAG.getVectorShuffle(VT, DL, V1, Flipped, InLaneMask);
16910 /// Handle lowering 2-lane 128-bit shuffles.
16911 static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
16912 SDValue V2, ArrayRef<int> Mask,
16913 const APInt &Zeroable,
16914 const X86Subtarget &Subtarget,
16915 SelectionDAG &DAG) {
16916 if (V2.isUndef()) {
16917 // Attempt to match VBROADCAST*128 subvector broadcast load.
16918 bool SplatLo = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1);
16919 bool SplatHi = isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1);
16920 if ((SplatLo || SplatHi) && !Subtarget.hasAVX512() && V1.hasOneUse() &&
16921 X86::mayFoldLoad(peekThroughOneUseBitcasts(V1), Subtarget)) {
16922 MVT MemVT = VT.getHalfNumVectorElementsVT();
16923 unsigned Ofs = SplatLo ? 0 : MemVT.getStoreSize();
16924 auto *Ld = cast<LoadSDNode>(peekThroughOneUseBitcasts(V1));
16925 if (SDValue BcstLd = getBROADCAST_LOAD(X86ISD::SUBV_BROADCAST_LOAD, DL,
16926 VT, MemVT, Ld, Ofs, DAG))
16930 // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
16931 if (Subtarget.hasAVX2())
16935 bool V2IsZero = !V2.isUndef() && ISD::isBuildVectorAllZeros(V2.getNode());
16937 SmallVector<int, 4> WidenedMask;
16938 if (!canWidenShuffleElements(Mask, Zeroable, V2IsZero, WidenedMask))
16941 bool IsLowZero = (Zeroable & 0x3) == 0x3;
16942 bool IsHighZero = (Zeroable & 0xc) == 0xc;
16944 // Try to use an insert into a zero vector.
16945 if (WidenedMask[0] == 0 && IsHighZero) {
16946 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
16947 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
16948 DAG.getIntPtrConstant(0, DL));
16949 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
16950 getZeroVector(VT, Subtarget, DAG, DL), LoV,
16951 DAG.getIntPtrConstant(0, DL));
16954 // TODO: If minimizing size and one of the inputs is a zero vector and the
16955 // the zero vector has only one use, we could use a VPERM2X128 to save the
16956 // instruction bytes needed to explicitly generate the zero vector.
16958 // Blends are faster and handle all the non-lane-crossing cases.
16959 if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
16963 // If either input operand is a zero vector, use VPERM2X128 because its mask
16964 // allows us to replace the zero input with an implicit zero.
16965 if (!IsLowZero && !IsHighZero) {
16966 // Check for patterns which can be matched with a single insert of a 128-bit
16968 bool OnlyUsesV1 = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1, V2);
16969 if (OnlyUsesV1 || isShuffleEquivalent(Mask, {0, 1, 4, 5}, V1, V2)) {
16971 // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
16972 // this will likely become vinsertf128 which can't fold a 256-bit memop.
16973 if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
16974 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
16975 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
16976 OnlyUsesV1 ? V1 : V2,
16977 DAG.getIntPtrConstant(0, DL));
16978 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
16979 DAG.getIntPtrConstant(2, DL));
16983 // Try to use SHUF128 if possible.
16984 if (Subtarget.hasVLX()) {
16985 if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
16986 unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
16987 ((WidenedMask[1] % 2) << 1);
16988 return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
16989 DAG.getTargetConstant(PermMask, DL, MVT::i8));
16994 // Otherwise form a 128-bit permutation. After accounting for undefs,
16995 // convert the 64-bit shuffle mask selection values into 128-bit
16996 // selection bits by dividing the indexes by 2 and shifting into positions
16997 // defined by a vperm2*128 instruction's immediate control byte.
16999 // The immediate permute control byte looks like this:
17000 // [1:0] - select 128 bits from sources for low half of destination
17002 // [3] - zero low half of destination
17003 // [5:4] - select 128 bits from sources for high half of destination
17005 // [7] - zero high half of destination
17007 assert((WidenedMask[0] >= 0 || IsLowZero) &&
17008 (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
17010 unsigned PermMask = 0;
17011 PermMask |= IsLowZero ? 0x08 : (WidenedMask[0] << 0);
17012 PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
17014 // Check the immediate mask and replace unused sources with undef.
17015 if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
17016 V1 = DAG.getUNDEF(VT);
17017 if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
17018 V2 = DAG.getUNDEF(VT);
17020 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
17021 DAG.getTargetConstant(PermMask, DL, MVT::i8));
17024 /// Lower a vector shuffle by first fixing the 128-bit lanes and then
17025 /// shuffling each lane.
17027 /// This attempts to create a repeated lane shuffle where each lane uses one
17028 /// or two of the lanes of the inputs. The lanes of the input vectors are
17029 /// shuffled in one or two independent shuffles to get the lanes into the
17030 /// position needed by the final shuffle.
17031 static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
17032 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
17033 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
17034 assert(!V2.isUndef() && "This is only useful with multiple inputs.");
17036 if (is128BitLaneRepeatedShuffleMask(VT, Mask))
17039 int NumElts = Mask.size();
17040 int NumLanes = VT.getSizeInBits() / 128;
17041 int NumLaneElts = 128 / VT.getScalarSizeInBits();
17042 SmallVector<int, 16> RepeatMask(NumLaneElts, -1);
17043 SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
17045 // First pass will try to fill in the RepeatMask from lanes that need two
17047 for (int Lane = 0; Lane != NumLanes; ++Lane) {
17048 int Srcs[2] = {-1, -1};
17049 SmallVector<int, 16> InLaneMask(NumLaneElts, -1);
17050 for (int i = 0; i != NumLaneElts; ++i) {
17051 int M = Mask[(Lane * NumLaneElts) + i];
17054 // Determine which of the possible input lanes (NumLanes from each source)
17055 // this element comes from. Assign that as one of the sources for this
17056 // lane. We can assign up to 2 sources for this lane. If we run out
17057 // sources we can't do anything.
17058 int LaneSrc = M / NumLaneElts;
17060 if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
17062 else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
17067 Srcs[Src] = LaneSrc;
17068 InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
17071 // If this lane has two sources, see if it fits with the repeat mask so far.
17075 LaneSrcs[Lane][0] = Srcs[0];
17076 LaneSrcs[Lane][1] = Srcs[1];
17078 auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
17079 assert(M1.size() == M2.size() && "Unexpected mask size");
17080 for (int i = 0, e = M1.size(); i != e; ++i)
17081 if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
17086 auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
17087 assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
17088 for (int i = 0, e = MergedMask.size(); i != e; ++i) {
17092 assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
17093 "Unexpected mask element");
17098 if (MatchMasks(InLaneMask, RepeatMask)) {
17099 // Merge this lane mask into the final repeat mask.
17100 MergeMasks(InLaneMask, RepeatMask);
17104 // Didn't find a match. Swap the operands and try again.
17105 std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
17106 ShuffleVectorSDNode::commuteMask(InLaneMask);
17108 if (MatchMasks(InLaneMask, RepeatMask)) {
17109 // Merge this lane mask into the final repeat mask.
17110 MergeMasks(InLaneMask, RepeatMask);
17114 // Couldn't find a match with the operands in either order.
17118 // Now handle any lanes with only one source.
17119 for (int Lane = 0; Lane != NumLanes; ++Lane) {
17120 // If this lane has already been processed, skip it.
17121 if (LaneSrcs[Lane][0] >= 0)
17124 for (int i = 0; i != NumLaneElts; ++i) {
17125 int M = Mask[(Lane * NumLaneElts) + i];
17129 // If RepeatMask isn't defined yet we can define it ourself.
17130 if (RepeatMask[i] < 0)
17131 RepeatMask[i] = M % NumLaneElts;
17133 if (RepeatMask[i] < NumElts) {
17134 if (RepeatMask[i] != M % NumLaneElts)
17136 LaneSrcs[Lane][0] = M / NumLaneElts;
17138 if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
17140 LaneSrcs[Lane][1] = M / NumLaneElts;
17144 if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
17148 SmallVector<int, 16> NewMask(NumElts, -1);
17149 for (int Lane = 0; Lane != NumLanes; ++Lane) {
17150 int Src = LaneSrcs[Lane][0];
17151 for (int i = 0; i != NumLaneElts; ++i) {
17154 M = Src * NumLaneElts + i;
17155 NewMask[Lane * NumLaneElts + i] = M;
17158 SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17159 // Ensure we didn't get back the shuffle we started with.
17160 // FIXME: This is a hack to make up for some splat handling code in
17161 // getVectorShuffle.
17162 if (isa<ShuffleVectorSDNode>(NewV1) &&
17163 cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
17166 for (int Lane = 0; Lane != NumLanes; ++Lane) {
17167 int Src = LaneSrcs[Lane][1];
17168 for (int i = 0; i != NumLaneElts; ++i) {
17171 M = Src * NumLaneElts + i;
17172 NewMask[Lane * NumLaneElts + i] = M;
17175 SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17176 // Ensure we didn't get back the shuffle we started with.
17177 // FIXME: This is a hack to make up for some splat handling code in
17178 // getVectorShuffle.
17179 if (isa<ShuffleVectorSDNode>(NewV2) &&
17180 cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
17183 for (int i = 0; i != NumElts; ++i) {
17184 NewMask[i] = RepeatMask[i % NumLaneElts];
17185 if (NewMask[i] < 0)
17188 NewMask[i] += (i / NumLaneElts) * NumLaneElts;
17190 return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
17193 /// If the input shuffle mask results in a vector that is undefined in all upper
17194 /// or lower half elements and that mask accesses only 2 halves of the
17195 /// shuffle's operands, return true. A mask of half the width with mask indexes
17196 /// adjusted to access the extracted halves of the original shuffle operands is
17197 /// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
17198 /// lower half of each input operand is accessed.
17200 getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
17201 int &HalfIdx1, int &HalfIdx2) {
17202 assert((Mask.size() == HalfMask.size() * 2) &&
17203 "Expected input mask to be twice as long as output");
17205 // Exactly one half of the result must be undef to allow narrowing.
17206 bool UndefLower = isUndefLowerHalf(Mask);
17207 bool UndefUpper = isUndefUpperHalf(Mask);
17208 if (UndefLower == UndefUpper)
17211 unsigned HalfNumElts = HalfMask.size();
17212 unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
17215 for (unsigned i = 0; i != HalfNumElts; ++i) {
17216 int M = Mask[i + MaskIndexOffset];
17222 // Determine which of the 4 half vectors this element is from.
17223 // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
17224 int HalfIdx = M / HalfNumElts;
17226 // Determine the element index into its half vector source.
17227 int HalfElt = M % HalfNumElts;
17229 // We can shuffle with up to 2 half vectors, set the new 'half'
17230 // shuffle mask accordingly.
17231 if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
17232 HalfMask[i] = HalfElt;
17233 HalfIdx1 = HalfIdx;
17236 if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
17237 HalfMask[i] = HalfElt + HalfNumElts;
17238 HalfIdx2 = HalfIdx;
17242 // Too many half vectors referenced.
17249 /// Given the output values from getHalfShuffleMask(), create a half width
17250 /// shuffle of extracted vectors followed by an insert back to full width.
17251 static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
17252 ArrayRef<int> HalfMask, int HalfIdx1,
17253 int HalfIdx2, bool UndefLower,
17254 SelectionDAG &DAG, bool UseConcat = false) {
17255 assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
17256 assert(V1.getValueType().isSimple() && "Expecting only simple types");
17258 MVT VT = V1.getSimpleValueType();
17259 MVT HalfVT = VT.getHalfNumVectorElementsVT();
17260 unsigned HalfNumElts = HalfVT.getVectorNumElements();
17262 auto getHalfVector = [&](int HalfIdx) {
17264 return DAG.getUNDEF(HalfVT);
17265 SDValue V = (HalfIdx < 2 ? V1 : V2);
17266 HalfIdx = (HalfIdx % 2) * HalfNumElts;
17267 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
17268 DAG.getIntPtrConstant(HalfIdx, DL));
17271 // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
17272 SDValue Half1 = getHalfVector(HalfIdx1);
17273 SDValue Half2 = getHalfVector(HalfIdx2);
17274 SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
17277 SDValue Op1 = DAG.getUNDEF(HalfVT);
17279 std::swap(Op0, Op1);
17280 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Op0, Op1);
17283 unsigned Offset = UndefLower ? HalfNumElts : 0;
17284 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
17285 DAG.getIntPtrConstant(Offset, DL));
17288 /// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
17289 /// This allows for fast cases such as subvector extraction/insertion
17290 /// or shuffling smaller vector types which can lower more efficiently.
17291 static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
17292 SDValue V2, ArrayRef<int> Mask,
17293 const X86Subtarget &Subtarget,
17294 SelectionDAG &DAG) {
17295 assert((VT.is256BitVector() || VT.is512BitVector()) &&
17296 "Expected 256-bit or 512-bit vector");
17298 bool UndefLower = isUndefLowerHalf(Mask);
17299 if (!UndefLower && !isUndefUpperHalf(Mask))
17302 assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
17303 "Completely undef shuffle mask should have been simplified already");
17305 // Upper half is undef and lower half is whole upper subvector.
17306 // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
17307 MVT HalfVT = VT.getHalfNumVectorElementsVT();
17308 unsigned HalfNumElts = HalfVT.getVectorNumElements();
17310 isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
17311 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
17312 DAG.getIntPtrConstant(HalfNumElts, DL));
17313 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
17314 DAG.getIntPtrConstant(0, DL));
17317 // Lower half is undef and upper half is whole lower subvector.
17318 // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
17320 isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
17321 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
17322 DAG.getIntPtrConstant(0, DL));
17323 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
17324 DAG.getIntPtrConstant(HalfNumElts, DL));
17327 int HalfIdx1, HalfIdx2;
17328 SmallVector<int, 8> HalfMask(HalfNumElts);
17329 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
17332 assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
17334 // Only shuffle the halves of the inputs when useful.
17335 unsigned NumLowerHalves =
17336 (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
17337 unsigned NumUpperHalves =
17338 (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
17339 assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");
17341 // Determine the larger pattern of undef/halves, then decide if it's worth
17342 // splitting the shuffle based on subtarget capabilities and types.
17343 unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
17345 // XXXXuuuu: no insert is needed.
17346 // Always extract lowers when setting lower - these are all free subreg ops.
17347 if (NumUpperHalves == 0)
17348 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
17351 if (NumUpperHalves == 1) {
17352 // AVX2 has efficient 32/64-bit element cross-lane shuffles.
17353 if (Subtarget.hasAVX2()) {
17354 // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
17355 if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
17356 !is128BitUnpackShuffleMask(HalfMask, DAG) &&
17357 (!isSingleSHUFPSMask(HalfMask) ||
17358 Subtarget.hasFastVariableCrossLaneShuffle()))
17360 // If this is a unary shuffle (assume that the 2nd operand is
17361 // canonicalized to undef), then we can use vpermpd. Otherwise, we
17362 // are better off extracting the upper half of 1 operand and using a
17364 if (EltWidth == 64 && V2.isUndef())
17367 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
17368 if (Subtarget.hasAVX512() && VT.is512BitVector())
17370 // Extract + narrow shuffle is better than the wide alternative.
17371 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
17375 // Don't extract both uppers, instead shuffle and then extract.
17376 assert(NumUpperHalves == 2 && "Half vector count went wrong");
17380 // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
17381 if (NumUpperHalves == 0) {
17382 // AVX2 has efficient 64-bit element cross-lane shuffles.
17383 // TODO: Refine to account for unary shuffle, splat, and other masks?
17384 if (Subtarget.hasAVX2() && EltWidth == 64)
17386 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
17387 if (Subtarget.hasAVX512() && VT.is512BitVector())
17389 // Narrow shuffle + insert is better than the wide alternative.
17390 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
17394 // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
17398 /// Test whether the specified input (0 or 1) is in-place blended by the
17401 /// This returns true if the elements from a particular input are already in the
17402 /// slot required by the given mask and require no permutation.
17403 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
17404 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
17405 int Size = Mask.size();
17406 for (int i = 0; i < Size; ++i)
17407 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
17413 /// Handle case where shuffle sources are coming from the same 128-bit lane and
17414 /// every lane can be represented as the same repeating mask - allowing us to
17415 /// shuffle the sources with the repeating shuffle and then permute the result
17416 /// to the destination lanes.
17417 static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
17418 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
17419 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
17420 int NumElts = VT.getVectorNumElements();
17421 int NumLanes = VT.getSizeInBits() / 128;
17422 int NumLaneElts = NumElts / NumLanes;
17424 // On AVX2 we may be able to just shuffle the lowest elements and then
17425 // broadcast the result.
17426 if (Subtarget.hasAVX2()) {
17427 for (unsigned BroadcastSize : {16, 32, 64}) {
17428 if (BroadcastSize <= VT.getScalarSizeInBits())
17430 int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
17432 // Attempt to match a repeating pattern every NumBroadcastElts,
17433 // accounting for UNDEFs but only references the lowest 128-bit
17434 // lane of the inputs.
17435 auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
17436 for (int i = 0; i != NumElts; i += NumBroadcastElts)
17437 for (int j = 0; j != NumBroadcastElts; ++j) {
17438 int M = Mask[i + j];
17441 int &R = RepeatMask[j];
17442 if (0 != ((M % NumElts) / NumLaneElts))
17444 if (0 <= R && R != M)
17451 SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
17452 if (!FindRepeatingBroadcastMask(RepeatMask))
17455 // Shuffle the (lowest) repeated elements in place for broadcast.
17456 SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
17458 // Shuffle the actual broadcast.
17459 SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
17460 for (int i = 0; i != NumElts; i += NumBroadcastElts)
17461 for (int j = 0; j != NumBroadcastElts; ++j)
17462 BroadcastMask[i + j] = j;
17463 return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
17468 // Bail if the shuffle mask doesn't cross 128-bit lanes.
17469 if (!is128BitLaneCrossingShuffleMask(VT, Mask))
17472 // Bail if we already have a repeated lane shuffle mask.
17473 if (is128BitLaneRepeatedShuffleMask(VT, Mask))
17476 // Helper to look for repeated mask in each split sublane, and that those
17477 // sublanes can then be permuted into place.
17478 auto ShuffleSubLanes = [&](int SubLaneScale) {
17479 int NumSubLanes = NumLanes * SubLaneScale;
17480 int NumSubLaneElts = NumLaneElts / SubLaneScale;
17482 // Check that all the sources are coming from the same lane and see if we
17483 // can form a repeating shuffle mask (local to each sub-lane). At the same
17484 // time, determine the source sub-lane for each destination sub-lane.
17485 int TopSrcSubLane = -1;
17486 SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
17487 SmallVector<SmallVector<int, 8>> RepeatedSubLaneMasks(
17489 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef));
17491 for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
17492 // Extract the sub-lane mask, check that it all comes from the same lane
17493 // and normalize the mask entries to come from the first lane.
17495 SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
17496 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
17497 int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
17500 int Lane = (M % NumElts) / NumLaneElts;
17501 if ((0 <= SrcLane) && (SrcLane != Lane))
17504 int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
17505 SubLaneMask[Elt] = LocalM;
17508 // Whole sub-lane is UNDEF.
17512 // Attempt to match against the candidate repeated sub-lane masks.
17513 for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
17514 auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
17515 for (int i = 0; i != NumSubLaneElts; ++i) {
17516 if (M1[i] < 0 || M2[i] < 0)
17518 if (M1[i] != M2[i])
17524 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
17525 if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
17528 // Merge the sub-lane mask into the matching repeated sub-lane mask.
17529 for (int i = 0; i != NumSubLaneElts; ++i) {
17530 int M = SubLaneMask[i];
17533 assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
17534 "Unexpected mask element");
17535 RepeatedSubLaneMask[i] = M;
17538 // Track the top most source sub-lane - by setting the remaining to
17539 // UNDEF we can greatly simplify shuffle matching.
17540 int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
17541 TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
17542 Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
17546 // Bail if we failed to find a matching repeated sub-lane mask.
17547 if (Dst2SrcSubLanes[DstSubLane] < 0)
17550 assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
17551 "Unexpected source lane");
17553 // Create a repeating shuffle mask for the entire vector.
17554 SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
17555 for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
17556 int Lane = SubLane / SubLaneScale;
17557 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
17558 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
17559 int M = RepeatedSubLaneMask[Elt];
17562 int Idx = (SubLane * NumSubLaneElts) + Elt;
17563 RepeatedMask[Idx] = M + (Lane * NumLaneElts);
17566 SDValue RepeatedShuffle =
17567 DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
17569 // Shuffle each source sub-lane to its destination.
17570 SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
17571 for (int i = 0; i != NumElts; i += NumSubLaneElts) {
17572 int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
17573 if (SrcSubLane < 0)
17575 for (int j = 0; j != NumSubLaneElts; ++j)
17576 SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
17579 return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
17583 // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
17584 // (with PERMQ/PERMPD). On AVX2/AVX512BW targets, permuting 32-bit sub-lanes,
17585 // even with a variable shuffle, can be worth it for v32i8/v64i8 vectors.
17586 // Otherwise we can only permute whole 128-bit lanes.
17587 int MinSubLaneScale = 1, MaxSubLaneScale = 1;
17588 if (Subtarget.hasAVX2() && VT.is256BitVector()) {
17589 bool OnlyLowestElts = isUndefOrInRange(Mask, 0, NumLaneElts);
17590 MinSubLaneScale = 2;
17592 (!OnlyLowestElts && V2.isUndef() && VT == MVT::v32i8) ? 4 : 2;
17594 if (Subtarget.hasBWI() && VT == MVT::v64i8)
17595 MinSubLaneScale = MaxSubLaneScale = 4;
17597 for (int Scale = MinSubLaneScale; Scale <= MaxSubLaneScale; Scale *= 2)
17598 if (SDValue Shuffle = ShuffleSubLanes(Scale))
17604 static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
17605 bool &ForceV1Zero, bool &ForceV2Zero,
17606 unsigned &ShuffleImm, ArrayRef<int> Mask,
17607 const APInt &Zeroable) {
17608 int NumElts = VT.getVectorNumElements();
17609 assert(VT.getScalarSizeInBits() == 64 &&
17610 (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
17611 "Unexpected data type for VSHUFPD");
17612 assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&
17613 "Illegal shuffle mask");
17615 bool ZeroLane[2] = { true, true };
17616 for (int i = 0; i < NumElts; ++i)
17617 ZeroLane[i & 1] &= Zeroable[i];
17619 // Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, ..
17620 // Mask for V4F64; 0/1, 4/5, 2/3, 6/7..
17622 bool ShufpdMask = true;
17623 bool CommutableMask = true;
17624 for (int i = 0; i < NumElts; ++i) {
17625 if (Mask[i] == SM_SentinelUndef || ZeroLane[i & 1])
17629 int Val = (i & 6) + NumElts * (i & 1);
17630 int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
17631 if (Mask[i] < Val || Mask[i] > Val + 1)
17632 ShufpdMask = false;
17633 if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
17634 CommutableMask = false;
17635 ShuffleImm |= (Mask[i] % 2) << i;
17638 if (!ShufpdMask && !CommutableMask)
17641 if (!ShufpdMask && CommutableMask)
17644 ForceV1Zero = ZeroLane[0];
17645 ForceV2Zero = ZeroLane[1];
17649 static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
17650 SDValue V2, ArrayRef<int> Mask,
17651 const APInt &Zeroable,
17652 const X86Subtarget &Subtarget,
17653 SelectionDAG &DAG) {
17654 assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
17655 "Unexpected data type for VSHUFPD");
17657 unsigned Immediate = 0;
17658 bool ForceV1Zero = false, ForceV2Zero = false;
17659 if (!matchShuffleWithSHUFPD(VT, V1, V2, ForceV1Zero, ForceV2Zero, Immediate,
17663 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
17665 V1 = getZeroVector(VT, Subtarget, DAG, DL);
17667 V2 = getZeroVector(VT, Subtarget, DAG, DL);
17669 return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
17670 DAG.getTargetConstant(Immediate, DL, MVT::i8));
17673 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
17674 // by zeroable elements in the remaining 24 elements. Turn this into two
17675 // vmovqb instructions shuffled together.
17676 static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
17677 SDValue V1, SDValue V2,
17678 ArrayRef<int> Mask,
17679 const APInt &Zeroable,
17680 SelectionDAG &DAG) {
17681 assert(VT == MVT::v32i8 && "Unexpected type!");
17683 // The first 8 indices should be every 8th element.
17684 if (!isSequentialOrUndefInRange(Mask, 0, 8, 0, 8))
17687 // Remaining elements need to be zeroable.
17688 if (Zeroable.countLeadingOnes() < (Mask.size() - 8))
17691 V1 = DAG.getBitcast(MVT::v4i64, V1);
17692 V2 = DAG.getBitcast(MVT::v4i64, V2);
17694 V1 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V1);
17695 V2 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V2);
17697 // The VTRUNCs will put 0s in the upper 12 bytes. Use them to put zeroes in
17698 // the upper bits of the result using an unpckldq.
17699 SDValue Unpack = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2,
17700 { 0, 1, 2, 3, 16, 17, 18, 19,
17701 4, 5, 6, 7, 20, 21, 22, 23 });
17702 // Insert the unpckldq into a zero vector to widen to v32i8.
17703 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v32i8,
17704 DAG.getConstant(0, DL, MVT::v32i8), Unpack,
17705 DAG.getIntPtrConstant(0, DL));
17709 /// Handle lowering of 4-lane 64-bit floating point shuffles.
17711 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
17712 /// isn't available.
17713 static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17714 const APInt &Zeroable, SDValue V1, SDValue V2,
17715 const X86Subtarget &Subtarget,
17716 SelectionDAG &DAG) {
17717 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
17718 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
17719 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
17721 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
17725 if (V2.isUndef()) {
17726 // Check for being able to broadcast a single element.
17727 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
17728 Mask, Subtarget, DAG))
17731 // Use low duplicate instructions for masks that match their pattern.
17732 if (isShuffleEquivalent(Mask, {0, 0, 2, 2}, V1, V2))
17733 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
17735 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
17736 // Non-half-crossing single input shuffles can be lowered with an
17737 // interleaved permutation.
17738 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
17739 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
17740 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
17741 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
17744 // With AVX2 we have direct support for this permutation.
17745 if (Subtarget.hasAVX2())
17746 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
17747 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
17749 // Try to create an in-lane repeating shuffle mask and then shuffle the
17750 // results into the target lanes.
17751 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
17752 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
17755 // Try to permute the lanes and then use a per-lane permute.
17756 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
17757 Mask, DAG, Subtarget))
17760 // Otherwise, fall back.
17761 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v4f64, V1, V2, Mask,
17765 // Use dedicated unpack instructions for masks that match their pattern.
17766 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
17769 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
17770 Zeroable, Subtarget, DAG))
17773 // Check if the blend happens to exactly fit that of SHUFPD.
17774 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, V1, V2, Mask,
17775 Zeroable, Subtarget, DAG))
17778 bool V1IsInPlace = isShuffleMaskInputInPlace(0, Mask);
17779 bool V2IsInPlace = isShuffleMaskInputInPlace(1, Mask);
17781 // If we have lane crossing shuffles AND they don't all come from the lower
17782 // lane elements, lower to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
17783 // TODO: Handle BUILD_VECTOR sources which getVectorShuffle currently
17784 // canonicalize to a blend of splat which isn't necessary for this combine.
17785 if (is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask) &&
17786 !all_of(Mask, [](int M) { return M < 2 || (4 <= M && M < 6); }) &&
17787 (V1.getOpcode() != ISD::BUILD_VECTOR) &&
17788 (V2.getOpcode() != ISD::BUILD_VECTOR))
17789 return lowerShuffleAsLanePermuteAndSHUFP(DL, MVT::v4f64, V1, V2, Mask, DAG);
17791 // If we have one input in place, then we can permute the other input and
17792 // blend the result.
17793 if (V1IsInPlace || V2IsInPlace)
17794 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4f64, V1, V2, Mask,
17797 // Try to create an in-lane repeating shuffle mask and then shuffle the
17798 // results into the target lanes.
17799 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
17800 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
17803 // Try to simplify this by merging 128-bit lanes to enable a lane-based
17804 // shuffle. However, if we have AVX2 and either inputs are already in place,
17805 // we will be able to shuffle even across lanes the other input in a single
17806 // instruction so skip this pattern.
17807 if (!(Subtarget.hasAVX2() && (V1IsInPlace || V2IsInPlace)))
17808 if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
17809 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
17812 // If we have VLX support, we can use VEXPAND.
17813 if (Subtarget.hasVLX())
17814 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
17818 // If we have AVX2 then we always want to lower with a blend because an v4 we
17819 // can fully permute the elements.
17820 if (Subtarget.hasAVX2())
17821 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4f64, V1, V2, Mask,
17824 // Otherwise fall back on generic lowering.
17825 return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
17829 /// Handle lowering of 4-lane 64-bit integer shuffles.
17831 /// This routine is only called when we have AVX2 and thus a reasonable
17832 /// instruction set for v4i64 shuffling..
17833 static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17834 const APInt &Zeroable, SDValue V1, SDValue V2,
17835 const X86Subtarget &Subtarget,
17836 SelectionDAG &DAG) {
17837 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
17838 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
17839 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
17840 assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
17842 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
17846 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
17847 Zeroable, Subtarget, DAG))
17850 // Check for being able to broadcast a single element.
17851 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
17855 if (V2.isUndef()) {
17856 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
17857 // can use lower latency instructions that will operate on both lanes.
17858 SmallVector<int, 2> RepeatedMask;
17859 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
17860 SmallVector<int, 4> PSHUFDMask;
17861 narrowShuffleMaskElts(2, RepeatedMask, PSHUFDMask);
17862 return DAG.getBitcast(
17864 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
17865 DAG.getBitcast(MVT::v8i32, V1),
17866 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
17869 // AVX2 provides a direct instruction for permuting a single input across
17871 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
17872 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
17875 // Try to use shift instructions.
17876 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
17877 Zeroable, Subtarget, DAG))
17880 // If we have VLX support, we can use VALIGN or VEXPAND.
17881 if (Subtarget.hasVLX()) {
17882 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i64, V1, V2, Mask,
17886 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
17891 // Try to use PALIGNR.
17892 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
17896 // Use dedicated unpack instructions for masks that match their pattern.
17897 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
17900 bool V1IsInPlace = isShuffleMaskInputInPlace(0, Mask);
17901 bool V2IsInPlace = isShuffleMaskInputInPlace(1, Mask);
17903 // If we have one input in place, then we can permute the other input and
17904 // blend the result.
17905 if (V1IsInPlace || V2IsInPlace)
17906 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
17909 // Try to create an in-lane repeating shuffle mask and then shuffle the
17910 // results into the target lanes.
17911 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
17912 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
17915 // Try to lower to PERMQ(BLENDD(V1,V2)).
17917 lowerShuffleAsBlendAndPermute(DL, MVT::v4i64, V1, V2, Mask, DAG))
17920 // Try to simplify this by merging 128-bit lanes to enable a lane-based
17921 // shuffle. However, if we have AVX2 and either inputs are already in place,
17922 // we will be able to shuffle even across lanes the other input in a single
17923 // instruction so skip this pattern.
17924 if (!V1IsInPlace && !V2IsInPlace)
17925 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
17926 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
17929 // Otherwise fall back on generic blend lowering.
17930 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
17934 /// Handle lowering of 8-lane 32-bit floating point shuffles.
17936 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
17937 /// isn't available.
17938 static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17939 const APInt &Zeroable, SDValue V1, SDValue V2,
17940 const X86Subtarget &Subtarget,
17941 SelectionDAG &DAG) {
17942 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
17943 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
17944 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
17946 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
17947 Zeroable, Subtarget, DAG))
17950 // Check for being able to broadcast a single element.
17951 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
17955 // If the shuffle mask is repeated in each 128-bit lane, we have many more
17956 // options to efficiently lower the shuffle.
17957 SmallVector<int, 4> RepeatedMask;
17958 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
17959 assert(RepeatedMask.size() == 4 &&
17960 "Repeated masks must be half the mask width!");
17962 // Use even/odd duplicate instructions for masks that match their pattern.
17963 if (isShuffleEquivalent(RepeatedMask, {0, 0, 2, 2}, V1, V2))
17964 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
17965 if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
17966 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
17969 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
17970 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
17972 // Use dedicated unpack instructions for masks that match their pattern.
17973 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
17976 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
17977 // have already handled any direct blends.
17978 return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
17981 // Try to create an in-lane repeating shuffle mask and then shuffle the
17982 // results into the target lanes.
17983 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
17984 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
17987 // If we have a single input shuffle with different shuffle patterns in the
17988 // two 128-bit lanes use the variable mask to VPERMILPS.
17989 if (V2.isUndef()) {
17990 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask)) {
17991 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
17992 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
17994 if (Subtarget.hasAVX2()) {
17995 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
17996 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
17998 // Otherwise, fall back.
17999 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v8f32, V1, V2, Mask,
18003 // Try to simplify this by merging 128-bit lanes to enable a lane-based
18005 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18006 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
18009 // If we have VLX support, we can use VEXPAND.
18010 if (Subtarget.hasVLX())
18011 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
18015 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
18016 // since after split we get a more efficient code using vpunpcklwd and
18017 // vpunpckhwd instrs than vblend.
18018 if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32, DAG))
18019 return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, Subtarget,
18022 // If we have AVX2 then we always want to lower with a blend because at v8 we
18023 // can fully permute the elements.
18024 if (Subtarget.hasAVX2())
18025 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8f32, V1, V2, Mask,
18028 // Otherwise fall back on generic lowering.
18029 return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
18033 /// Handle lowering of 8-lane 32-bit integer shuffles.
18035 /// This routine is only called when we have AVX2 and thus a reasonable
18036 /// instruction set for v8i32 shuffling..
18037 static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18038 const APInt &Zeroable, SDValue V1, SDValue V2,
18039 const X86Subtarget &Subtarget,
18040 SelectionDAG &DAG) {
18041 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
18042 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
18043 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
18044 assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
18046 // Whenever we can lower this as a zext, that instruction is strictly faster
18047 // than any alternative. It also allows us to fold memory operands into the
18048 // shuffle in many cases.
18049 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
18050 Zeroable, Subtarget, DAG))
18053 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
18054 // since after split we get a more efficient code than vblend by using
18055 // vpunpcklwd and vpunpckhwd instrs.
18056 if (isUnpackWdShuffleMask(Mask, MVT::v8i32, DAG) && !V2.isUndef() &&
18057 !Subtarget.hasAVX512())
18058 return lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask, Subtarget,
18061 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
18062 Zeroable, Subtarget, DAG))
18065 // Check for being able to broadcast a single element.
18066 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
18070 // If the shuffle mask is repeated in each 128-bit lane we can use more
18071 // efficient instructions that mirror the shuffles across the two 128-bit
18073 SmallVector<int, 4> RepeatedMask;
18074 bool Is128BitLaneRepeatedShuffle =
18075 is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
18076 if (Is128BitLaneRepeatedShuffle) {
18077 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
18079 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
18080 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
18082 // Use dedicated unpack instructions for masks that match their pattern.
18083 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
18087 // Try to use shift instructions.
18088 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
18089 Zeroable, Subtarget, DAG))
18092 // If we have VLX support, we can use VALIGN or EXPAND.
18093 if (Subtarget.hasVLX()) {
18094 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i32, V1, V2, Mask,
18098 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
18103 // Try to use byte rotation instructions.
18104 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
18108 // Try to create an in-lane repeating shuffle mask and then shuffle the
18109 // results into the target lanes.
18110 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18111 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
18114 if (V2.isUndef()) {
18115 // Try to produce a fixed cross-128-bit lane permute followed by unpack
18116 // because that should be faster than the variable permute alternatives.
18117 if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v8i32, Mask, V1, V2, DAG))
18120 // If the shuffle patterns aren't repeated but it's a single input, directly
18121 // generate a cross-lane VPERMD instruction.
18122 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
18123 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
18126 // Assume that a single SHUFPS is faster than an alternative sequence of
18127 // multiple instructions (even if the CPU has a domain penalty).
18128 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
18129 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
18130 SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
18131 SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
18132 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
18133 CastV1, CastV2, DAG);
18134 return DAG.getBitcast(MVT::v8i32, ShufPS);
18137 // Try to simplify this by merging 128-bit lanes to enable a lane-based
18139 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18140 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
18143 // Otherwise fall back on generic blend lowering.
18144 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8i32, V1, V2, Mask,
18148 /// Handle lowering of 16-lane 16-bit integer shuffles.
18150 /// This routine is only called when we have AVX2 and thus a reasonable
18151 /// instruction set for v16i16 shuffling..
18152 static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18153 const APInt &Zeroable, SDValue V1, SDValue V2,
18154 const X86Subtarget &Subtarget,
18155 SelectionDAG &DAG) {
18156 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
18157 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
18158 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
18159 assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
18161 // Whenever we can lower this as a zext, that instruction is strictly faster
18162 // than any alternative. It also allows us to fold memory operands into the
18163 // shuffle in many cases.
18164 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
18165 DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
18168 // Check for being able to broadcast a single element.
18169 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
18173 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
18174 Zeroable, Subtarget, DAG))
18177 // Use dedicated unpack instructions for masks that match their pattern.
18178 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
18181 // Use dedicated pack instructions for masks that match their pattern.
18182 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
18186 // Try to use lower using a truncation.
18187 if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v16i16, V1, V2, Mask, Zeroable,
18191 // Try to use shift instructions.
18192 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
18193 Zeroable, Subtarget, DAG))
18196 // Try to use byte rotation instructions.
18197 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
18201 // Try to create an in-lane repeating shuffle mask and then shuffle the
18202 // results into the target lanes.
18203 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18204 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
18207 if (V2.isUndef()) {
18208 // Try to use bit rotation instructions.
18209 if (SDValue Rotate =
18210 lowerShuffleAsBitRotate(DL, MVT::v16i16, V1, Mask, Subtarget, DAG))
18213 // Try to produce a fixed cross-128-bit lane permute followed by unpack
18214 // because that should be faster than the variable permute alternatives.
18215 if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v16i16, Mask, V1, V2, DAG))
18218 // There are no generalized cross-lane shuffle operations available on i16
18220 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
18221 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
18222 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
18225 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v16i16, V1, V2, Mask,
18229 SmallVector<int, 8> RepeatedMask;
18230 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
18231 // As this is a single-input shuffle, the repeated mask should be
18232 // a strictly valid v8i16 mask that we can pass through to the v8i16
18233 // lowering to handle even the v16 case.
18234 return lowerV8I16GeneralSingleInputShuffle(
18235 DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
18239 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
18240 Zeroable, Subtarget, DAG))
18243 // AVX512BW can lower to VPERMW (non-VLX will pad to v32i16).
18244 if (Subtarget.hasBWI())
18245 return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, Subtarget, DAG);
18247 // Try to simplify this by merging 128-bit lanes to enable a lane-based
18249 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18250 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
18253 // Try to permute the lanes and then use a per-lane permute.
18254 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
18255 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
18258 // Otherwise fall back on generic lowering.
18259 return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
18263 /// Handle lowering of 32-lane 8-bit integer shuffles.
18265 /// This routine is only called when we have AVX2 and thus a reasonable
18266 /// instruction set for v32i8 shuffling..
18267 static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18268 const APInt &Zeroable, SDValue V1, SDValue V2,
18269 const X86Subtarget &Subtarget,
18270 SelectionDAG &DAG) {
18271 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
18272 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
18273 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
18274 assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
18276 // Whenever we can lower this as a zext, that instruction is strictly faster
18277 // than any alternative. It also allows us to fold memory operands into the
18278 // shuffle in many cases.
18279 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
18280 Zeroable, Subtarget, DAG))
18283 // Check for being able to broadcast a single element.
18284 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
18288 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
18289 Zeroable, Subtarget, DAG))
18292 // Use dedicated unpack instructions for masks that match their pattern.
18293 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
18296 // Use dedicated pack instructions for masks that match their pattern.
18297 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
18301 // Try to use lower using a truncation.
18302 if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v32i8, V1, V2, Mask, Zeroable,
18306 // Try to use shift instructions.
18307 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
18308 Zeroable, Subtarget, DAG))
18311 // Try to use byte rotation instructions.
18312 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
18316 // Try to use bit rotation instructions.
18318 if (SDValue Rotate =
18319 lowerShuffleAsBitRotate(DL, MVT::v32i8, V1, Mask, Subtarget, DAG))
18322 // Try to create an in-lane repeating shuffle mask and then shuffle the
18323 // results into the target lanes.
18324 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18325 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
18328 // There are no generalized cross-lane shuffle operations available on i8
18330 if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
18331 // Try to produce a fixed cross-128-bit lane permute followed by unpack
18332 // because that should be faster than the variable permute alternatives.
18333 if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v32i8, Mask, V1, V2, DAG))
18336 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
18337 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
18340 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v32i8, V1, V2, Mask,
18344 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
18345 Zeroable, Subtarget, DAG))
18348 // AVX512VBMI can lower to VPERMB (non-VLX will pad to v64i8).
18349 if (Subtarget.hasVBMI())
18350 return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, Subtarget, DAG);
18352 // Try to simplify this by merging 128-bit lanes to enable a lane-based
18354 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18355 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
18358 // Try to permute the lanes and then use a per-lane permute.
18359 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
18360 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
18363 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
18364 // by zeroable elements in the remaining 24 elements. Turn this into two
18365 // vmovqb instructions shuffled together.
18366 if (Subtarget.hasVLX())
18367 if (SDValue V = lowerShuffleAsVTRUNCAndUnpack(DL, MVT::v32i8, V1, V2,
18368 Mask, Zeroable, DAG))
18371 // Otherwise fall back on generic lowering.
18372 return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
18376 /// High-level routine to lower various 256-bit x86 vector shuffles.
18378 /// This routine either breaks down the specific type of a 256-bit x86 vector
18379 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
18380 /// together based on the available instructions.
18381 static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
18382 SDValue V1, SDValue V2, const APInt &Zeroable,
18383 const X86Subtarget &Subtarget,
18384 SelectionDAG &DAG) {
18385 // If we have a single input to the zero element, insert that into V1 if we
18386 // can do so cheaply.
18387 int NumElts = VT.getVectorNumElements();
18388 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
18390 if (NumV2Elements == 1 && Mask[0] >= NumElts)
18391 if (SDValue Insertion = lowerShuffleAsElementInsertion(
18392 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
18395 // Handle special cases where the lower or upper half is UNDEF.
18397 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
18400 // There is a really nice hard cut-over between AVX1 and AVX2 that means we
18401 // can check for those subtargets here and avoid much of the subtarget
18402 // querying in the per-vector-type lowering routines. With AVX1 we have
18403 // essentially *zero* ability to manipulate a 256-bit vector with integer
18404 // types. Since we'll use floating point types there eventually, just
18405 // immediately cast everything to a float and operate entirely in that domain.
18406 if (VT.isInteger() && !Subtarget.hasAVX2()) {
18407 int ElementBits = VT.getScalarSizeInBits();
18408 if (ElementBits < 32) {
18409 // No floating point type available, if we can't use the bit operations
18410 // for masking/blending then decompose into 128-bit vectors.
18411 if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
18414 if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
18416 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
18419 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
18420 VT.getVectorNumElements());
18421 V1 = DAG.getBitcast(FpVT, V1);
18422 V2 = DAG.getBitcast(FpVT, V2);
18423 return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
18426 if (VT == MVT::v16f16) {
18427 V1 = DAG.getBitcast(MVT::v16i16, V1);
18428 V2 = DAG.getBitcast(MVT::v16i16, V2);
18429 return DAG.getBitcast(MVT::v16f16,
18430 DAG.getVectorShuffle(MVT::v16i16, DL, V1, V2, Mask));
18433 switch (VT.SimpleTy) {
18435 return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18437 return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18439 return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18441 return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18443 return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18445 return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18448 llvm_unreachable("Not a valid 256-bit x86 vector type!");
18452 /// Try to lower a vector shuffle as a 128-bit shuffles.
18453 static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
18454 const APInt &Zeroable, SDValue V1, SDValue V2,
18455 const X86Subtarget &Subtarget,
18456 SelectionDAG &DAG) {
18457 assert(VT.getScalarSizeInBits() == 64 &&
18458 "Unexpected element type size for 128bit shuffle.");
18460 // To handle 256 bit vector requires VLX and most probably
18461 // function lowerV2X128VectorShuffle() is better solution.
18462 assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
18464 // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
18465 SmallVector<int, 4> Widened128Mask;
18466 if (!canWidenShuffleElements(Mask, Widened128Mask))
18468 assert(Widened128Mask.size() == 4 && "Shuffle widening mismatch");
18470 // Try to use an insert into a zero vector.
18471 if (Widened128Mask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
18472 (Widened128Mask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
18473 unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
18474 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
18475 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
18476 DAG.getIntPtrConstant(0, DL));
18477 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
18478 getZeroVector(VT, Subtarget, DAG, DL), LoV,
18479 DAG.getIntPtrConstant(0, DL));
18482 // Check for patterns which can be matched with a single insert of a 256-bit
18484 bool OnlyUsesV1 = isShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3}, V1, V2);
18486 isShuffleEquivalent(Mask, {0, 1, 2, 3, 8, 9, 10, 11}, V1, V2)) {
18487 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
18489 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, OnlyUsesV1 ? V1 : V2,
18490 DAG.getIntPtrConstant(0, DL));
18491 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
18492 DAG.getIntPtrConstant(4, DL));
18495 // See if this is an insertion of the lower 128-bits of V2 into V1.
18496 bool IsInsert = true;
18498 for (int i = 0; i < 4; ++i) {
18499 assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
18500 if (Widened128Mask[i] < 0)
18503 // Make sure all V1 subvectors are in place.
18504 if (Widened128Mask[i] < 4) {
18505 if (Widened128Mask[i] != i) {
18510 // Make sure we only have a single V2 index and its the lowest 128-bits.
18511 if (V2Index >= 0 || Widened128Mask[i] != 4) {
18518 if (IsInsert && V2Index >= 0) {
18519 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
18520 SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
18521 DAG.getIntPtrConstant(0, DL));
18522 return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
18525 // See if we can widen to a 256-bit lane shuffle, we're going to lose 128-lane
18526 // UNDEF info by lowering to X86ISD::SHUF128 anyway, so by widening where
18527 // possible we at least ensure the lanes stay sequential to help later
18529 SmallVector<int, 2> Widened256Mask;
18530 if (canWidenShuffleElements(Widened128Mask, Widened256Mask)) {
18531 Widened128Mask.clear();
18532 narrowShuffleMaskElts(2, Widened256Mask, Widened128Mask);
18535 // Try to lower to vshuf64x2/vshuf32x4.
18536 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
18537 unsigned PermMask = 0;
18538 // Insure elements came from the same Op.
18539 for (int i = 0; i < 4; ++i) {
18540 assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
18541 if (Widened128Mask[i] < 0)
18544 SDValue Op = Widened128Mask[i] >= 4 ? V2 : V1;
18545 unsigned OpIndex = i / 2;
18546 if (Ops[OpIndex].isUndef())
18548 else if (Ops[OpIndex] != Op)
18551 // Convert the 128-bit shuffle mask selection values into 128-bit selection
18552 // bits defined by a vshuf64x2 instruction's immediate control byte.
18553 PermMask |= (Widened128Mask[i] % 4) << (i * 2);
18556 return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
18557 DAG.getTargetConstant(PermMask, DL, MVT::i8));
18560 /// Handle lowering of 8-lane 64-bit floating point shuffles.
18561 static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18562 const APInt &Zeroable, SDValue V1, SDValue V2,
18563 const X86Subtarget &Subtarget,
18564 SelectionDAG &DAG) {
18565 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
18566 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
18567 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
18569 if (V2.isUndef()) {
18570 // Use low duplicate instructions for masks that match their pattern.
18571 if (isShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6}, V1, V2))
18572 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
18574 if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
18575 // Non-half-crossing single input shuffles can be lowered with an
18576 // interleaved permutation.
18577 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
18578 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
18579 ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
18580 ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
18581 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
18582 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
18585 SmallVector<int, 4> RepeatedMask;
18586 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
18587 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
18588 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
18591 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
18592 V2, Subtarget, DAG))
18595 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
18598 // Check if the blend happens to exactly fit that of SHUFPD.
18599 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, V1, V2, Mask,
18600 Zeroable, Subtarget, DAG))
18603 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
18607 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
18608 Zeroable, Subtarget, DAG))
18611 return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, Subtarget, DAG);
18614 /// Handle lowering of 16-lane 32-bit floating point shuffles.
18615 static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18616 const APInt &Zeroable, SDValue V1, SDValue V2,
18617 const X86Subtarget &Subtarget,
18618 SelectionDAG &DAG) {
18619 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
18620 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
18621 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
18623 // If the shuffle mask is repeated in each 128-bit lane, we have many more
18624 // options to efficiently lower the shuffle.
18625 SmallVector<int, 4> RepeatedMask;
18626 if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
18627 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
18629 // Use even/odd duplicate instructions for masks that match their pattern.
18630 if (isShuffleEquivalent(RepeatedMask, {0, 0, 2, 2}, V1, V2))
18631 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
18632 if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
18633 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
18636 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
18637 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
18639 // Use dedicated unpack instructions for masks that match their pattern.
18640 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
18643 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
18644 Zeroable, Subtarget, DAG))
18647 // Otherwise, fall back to a SHUFPS sequence.
18648 return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
18651 // Try to create an in-lane repeating shuffle mask and then shuffle the
18652 // results into the target lanes.
18653 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18654 DL, MVT::v16f32, V1, V2, Mask, Subtarget, DAG))
18657 // If we have a single input shuffle with different shuffle patterns in the
18658 // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
18659 if (V2.isUndef() &&
18660 !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
18661 SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
18662 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
18665 // If we have AVX512F support, we can use VEXPAND.
18666 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
18667 V1, V2, DAG, Subtarget))
18670 return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, Subtarget, DAG);
18673 /// Handle lowering of 8-lane 64-bit integer shuffles.
18674 static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18675 const APInt &Zeroable, SDValue V1, SDValue V2,
18676 const X86Subtarget &Subtarget,
18677 SelectionDAG &DAG) {
18678 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
18679 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
18680 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
18682 if (V2.isUndef()) {
18683 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
18684 // can use lower latency instructions that will operate on all four
18686 SmallVector<int, 2> Repeated128Mask;
18687 if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
18688 SmallVector<int, 4> PSHUFDMask;
18689 narrowShuffleMaskElts(2, Repeated128Mask, PSHUFDMask);
18690 return DAG.getBitcast(
18692 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
18693 DAG.getBitcast(MVT::v16i32, V1),
18694 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
18697 SmallVector<int, 4> Repeated256Mask;
18698 if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
18699 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
18700 getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
18703 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
18704 V2, Subtarget, DAG))
18707 // Try to use shift instructions.
18708 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
18709 Zeroable, Subtarget, DAG))
18712 // Try to use VALIGN.
18713 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i64, V1, V2, Mask,
18717 // Try to use PALIGNR.
18718 if (Subtarget.hasBWI())
18719 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
18723 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
18726 // If we have AVX512F support, we can use VEXPAND.
18727 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
18731 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
18732 Zeroable, Subtarget, DAG))
18735 return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, Subtarget, DAG);
18738 /// Handle lowering of 16-lane 32-bit integer shuffles.
18739 static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18740 const APInt &Zeroable, SDValue V1, SDValue V2,
18741 const X86Subtarget &Subtarget,
18742 SelectionDAG &DAG) {
18743 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
18744 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
18745 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
18747 // Whenever we can lower this as a zext, that instruction is strictly faster
18748 // than any alternative. It also allows us to fold memory operands into the
18749 // shuffle in many cases.
18750 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
18751 DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
18754 // If the shuffle mask is repeated in each 128-bit lane we can use more
18755 // efficient instructions that mirror the shuffles across the four 128-bit
18757 SmallVector<int, 4> RepeatedMask;
18758 bool Is128BitLaneRepeatedShuffle =
18759 is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
18760 if (Is128BitLaneRepeatedShuffle) {
18761 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
18763 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
18764 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
18766 // Use dedicated unpack instructions for masks that match their pattern.
18767 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
18771 // Try to use shift instructions.
18772 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
18773 Zeroable, Subtarget, DAG))
18776 // Try to use VALIGN.
18777 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v16i32, V1, V2, Mask,
18781 // Try to use byte rotation instructions.
18782 if (Subtarget.hasBWI())
18783 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
18787 // Assume that a single SHUFPS is faster than using a permv shuffle.
18788 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
18789 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
18790 SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
18791 SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
18792 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
18793 CastV1, CastV2, DAG);
18794 return DAG.getBitcast(MVT::v16i32, ShufPS);
18797 // Try to create an in-lane repeating shuffle mask and then shuffle the
18798 // results into the target lanes.
18799 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18800 DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG))
18803 // If we have AVX512F support, we can use VEXPAND.
18804 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
18808 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
18809 Zeroable, Subtarget, DAG))
18812 return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, Subtarget, DAG);
18815 /// Handle lowering of 32-lane 16-bit integer shuffles.
18816 static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18817 const APInt &Zeroable, SDValue V1, SDValue V2,
18818 const X86Subtarget &Subtarget,
18819 SelectionDAG &DAG) {
18820 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
18821 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
18822 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
18823 assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
18825 // Whenever we can lower this as a zext, that instruction is strictly faster
18826 // than any alternative. It also allows us to fold memory operands into the
18827 // shuffle in many cases.
18828 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
18829 DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
18832 // Use dedicated unpack instructions for masks that match their pattern.
18833 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
18836 // Use dedicated pack instructions for masks that match their pattern.
18838 lowerShuffleWithPACK(DL, MVT::v32i16, Mask, V1, V2, DAG, Subtarget))
18841 // Try to use shift instructions.
18842 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
18843 Zeroable, Subtarget, DAG))
18846 // Try to use byte rotation instructions.
18847 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
18851 if (V2.isUndef()) {
18852 // Try to use bit rotation instructions.
18853 if (SDValue Rotate =
18854 lowerShuffleAsBitRotate(DL, MVT::v32i16, V1, Mask, Subtarget, DAG))
18857 SmallVector<int, 8> RepeatedMask;
18858 if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
18859 // As this is a single-input shuffle, the repeated mask should be
18860 // a strictly valid v8i16 mask that we can pass through to the v8i16
18861 // lowering to handle even the v32 case.
18862 return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v32i16, V1,
18863 RepeatedMask, Subtarget, DAG);
18867 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
18868 Zeroable, Subtarget, DAG))
18871 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
18872 Zeroable, Subtarget, DAG))
18875 return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, Subtarget, DAG);
18878 /// Handle lowering of 64-lane 8-bit integer shuffles.
18879 static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18880 const APInt &Zeroable, SDValue V1, SDValue V2,
18881 const X86Subtarget &Subtarget,
18882 SelectionDAG &DAG) {
18883 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
18884 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
18885 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
18886 assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
18888 // Whenever we can lower this as a zext, that instruction is strictly faster
18889 // than any alternative. It also allows us to fold memory operands into the
18890 // shuffle in many cases.
18891 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
18892 DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
18895 // Use dedicated unpack instructions for masks that match their pattern.
18896 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
18899 // Use dedicated pack instructions for masks that match their pattern.
18900 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
18904 // Try to use shift instructions.
18905 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
18906 Zeroable, Subtarget, DAG))
18909 // Try to use byte rotation instructions.
18910 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
18914 // Try to use bit rotation instructions.
18916 if (SDValue Rotate =
18917 lowerShuffleAsBitRotate(DL, MVT::v64i8, V1, Mask, Subtarget, DAG))
18920 // Lower as AND if possible.
18921 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v64i8, V1, V2, Mask,
18922 Zeroable, Subtarget, DAG))
18925 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
18926 Zeroable, Subtarget, DAG))
18929 // Try to create an in-lane repeating shuffle mask and then shuffle the
18930 // results into the target lanes.
18931 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18932 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
18935 if (SDValue Result = lowerShuffleAsLanePermuteAndPermute(
18936 DL, MVT::v64i8, V1, V2, Mask, DAG, Subtarget))
18939 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
18940 Zeroable, Subtarget, DAG))
18943 if (!is128BitLaneCrossingShuffleMask(MVT::v64i8, Mask)) {
18944 // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
18945 // PALIGNR will be cheaper than the second PSHUFB+OR.
18946 if (SDValue V = lowerShuffleAsByteRotateAndPermute(DL, MVT::v64i8, V1, V2,
18947 Mask, Subtarget, DAG))
18950 // If we can't directly blend but can use PSHUFB, that will be better as it
18951 // can both shuffle and set up the inefficient blend.
18952 bool V1InUse, V2InUse;
18953 return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v64i8, V1, V2, Mask, Zeroable,
18954 DAG, V1InUse, V2InUse);
18957 // Try to simplify this by merging 128-bit lanes to enable a lane-based
18960 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18961 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
18964 // VBMI can use VPERMV/VPERMV3 byte shuffles.
18965 if (Subtarget.hasVBMI())
18966 return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, Subtarget, DAG);
18968 return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
18971 /// High-level routine to lower various 512-bit x86 vector shuffles.
18973 /// This routine either breaks down the specific type of a 512-bit x86 vector
18974 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
18975 /// together based on the available instructions.
18976 static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
18977 MVT VT, SDValue V1, SDValue V2,
18978 const APInt &Zeroable,
18979 const X86Subtarget &Subtarget,
18980 SelectionDAG &DAG) {
18981 assert(Subtarget.hasAVX512() &&
18982 "Cannot lower 512-bit vectors w/ basic ISA!");
18984 // If we have a single input to the zero element, insert that into V1 if we
18985 // can do so cheaply.
18986 int NumElts = Mask.size();
18987 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
18989 if (NumV2Elements == 1 && Mask[0] >= NumElts)
18990 if (SDValue Insertion = lowerShuffleAsElementInsertion(
18991 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
18994 // Handle special cases where the lower or upper half is UNDEF.
18996 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
18999 // Check for being able to broadcast a single element.
19000 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
19004 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI()) {
19005 // Try using bit ops for masking and blending before falling back to
19007 if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
19010 if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
19013 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
19016 if (VT == MVT::v32f16) {
19017 V1 = DAG.getBitcast(MVT::v32i16, V1);
19018 V2 = DAG.getBitcast(MVT::v32i16, V2);
19019 return DAG.getBitcast(MVT::v32f16,
19020 DAG.getVectorShuffle(MVT::v32i16, DL, V1, V2, Mask));
19023 // Dispatch to each element type for lowering. If we don't have support for
19024 // specific element type shuffles at 512 bits, immediately split them and
19025 // lower them. Each lowering routine of a given type is allowed to assume that
19026 // the requisite ISA extensions for that element type are available.
19027 switch (VT.SimpleTy) {
19029 return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19031 return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19033 return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19035 return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19037 return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19039 return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19042 llvm_unreachable("Not a valid 512-bit x86 vector type!");
19046 static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
19047 MVT VT, SDValue V1, SDValue V2,
19048 const X86Subtarget &Subtarget,
19049 SelectionDAG &DAG) {
19050 // Shuffle should be unary.
19055 int NumElts = Mask.size();
19056 for (int i = 0; i != NumElts; ++i) {
19058 assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&
19059 "Unexpected mask index.");
19063 // The first non-undef element determines our shift amount.
19064 if (ShiftAmt < 0) {
19066 // Need to be shifting right.
19070 // All non-undef elements must shift by the same amount.
19071 if (ShiftAmt != M - i)
19074 assert(ShiftAmt >= 0 && "All undef?");
19076 // Great we found a shift right.
19078 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
19079 WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
19080 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
19081 DAG.getUNDEF(WideVT), V1,
19082 DAG.getIntPtrConstant(0, DL));
19083 Res = DAG.getNode(X86ISD::KSHIFTR, DL, WideVT, Res,
19084 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
19085 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19086 DAG.getIntPtrConstant(0, DL));
19089 // Determine if this shuffle can be implemented with a KSHIFT instruction.
19090 // Returns the shift amount if possible or -1 if not. This is a simplified
19091 // version of matchShuffleAsShift.
19092 static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
19093 int MaskOffset, const APInt &Zeroable) {
19094 int Size = Mask.size();
19096 auto CheckZeros = [&](int Shift, bool Left) {
19097 for (int j = 0; j < Shift; ++j)
19098 if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
19104 auto MatchShift = [&](int Shift, bool Left) {
19105 unsigned Pos = Left ? Shift : 0;
19106 unsigned Low = Left ? 0 : Shift;
19107 unsigned Len = Size - Shift;
19108 return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
19111 for (int Shift = 1; Shift != Size; ++Shift)
19112 for (bool Left : {true, false})
19113 if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
19114 Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
19122 // Lower vXi1 vector shuffles.
19123 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
19124 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
19125 // vector, shuffle and then truncate it back.
19126 static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
19127 MVT VT, SDValue V1, SDValue V2,
19128 const APInt &Zeroable,
19129 const X86Subtarget &Subtarget,
19130 SelectionDAG &DAG) {
19131 assert(Subtarget.hasAVX512() &&
19132 "Cannot lower 512-bit vectors w/o basic ISA!");
19134 int NumElts = Mask.size();
19136 // Try to recognize shuffles that are just padding a subvector with zeros.
19137 int SubvecElts = 0;
19139 for (int i = 0; i != NumElts; ++i) {
19140 if (Mask[i] >= 0) {
19141 // Grab the source from the first valid mask. All subsequent elements need
19142 // to use this same source.
19144 Src = Mask[i] / NumElts;
19145 if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
19151 assert(SubvecElts != NumElts && "Identity shuffle?");
19153 // Clip to a power 2.
19154 SubvecElts = PowerOf2Floor(SubvecElts);
19156 // Make sure the number of zeroable bits in the top at least covers the bits
19157 // not covered by the subvector.
19158 if ((int)Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) {
19159 assert(Src >= 0 && "Expected a source!");
19160 MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
19161 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
19162 Src == 0 ? V1 : V2,
19163 DAG.getIntPtrConstant(0, DL));
19164 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
19165 DAG.getConstant(0, DL, VT),
19166 Extract, DAG.getIntPtrConstant(0, DL));
19169 // Try a simple shift right with undef elements. Later we'll try with zeros.
19170 if (SDValue Shift = lower1BitShuffleAsKSHIFTR(DL, Mask, VT, V1, V2, Subtarget,
19174 // Try to match KSHIFTs.
19175 unsigned Offset = 0;
19176 for (SDValue V : { V1, V2 }) {
19178 int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
19179 if (ShiftAmt >= 0) {
19181 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
19182 WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
19183 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
19184 DAG.getUNDEF(WideVT), V,
19185 DAG.getIntPtrConstant(0, DL));
19186 // Widened right shifts need two shifts to ensure we shift in zeroes.
19187 if (Opcode == X86ISD::KSHIFTR && WideVT != VT) {
19188 int WideElts = WideVT.getVectorNumElements();
19189 // Shift left to put the original vector in the MSBs of the new size.
19190 Res = DAG.getNode(X86ISD::KSHIFTL, DL, WideVT, Res,
19191 DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
19192 // Increase the shift amount to account for the left shift.
19193 ShiftAmt += WideElts - NumElts;
19196 Res = DAG.getNode(Opcode, DL, WideVT, Res,
19197 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
19198 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19199 DAG.getIntPtrConstant(0, DL));
19201 Offset += NumElts; // Increment for next iteration.
19204 // If we're broadcasting a SETCC result, try to broadcast the ops instead.
19205 // TODO: What other unary shuffles would benefit from this?
19206 if (isBroadcastShuffleMask(Mask) && V1.getOpcode() == ISD::SETCC &&
19208 SDValue Op0 = V1.getOperand(0);
19209 SDValue Op1 = V1.getOperand(1);
19210 ISD::CondCode CC = cast<CondCodeSDNode>(V1.getOperand(2))->get();
19211 EVT OpVT = Op0.getValueType();
19212 return DAG.getSetCC(
19213 DL, VT, DAG.getVectorShuffle(OpVT, DL, Op0, DAG.getUNDEF(OpVT), Mask),
19214 DAG.getVectorShuffle(OpVT, DL, Op1, DAG.getUNDEF(OpVT), Mask), CC);
19218 switch (VT.SimpleTy) {
19220 llvm_unreachable("Expected a vector of i1 elements");
19222 ExtVT = MVT::v2i64;
19225 ExtVT = MVT::v4i32;
19228 // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
19230 ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
19233 // Take 512-bit type, unless we are avoiding 512-bit types and have the
19234 // 256-bit operation available.
19235 ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
19238 // Take 512-bit type, unless we are avoiding 512-bit types and have the
19239 // 256-bit operation available.
19240 assert(Subtarget.hasBWI() && "Expected AVX512BW support");
19241 ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
19244 // Fall back to scalarization. FIXME: We can do better if the shuffle
19245 // can be partitioned cleanly.
19246 if (!Subtarget.useBWIRegs())
19248 ExtVT = MVT::v64i8;
19252 V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
19253 V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
19255 SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
19256 // i1 was sign extended we can use X86ISD::CVT2MASK.
19257 int NumElems = VT.getVectorNumElements();
19258 if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
19259 (Subtarget.hasDQI() && (NumElems < 32)))
19260 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
19261 Shuffle, ISD::SETGT);
19263 return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
19266 /// Helper function that returns true if the shuffle mask should be
19267 /// commuted to improve canonicalization.
19268 static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
19269 int NumElements = Mask.size();
19271 int NumV1Elements = 0, NumV2Elements = 0;
19275 else if (M < NumElements)
19280 // Commute the shuffle as needed such that more elements come from V1 than
19281 // V2. This allows us to match the shuffle pattern strictly on how many
19282 // elements come from V1 without handling the symmetric cases.
19283 if (NumV2Elements > NumV1Elements)
19286 assert(NumV1Elements > 0 && "No V1 indices");
19288 if (NumV2Elements == 0)
19291 // When the number of V1 and V2 elements are the same, try to minimize the
19292 // number of uses of V2 in the low half of the vector. When that is tied,
19293 // ensure that the sum of indices for V1 is equal to or lower than the sum
19294 // indices for V2. When those are equal, try to ensure that the number of odd
19295 // indices for V1 is lower than the number of odd indices for V2.
19296 if (NumV1Elements == NumV2Elements) {
19297 int LowV1Elements = 0, LowV2Elements = 0;
19298 for (int M : Mask.slice(0, NumElements / 2))
19299 if (M >= NumElements)
19303 if (LowV2Elements > LowV1Elements)
19305 if (LowV2Elements == LowV1Elements) {
19306 int SumV1Indices = 0, SumV2Indices = 0;
19307 for (int i = 0, Size = Mask.size(); i < Size; ++i)
19308 if (Mask[i] >= NumElements)
19310 else if (Mask[i] >= 0)
19312 if (SumV2Indices < SumV1Indices)
19314 if (SumV2Indices == SumV1Indices) {
19315 int NumV1OddIndices = 0, NumV2OddIndices = 0;
19316 for (int i = 0, Size = Mask.size(); i < Size; ++i)
19317 if (Mask[i] >= NumElements)
19318 NumV2OddIndices += i % 2;
19319 else if (Mask[i] >= 0)
19320 NumV1OddIndices += i % 2;
19321 if (NumV2OddIndices < NumV1OddIndices)
19330 // Forward declaration.
19331 static SDValue canonicalizeShuffleMaskWithHorizOp(
19332 MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
19333 unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
19334 const X86Subtarget &Subtarget);
19336 /// Top-level lowering for x86 vector shuffles.
19338 /// This handles decomposition, canonicalization, and lowering of all x86
19339 /// vector shuffles. Most of the specific lowering strategies are encapsulated
19340 /// above in helper routines. The canonicalization attempts to widen shuffles
19341 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
19342 /// s.t. only one of the two inputs needs to be tested, etc.
19343 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
19344 SelectionDAG &DAG) {
19345 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
19346 ArrayRef<int> OrigMask = SVOp->getMask();
19347 SDValue V1 = Op.getOperand(0);
19348 SDValue V2 = Op.getOperand(1);
19349 MVT VT = Op.getSimpleValueType();
19350 int NumElements = VT.getVectorNumElements();
19352 bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
19354 assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
19355 "Can't lower MMX shuffles");
19357 bool V1IsUndef = V1.isUndef();
19358 bool V2IsUndef = V2.isUndef();
19359 if (V1IsUndef && V2IsUndef)
19360 return DAG.getUNDEF(VT);
19362 // When we create a shuffle node we put the UNDEF node to second operand,
19363 // but in some cases the first operand may be transformed to UNDEF.
19364 // In this case we should just commute the node.
19366 return DAG.getCommutedVectorShuffle(*SVOp);
19368 // Check for non-undef masks pointing at an undef vector and make the masks
19369 // undef as well. This makes it easier to match the shuffle based solely on
19372 any_of(OrigMask, [NumElements](int M) { return M >= NumElements; })) {
19373 SmallVector<int, 8> NewMask(OrigMask.begin(), OrigMask.end());
19374 for (int &M : NewMask)
19375 if (M >= NumElements)
19377 return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
19380 // Check for illegal shuffle mask element index values.
19381 int MaskUpperLimit = OrigMask.size() * (V2IsUndef ? 1 : 2);
19382 (void)MaskUpperLimit;
19383 assert(llvm::all_of(OrigMask,
19384 [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
19385 "Out of bounds shuffle index");
19387 // We actually see shuffles that are entirely re-arrangements of a set of
19388 // zero inputs. This mostly happens while decomposing complex shuffles into
19389 // simple ones. Directly lower these as a buildvector of zeros.
19390 APInt KnownUndef, KnownZero;
19391 computeZeroableShuffleElements(OrigMask, V1, V2, KnownUndef, KnownZero);
19393 APInt Zeroable = KnownUndef | KnownZero;
19394 if (Zeroable.isAllOnes())
19395 return getZeroVector(VT, Subtarget, DAG, DL);
19397 bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
19399 // Try to collapse shuffles into using a vector type with fewer elements but
19400 // wider element types. We cap this to not form integers or floating point
19401 // elements wider than 64 bits. It does not seem beneficial to form i128
19402 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
19403 SmallVector<int, 16> WidenedMask;
19404 if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
19405 canWidenShuffleElements(OrigMask, Zeroable, V2IsZero, WidenedMask)) {
19406 // Shuffle mask widening should not interfere with a broadcast opportunity
19407 // by obfuscating the operands with bitcasts.
19408 // TODO: Avoid lowering directly from this top-level function: make this
19409 // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
19410 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, OrigMask,
19414 MVT NewEltVT = VT.isFloatingPoint()
19415 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
19416 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
19417 int NewNumElts = NumElements / 2;
19418 MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
19419 // Make sure that the new vector type is legal. For example, v2f64 isn't
19421 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
19423 // Modify the new Mask to take all zeros from the all-zero vector.
19424 // Choose indices that are blend-friendly.
19425 bool UsedZeroVector = false;
19426 assert(is_contained(WidenedMask, SM_SentinelZero) &&
19427 "V2's non-undef elements are used?!");
19428 for (int i = 0; i != NewNumElts; ++i)
19429 if (WidenedMask[i] == SM_SentinelZero) {
19430 WidenedMask[i] = i + NewNumElts;
19431 UsedZeroVector = true;
19433 // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
19434 // some elements to be undef.
19435 if (UsedZeroVector)
19436 V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
19438 V1 = DAG.getBitcast(NewVT, V1);
19439 V2 = DAG.getBitcast(NewVT, V2);
19440 return DAG.getBitcast(
19441 VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
19445 SmallVector<SDValue> Ops = {V1, V2};
19446 SmallVector<int> Mask(OrigMask.begin(), OrigMask.end());
19448 // Canonicalize the shuffle with any horizontal ops inputs.
19449 // NOTE: This may update Ops and Mask.
19450 if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
19451 Ops, Mask, VT.getSizeInBits(), DL, DAG, Subtarget))
19452 return DAG.getBitcast(VT, HOp);
19454 V1 = DAG.getBitcast(VT, Ops[0]);
19455 V2 = DAG.getBitcast(VT, Ops[1]);
19456 assert(NumElements == (int)Mask.size() &&
19457 "canonicalizeShuffleMaskWithHorizOp "
19458 "shouldn't alter the shuffle mask size");
19460 // Commute the shuffle if it will improve canonicalization.
19461 if (canonicalizeShuffleMaskWithCommute(Mask)) {
19462 ShuffleVectorSDNode::commuteMask(Mask);
19466 // For each vector width, delegate to a specialized lowering routine.
19467 if (VT.is128BitVector())
19468 return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
19470 if (VT.is256BitVector())
19471 return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
19473 if (VT.is512BitVector())
19474 return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
19477 return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
19479 llvm_unreachable("Unimplemented!");
19482 /// Try to lower a VSELECT instruction to a vector shuffle.
19483 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
19484 const X86Subtarget &Subtarget,
19485 SelectionDAG &DAG) {
19486 SDValue Cond = Op.getOperand(0);
19487 SDValue LHS = Op.getOperand(1);
19488 SDValue RHS = Op.getOperand(2);
19489 MVT VT = Op.getSimpleValueType();
19491 // Only non-legal VSELECTs reach this lowering, convert those into generic
19492 // shuffles and re-use the shuffle lowering path for blends.
19493 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
19494 SmallVector<int, 32> Mask;
19495 if (createShuffleMaskFromVSELECT(Mask, Cond))
19496 return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
19502 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
19503 SDValue Cond = Op.getOperand(0);
19504 SDValue LHS = Op.getOperand(1);
19505 SDValue RHS = Op.getOperand(2);
19508 MVT VT = Op.getSimpleValueType();
19509 if (isSoftFP16(VT)) {
19510 MVT NVT = VT.changeVectorElementTypeToInteger();
19511 return DAG.getBitcast(VT, DAG.getNode(ISD::VSELECT, dl, NVT, Cond,
19512 DAG.getBitcast(NVT, LHS),
19513 DAG.getBitcast(NVT, RHS)));
19516 // A vselect where all conditions and data are constants can be optimized into
19517 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
19518 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
19519 ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
19520 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
19523 // Try to lower this to a blend-style vector shuffle. This can handle all
19524 // constant condition cases.
19525 if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
19528 // If this VSELECT has a vector if i1 as a mask, it will be directly matched
19529 // with patterns on the mask registers on AVX-512.
19530 MVT CondVT = Cond.getSimpleValueType();
19531 unsigned CondEltSize = Cond.getScalarValueSizeInBits();
19532 if (CondEltSize == 1)
19535 // Variable blends are only legal from SSE4.1 onward.
19536 if (!Subtarget.hasSSE41())
19539 unsigned EltSize = VT.getScalarSizeInBits();
19540 unsigned NumElts = VT.getVectorNumElements();
19542 // Expand v32i16/v64i8 without BWI.
19543 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
19546 // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
19547 // into an i1 condition so that we can use the mask-based 512-bit blend
19549 if (VT.getSizeInBits() == 512) {
19550 // Build a mask by testing the condition against zero.
19551 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
19552 SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
19553 DAG.getConstant(0, dl, CondVT),
19555 // Now return a new VSELECT using the mask.
19556 return DAG.getSelect(dl, VT, Mask, LHS, RHS);
19559 // SEXT/TRUNC cases where the mask doesn't match the destination size.
19560 if (CondEltSize != EltSize) {
19561 // If we don't have a sign splat, rely on the expansion.
19562 if (CondEltSize != DAG.ComputeNumSignBits(Cond))
19565 MVT NewCondSVT = MVT::getIntegerVT(EltSize);
19566 MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
19567 Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
19568 return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
19571 // Only some types will be legal on some subtargets. If we can emit a legal
19572 // VSELECT-matching blend, return Op, and but if we need to expand, return
19574 switch (VT.SimpleTy) {
19576 // Most of the vector types have blends past SSE4.1.
19580 // The byte blends for AVX vectors were introduced only in AVX2.
19581 if (Subtarget.hasAVX2())
19587 case MVT::v16i16: {
19588 // Bitcast everything to the vXi8 type and use a vXi8 vselect.
19589 MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
19590 Cond = DAG.getBitcast(CastVT, Cond);
19591 LHS = DAG.getBitcast(CastVT, LHS);
19592 RHS = DAG.getBitcast(CastVT, RHS);
19593 SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
19594 return DAG.getBitcast(VT, Select);
19599 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
19600 MVT VT = Op.getSimpleValueType();
19601 SDValue Vec = Op.getOperand(0);
19602 SDValue Idx = Op.getOperand(1);
19603 assert(isa<ConstantSDNode>(Idx) && "Constant index expected");
19606 if (!Vec.getSimpleValueType().is128BitVector())
19609 if (VT.getSizeInBits() == 8) {
19610 // If IdxVal is 0, it's cheaper to do a move instead of a pextrb, unless
19611 // we're going to zero extend the register or fold the store.
19612 if (llvm::isNullConstant(Idx) && !X86::mayFoldIntoZeroExtend(Op) &&
19613 !X86::mayFoldIntoStore(Op))
19614 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
19615 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
19616 DAG.getBitcast(MVT::v4i32, Vec), Idx));
19618 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
19619 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, Vec,
19620 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
19621 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
19624 if (VT == MVT::f32) {
19625 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
19626 // the result back to FR32 register. It's only worth matching if the
19627 // result has a single use which is a store or a bitcast to i32. And in
19628 // the case of a store, it's not worth it if the index is a constant 0,
19629 // because a MOVSSmr can be used instead, which is smaller and faster.
19630 if (!Op.hasOneUse())
19632 SDNode *User = *Op.getNode()->use_begin();
19633 if ((User->getOpcode() != ISD::STORE || isNullConstant(Idx)) &&
19634 (User->getOpcode() != ISD::BITCAST ||
19635 User->getValueType(0) != MVT::i32))
19637 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
19638 DAG.getBitcast(MVT::v4i32, Vec), Idx);
19639 return DAG.getBitcast(MVT::f32, Extract);
19642 if (VT == MVT::i32 || VT == MVT::i64)
19648 /// Extract one bit from mask vector, like v16i1 or v8i1.
19649 /// AVX-512 feature.
19650 static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
19651 const X86Subtarget &Subtarget) {
19652 SDValue Vec = Op.getOperand(0);
19654 MVT VecVT = Vec.getSimpleValueType();
19655 SDValue Idx = Op.getOperand(1);
19656 auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
19657 MVT EltVT = Op.getSimpleValueType();
19659 assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
19660 "Unexpected vector type in ExtractBitFromMaskVector");
19662 // variable index can't be handled in mask registers,
19663 // extend vector to VR512/128
19665 unsigned NumElts = VecVT.getVectorNumElements();
19666 // Extending v8i1/v16i1 to 512-bit get better performance on KNL
19667 // than extending to 128/256bit.
19668 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
19669 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
19670 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
19671 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
19672 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
19675 unsigned IdxVal = IdxC->getZExtValue();
19676 if (IdxVal == 0) // the operation is legal
19679 // Extend to natively supported kshift.
19680 unsigned NumElems = VecVT.getVectorNumElements();
19681 MVT WideVecVT = VecVT;
19682 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
19683 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
19684 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
19685 DAG.getUNDEF(WideVecVT), Vec,
19686 DAG.getIntPtrConstant(0, dl));
19689 // Use kshiftr instruction to move to the lower element.
19690 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
19691 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
19693 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
19694 DAG.getIntPtrConstant(0, dl));
19698 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
19699 SelectionDAG &DAG) const {
19701 SDValue Vec = Op.getOperand(0);
19702 MVT VecVT = Vec.getSimpleValueType();
19703 SDValue Idx = Op.getOperand(1);
19704 auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
19706 if (VecVT.getVectorElementType() == MVT::i1)
19707 return ExtractBitFromMaskVector(Op, DAG, Subtarget);
19710 // Its more profitable to go through memory (1 cycles throughput)
19711 // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
19712 // IACA tool was used to get performance estimation
19713 // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
19715 // example : extractelement <16 x i8> %a, i32 %i
19717 // Block Throughput: 3.00 Cycles
19718 // Throughput Bottleneck: Port5
19720 // | Num Of | Ports pressure in cycles | |
19721 // | Uops | 0 - DV | 5 | 6 | 7 | |
19722 // ---------------------------------------------
19723 // | 1 | | 1.0 | | | CP | vmovd xmm1, edi
19724 // | 1 | | 1.0 | | | CP | vpshufb xmm0, xmm0, xmm1
19725 // | 2 | 1.0 | 1.0 | | | CP | vpextrb eax, xmm0, 0x0
19726 // Total Num Of Uops: 4
19729 // Block Throughput: 1.00 Cycles
19730 // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
19732 // | | Ports pressure in cycles | |
19733 // |Uops| 1 | 2 - D |3 - D | 4 | 5 | |
19734 // ---------------------------------------------------------
19735 // |2^ | | 0.5 | 0.5 |1.0| |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
19736 // |1 |0.5| | | |0.5| | lea rax, ptr [rsp-0x18]
19737 // |1 | |0.5, 0.5|0.5, 0.5| | |CP| mov al, byte ptr [rdi+rax*1]
19738 // Total Num Of Uops: 4
19743 unsigned IdxVal = IdxC->getZExtValue();
19745 // If this is a 256-bit vector result, first extract the 128-bit vector and
19746 // then extract the element from the 128-bit vector.
19747 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
19748 // Get the 128-bit vector.
19749 Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
19750 MVT EltVT = VecVT.getVectorElementType();
19752 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
19753 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
19755 // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
19756 // this can be done with a mask.
19757 IdxVal &= ElemsPerChunk - 1;
19758 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
19759 DAG.getIntPtrConstant(IdxVal, dl));
19762 assert(VecVT.is128BitVector() && "Unexpected vector length");
19764 MVT VT = Op.getSimpleValueType();
19766 if (VT == MVT::i16) {
19767 // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
19768 // we're going to zero extend the register or fold the store (SSE41 only).
19769 if (IdxVal == 0 && !X86::mayFoldIntoZeroExtend(Op) &&
19770 !(Subtarget.hasSSE41() && X86::mayFoldIntoStore(Op))) {
19771 if (Subtarget.hasFP16())
19774 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
19775 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
19776 DAG.getBitcast(MVT::v4i32, Vec), Idx));
19779 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, Vec,
19780 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
19781 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
19784 if (Subtarget.hasSSE41())
19785 if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
19788 // TODO: We only extract a single element from v16i8, we can probably afford
19789 // to be more aggressive here before using the default approach of spilling to
19791 if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
19792 // Extract either the lowest i32 or any i16, and extract the sub-byte.
19793 int DWordIdx = IdxVal / 4;
19794 if (DWordIdx == 0) {
19795 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
19796 DAG.getBitcast(MVT::v4i32, Vec),
19797 DAG.getIntPtrConstant(DWordIdx, dl));
19798 int ShiftVal = (IdxVal % 4) * 8;
19800 Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
19801 DAG.getConstant(ShiftVal, dl, MVT::i8));
19802 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
19805 int WordIdx = IdxVal / 2;
19806 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
19807 DAG.getBitcast(MVT::v8i16, Vec),
19808 DAG.getIntPtrConstant(WordIdx, dl));
19809 int ShiftVal = (IdxVal % 2) * 8;
19811 Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
19812 DAG.getConstant(ShiftVal, dl, MVT::i8));
19813 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
19816 if (VT == MVT::f16 || VT.getSizeInBits() == 32) {
19820 // Shuffle the element to the lowest element, then movss or movsh.
19821 SmallVector<int, 8> Mask(VecVT.getVectorNumElements(), -1);
19822 Mask[0] = static_cast<int>(IdxVal);
19823 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
19824 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
19825 DAG.getIntPtrConstant(0, dl));
19828 if (VT.getSizeInBits() == 64) {
19829 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
19830 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
19831 // to match extract_elt for f64.
19835 // UNPCKHPD the element to the lowest double word, then movsd.
19836 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
19837 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
19838 int Mask[2] = { 1, -1 };
19839 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
19840 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
19841 DAG.getIntPtrConstant(0, dl));
19847 /// Insert one bit to mask vector, like v16i1 or v8i1.
19848 /// AVX-512 feature.
19849 static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
19850 const X86Subtarget &Subtarget) {
19852 SDValue Vec = Op.getOperand(0);
19853 SDValue Elt = Op.getOperand(1);
19854 SDValue Idx = Op.getOperand(2);
19855 MVT VecVT = Vec.getSimpleValueType();
19857 if (!isa<ConstantSDNode>(Idx)) {
19858 // Non constant index. Extend source and destination,
19859 // insert element and then truncate the result.
19860 unsigned NumElts = VecVT.getVectorNumElements();
19861 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
19862 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
19863 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
19864 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
19865 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
19866 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
19869 // Copy into a k-register, extract to v1i1 and insert_subvector.
19870 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
19871 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec, Idx);
19874 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
19875 SelectionDAG &DAG) const {
19876 MVT VT = Op.getSimpleValueType();
19877 MVT EltVT = VT.getVectorElementType();
19878 unsigned NumElts = VT.getVectorNumElements();
19879 unsigned EltSizeInBits = EltVT.getScalarSizeInBits();
19881 if (EltVT == MVT::i1)
19882 return InsertBitToMaskVector(Op, DAG, Subtarget);
19885 SDValue N0 = Op.getOperand(0);
19886 SDValue N1 = Op.getOperand(1);
19887 SDValue N2 = Op.getOperand(2);
19888 auto *N2C = dyn_cast<ConstantSDNode>(N2);
19891 // Variable insertion indices, usually we're better off spilling to stack,
19892 // but AVX512 can use a variable compare+select by comparing against all
19893 // possible vector indices, and FP insertion has less gpr->simd traffic.
19894 if (!(Subtarget.hasBWI() ||
19895 (Subtarget.hasAVX512() && EltSizeInBits >= 32) ||
19896 (Subtarget.hasSSE41() && VT.isFloatingPoint())))
19899 MVT IdxSVT = MVT::getIntegerVT(EltSizeInBits);
19900 MVT IdxVT = MVT::getVectorVT(IdxSVT, NumElts);
19901 if (!isTypeLegal(IdxSVT) || !isTypeLegal(IdxVT))
19904 SDValue IdxExt = DAG.getZExtOrTrunc(N2, dl, IdxSVT);
19905 SDValue IdxSplat = DAG.getSplatBuildVector(IdxVT, dl, IdxExt);
19906 SDValue EltSplat = DAG.getSplatBuildVector(VT, dl, N1);
19908 SmallVector<SDValue, 16> RawIndices;
19909 for (unsigned I = 0; I != NumElts; ++I)
19910 RawIndices.push_back(DAG.getConstant(I, dl, IdxSVT));
19911 SDValue Indices = DAG.getBuildVector(IdxVT, dl, RawIndices);
19913 // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
19914 return DAG.getSelectCC(dl, IdxSplat, Indices, EltSplat, N0,
19915 ISD::CondCode::SETEQ);
19918 if (N2C->getAPIntValue().uge(NumElts))
19920 uint64_t IdxVal = N2C->getZExtValue();
19922 bool IsZeroElt = X86::isZeroNode(N1);
19923 bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
19925 if (IsZeroElt || IsAllOnesElt) {
19926 // Lower insertion of v16i8/v32i8/v64i16 -1 elts as an 'OR' blend.
19927 // We don't deal with i8 0 since it appears to be handled elsewhere.
19928 if (IsAllOnesElt &&
19929 ((VT == MVT::v16i8 && !Subtarget.hasSSE41()) ||
19930 ((VT == MVT::v32i8 || VT == MVT::v16i16) && !Subtarget.hasInt256()))) {
19931 SDValue ZeroCst = DAG.getConstant(0, dl, VT.getScalarType());
19932 SDValue OnesCst = DAG.getAllOnesConstant(dl, VT.getScalarType());
19933 SmallVector<SDValue, 8> CstVectorElts(NumElts, ZeroCst);
19934 CstVectorElts[IdxVal] = OnesCst;
19935 SDValue CstVector = DAG.getBuildVector(VT, dl, CstVectorElts);
19936 return DAG.getNode(ISD::OR, dl, VT, N0, CstVector);
19938 // See if we can do this more efficiently with a blend shuffle with a
19939 // rematerializable vector.
19940 if (Subtarget.hasSSE41() &&
19941 (EltSizeInBits >= 16 || (IsZeroElt && !VT.is128BitVector()))) {
19942 SmallVector<int, 8> BlendMask;
19943 for (unsigned i = 0; i != NumElts; ++i)
19944 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
19945 SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
19946 : getOnesVector(VT, DAG, dl);
19947 return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
19951 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
19952 // into that, and then insert the subvector back into the result.
19953 if (VT.is256BitVector() || VT.is512BitVector()) {
19954 // With a 256-bit vector, we can insert into the zero element efficiently
19955 // using a blend if we have AVX or AVX2 and the right data type.
19956 if (VT.is256BitVector() && IdxVal == 0) {
19957 // TODO: It is worthwhile to cast integer to floating point and back
19958 // and incur a domain crossing penalty if that's what we'll end up
19959 // doing anyway after extracting to a 128-bit vector.
19960 if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
19961 (Subtarget.hasAVX2() && (EltVT == MVT::i32 || EltVT == MVT::i64))) {
19962 SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
19963 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
19964 DAG.getTargetConstant(1, dl, MVT::i8));
19968 unsigned NumEltsIn128 = 128 / EltSizeInBits;
19969 assert(isPowerOf2_32(NumEltsIn128) &&
19970 "Vectors will always have power-of-two number of elements.");
19972 // If we are not inserting into the low 128-bit vector chunk,
19973 // then prefer the broadcast+blend sequence.
19974 // FIXME: relax the profitability check iff all N1 uses are insertions.
19975 if (IdxVal >= NumEltsIn128 &&
19976 ((Subtarget.hasAVX2() && EltSizeInBits != 8) ||
19977 (Subtarget.hasAVX() && (EltSizeInBits >= 32) &&
19978 X86::mayFoldLoad(N1, Subtarget)))) {
19979 SDValue N1SplatVec = DAG.getSplatBuildVector(VT, dl, N1);
19980 SmallVector<int, 8> BlendMask;
19981 for (unsigned i = 0; i != NumElts; ++i)
19982 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
19983 return DAG.getVectorShuffle(VT, dl, N0, N1SplatVec, BlendMask);
19986 // Get the desired 128-bit vector chunk.
19987 SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
19989 // Insert the element into the desired chunk.
19990 // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
19991 unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
19993 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
19994 DAG.getIntPtrConstant(IdxIn128, dl));
19996 // Insert the changed part back into the bigger vector
19997 return insert128BitVector(N0, V, IdxVal, DAG, dl);
19999 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
20001 // This will be just movw/movd/movq/movsh/movss/movsd.
20002 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode())) {
20003 if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
20004 EltVT == MVT::f16 || EltVT == MVT::i64) {
20005 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
20006 return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
20009 // We can't directly insert an i8 or i16 into a vector, so zero extend
20010 // it to i32 first.
20011 if (EltVT == MVT::i16 || EltVT == MVT::i8) {
20012 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, N1);
20013 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
20014 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, N1);
20015 N1 = getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
20016 return DAG.getBitcast(VT, N1);
20020 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
20021 // argument. SSE41 required for pinsrb.
20022 if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
20024 if (VT == MVT::v8i16) {
20025 assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
20026 Opc = X86ISD::PINSRW;
20028 assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
20029 assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
20030 Opc = X86ISD::PINSRB;
20033 assert(N1.getValueType() != MVT::i32 && "Unexpected VT");
20034 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
20035 N2 = DAG.getTargetConstant(IdxVal, dl, MVT::i8);
20036 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
20039 if (Subtarget.hasSSE41()) {
20040 if (EltVT == MVT::f32) {
20041 // Bits [7:6] of the constant are the source select. This will always be
20042 // zero here. The DAG Combiner may combine an extract_elt index into
20043 // these bits. For example (insert (extract, 3), 2) could be matched by
20044 // putting the '3' into bits [7:6] of X86ISD::INSERTPS.
20045 // Bits [5:4] of the constant are the destination select. This is the
20046 // value of the incoming immediate.
20047 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
20048 // combine either bitwise AND or insert of float 0.0 to set these bits.
20050 bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
20051 if (IdxVal == 0 && (!MinSize || !X86::mayFoldLoad(N1, Subtarget))) {
20052 // If this is an insertion of 32-bits into the low 32-bits of
20053 // a vector, we prefer to generate a blend with immediate rather
20054 // than an insertps. Blends are simpler operations in hardware and so
20055 // will always have equal or better performance than insertps.
20056 // But if optimizing for size and there's a load folding opportunity,
20057 // generate insertps because blendps does not have a 32-bit memory
20059 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
20060 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1,
20061 DAG.getTargetConstant(1, dl, MVT::i8));
20063 // Create this as a scalar to vector..
20064 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
20065 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1,
20066 DAG.getTargetConstant(IdxVal << 4, dl, MVT::i8));
20069 // PINSR* works with constant index.
20070 if (EltVT == MVT::i32 || EltVT == MVT::i64)
20077 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
20078 SelectionDAG &DAG) {
20080 MVT OpVT = Op.getSimpleValueType();
20082 // It's always cheaper to replace a xor+movd with xorps and simplifies further
20084 if (X86::isZeroNode(Op.getOperand(0)))
20085 return getZeroVector(OpVT, Subtarget, DAG, dl);
20087 // If this is a 256-bit vector result, first insert into a 128-bit
20088 // vector and then insert into the 256-bit vector.
20089 if (!OpVT.is128BitVector()) {
20090 // Insert into a 128-bit vector.
20091 unsigned SizeFactor = OpVT.getSizeInBits() / 128;
20092 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
20093 OpVT.getVectorNumElements() / SizeFactor);
20095 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
20097 // Insert the 128-bit vector.
20098 return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
20100 assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
20101 "Expected an SSE type!");
20103 // Pass through a v4i32 or V8i16 SCALAR_TO_VECTOR as that's what we use in
20105 if (OpVT == MVT::v4i32 || (OpVT == MVT::v8i16 && Subtarget.hasFP16()))
20108 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
20109 return DAG.getBitcast(
20110 OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
20113 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
20114 // simple superregister reference or explicit instructions to insert
20115 // the upper bits of a vector.
20116 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
20117 SelectionDAG &DAG) {
20118 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
20120 return insert1BitVector(Op, DAG, Subtarget);
20123 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
20124 SelectionDAG &DAG) {
20125 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
20126 "Only vXi1 extract_subvectors need custom lowering");
20129 SDValue Vec = Op.getOperand(0);
20130 uint64_t IdxVal = Op.getConstantOperandVal(1);
20132 if (IdxVal == 0) // the operation is legal
20135 MVT VecVT = Vec.getSimpleValueType();
20136 unsigned NumElems = VecVT.getVectorNumElements();
20138 // Extend to natively supported kshift.
20139 MVT WideVecVT = VecVT;
20140 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
20141 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
20142 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
20143 DAG.getUNDEF(WideVecVT), Vec,
20144 DAG.getIntPtrConstant(0, dl));
20147 // Shift to the LSB.
20148 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
20149 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
20151 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
20152 DAG.getIntPtrConstant(0, dl));
20155 // Returns the appropriate wrapper opcode for a global reference.
20156 unsigned X86TargetLowering::getGlobalWrapperKind(
20157 const GlobalValue *GV, const unsigned char OpFlags) const {
20158 // References to absolute symbols are never PC-relative.
20159 if (GV && GV->isAbsoluteSymbolRef())
20160 return X86ISD::Wrapper;
20162 CodeModel::Model M = getTargetMachine().getCodeModel();
20163 if (Subtarget.isPICStyleRIPRel() &&
20164 (M == CodeModel::Small || M == CodeModel::Kernel))
20165 return X86ISD::WrapperRIP;
20167 // GOTPCREL references must always use RIP.
20168 if (OpFlags == X86II::MO_GOTPCREL || OpFlags == X86II::MO_GOTPCREL_NORELAX)
20169 return X86ISD::WrapperRIP;
20171 return X86ISD::Wrapper;
20174 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
20175 // their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
20176 // one of the above mentioned nodes. It has to be wrapped because otherwise
20177 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
20178 // be used to form addressing mode. These wrapped nodes will be selected
20181 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
20182 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
20184 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
20185 // global base reg.
20186 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
20188 auto PtrVT = getPointerTy(DAG.getDataLayout());
20189 SDValue Result = DAG.getTargetConstantPool(
20190 CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
20192 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
20193 // With PIC, the address is actually $g + Offset.
20196 DAG.getNode(ISD::ADD, DL, PtrVT,
20197 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
20203 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
20204 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
20206 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
20207 // global base reg.
20208 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
20210 auto PtrVT = getPointerTy(DAG.getDataLayout());
20211 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
20213 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
20215 // With PIC, the address is actually $g + Offset.
20218 DAG.getNode(ISD::ADD, DL, PtrVT,
20219 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
20224 SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
20225 SelectionDAG &DAG) const {
20226 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
20230 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
20231 // Create the TargetBlockAddressAddress node.
20232 unsigned char OpFlags =
20233 Subtarget.classifyBlockAddressReference();
20234 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
20235 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
20237 auto PtrVT = getPointerTy(DAG.getDataLayout());
20238 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
20239 Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
20241 // With PIC, the address is actually $g + Offset.
20242 if (isGlobalRelativeToPICBase(OpFlags)) {
20243 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
20244 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
20250 /// Creates target global address or external symbol nodes for calls or
20252 SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
20253 bool ForCall) const {
20254 // Unpack the global address or external symbol.
20255 const SDLoc &dl = SDLoc(Op);
20256 const GlobalValue *GV = nullptr;
20257 int64_t Offset = 0;
20258 const char *ExternalSym = nullptr;
20259 if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
20260 GV = G->getGlobal();
20261 Offset = G->getOffset();
20263 const auto *ES = cast<ExternalSymbolSDNode>(Op);
20264 ExternalSym = ES->getSymbol();
20267 // Calculate some flags for address lowering.
20268 const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
20269 unsigned char OpFlags;
20271 OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
20273 OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
20274 bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
20275 bool NeedsLoad = isGlobalStubReference(OpFlags);
20277 CodeModel::Model M = DAG.getTarget().getCodeModel();
20278 auto PtrVT = getPointerTy(DAG.getDataLayout());
20282 // Create a target global address if this is a global. If possible, fold the
20283 // offset into the global address reference. Otherwise, ADD it on later.
20284 // Suppress the folding if Offset is negative: movl foo-1, %eax is not
20285 // allowed because if the address of foo is 0, the ELF R_X86_64_32
20286 // relocation will compute to a negative value, which is invalid.
20287 int64_t GlobalOffset = 0;
20288 if (OpFlags == X86II::MO_NO_FLAG && Offset >= 0 &&
20289 X86::isOffsetSuitableForCodeModel(Offset, M, true)) {
20290 std::swap(GlobalOffset, Offset);
20292 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
20294 // If this is not a global address, this must be an external symbol.
20295 Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
20298 // If this is a direct call, avoid the wrapper if we don't need to do any
20299 // loads or adds. This allows SDAG ISel to match direct calls.
20300 if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
20303 Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
20305 // With PIC, the address is actually $g + Offset.
20307 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
20308 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
20311 // For globals that require a load from a stub to get the address, emit the
20314 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
20315 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
20317 // If there was a non-zero offset that we didn't fold, create an explicit
20318 // addition for it.
20320 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
20321 DAG.getConstant(Offset, dl, PtrVT));
20327 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
20328 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
20332 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
20333 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
20334 unsigned char OperandFlags, bool LocalDynamic = false) {
20335 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
20336 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
20338 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
20339 GA->getValueType(0),
20343 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
20347 SDValue Ops[] = { Chain, TGA, *InFlag };
20348 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
20350 SDValue Ops[] = { Chain, TGA };
20351 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
20354 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
20355 MFI.setAdjustsStack(true);
20356 MFI.setHasCalls(true);
20358 SDValue Flag = Chain.getValue(1);
20359 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
20362 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
20364 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
20367 SDLoc dl(GA); // ? function entry point might be better
20368 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
20369 DAG.getNode(X86ISD::GlobalBaseReg,
20370 SDLoc(), PtrVT), InFlag);
20371 InFlag = Chain.getValue(1);
20373 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
20376 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit LP64
20378 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
20380 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
20381 X86::RAX, X86II::MO_TLSGD);
20384 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit ILP32
20386 LowerToTLSGeneralDynamicModelX32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
20388 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
20389 X86::EAX, X86II::MO_TLSGD);
20392 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
20393 SelectionDAG &DAG, const EVT PtrVT,
20394 bool Is64Bit, bool Is64BitLP64) {
20397 // Get the start address of the TLS block for this module.
20398 X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
20399 .getInfo<X86MachineFunctionInfo>();
20400 MFI->incNumLocalDynamicTLSAccesses();
20404 unsigned ReturnReg = Is64BitLP64 ? X86::RAX : X86::EAX;
20405 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, ReturnReg,
20406 X86II::MO_TLSLD, /*LocalDynamic=*/true);
20409 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
20410 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
20411 InFlag = Chain.getValue(1);
20412 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
20413 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
20416 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
20420 unsigned char OperandFlags = X86II::MO_DTPOFF;
20421 unsigned WrapperKind = X86ISD::Wrapper;
20422 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
20423 GA->getValueType(0),
20424 GA->getOffset(), OperandFlags);
20425 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
20427 // Add x@dtpoff with the base.
20428 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
20431 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
20432 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
20433 const EVT PtrVT, TLSModel::Model model,
20434 bool is64Bit, bool isPIC) {
20437 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
20438 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
20439 is64Bit ? 257 : 256));
20441 SDValue ThreadPointer =
20442 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
20443 MachinePointerInfo(Ptr));
20445 unsigned char OperandFlags = 0;
20446 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
20448 unsigned WrapperKind = X86ISD::Wrapper;
20449 if (model == TLSModel::LocalExec) {
20450 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
20451 } else if (model == TLSModel::InitialExec) {
20453 OperandFlags = X86II::MO_GOTTPOFF;
20454 WrapperKind = X86ISD::WrapperRIP;
20456 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
20459 llvm_unreachable("Unexpected model");
20462 // emit "addl x@ntpoff,%eax" (local exec)
20463 // or "addl x@indntpoff,%eax" (initial exec)
20464 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
20466 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
20467 GA->getOffset(), OperandFlags);
20468 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
20470 if (model == TLSModel::InitialExec) {
20471 if (isPIC && !is64Bit) {
20472 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
20473 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
20477 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
20478 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
20481 // The address of the thread local variable is the add of the thread
20482 // pointer with the offset of the variable.
20483 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
20487 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
20489 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
20491 if (DAG.getTarget().useEmulatedTLS())
20492 return LowerToTLSEmulatedModel(GA, DAG);
20494 const GlobalValue *GV = GA->getGlobal();
20495 auto PtrVT = getPointerTy(DAG.getDataLayout());
20496 bool PositionIndependent = isPositionIndependent();
20498 if (Subtarget.isTargetELF()) {
20499 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
20501 case TLSModel::GeneralDynamic:
20502 if (Subtarget.is64Bit()) {
20503 if (Subtarget.isTarget64BitLP64())
20504 return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
20505 return LowerToTLSGeneralDynamicModelX32(GA, DAG, PtrVT);
20507 return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
20508 case TLSModel::LocalDynamic:
20509 return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT, Subtarget.is64Bit(),
20510 Subtarget.isTarget64BitLP64());
20511 case TLSModel::InitialExec:
20512 case TLSModel::LocalExec:
20513 return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
20514 PositionIndependent);
20516 llvm_unreachable("Unknown TLS model.");
20519 if (Subtarget.isTargetDarwin()) {
20520 // Darwin only has one model of TLS. Lower to that.
20521 unsigned char OpFlag = 0;
20522 unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
20523 X86ISD::WrapperRIP : X86ISD::Wrapper;
20525 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
20526 // global base reg.
20527 bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
20529 OpFlag = X86II::MO_TLVP_PIC_BASE;
20531 OpFlag = X86II::MO_TLVP;
20533 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
20534 GA->getValueType(0),
20535 GA->getOffset(), OpFlag);
20536 SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
20538 // With PIC32, the address is actually $g + Offset.
20540 Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
20541 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
20544 // Lowering the machine isd will make sure everything is in the right
20546 SDValue Chain = DAG.getEntryNode();
20547 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
20548 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
20549 SDValue Args[] = { Chain, Offset };
20550 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
20551 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
20552 DAG.getIntPtrConstant(0, DL, true),
20553 Chain.getValue(1), DL);
20555 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
20556 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
20557 MFI.setAdjustsStack(true);
20559 // And our return value (tls address) is in the standard call return value
20561 unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
20562 return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
20565 if (Subtarget.isOSWindows()) {
20566 // Just use the implicit TLS architecture
20567 // Need to generate something similar to:
20568 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
20570 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
20571 // mov rcx, qword [rdx+rcx*8]
20572 // mov eax, .tls$:tlsvar
20573 // [rax+rcx] contains the address
20574 // Windows 64bit: gs:0x58
20575 // Windows 32bit: fs:__tls_array
20578 SDValue Chain = DAG.getEntryNode();
20580 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
20581 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
20582 // use its literal value of 0x2C.
20583 Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
20584 ? Type::getInt8PtrTy(*DAG.getContext(),
20586 : Type::getInt32PtrTy(*DAG.getContext(),
20589 SDValue TlsArray = Subtarget.is64Bit()
20590 ? DAG.getIntPtrConstant(0x58, dl)
20591 : (Subtarget.isTargetWindowsGNU()
20592 ? DAG.getIntPtrConstant(0x2C, dl)
20593 : DAG.getExternalSymbol("_tls_array", PtrVT));
20595 SDValue ThreadPointer =
20596 DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
20599 if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
20600 res = ThreadPointer;
20602 // Load the _tls_index variable
20603 SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
20604 if (Subtarget.is64Bit())
20605 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
20606 MachinePointerInfo(), MVT::i32);
20608 IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
20610 const DataLayout &DL = DAG.getDataLayout();
20612 DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
20613 IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
20615 res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
20618 res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
20620 // Get the offset of start of .tls section
20621 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
20622 GA->getValueType(0),
20623 GA->getOffset(), X86II::MO_SECREL);
20624 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
20626 // The address of the thread local variable is the add of the thread
20627 // pointer with the offset of the variable.
20628 return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
20631 llvm_unreachable("TLS not implemented for this target.");
20634 /// Lower SRA_PARTS and friends, which return two i32 values
20635 /// and take a 2 x i32 value to shift plus a shift amount.
20636 /// TODO: Can this be moved to general expansion code?
20637 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
20639 DAG.getTargetLoweringInfo().expandShiftParts(Op.getNode(), Lo, Hi, DAG);
20640 return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
20643 // Try to use a packed vector operation to handle i64 on 32-bit targets when
20644 // AVX512DQ is enabled.
20645 static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
20646 const X86Subtarget &Subtarget) {
20647 assert((Op.getOpcode() == ISD::SINT_TO_FP ||
20648 Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
20649 Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
20650 Op.getOpcode() == ISD::UINT_TO_FP) &&
20651 "Unexpected opcode!");
20652 bool IsStrict = Op->isStrictFPOpcode();
20653 unsigned OpNo = IsStrict ? 1 : 0;
20654 SDValue Src = Op.getOperand(OpNo);
20655 MVT SrcVT = Src.getSimpleValueType();
20656 MVT VT = Op.getSimpleValueType();
20658 if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
20659 (VT != MVT::f32 && VT != MVT::f64))
20662 // Pack the i64 into a vector, do the operation and extract.
20664 // Using 256-bit to ensure result is 128-bits for f32 case.
20665 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
20666 MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
20667 MVT VecVT = MVT::getVectorVT(VT, NumElts);
20670 SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
20672 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {VecVT, MVT::Other},
20673 {Op.getOperand(0), InVec});
20674 SDValue Chain = CvtVec.getValue(1);
20675 SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
20676 DAG.getIntPtrConstant(0, dl));
20677 return DAG.getMergeValues({Value, Chain}, dl);
20680 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
20682 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
20683 DAG.getIntPtrConstant(0, dl));
20686 // Try to use a packed vector operation to handle i64 on 32-bit targets.
20687 static SDValue LowerI64IntToFP16(SDValue Op, SelectionDAG &DAG,
20688 const X86Subtarget &Subtarget) {
20689 assert((Op.getOpcode() == ISD::SINT_TO_FP ||
20690 Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
20691 Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
20692 Op.getOpcode() == ISD::UINT_TO_FP) &&
20693 "Unexpected opcode!");
20694 bool IsStrict = Op->isStrictFPOpcode();
20695 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
20696 MVT SrcVT = Src.getSimpleValueType();
20697 MVT VT = Op.getSimpleValueType();
20699 if (SrcVT != MVT::i64 || Subtarget.is64Bit() || VT != MVT::f16)
20702 // Pack the i64 into a vector, do the operation and extract.
20704 assert(Subtarget.hasFP16() && "Expected FP16");
20707 SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
20709 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {MVT::v2f16, MVT::Other},
20710 {Op.getOperand(0), InVec});
20711 SDValue Chain = CvtVec.getValue(1);
20712 SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
20713 DAG.getIntPtrConstant(0, dl));
20714 return DAG.getMergeValues({Value, Chain}, dl);
20717 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, MVT::v2f16, InVec);
20719 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
20720 DAG.getIntPtrConstant(0, dl));
20723 static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
20724 const X86Subtarget &Subtarget) {
20726 case ISD::SINT_TO_FP:
20727 // TODO: Handle wider types with AVX/AVX512.
20728 if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
20730 // CVTDQ2PS or (V)CVTDQ2PD
20731 return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
20733 case ISD::UINT_TO_FP:
20734 // TODO: Handle wider types and i64 elements.
20735 if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
20737 // VCVTUDQ2PS or VCVTUDQ2PD
20738 return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
20745 /// Given a scalar cast operation that is extracted from a vector, try to
20746 /// vectorize the cast op followed by extraction. This will avoid an expensive
20747 /// round-trip between XMM and GPR.
20748 static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
20749 const X86Subtarget &Subtarget) {
20750 // TODO: This could be enhanced to handle smaller integer types by peeking
20751 // through an extend.
20752 SDValue Extract = Cast.getOperand(0);
20753 MVT DestVT = Cast.getSimpleValueType();
20754 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
20755 !isa<ConstantSDNode>(Extract.getOperand(1)))
20758 // See if we have a 128-bit vector cast op for this type of cast.
20759 SDValue VecOp = Extract.getOperand(0);
20760 MVT FromVT = VecOp.getSimpleValueType();
20761 unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
20762 MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
20763 MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
20764 if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
20767 // If we are extracting from a non-zero element, first shuffle the source
20768 // vector to allow extracting from element zero.
20770 if (!isNullConstant(Extract.getOperand(1))) {
20771 SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
20772 Mask[0] = Extract.getConstantOperandVal(1);
20773 VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
20775 // If the source vector is wider than 128-bits, extract the low part. Do not
20776 // create an unnecessarily wide vector cast op.
20777 if (FromVT != Vec128VT)
20778 VecOp = extract128BitVector(VecOp, 0, DAG, DL);
20780 // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
20781 // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
20782 SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
20783 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
20784 DAG.getIntPtrConstant(0, DL));
20787 /// Given a scalar cast to FP with a cast to integer operand (almost an ftrunc),
20788 /// try to vectorize the cast ops. This will avoid an expensive round-trip
20789 /// between XMM and GPR.
20790 static SDValue lowerFPToIntToFP(SDValue CastToFP, SelectionDAG &DAG,
20791 const X86Subtarget &Subtarget) {
20792 // TODO: Allow FP_TO_UINT.
20793 SDValue CastToInt = CastToFP.getOperand(0);
20794 MVT VT = CastToFP.getSimpleValueType();
20795 if (CastToInt.getOpcode() != ISD::FP_TO_SINT || VT.isVector())
20798 MVT IntVT = CastToInt.getSimpleValueType();
20799 SDValue X = CastToInt.getOperand(0);
20800 MVT SrcVT = X.getSimpleValueType();
20801 if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
20804 // See if we have 128-bit vector cast instructions for this type of cast.
20805 // We need cvttps2dq/cvttpd2dq and cvtdq2ps/cvtdq2pd.
20806 if (!Subtarget.hasSSE2() || (VT != MVT::f32 && VT != MVT::f64) ||
20810 unsigned SrcSize = SrcVT.getSizeInBits();
20811 unsigned IntSize = IntVT.getSizeInBits();
20812 unsigned VTSize = VT.getSizeInBits();
20813 MVT VecSrcVT = MVT::getVectorVT(SrcVT, 128 / SrcSize);
20814 MVT VecIntVT = MVT::getVectorVT(IntVT, 128 / IntSize);
20815 MVT VecVT = MVT::getVectorVT(VT, 128 / VTSize);
20817 // We need target-specific opcodes if this is v2f64 -> v4i32 -> v2f64.
20818 unsigned ToIntOpcode =
20819 SrcSize != IntSize ? X86ISD::CVTTP2SI : (unsigned)ISD::FP_TO_SINT;
20820 unsigned ToFPOpcode =
20821 IntSize != VTSize ? X86ISD::CVTSI2P : (unsigned)ISD::SINT_TO_FP;
20823 // sint_to_fp (fp_to_sint X) --> extelt (sint_to_fp (fp_to_sint (s2v X))), 0
20825 // We are not defining the high elements (for example, zero them) because
20826 // that could nullify any performance advantage that we hoped to gain from
20827 // this vector op hack. We do not expect any adverse effects (like denorm
20828 // penalties) with cast ops.
20829 SDLoc DL(CastToFP);
20830 SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
20831 SDValue VecX = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecSrcVT, X);
20832 SDValue VCastToInt = DAG.getNode(ToIntOpcode, DL, VecIntVT, VecX);
20833 SDValue VCastToFP = DAG.getNode(ToFPOpcode, DL, VecVT, VCastToInt);
20834 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, VCastToFP, ZeroIdx);
20837 static SDValue lowerINT_TO_FP_vXi64(SDValue Op, SelectionDAG &DAG,
20838 const X86Subtarget &Subtarget) {
20840 bool IsStrict = Op->isStrictFPOpcode();
20841 MVT VT = Op->getSimpleValueType(0);
20842 SDValue Src = Op->getOperand(IsStrict ? 1 : 0);
20844 if (Subtarget.hasDQI()) {
20845 assert(!Subtarget.hasVLX() && "Unexpected features");
20847 assert((Src.getSimpleValueType() == MVT::v2i64 ||
20848 Src.getSimpleValueType() == MVT::v4i64) &&
20849 "Unsupported custom type");
20851 // With AVX512DQ, but not VLX we need to widen to get a 512-bit result type.
20852 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v4f64) &&
20854 MVT WideVT = VT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
20856 // Need to concat with zero vector for strict fp to avoid spurious
20858 SDValue Tmp = IsStrict ? DAG.getConstant(0, DL, MVT::v8i64)
20859 : DAG.getUNDEF(MVT::v8i64);
20860 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i64, Tmp, Src,
20861 DAG.getIntPtrConstant(0, DL));
20862 SDValue Res, Chain;
20864 Res = DAG.getNode(Op.getOpcode(), DL, {WideVT, MVT::Other},
20865 {Op->getOperand(0), Src});
20866 Chain = Res.getValue(1);
20868 Res = DAG.getNode(Op.getOpcode(), DL, WideVT, Src);
20871 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
20872 DAG.getIntPtrConstant(0, DL));
20875 return DAG.getMergeValues({Res, Chain}, DL);
20879 bool IsSigned = Op->getOpcode() == ISD::SINT_TO_FP ||
20880 Op->getOpcode() == ISD::STRICT_SINT_TO_FP;
20881 if (VT != MVT::v4f32 || IsSigned)
20884 SDValue Zero = DAG.getConstant(0, DL, MVT::v4i64);
20885 SDValue One = DAG.getConstant(1, DL, MVT::v4i64);
20886 SDValue Sign = DAG.getNode(ISD::OR, DL, MVT::v4i64,
20887 DAG.getNode(ISD::SRL, DL, MVT::v4i64, Src, One),
20888 DAG.getNode(ISD::AND, DL, MVT::v4i64, Src, One));
20889 SDValue IsNeg = DAG.getSetCC(DL, MVT::v4i64, Src, Zero, ISD::SETLT);
20890 SDValue SignSrc = DAG.getSelect(DL, MVT::v4i64, IsNeg, Sign, Src);
20891 SmallVector<SDValue, 4> SignCvts(4);
20892 SmallVector<SDValue, 4> Chains(4);
20893 for (int i = 0; i != 4; ++i) {
20894 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, SignSrc,
20895 DAG.getIntPtrConstant(i, DL));
20898 DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {MVT::f32, MVT::Other},
20899 {Op.getOperand(0), Elt});
20900 Chains[i] = SignCvts[i].getValue(1);
20902 SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, DL, MVT::f32, Elt);
20905 SDValue SignCvt = DAG.getBuildVector(VT, DL, SignCvts);
20907 SDValue Slow, Chain;
20909 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
20910 Slow = DAG.getNode(ISD::STRICT_FADD, DL, {MVT::v4f32, MVT::Other},
20911 {Chain, SignCvt, SignCvt});
20912 Chain = Slow.getValue(1);
20914 Slow = DAG.getNode(ISD::FADD, DL, MVT::v4f32, SignCvt, SignCvt);
20917 IsNeg = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i32, IsNeg);
20918 SDValue Cvt = DAG.getSelect(DL, MVT::v4f32, IsNeg, Slow, SignCvt);
20921 return DAG.getMergeValues({Cvt, Chain}, DL);
20926 static SDValue promoteXINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
20927 bool IsStrict = Op->isStrictFPOpcode();
20928 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
20929 SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
20930 MVT VT = Op.getSimpleValueType();
20931 MVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
20934 SDValue Rnd = DAG.getIntPtrConstant(0, dl);
20936 return DAG.getNode(
20937 ISD::STRICT_FP_ROUND, dl, {VT, MVT::Other},
20939 DAG.getNode(Op.getOpcode(), dl, {NVT, MVT::Other}, {Chain, Src}),
20941 return DAG.getNode(ISD::FP_ROUND, dl, VT,
20942 DAG.getNode(Op.getOpcode(), dl, NVT, Src), Rnd);
20945 static bool isLegalConversion(MVT VT, bool IsSigned,
20946 const X86Subtarget &Subtarget) {
20947 if (VT == MVT::v4i32 && Subtarget.hasSSE2() && IsSigned)
20949 if (VT == MVT::v8i32 && Subtarget.hasAVX() && IsSigned)
20951 if (Subtarget.hasVLX() && (VT == MVT::v4i32 || VT == MVT::v8i32))
20953 if (Subtarget.useAVX512Regs()) {
20954 if (VT == MVT::v16i32)
20956 if (VT == MVT::v8i64 && Subtarget.hasDQI())
20959 if (Subtarget.hasDQI() && Subtarget.hasVLX() &&
20960 (VT == MVT::v2i64 || VT == MVT::v4i64))
20965 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
20966 SelectionDAG &DAG) const {
20967 bool IsStrict = Op->isStrictFPOpcode();
20968 unsigned OpNo = IsStrict ? 1 : 0;
20969 SDValue Src = Op.getOperand(OpNo);
20970 SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
20971 MVT SrcVT = Src.getSimpleValueType();
20972 MVT VT = Op.getSimpleValueType();
20975 if (isSoftFP16(VT))
20976 return promoteXINT_TO_FP(Op, DAG);
20977 else if (isLegalConversion(SrcVT, true, Subtarget))
20980 if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
20981 return LowerWin64_INT128_TO_FP(Op, DAG);
20983 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
20986 if (SDValue R = lowerFPToIntToFP(Op, DAG, Subtarget))
20989 if (SrcVT.isVector()) {
20990 if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
20991 // Note: Since v2f64 is a legal type. We don't need to zero extend the
20992 // source for strict FP.
20994 return DAG.getNode(
20995 X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
20996 {Chain, DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
20997 DAG.getUNDEF(SrcVT))});
20998 return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
20999 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
21000 DAG.getUNDEF(SrcVT)));
21002 if (SrcVT == MVT::v2i64 || SrcVT == MVT::v4i64)
21003 return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
21008 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
21009 "Unknown SINT_TO_FP to lower!");
21011 bool UseSSEReg = isScalarFPTypeInSSEReg(VT);
21013 // These are really Legal; return the operand so the caller accepts it as
21015 if (SrcVT == MVT::i32 && UseSSEReg)
21017 if (SrcVT == MVT::i64 && UseSSEReg && Subtarget.is64Bit())
21020 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
21022 if (SDValue V = LowerI64IntToFP16(Op, DAG, Subtarget))
21025 // SSE doesn't have an i16 conversion so we need to promote.
21026 if (SrcVT == MVT::i16 && (UseSSEReg || VT == MVT::f128)) {
21027 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Src);
21029 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
21032 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Ext);
21035 if (VT == MVT::f128)
21038 SDValue ValueToStore = Src;
21039 if (SrcVT == MVT::i64 && Subtarget.hasSSE2() && !Subtarget.is64Bit())
21040 // Bitcasting to f64 here allows us to do a single 64-bit store from
21041 // an SSE register, avoiding the store forwarding penalty that would come
21042 // with two 32-bit stores.
21043 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
21045 unsigned Size = SrcVT.getStoreSize();
21046 Align Alignment(Size);
21047 MachineFunction &MF = DAG.getMachineFunction();
21048 auto PtrVT = getPointerTy(MF.getDataLayout());
21049 int SSFI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false);
21050 MachinePointerInfo MPI =
21051 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
21052 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
21053 Chain = DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, Alignment);
21054 std::pair<SDValue, SDValue> Tmp =
21055 BuildFILD(VT, SrcVT, dl, Chain, StackSlot, MPI, Alignment, DAG);
21058 return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
21063 std::pair<SDValue, SDValue> X86TargetLowering::BuildFILD(
21064 EVT DstVT, EVT SrcVT, const SDLoc &DL, SDValue Chain, SDValue Pointer,
21065 MachinePointerInfo PtrInfo, Align Alignment, SelectionDAG &DAG) const {
21068 bool useSSE = isScalarFPTypeInSSEReg(DstVT);
21070 Tys = DAG.getVTList(MVT::f80, MVT::Other);
21072 Tys = DAG.getVTList(DstVT, MVT::Other);
21074 SDValue FILDOps[] = {Chain, Pointer};
21076 DAG.getMemIntrinsicNode(X86ISD::FILD, DL, Tys, FILDOps, SrcVT, PtrInfo,
21077 Alignment, MachineMemOperand::MOLoad);
21078 Chain = Result.getValue(1);
21081 MachineFunction &MF = DAG.getMachineFunction();
21082 unsigned SSFISize = DstVT.getStoreSize();
21084 MF.getFrameInfo().CreateStackObject(SSFISize, Align(SSFISize), false);
21085 auto PtrVT = getPointerTy(MF.getDataLayout());
21086 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
21087 Tys = DAG.getVTList(MVT::Other);
21088 SDValue FSTOps[] = {Chain, Result, StackSlot};
21089 MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
21090 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
21091 MachineMemOperand::MOStore, SSFISize, Align(SSFISize));
21094 DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps, DstVT, StoreMMO);
21095 Result = DAG.getLoad(
21096 DstVT, DL, Chain, StackSlot,
21097 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
21098 Chain = Result.getValue(1);
21101 return { Result, Chain };
21104 /// Horizontal vector math instructions may be slower than normal math with
21105 /// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
21106 /// implementation, and likely shuffle complexity of the alternate sequence.
21107 static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
21108 const X86Subtarget &Subtarget) {
21109 bool IsOptimizingSize = DAG.shouldOptForSize();
21110 bool HasFastHOps = Subtarget.hasFastHorizontalOps();
21111 return !IsSingleSource || IsOptimizingSize || HasFastHOps;
21114 /// 64-bit unsigned integer to double expansion.
21115 static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
21116 const X86Subtarget &Subtarget) {
21117 // We can't use this algorithm for strict fp. It produces -0.0 instead of +0.0
21118 // when converting 0 when rounding toward negative infinity. Caller will
21119 // fall back to Expand for when i64 or is legal or use FILD in 32-bit mode.
21120 assert(!Op->isStrictFPOpcode() && "Expected non-strict uint_to_fp!");
21121 // This algorithm is not obvious. Here it is what we're trying to output:
21124 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
21125 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
21127 haddpd %xmm0, %xmm0
21129 pshufd $0x4e, %xmm0, %xmm1
21135 LLVMContext *Context = DAG.getContext();
21137 // Build some magic constants.
21138 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
21139 Constant *C0 = ConstantDataVector::get(*Context, CV0);
21140 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
21141 SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, Align(16));
21143 SmallVector<Constant*,2> CV1;
21145 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
21146 APInt(64, 0x4330000000000000ULL))));
21148 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
21149 APInt(64, 0x4530000000000000ULL))));
21150 Constant *C1 = ConstantVector::get(CV1);
21151 SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, Align(16));
21153 // Load the 64-bit value into an XMM register.
21155 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Op.getOperand(0));
21156 SDValue CLod0 = DAG.getLoad(
21157 MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
21158 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(16));
21160 getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
21162 SDValue CLod1 = DAG.getLoad(
21163 MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
21164 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(16));
21165 SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
21166 // TODO: Are there any fast-math-flags to propagate here?
21167 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
21170 if (Subtarget.hasSSE3() &&
21171 shouldUseHorizontalOp(true, DAG, Subtarget)) {
21172 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
21174 SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
21175 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
21177 Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
21178 DAG.getIntPtrConstant(0, dl));
21182 /// 32-bit unsigned integer to float expansion.
21183 static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
21184 const X86Subtarget &Subtarget) {
21185 unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
21187 // FP constant to bias correct the final result.
21188 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
21191 // Load the 32-bit value into an XMM register.
21193 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Op.getOperand(OpNo));
21195 // Zero out the upper parts of the register.
21196 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
21198 // Or the load with the bias.
21199 SDValue Or = DAG.getNode(
21200 ISD::OR, dl, MVT::v2i64,
21201 DAG.getBitcast(MVT::v2i64, Load),
21202 DAG.getBitcast(MVT::v2i64,
21203 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
21205 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
21206 DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
21208 if (Op.getNode()->isStrictFPOpcode()) {
21209 // Subtract the bias.
21210 // TODO: Are there any fast-math-flags to propagate here?
21211 SDValue Chain = Op.getOperand(0);
21212 SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::f64, MVT::Other},
21213 {Chain, Or, Bias});
21215 if (Op.getValueType() == Sub.getValueType())
21218 // Handle final rounding.
21219 std::pair<SDValue, SDValue> ResultPair = DAG.getStrictFPExtendOrRound(
21220 Sub, Sub.getValue(1), dl, Op.getSimpleValueType());
21222 return DAG.getMergeValues({ResultPair.first, ResultPair.second}, dl);
21225 // Subtract the bias.
21226 // TODO: Are there any fast-math-flags to propagate here?
21227 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
21229 // Handle final rounding.
21230 return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
21233 static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
21234 const X86Subtarget &Subtarget,
21236 if (Op.getSimpleValueType() != MVT::v2f64)
21239 bool IsStrict = Op->isStrictFPOpcode();
21241 SDValue N0 = Op.getOperand(IsStrict ? 1 : 0);
21242 assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
21244 if (Subtarget.hasAVX512()) {
21245 if (!Subtarget.hasVLX()) {
21246 // Let generic type legalization widen this.
21249 // Otherwise pad the integer input with 0s and widen the operation.
21250 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
21251 DAG.getConstant(0, DL, MVT::v2i32));
21252 SDValue Res = DAG.getNode(Op->getOpcode(), DL, {MVT::v4f64, MVT::Other},
21253 {Op.getOperand(0), N0});
21254 SDValue Chain = Res.getValue(1);
21255 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2f64, Res,
21256 DAG.getIntPtrConstant(0, DL));
21257 return DAG.getMergeValues({Res, Chain}, DL);
21260 // Legalize to v4i32 type.
21261 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
21262 DAG.getUNDEF(MVT::v2i32));
21264 return DAG.getNode(X86ISD::STRICT_CVTUI2P, DL, {MVT::v2f64, MVT::Other},
21265 {Op.getOperand(0), N0});
21266 return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
21269 // Zero extend to 2i64, OR with the floating point representation of 2^52.
21270 // This gives us the floating point equivalent of 2^52 + the i32 integer
21271 // since double has 52-bits of mantissa. Then subtract 2^52 in floating
21272 // point leaving just our i32 integers in double format.
21273 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i64, N0);
21275 DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), DL, MVT::v2f64);
21276 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v2i64, ZExtIn,
21277 DAG.getBitcast(MVT::v2i64, VBias));
21278 Or = DAG.getBitcast(MVT::v2f64, Or);
21281 return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v2f64, MVT::Other},
21282 {Op.getOperand(0), Or, VBias});
21283 return DAG.getNode(ISD::FSUB, DL, MVT::v2f64, Or, VBias);
21286 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
21287 const X86Subtarget &Subtarget) {
21289 bool IsStrict = Op->isStrictFPOpcode();
21290 SDValue V = Op->getOperand(IsStrict ? 1 : 0);
21291 MVT VecIntVT = V.getSimpleValueType();
21292 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
21293 "Unsupported custom type");
21295 if (Subtarget.hasAVX512()) {
21296 // With AVX512, but not VLX we need to widen to get a 512-bit result type.
21297 assert(!Subtarget.hasVLX() && "Unexpected features");
21298 MVT VT = Op->getSimpleValueType(0);
21300 // v8i32->v8f64 is legal with AVX512 so just return it.
21301 if (VT == MVT::v8f64)
21304 assert((VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v4f64) &&
21306 MVT WideVT = VT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
21307 MVT WideIntVT = VT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
21308 // Need to concat with zero vector for strict fp to avoid spurious
21311 IsStrict ? DAG.getConstant(0, DL, WideIntVT) : DAG.getUNDEF(WideIntVT);
21312 V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideIntVT, Tmp, V,
21313 DAG.getIntPtrConstant(0, DL));
21314 SDValue Res, Chain;
21316 Res = DAG.getNode(ISD::STRICT_UINT_TO_FP, DL, {WideVT, MVT::Other},
21317 {Op->getOperand(0), V});
21318 Chain = Res.getValue(1);
21320 Res = DAG.getNode(ISD::UINT_TO_FP, DL, WideVT, V);
21323 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
21324 DAG.getIntPtrConstant(0, DL));
21327 return DAG.getMergeValues({Res, Chain}, DL);
21331 if (Subtarget.hasAVX() && VecIntVT == MVT::v4i32 &&
21332 Op->getSimpleValueType(0) == MVT::v4f64) {
21333 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i64, V);
21334 Constant *Bias = ConstantFP::get(
21336 APFloat(APFloat::IEEEdouble(), APInt(64, 0x4330000000000000ULL)));
21337 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
21338 SDValue CPIdx = DAG.getConstantPool(Bias, PtrVT, Align(8));
21339 SDVTList Tys = DAG.getVTList(MVT::v4f64, MVT::Other);
21340 SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
21341 SDValue VBias = DAG.getMemIntrinsicNode(
21342 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64,
21343 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(8),
21344 MachineMemOperand::MOLoad);
21346 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v4i64, ZExtIn,
21347 DAG.getBitcast(MVT::v4i64, VBias));
21348 Or = DAG.getBitcast(MVT::v4f64, Or);
21351 return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v4f64, MVT::Other},
21352 {Op.getOperand(0), Or, VBias});
21353 return DAG.getNode(ISD::FSUB, DL, MVT::v4f64, Or, VBias);
21356 // The algorithm is the following:
21357 // #ifdef __SSE4_1__
21358 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
21359 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
21360 // (uint4) 0x53000000, 0xaa);
21362 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
21363 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
21365 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
21366 // return (float4) lo + fhi;
21368 bool Is128 = VecIntVT == MVT::v4i32;
21369 MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
21370 // If we convert to something else than the supported type, e.g., to v4f64,
21372 if (VecFloatVT != Op->getSimpleValueType(0))
21375 // In the #idef/#else code, we have in common:
21376 // - The vector of constants:
21382 // Create the splat vector for 0x4b000000.
21383 SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
21384 // Create the splat vector for 0x53000000.
21385 SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
21387 // Create the right shift.
21388 SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
21389 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
21392 if (Subtarget.hasSSE41()) {
21393 MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
21394 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
21395 SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
21396 SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
21397 // Low will be bitcasted right away, so do not bother bitcasting back to its
21399 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
21400 VecCstLowBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
21401 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
21402 // (uint4) 0x53000000, 0xaa);
21403 SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
21404 SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
21405 // High will be bitcasted right away, so do not bother bitcasting back to
21406 // its original type.
21407 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
21408 VecCstHighBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
21410 SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
21411 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
21412 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
21413 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
21415 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
21416 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
21419 // Create the vector constant for (0x1.0p39f + 0x1.0p23f).
21420 SDValue VecCstFSub = DAG.getConstantFP(
21421 APFloat(APFloat::IEEEsingle(), APInt(32, 0x53000080)), DL, VecFloatVT);
21423 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
21424 // NOTE: By using fsub of a positive constant instead of fadd of a negative
21425 // constant, we avoid reassociation in MachineCombiner when unsafe-fp-math is
21426 // enabled. See PR24512.
21427 SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
21428 // TODO: Are there any fast-math-flags to propagate here?
21430 SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
21431 // return (float4) lo + fhi;
21433 SDValue FHigh = DAG.getNode(ISD::STRICT_FSUB, DL, {VecFloatVT, MVT::Other},
21434 {Op.getOperand(0), HighBitcast, VecCstFSub});
21435 return DAG.getNode(ISD::STRICT_FADD, DL, {VecFloatVT, MVT::Other},
21436 {FHigh.getValue(1), LowBitcast, FHigh});
21440 DAG.getNode(ISD::FSUB, DL, VecFloatVT, HighBitcast, VecCstFSub);
21441 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
21444 static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
21445 const X86Subtarget &Subtarget) {
21446 unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
21447 SDValue N0 = Op.getOperand(OpNo);
21448 MVT SrcVT = N0.getSimpleValueType();
21451 switch (SrcVT.SimpleTy) {
21453 llvm_unreachable("Custom UINT_TO_FP is not supported!");
21455 return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
21458 return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
21461 return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
21465 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
21466 SelectionDAG &DAG) const {
21467 bool IsStrict = Op->isStrictFPOpcode();
21468 unsigned OpNo = IsStrict ? 1 : 0;
21469 SDValue Src = Op.getOperand(OpNo);
21471 auto PtrVT = getPointerTy(DAG.getDataLayout());
21472 MVT SrcVT = Src.getSimpleValueType();
21473 MVT DstVT = Op->getSimpleValueType(0);
21474 SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
21476 // Bail out when we don't have native conversion instructions.
21477 if (DstVT == MVT::f128)
21480 if (isSoftFP16(DstVT))
21481 return promoteXINT_TO_FP(Op, DAG);
21482 else if (isLegalConversion(SrcVT, false, Subtarget))
21485 if (DstVT.isVector())
21486 return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
21488 if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
21489 return LowerWin64_INT128_TO_FP(Op, DAG);
21491 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
21494 if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
21495 (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
21496 // Conversions from unsigned i32 to f32/f64 are legal,
21497 // using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
21501 // Promote i32 to i64 and use a signed conversion on 64-bit targets.
21502 if (SrcVT == MVT::i32 && Subtarget.is64Bit()) {
21503 Src = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Src);
21505 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {DstVT, MVT::Other},
21507 return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Src);
21510 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
21512 if (SDValue V = LowerI64IntToFP16(Op, DAG, Subtarget))
21515 // The transform for i64->f64 isn't correct for 0 when rounding to negative
21516 // infinity. It produces -0.0, so disable under strictfp.
21517 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && Subtarget.hasSSE2() &&
21519 return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
21520 // The transform for i32->f64/f32 isn't correct for 0 when rounding to
21521 // negative infinity. So disable under strictfp. Using FILD instead.
21522 if (SrcVT == MVT::i32 && Subtarget.hasSSE2() && DstVT != MVT::f80 &&
21524 return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
21525 if (Subtarget.is64Bit() && SrcVT == MVT::i64 &&
21526 (DstVT == MVT::f32 || DstVT == MVT::f64))
21529 // Make a 64-bit buffer, and use it to build an FILD.
21530 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64, 8);
21531 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
21532 Align SlotAlign(8);
21533 MachinePointerInfo MPI =
21534 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
21535 if (SrcVT == MVT::i32) {
21536 SDValue OffsetSlot =
21537 DAG.getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), dl);
21538 SDValue Store1 = DAG.getStore(Chain, dl, Src, StackSlot, MPI, SlotAlign);
21539 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
21540 OffsetSlot, MPI.getWithOffset(4), SlotAlign);
21541 std::pair<SDValue, SDValue> Tmp =
21542 BuildFILD(DstVT, MVT::i64, dl, Store2, StackSlot, MPI, SlotAlign, DAG);
21544 return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
21549 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
21550 SDValue ValueToStore = Src;
21551 if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) {
21552 // Bitcasting to f64 here allows us to do a single 64-bit store from
21553 // an SSE register, avoiding the store forwarding penalty that would come
21554 // with two 32-bit stores.
21555 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
21558 DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, SlotAlign);
21559 // For i64 source, we need to add the appropriate power of 2 if the input
21560 // was negative. We must be careful to do the computation in x87 extended
21561 // precision, not in SSE.
21562 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
21563 SDValue Ops[] = { Store, StackSlot };
21565 DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, MVT::i64, MPI,
21566 SlotAlign, MachineMemOperand::MOLoad);
21567 Chain = Fild.getValue(1);
21570 // Check whether the sign bit is set.
21571 SDValue SignSet = DAG.getSetCC(
21572 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
21573 Op.getOperand(OpNo), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
21575 // Build a 64 bit pair (FF, 0) in the constant pool, with FF in the hi bits.
21576 APInt FF(64, 0x5F80000000000000ULL);
21577 SDValue FudgePtr = DAG.getConstantPool(
21578 ConstantInt::get(*DAG.getContext(), FF), PtrVT);
21579 Align CPAlignment = cast<ConstantPoolSDNode>(FudgePtr)->getAlign();
21581 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
21582 SDValue Zero = DAG.getIntPtrConstant(0, dl);
21583 SDValue Four = DAG.getIntPtrConstant(4, dl);
21584 SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Four, Zero);
21585 FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
21587 // Load the value out, extending it from f32 to f80.
21588 SDValue Fudge = DAG.getExtLoad(
21589 ISD::EXTLOAD, dl, MVT::f80, Chain, FudgePtr,
21590 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
21592 Chain = Fudge.getValue(1);
21593 // Extend everything to 80 bits to force it to be done on x87.
21594 // TODO: Are there any fast-math-flags to propagate here?
21596 SDValue Add = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::f80, MVT::Other},
21597 {Chain, Fild, Fudge});
21598 // STRICT_FP_ROUND can't handle equal types.
21599 if (DstVT == MVT::f80)
21601 return DAG.getNode(ISD::STRICT_FP_ROUND, dl, {DstVT, MVT::Other},
21602 {Add.getValue(1), Add, DAG.getIntPtrConstant(0, dl)});
21604 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
21605 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
21606 DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
21609 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
21610 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
21611 // just return an SDValue().
21612 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
21613 // to i16, i32 or i64, and we lower it to a legal sequence and return the
21616 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
21617 bool IsSigned, SDValue &Chain) const {
21618 bool IsStrict = Op->isStrictFPOpcode();
21621 EVT DstTy = Op.getValueType();
21622 SDValue Value = Op.getOperand(IsStrict ? 1 : 0);
21623 EVT TheVT = Value.getValueType();
21624 auto PtrVT = getPointerTy(DAG.getDataLayout());
21626 if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
21627 // f16 must be promoted before using the lowering in this routine.
21628 // fp128 does not use this lowering.
21632 // If using FIST to compute an unsigned i64, we'll need some fixup
21633 // to handle values above the maximum signed i64. A FIST is always
21634 // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
21635 bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
21637 // FIXME: This does not generate an invalid exception if the input does not
21638 // fit in i32. PR44019
21639 if (!IsSigned && DstTy != MVT::i64) {
21640 // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
21641 // The low 32 bits of the fist result will have the correct uint32 result.
21642 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
21646 assert(DstTy.getSimpleVT() <= MVT::i64 &&
21647 DstTy.getSimpleVT() >= MVT::i16 &&
21648 "Unknown FP_TO_INT to lower!");
21650 // We lower FP->int64 into FISTP64 followed by a load from a temporary
21652 MachineFunction &MF = DAG.getMachineFunction();
21653 unsigned MemSize = DstTy.getStoreSize();
21655 MF.getFrameInfo().CreateStackObject(MemSize, Align(MemSize), false);
21656 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
21658 Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
21660 SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
21662 if (UnsignedFixup) {
21664 // Conversion to unsigned i64 is implemented with a select,
21665 // depending on whether the source value fits in the range
21666 // of a signed i64. Let Thresh be the FP equivalent of
21667 // 0x8000000000000000ULL.
21669 // Adjust = (Value >= Thresh) ? 0x80000000 : 0;
21670 // FltOfs = (Value >= Thresh) ? 0x80000000 : 0;
21671 // FistSrc = (Value - FltOfs);
21672 // Fist-to-mem64 FistSrc
21673 // Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
21674 // to XOR'ing the high 32 bits with Adjust.
21676 // Being a power of 2, Thresh is exactly representable in all FP formats.
21677 // For X87 we'd like to use the smallest FP type for this constant, but
21678 // for DAG type consistency we have to match the FP operand type.
21680 APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
21681 LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
21682 bool LosesInfo = false;
21683 if (TheVT == MVT::f64)
21684 // The rounding mode is irrelevant as the conversion should be exact.
21685 Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
21687 else if (TheVT == MVT::f80)
21688 Status = Thresh.convert(APFloat::x87DoubleExtended(),
21689 APFloat::rmNearestTiesToEven, &LosesInfo);
21691 assert(Status == APFloat::opOK && !LosesInfo &&
21692 "FP conversion should have been exact");
21694 SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
21696 EVT ResVT = getSetCCResultType(DAG.getDataLayout(),
21697 *DAG.getContext(), TheVT);
21700 Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETGE, Chain,
21701 /*IsSignaling*/ true);
21702 Chain = Cmp.getValue(1);
21704 Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETGE);
21707 // Our preferred lowering of
21709 // (Value >= Thresh) ? 0x8000000000000000ULL : 0
21713 // (Value >= Thresh) << 63
21715 // but since we can get here after LegalOperations, DAGCombine might do the
21716 // wrong thing if we create a select. So, directly create the preferred
21718 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Cmp);
21719 SDValue Const63 = DAG.getConstant(63, DL, MVT::i8);
21720 Adjust = DAG.getNode(ISD::SHL, DL, MVT::i64, Zext, Const63);
21722 SDValue FltOfs = DAG.getSelect(DL, TheVT, Cmp, ThreshVal,
21723 DAG.getConstantFP(0.0, DL, TheVT));
21726 Value = DAG.getNode(ISD::STRICT_FSUB, DL, { TheVT, MVT::Other},
21727 { Chain, Value, FltOfs });
21728 Chain = Value.getValue(1);
21730 Value = DAG.getNode(ISD::FSUB, DL, TheVT, Value, FltOfs);
21733 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
21735 // FIXME This causes a redundant load/store if the SSE-class value is already
21736 // in memory, such as if it is on the callstack.
21737 if (isScalarFPTypeInSSEReg(TheVT)) {
21738 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
21739 Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
21740 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
21741 SDValue Ops[] = { Chain, StackSlot };
21743 unsigned FLDSize = TheVT.getStoreSize();
21744 assert(FLDSize <= MemSize && "Stack slot not big enough");
21745 MachineMemOperand *MMO = MF.getMachineMemOperand(
21746 MPI, MachineMemOperand::MOLoad, FLDSize, Align(FLDSize));
21747 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
21748 Chain = Value.getValue(1);
21751 // Build the FP_TO_INT*_IN_MEM
21752 MachineMemOperand *MMO = MF.getMachineMemOperand(
21753 MPI, MachineMemOperand::MOStore, MemSize, Align(MemSize));
21754 SDValue Ops[] = { Chain, Value, StackSlot };
21755 SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
21756 DAG.getVTList(MVT::Other),
21759 SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
21760 Chain = Res.getValue(1);
21762 // If we need an unsigned fixup, XOR the result with adjust.
21764 Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
21769 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
21770 const X86Subtarget &Subtarget) {
21771 MVT VT = Op.getSimpleValueType();
21772 SDValue In = Op.getOperand(0);
21773 MVT InVT = In.getSimpleValueType();
21775 unsigned Opc = Op.getOpcode();
21777 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
21778 assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
21779 "Unexpected extension opcode");
21780 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
21781 "Expected same number of elements");
21782 assert((VT.getVectorElementType() == MVT::i16 ||
21783 VT.getVectorElementType() == MVT::i32 ||
21784 VT.getVectorElementType() == MVT::i64) &&
21785 "Unexpected element type");
21786 assert((InVT.getVectorElementType() == MVT::i8 ||
21787 InVT.getVectorElementType() == MVT::i16 ||
21788 InVT.getVectorElementType() == MVT::i32) &&
21789 "Unexpected element type");
21791 unsigned ExtendInVecOpc = getOpcode_EXTEND_VECTOR_INREG(Opc);
21793 if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
21794 assert(InVT == MVT::v32i8 && "Unexpected VT!");
21795 return splitVectorIntUnary(Op, DAG);
21798 if (Subtarget.hasInt256())
21801 // Optimize vectors in AVX mode:
21804 // Use vpmovzwd for 4 lower elements v8i16 -> v4i32.
21805 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
21806 // Concat upper and lower parts.
21809 // Use vpmovzdq for 4 lower elements v4i32 -> v2i64.
21810 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
21811 // Concat upper and lower parts.
21813 MVT HalfVT = VT.getHalfNumVectorElementsVT();
21814 SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
21816 // Short-circuit if we can determine that each 128-bit half is the same value.
21817 // Otherwise, this is difficult to match and optimize.
21818 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
21819 if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
21820 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
21822 SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
21823 SDValue Undef = DAG.getUNDEF(InVT);
21824 bool NeedZero = Opc == ISD::ZERO_EXTEND;
21825 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
21826 OpHi = DAG.getBitcast(HalfVT, OpHi);
21828 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
21831 // Helper to split and extend a v16i1 mask to v16i8 or v16i16.
21832 static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
21833 const SDLoc &dl, SelectionDAG &DAG) {
21834 assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
21835 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
21836 DAG.getIntPtrConstant(0, dl));
21837 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
21838 DAG.getIntPtrConstant(8, dl));
21839 Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
21840 Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
21841 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
21842 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
21845 static SDValue LowerZERO_EXTEND_Mask(SDValue Op,
21846 const X86Subtarget &Subtarget,
21847 SelectionDAG &DAG) {
21848 MVT VT = Op->getSimpleValueType(0);
21849 SDValue In = Op->getOperand(0);
21850 MVT InVT = In.getSimpleValueType();
21851 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
21853 unsigned NumElts = VT.getVectorNumElements();
21855 // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
21856 // avoids a constant pool load.
21857 if (VT.getVectorElementType() != MVT::i8) {
21858 SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
21859 return DAG.getNode(ISD::SRL, DL, VT, Extend,
21860 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
21863 // Extend VT if BWI is not supported.
21865 if (!Subtarget.hasBWI()) {
21866 // If v16i32 is to be avoided, we'll need to split and concatenate.
21867 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
21868 return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
21870 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
21873 // Widen to 512-bits if VLX is not supported.
21874 MVT WideVT = ExtVT;
21875 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
21876 NumElts *= 512 / ExtVT.getSizeInBits();
21877 InVT = MVT::getVectorVT(MVT::i1, NumElts);
21878 In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
21879 In, DAG.getIntPtrConstant(0, DL));
21880 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
21884 SDValue One = DAG.getConstant(1, DL, WideVT);
21885 SDValue Zero = DAG.getConstant(0, DL, WideVT);
21887 SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
21889 // Truncate if we had to extend above.
21891 WideVT = MVT::getVectorVT(MVT::i8, NumElts);
21892 SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
21895 // Extract back to 128/256-bit if we widened.
21897 SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
21898 DAG.getIntPtrConstant(0, DL));
21900 return SelectedVal;
21903 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
21904 SelectionDAG &DAG) {
21905 SDValue In = Op.getOperand(0);
21906 MVT SVT = In.getSimpleValueType();
21908 if (SVT.getVectorElementType() == MVT::i1)
21909 return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
21911 assert(Subtarget.hasAVX() && "Expected AVX support");
21912 return LowerAVXExtend(Op, DAG, Subtarget);
21915 /// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
21916 /// It makes use of the fact that vectors with enough leading sign/zero bits
21917 /// prevent the PACKSS/PACKUS from saturating the results.
21918 /// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
21919 /// within each 128-bit lane.
21920 static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
21921 const SDLoc &DL, SelectionDAG &DAG,
21922 const X86Subtarget &Subtarget) {
21923 assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
21924 "Unexpected PACK opcode");
21925 assert(DstVT.isVector() && "VT not a vector?");
21927 // Requires SSE2 for PACKSS (SSE41 PACKUSDW is handled below).
21928 if (!Subtarget.hasSSE2())
21931 EVT SrcVT = In.getValueType();
21933 // No truncation required, we might get here due to recursive calls.
21934 if (SrcVT == DstVT)
21937 // We only support vector truncation to 64bits or greater from a
21938 // 128bits or greater source.
21939 unsigned DstSizeInBits = DstVT.getSizeInBits();
21940 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
21941 if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
21944 unsigned NumElems = SrcVT.getVectorNumElements();
21945 if (!isPowerOf2_32(NumElems))
21948 LLVMContext &Ctx = *DAG.getContext();
21949 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
21950 assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
21952 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
21954 // Pack to the largest type possible:
21955 // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
21956 EVT InVT = MVT::i16, OutVT = MVT::i8;
21957 if (SrcVT.getScalarSizeInBits() > 16 &&
21958 (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
21963 // 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
21964 if (SrcVT.is128BitVector()) {
21965 InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
21966 OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
21967 In = DAG.getBitcast(InVT, In);
21968 SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, DAG.getUNDEF(InVT));
21969 Res = extractSubVector(Res, 0, DAG, DL, 64);
21970 return DAG.getBitcast(DstVT, Res);
21973 // Split lower/upper subvectors.
21975 std::tie(Lo, Hi) = splitVector(In, DAG, DL);
21977 unsigned SubSizeInBits = SrcSizeInBits / 2;
21978 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
21979 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
21981 // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
21982 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
21983 Lo = DAG.getBitcast(InVT, Lo);
21984 Hi = DAG.getBitcast(InVT, Hi);
21985 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
21986 return DAG.getBitcast(DstVT, Res);
21989 // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
21990 // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
21991 if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
21992 Lo = DAG.getBitcast(InVT, Lo);
21993 Hi = DAG.getBitcast(InVT, Hi);
21994 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
21996 // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
21997 // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
21998 // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
21999 SmallVector<int, 64> Mask;
22000 int Scale = 64 / OutVT.getScalarSizeInBits();
22001 narrowShuffleMaskElts(Scale, { 0, 2, 1, 3 }, Mask);
22002 Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
22004 if (DstVT.is256BitVector())
22005 return DAG.getBitcast(DstVT, Res);
22007 // If 512bit -> 128bit truncate another stage.
22008 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
22009 Res = DAG.getBitcast(PackedVT, Res);
22010 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
22013 // Recursively pack lower/upper subvectors, concat result and pack again.
22014 assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
22015 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2);
22016 Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget);
22017 Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget);
22019 PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
22020 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
22021 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
22024 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
22025 const X86Subtarget &Subtarget) {
22028 MVT VT = Op.getSimpleValueType();
22029 SDValue In = Op.getOperand(0);
22030 MVT InVT = In.getSimpleValueType();
22032 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
22034 // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
22035 unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
22036 if (InVT.getScalarSizeInBits() <= 16) {
22037 if (Subtarget.hasBWI()) {
22038 // legal, will go to VPMOVB2M, VPMOVW2M
22039 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
22040 // We need to shift to get the lsb into sign position.
22041 // Shift packed bytes not supported natively, bitcast to word
22042 MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
22043 In = DAG.getNode(ISD::SHL, DL, ExtVT,
22044 DAG.getBitcast(ExtVT, In),
22045 DAG.getConstant(ShiftInx, DL, ExtVT));
22046 In = DAG.getBitcast(InVT, In);
22048 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
22051 // Use TESTD/Q, extended vector to packed dword/qword.
22052 assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
22053 "Unexpected vector type.");
22054 unsigned NumElts = InVT.getVectorNumElements();
22055 assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
22056 // We need to change to a wider element type that we have support for.
22057 // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
22058 // For 16 element vectors we extend to v16i32 unless we are explicitly
22059 // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
22060 // we need to split into two 8 element vectors which we can extend to v8i32,
22061 // truncate and concat the results. There's an additional complication if
22062 // the original type is v16i8. In that case we can't split the v16i8
22063 // directly, so we need to shuffle high elements to low and use
22064 // sign_extend_vector_inreg.
22065 if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
22067 if (InVT == MVT::v16i8) {
22068 Lo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, In);
22069 Hi = DAG.getVectorShuffle(
22071 {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
22072 Hi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, Hi);
22074 assert(InVT == MVT::v16i16 && "Unexpected VT!");
22075 Lo = extract128BitVector(In, 0, DAG, DL);
22076 Hi = extract128BitVector(In, 8, DAG, DL);
22078 // We're split now, just emit two truncates and a concat. The two
22079 // truncates will trigger legalization to come back to this function.
22080 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
22081 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
22082 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
22084 // We either have 8 elements or we're allowed to use 512-bit vectors.
22085 // If we have VLX, we want to use the narrowest vector that can get the
22086 // job done so we use vXi32.
22087 MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
22088 MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
22089 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
22091 ShiftInx = InVT.getScalarSizeInBits() - 1;
22094 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
22095 // We need to shift to get the lsb into sign position.
22096 In = DAG.getNode(ISD::SHL, DL, InVT, In,
22097 DAG.getConstant(ShiftInx, DL, InVT));
22099 // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
22100 if (Subtarget.hasDQI())
22101 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
22102 return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
22105 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
22107 MVT VT = Op.getSimpleValueType();
22108 SDValue In = Op.getOperand(0);
22109 MVT InVT = In.getSimpleValueType();
22110 unsigned InNumEltBits = InVT.getScalarSizeInBits();
22112 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
22113 "Invalid TRUNCATE operation");
22115 // If we're called by the type legalizer, handle a few cases.
22116 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22117 if (!TLI.isTypeLegal(InVT)) {
22118 if ((InVT == MVT::v8i64 || InVT == MVT::v16i32 || InVT == MVT::v16i64) &&
22119 VT.is128BitVector()) {
22120 assert((InVT == MVT::v16i64 || Subtarget.hasVLX()) &&
22121 "Unexpected subtarget!");
22122 // The default behavior is to truncate one step, concatenate, and then
22123 // truncate the remainder. We'd rather produce two 64-bit results and
22124 // concatenate those.
22126 std::tie(Lo, Hi) = DAG.SplitVector(In, DL);
22129 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
22131 Lo = DAG.getNode(ISD::TRUNCATE, DL, LoVT, Lo);
22132 Hi = DAG.getNode(ISD::TRUNCATE, DL, HiVT, Hi);
22133 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
22136 // Otherwise let default legalization handle it.
22140 if (VT.getVectorElementType() == MVT::i1)
22141 return LowerTruncateVecI1(Op, DAG, Subtarget);
22143 // vpmovqb/w/d, vpmovdb/w, vpmovwb
22144 if (Subtarget.hasAVX512()) {
22145 if (InVT == MVT::v32i16 && !Subtarget.hasBWI()) {
22146 assert(VT == MVT::v32i8 && "Unexpected VT!");
22147 return splitVectorIntUnary(Op, DAG);
22150 // word to byte only under BWI. Otherwise we have to promoted to v16i32
22151 // and then truncate that. But we should only do that if we haven't been
22152 // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
22153 // handled by isel patterns.
22154 if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
22155 Subtarget.canExtendTo512DQ())
22159 unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16);
22160 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
22162 // Truncate with PACKUS if we are truncating a vector with leading zero bits
22163 // that extend all the way to the packed/truncated value.
22164 // Pre-SSE41 we can only use PACKUSWB.
22165 KnownBits Known = DAG.computeKnownBits(In);
22166 if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
22168 truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
22171 // Truncate with PACKSS if we are truncating a vector with sign-bits that
22172 // extend all the way to the packed/truncated value.
22173 if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
22175 truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget))
22178 // Handle truncation of V256 to V128 using shuffles.
22179 assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
22181 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
22182 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
22183 if (Subtarget.hasInt256()) {
22184 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
22185 In = DAG.getBitcast(MVT::v8i32, In);
22186 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
22187 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
22188 DAG.getIntPtrConstant(0, DL));
22191 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
22192 DAG.getIntPtrConstant(0, DL));
22193 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
22194 DAG.getIntPtrConstant(2, DL));
22195 static const int ShufMask[] = {0, 2, 4, 6};
22196 return DAG.getVectorShuffle(VT, DL, DAG.getBitcast(MVT::v4i32, OpLo),
22197 DAG.getBitcast(MVT::v4i32, OpHi), ShufMask);
22200 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
22201 // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
22202 if (Subtarget.hasInt256()) {
22203 // The PSHUFB mask:
22204 static const int ShufMask1[] = { 0, 1, 4, 5, 8, 9, 12, 13,
22205 -1, -1, -1, -1, -1, -1, -1, -1,
22206 16, 17, 20, 21, 24, 25, 28, 29,
22207 -1, -1, -1, -1, -1, -1, -1, -1 };
22208 In = DAG.getBitcast(MVT::v32i8, In);
22209 In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
22210 In = DAG.getBitcast(MVT::v4i64, In);
22212 static const int ShufMask2[] = {0, 2, -1, -1};
22213 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
22214 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
22215 DAG.getIntPtrConstant(0, DL));
22216 return DAG.getBitcast(MVT::v8i16, In);
22219 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
22220 DAG.getIntPtrConstant(0, DL));
22221 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
22222 DAG.getIntPtrConstant(4, DL));
22224 // The PSHUFB mask:
22225 static const int ShufMask1[] = {0, 2, 4, 6, -1, -1, -1, -1};
22227 OpLo = DAG.getBitcast(MVT::v8i16, OpLo);
22228 OpHi = DAG.getBitcast(MVT::v8i16, OpHi);
22230 OpLo = DAG.getVectorShuffle(MVT::v8i16, DL, OpLo, OpLo, ShufMask1);
22231 OpHi = DAG.getVectorShuffle(MVT::v8i16, DL, OpHi, OpHi, ShufMask1);
22233 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
22234 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
22236 // The MOVLHPS Mask:
22237 static const int ShufMask2[] = {0, 1, 4, 5};
22238 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
22239 return DAG.getBitcast(MVT::v8i16, res);
22242 if (VT == MVT::v16i8 && InVT == MVT::v16i16) {
22243 // Use an AND to zero uppper bits for PACKUS.
22244 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(255, DL, InVT));
22246 SDValue InLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
22247 DAG.getIntPtrConstant(0, DL));
22248 SDValue InHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
22249 DAG.getIntPtrConstant(8, DL));
22250 return DAG.getNode(X86ISD::PACKUS, DL, VT, InLo, InHi);
22253 llvm_unreachable("All 256->128 cases should have been handled above!");
22256 // We can leverage the specific way the "cvttps2dq/cvttpd2dq" instruction
22257 // behaves on out of range inputs to generate optimized conversions.
22258 static SDValue expandFP_TO_UINT_SSE(MVT VT, SDValue Src, const SDLoc &dl,
22260 const X86Subtarget &Subtarget) {
22261 MVT SrcVT = Src.getSimpleValueType();
22262 unsigned DstBits = VT.getScalarSizeInBits();
22263 assert(DstBits == 32 && "expandFP_TO_UINT_SSE - only vXi32 supported");
22265 // Calculate the converted result for values in the range 0 to
22266 // 2^31-1 ("Small") and from 2^31 to 2^32-1 ("Big").
22267 SDValue Small = DAG.getNode(X86ISD::CVTTP2SI, dl, VT, Src);
22269 DAG.getNode(X86ISD::CVTTP2SI, dl, VT,
22270 DAG.getNode(ISD::FSUB, dl, SrcVT, Src,
22271 DAG.getConstantFP(2147483648.0f, dl, SrcVT)));
22273 // The "CVTTP2SI" instruction conveniently sets the sign bit if
22274 // and only if the value was out of range. So we can use that
22275 // as our indicator that we rather use "Big" instead of "Small".
22277 // Use "Small" if "IsOverflown" has all bits cleared
22278 // and "0x80000000 | Big" if all bits in "IsOverflown" are set.
22280 // AVX1 can't use the signsplat masking for 256-bit vectors - we have to
22281 // use the slightly slower blendv select instead.
22282 if (VT == MVT::v8i32 && !Subtarget.hasAVX2()) {
22283 SDValue Overflow = DAG.getNode(ISD::OR, dl, VT, Small, Big);
22284 return DAG.getNode(X86ISD::BLENDV, dl, VT, Small, Overflow, Small);
22287 SDValue IsOverflown =
22288 DAG.getNode(X86ISD::VSRAI, dl, VT, Small,
22289 DAG.getTargetConstant(DstBits - 1, dl, MVT::i8));
22290 return DAG.getNode(ISD::OR, dl, VT, Small,
22291 DAG.getNode(ISD::AND, dl, VT, Big, IsOverflown));
22294 SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
22295 bool IsStrict = Op->isStrictFPOpcode();
22296 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
22297 Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
22298 MVT VT = Op->getSimpleValueType(0);
22299 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
22300 SDValue Chain = IsStrict ? Op->getOperand(0) : SDValue();
22301 MVT SrcVT = Src.getSimpleValueType();
22305 if (isSoftFP16(SrcVT)) {
22306 MVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
22308 return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
22309 {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
22310 {NVT, MVT::Other}, {Chain, Src})});
22311 return DAG.getNode(Op.getOpcode(), dl, VT,
22312 DAG.getNode(ISD::FP_EXTEND, dl, NVT, Src));
22313 } else if (isTypeLegal(SrcVT) && isLegalConversion(VT, IsSigned, Subtarget)) {
22317 if (VT.isVector()) {
22318 if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
22319 MVT ResVT = MVT::v4i32;
22320 MVT TruncVT = MVT::v4i1;
22323 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
22325 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
22327 if (!IsSigned && !Subtarget.hasVLX()) {
22328 assert(Subtarget.useAVX512Regs() && "Unexpected features!");
22329 // Widen to 512-bits.
22330 ResVT = MVT::v8i32;
22331 TruncVT = MVT::v8i1;
22332 Opc = Op.getOpcode();
22333 // Need to concat with zero vector for strict fp to avoid spurious
22335 // TODO: Should we just do this for non-strict as well?
22336 SDValue Tmp = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v8f64)
22337 : DAG.getUNDEF(MVT::v8f64);
22338 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64, Tmp, Src,
22339 DAG.getIntPtrConstant(0, dl));
22342 Res = DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {Chain, Src});
22343 Chain = Res.getValue(1);
22345 Res = DAG.getNode(Opc, dl, ResVT, Src);
22348 Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
22349 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
22350 DAG.getIntPtrConstant(0, dl));
22352 return DAG.getMergeValues({Res, Chain}, dl);
22356 if (Subtarget.hasFP16() && SrcVT.getVectorElementType() == MVT::f16) {
22357 if (VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16)
22361 MVT EleVT = VT.getVectorElementType();
22362 if (EleVT != MVT::i64)
22363 ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
22365 if (SrcVT != MVT::v8f16) {
22367 IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
22368 SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
22370 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
22374 Res = DAG.getNode(IsSigned ? X86ISD::STRICT_CVTTP2SI
22375 : X86ISD::STRICT_CVTTP2UI,
22376 dl, {ResVT, MVT::Other}, {Chain, Src});
22377 Chain = Res.getValue(1);
22379 Res = DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl,
22383 // TODO: Need to add exception check code for strict FP.
22384 if (EleVT.getSizeInBits() < 16) {
22385 ResVT = MVT::getVectorVT(EleVT, 8);
22386 Res = DAG.getNode(ISD::TRUNCATE, dl, ResVT, Res);
22390 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
22391 DAG.getIntPtrConstant(0, dl));
22394 return DAG.getMergeValues({Res, Chain}, dl);
22398 if (VT == MVT::v8i16 && (SrcVT == MVT::v8f32 || SrcVT == MVT::v8f64)) {
22400 Res = DAG.getNode(IsSigned ? ISD::STRICT_FP_TO_SINT
22401 : ISD::STRICT_FP_TO_UINT,
22402 dl, {MVT::v8i32, MVT::Other}, {Chain, Src});
22403 Chain = Res.getValue(1);
22405 Res = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl,
22409 // TODO: Need to add exception check code for strict FP.
22410 Res = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i16, Res);
22413 return DAG.getMergeValues({Res, Chain}, dl);
22417 // v8f64->v8i32 is legal, but we need v8i32 to be custom for v8f32.
22418 if (VT == MVT::v8i32 && SrcVT == MVT::v8f64) {
22419 assert(!IsSigned && "Expected unsigned conversion!");
22420 assert(Subtarget.useAVX512Regs() && "Requires avx512f");
22424 // Widen vXi32 fp_to_uint with avx512f to 512-bit source.
22425 if ((VT == MVT::v4i32 || VT == MVT::v8i32) &&
22426 (SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v8f32) &&
22427 Subtarget.useAVX512Regs()) {
22428 assert(!IsSigned && "Expected unsigned conversion!");
22429 assert(!Subtarget.hasVLX() && "Unexpected features!");
22430 MVT WideVT = SrcVT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
22431 MVT ResVT = SrcVT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
22432 // Need to concat with zero vector for strict fp to avoid spurious
22434 // TODO: Should we just do this for non-strict as well?
22436 IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
22437 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
22438 DAG.getIntPtrConstant(0, dl));
22441 Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, dl, {ResVT, MVT::Other},
22443 Chain = Res.getValue(1);
22445 Res = DAG.getNode(ISD::FP_TO_UINT, dl, ResVT, Src);
22448 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
22449 DAG.getIntPtrConstant(0, dl));
22452 return DAG.getMergeValues({Res, Chain}, dl);
22456 // Widen vXi64 fp_to_uint/fp_to_sint with avx512dq to 512-bit source.
22457 if ((VT == MVT::v2i64 || VT == MVT::v4i64) &&
22458 (SrcVT == MVT::v2f64 || SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32) &&
22459 Subtarget.useAVX512Regs() && Subtarget.hasDQI()) {
22460 assert(!Subtarget.hasVLX() && "Unexpected features!");
22461 MVT WideVT = SrcVT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
22462 // Need to concat with zero vector for strict fp to avoid spurious
22464 // TODO: Should we just do this for non-strict as well?
22466 IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
22467 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
22468 DAG.getIntPtrConstant(0, dl));
22471 Res = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
22473 Chain = Res.getValue(1);
22475 Res = DAG.getNode(Op.getOpcode(), dl, MVT::v8i64, Src);
22478 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
22479 DAG.getIntPtrConstant(0, dl));
22482 return DAG.getMergeValues({Res, Chain}, dl);
22486 if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) {
22487 if (!Subtarget.hasVLX()) {
22488 // Non-strict nodes without VLX can we widened to v4f32->v4i64 by type
22489 // legalizer and then widened again by vector op legalization.
22493 SDValue Zero = DAG.getConstantFP(0.0, dl, MVT::v2f32);
22494 SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f32,
22495 {Src, Zero, Zero, Zero});
22496 Tmp = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
22498 SDValue Chain = Tmp.getValue(1);
22499 Tmp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Tmp,
22500 DAG.getIntPtrConstant(0, dl));
22501 return DAG.getMergeValues({Tmp, Chain}, dl);
22504 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL");
22505 SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
22506 DAG.getUNDEF(MVT::v2f32));
22508 unsigned Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI
22509 : X86ISD::STRICT_CVTTP2UI;
22510 return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op->getOperand(0), Tmp});
22512 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
22513 return DAG.getNode(Opc, dl, VT, Tmp);
22516 // Generate optimized instructions for pre AVX512 unsigned conversions from
22518 if ((VT == MVT::v4i32 && SrcVT == MVT::v4f32) ||
22519 (VT == MVT::v4i32 && SrcVT == MVT::v4f64) ||
22520 (VT == MVT::v8i32 && SrcVT == MVT::v8f32)) {
22521 assert(!IsSigned && "Expected unsigned conversion!");
22522 return expandFP_TO_UINT_SSE(VT, Src, dl, DAG, Subtarget);
22528 assert(!VT.isVector());
22530 bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
22532 if (!IsSigned && UseSSEReg) {
22533 // Conversions from f32/f64 with AVX512 should be legal.
22534 if (Subtarget.hasAVX512())
22537 // We can leverage the specific way the "cvttss2si/cvttsd2si" instruction
22538 // behaves on out of range inputs to generate optimized conversions.
22539 if (!IsStrict && ((VT == MVT::i32 && !Subtarget.is64Bit()) ||
22540 (VT == MVT::i64 && Subtarget.is64Bit()))) {
22541 unsigned DstBits = VT.getScalarSizeInBits();
22542 APInt UIntLimit = APInt::getSignMask(DstBits);
22543 SDValue FloatOffset = DAG.getNode(ISD::UINT_TO_FP, dl, SrcVT,
22544 DAG.getConstant(UIntLimit, dl, VT));
22545 MVT SrcVecVT = MVT::getVectorVT(SrcVT, 128 / SrcVT.getScalarSizeInBits());
22547 // Calculate the converted result for values in the range:
22548 // (i32) 0 to 2^31-1 ("Small") and from 2^31 to 2^32-1 ("Big").
22549 // (i64) 0 to 2^63-1 ("Small") and from 2^63 to 2^64-1 ("Big").
22551 DAG.getNode(X86ISD::CVTTS2SI, dl, VT,
22552 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, SrcVecVT, Src));
22553 SDValue Big = DAG.getNode(
22554 X86ISD::CVTTS2SI, dl, VT,
22555 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, SrcVecVT,
22556 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FloatOffset)));
22558 // The "CVTTS2SI" instruction conveniently sets the sign bit if
22559 // and only if the value was out of range. So we can use that
22560 // as our indicator that we rather use "Big" instead of "Small".
22562 // Use "Small" if "IsOverflown" has all bits cleared
22563 // and "0x80000000 | Big" if all bits in "IsOverflown" are set.
22564 SDValue IsOverflown = DAG.getNode(
22565 ISD::SRA, dl, VT, Small, DAG.getConstant(DstBits - 1, dl, MVT::i8));
22566 return DAG.getNode(ISD::OR, dl, VT, Small,
22567 DAG.getNode(ISD::AND, dl, VT, Big, IsOverflown));
22570 // Use default expansion for i64.
22571 if (VT == MVT::i64)
22574 assert(VT == MVT::i32 && "Unexpected VT!");
22576 // Promote i32 to i64 and use a signed operation on 64-bit targets.
22577 // FIXME: This does not generate an invalid exception if the input does not
22578 // fit in i32. PR44019
22579 if (Subtarget.is64Bit()) {
22581 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {MVT::i64, MVT::Other},
22583 Chain = Res.getValue(1);
22585 Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
22587 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
22589 return DAG.getMergeValues({Res, Chain}, dl);
22593 // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
22594 // use fisttp which will be handled later.
22595 if (!Subtarget.hasSSE3())
22599 // Promote i16 to i32 if we can use a SSE operation or the type is f128.
22600 // FIXME: This does not generate an invalid exception if the input does not
22601 // fit in i16. PR44019
22602 if (VT == MVT::i16 && (UseSSEReg || SrcVT == MVT::f128)) {
22603 assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
22605 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {MVT::i32, MVT::Other},
22607 Chain = Res.getValue(1);
22609 Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
22611 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
22613 return DAG.getMergeValues({Res, Chain}, dl);
22617 // If this is a FP_TO_SINT using SSEReg we're done.
22618 if (UseSSEReg && IsSigned)
22621 // fp128 needs to use a libcall.
22622 if (SrcVT == MVT::f128) {
22625 LC = RTLIB::getFPTOSINT(SrcVT, VT);
22627 LC = RTLIB::getFPTOUINT(SrcVT, VT);
22629 MakeLibCallOptions CallOptions;
22630 std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, Src, CallOptions,
22634 return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
22639 // Fall back to X87.
22640 if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned, Chain)) {
22642 return DAG.getMergeValues({V, Chain}, dl);
22646 llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
22649 SDValue X86TargetLowering::LowerLRINT_LLRINT(SDValue Op,
22650 SelectionDAG &DAG) const {
22651 SDValue Src = Op.getOperand(0);
22652 MVT SrcVT = Src.getSimpleValueType();
22654 if (SrcVT == MVT::f16)
22657 // If the source is in an SSE register, the node is Legal.
22658 if (isScalarFPTypeInSSEReg(SrcVT))
22661 return LRINT_LLRINTHelper(Op.getNode(), DAG);
22664 SDValue X86TargetLowering::LRINT_LLRINTHelper(SDNode *N,
22665 SelectionDAG &DAG) const {
22666 EVT DstVT = N->getValueType(0);
22667 SDValue Src = N->getOperand(0);
22668 EVT SrcVT = Src.getValueType();
22670 if (SrcVT != MVT::f32 && SrcVT != MVT::f64 && SrcVT != MVT::f80) {
22671 // f16 must be promoted before using the lowering in this routine.
22672 // fp128 does not use this lowering.
22677 SDValue Chain = DAG.getEntryNode();
22679 bool UseSSE = isScalarFPTypeInSSEReg(SrcVT);
22681 // If we're converting from SSE, the stack slot needs to hold both types.
22682 // Otherwise it only needs to hold the DstVT.
22683 EVT OtherVT = UseSSE ? SrcVT : DstVT;
22684 SDValue StackPtr = DAG.CreateStackTemporary(DstVT, OtherVT);
22685 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
22686 MachinePointerInfo MPI =
22687 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
22690 assert(DstVT == MVT::i64 && "Invalid LRINT/LLRINT to lower!");
22691 Chain = DAG.getStore(Chain, DL, Src, StackPtr, MPI);
22692 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
22693 SDValue Ops[] = { Chain, StackPtr };
22695 Src = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, SrcVT, MPI,
22696 /*Align*/ None, MachineMemOperand::MOLoad);
22697 Chain = Src.getValue(1);
22700 SDValue StoreOps[] = { Chain, Src, StackPtr };
22701 Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, DL, DAG.getVTList(MVT::Other),
22702 StoreOps, DstVT, MPI, /*Align*/ None,
22703 MachineMemOperand::MOStore);
22705 return DAG.getLoad(DstVT, DL, Chain, StackPtr, MPI);
22709 X86TargetLowering::LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const {
22710 // This is based on the TargetLowering::expandFP_TO_INT_SAT implementation,
22711 // but making use of X86 specifics to produce better instruction sequences.
22712 SDNode *Node = Op.getNode();
22713 bool IsSigned = Node->getOpcode() == ISD::FP_TO_SINT_SAT;
22714 unsigned FpToIntOpcode = IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
22715 SDLoc dl(SDValue(Node, 0));
22716 SDValue Src = Node->getOperand(0);
22718 // There are three types involved here: SrcVT is the source floating point
22719 // type, DstVT is the type of the result, and TmpVT is the result of the
22720 // intermediate FP_TO_*INT operation we'll use (which may be a promotion of
22722 EVT SrcVT = Src.getValueType();
22723 EVT DstVT = Node->getValueType(0);
22726 // This code is only for floats and doubles. Fall back to generic code for
22728 if (!isScalarFPTypeInSSEReg(SrcVT) || isSoftFP16(SrcVT))
22731 EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
22732 unsigned SatWidth = SatVT.getScalarSizeInBits();
22733 unsigned DstWidth = DstVT.getScalarSizeInBits();
22734 unsigned TmpWidth = TmpVT.getScalarSizeInBits();
22735 assert(SatWidth <= DstWidth && SatWidth <= TmpWidth &&
22736 "Expected saturation width smaller than result width");
22738 // Promote result of FP_TO_*INT to at least 32 bits.
22739 if (TmpWidth < 32) {
22744 // Promote conversions to unsigned 32-bit to 64-bit, because it will allow
22745 // us to use a native signed conversion instead.
22746 if (SatWidth == 32 && !IsSigned && Subtarget.is64Bit()) {
22751 // If the saturation width is smaller than the size of the temporary result,
22752 // we can always use signed conversion, which is native.
22753 if (SatWidth < TmpWidth)
22754 FpToIntOpcode = ISD::FP_TO_SINT;
22756 // Determine minimum and maximum integer values and their corresponding
22757 // floating-point values.
22758 APInt MinInt, MaxInt;
22760 MinInt = APInt::getSignedMinValue(SatWidth).sext(DstWidth);
22761 MaxInt = APInt::getSignedMaxValue(SatWidth).sext(DstWidth);
22763 MinInt = APInt::getMinValue(SatWidth).zext(DstWidth);
22764 MaxInt = APInt::getMaxValue(SatWidth).zext(DstWidth);
22767 APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT));
22768 APFloat MaxFloat(DAG.EVTToAPFloatSemantics(SrcVT));
22770 APFloat::opStatus MinStatus = MinFloat.convertFromAPInt(
22771 MinInt, IsSigned, APFloat::rmTowardZero);
22772 APFloat::opStatus MaxStatus = MaxFloat.convertFromAPInt(
22773 MaxInt, IsSigned, APFloat::rmTowardZero);
22774 bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact)
22775 && !(MaxStatus & APFloat::opStatus::opInexact);
22777 SDValue MinFloatNode = DAG.getConstantFP(MinFloat, dl, SrcVT);
22778 SDValue MaxFloatNode = DAG.getConstantFP(MaxFloat, dl, SrcVT);
22780 // If the integer bounds are exactly representable as floats, emit a
22781 // min+max+fptoi sequence. Otherwise use comparisons and selects.
22782 if (AreExactFloatBounds) {
22783 if (DstVT != TmpVT) {
22784 // Clamp by MinFloat from below. If Src is NaN, propagate NaN.
22785 SDValue MinClamped = DAG.getNode(
22786 X86ISD::FMAX, dl, SrcVT, MinFloatNode, Src);
22787 // Clamp by MaxFloat from above. If Src is NaN, propagate NaN.
22788 SDValue BothClamped = DAG.getNode(
22789 X86ISD::FMIN, dl, SrcVT, MaxFloatNode, MinClamped);
22790 // Convert clamped value to integer.
22791 SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, TmpVT, BothClamped);
22793 // NaN will become INDVAL, with the top bit set and the rest zero.
22794 // Truncation will discard the top bit, resulting in zero.
22795 return DAG.getNode(ISD::TRUNCATE, dl, DstVT, FpToInt);
22798 // Clamp by MinFloat from below. If Src is NaN, the result is MinFloat.
22799 SDValue MinClamped = DAG.getNode(
22800 X86ISD::FMAX, dl, SrcVT, Src, MinFloatNode);
22801 // Clamp by MaxFloat from above. NaN cannot occur.
22802 SDValue BothClamped = DAG.getNode(
22803 X86ISD::FMINC, dl, SrcVT, MinClamped, MaxFloatNode);
22804 // Convert clamped value to integer.
22805 SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, DstVT, BothClamped);
22808 // In the unsigned case we're done, because we mapped NaN to MinFloat,
22813 // Otherwise, select zero if Src is NaN.
22814 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
22815 return DAG.getSelectCC(
22816 dl, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
22819 SDValue MinIntNode = DAG.getConstant(MinInt, dl, DstVT);
22820 SDValue MaxIntNode = DAG.getConstant(MaxInt, dl, DstVT);
22822 // Result of direct conversion, which may be selected away.
22823 SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, TmpVT, Src);
22825 if (DstVT != TmpVT) {
22826 // NaN will become INDVAL, with the top bit set and the rest zero.
22827 // Truncation will discard the top bit, resulting in zero.
22828 FpToInt = DAG.getNode(ISD::TRUNCATE, dl, DstVT, FpToInt);
22831 SDValue Select = FpToInt;
22832 // For signed conversions where we saturate to the same size as the
22833 // result type of the fptoi instructions, INDVAL coincides with integer
22834 // minimum, so we don't need to explicitly check it.
22835 if (!IsSigned || SatWidth != TmpVT.getScalarSizeInBits()) {
22836 // If Src ULT MinFloat, select MinInt. In particular, this also selects
22837 // MinInt if Src is NaN.
22838 Select = DAG.getSelectCC(
22839 dl, Src, MinFloatNode, MinIntNode, Select, ISD::CondCode::SETULT);
22842 // If Src OGT MaxFloat, select MaxInt.
22843 Select = DAG.getSelectCC(
22844 dl, Src, MaxFloatNode, MaxIntNode, Select, ISD::CondCode::SETOGT);
22846 // In the unsigned case we are done, because we mapped NaN to MinInt, which
22847 // is already zero. The promoted case was already handled above.
22848 if (!IsSigned || DstVT != TmpVT) {
22852 // Otherwise, select 0 if Src is NaN.
22853 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
22854 return DAG.getSelectCC(
22855 dl, Src, Src, ZeroInt, Select, ISD::CondCode::SETUO);
22858 SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
22859 bool IsStrict = Op->isStrictFPOpcode();
22862 MVT VT = Op.getSimpleValueType();
22863 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
22864 SDValue In = Op.getOperand(IsStrict ? 1 : 0);
22865 MVT SVT = In.getSimpleValueType();
22867 if (VT == MVT::f128 || (SVT == MVT::f16 && VT == MVT::f80))
22870 if (SVT == MVT::f16) {
22871 if (Subtarget.hasFP16())
22874 if (VT != MVT::f32) {
22876 return DAG.getNode(
22877 ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other},
22878 {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, DL,
22879 {MVT::f32, MVT::Other}, {Chain, In})});
22881 return DAG.getNode(ISD::FP_EXTEND, DL, VT,
22882 DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, In));
22885 if (!Subtarget.hasF16C())
22888 In = DAG.getBitcast(MVT::i16, In);
22889 In = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v8i16,
22890 getZeroVector(MVT::v8i16, Subtarget, DAG, DL), In,
22891 DAG.getIntPtrConstant(0, DL));
22894 Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, DL, {MVT::v4f32, MVT::Other},
22896 Chain = Res.getValue(1);
22898 Res = DAG.getNode(X86ISD::CVTPH2PS, DL, MVT::v4f32, In,
22899 DAG.getTargetConstant(4, DL, MVT::i32));
22901 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Res,
22902 DAG.getIntPtrConstant(0, DL));
22904 return DAG.getMergeValues({Res, Chain}, DL);
22908 if (!SVT.isVector())
22911 if (SVT.getVectorElementType() == MVT::f16) {
22912 assert(Subtarget.hasF16C() && "Unexpected features!");
22913 if (SVT == MVT::v2f16)
22914 In = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f16, In,
22915 DAG.getUNDEF(MVT::v2f16));
22916 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f16, In,
22917 DAG.getUNDEF(MVT::v4f16));
22919 return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
22920 {Op->getOperand(0), Res});
22921 return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
22922 } else if (VT == MVT::v4f64 || VT == MVT::v8f64) {
22926 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
22929 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, In, DAG.getUNDEF(SVT));
22931 return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
22932 {Op->getOperand(0), Res});
22933 return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
22936 SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
22937 bool IsStrict = Op->isStrictFPOpcode();
22940 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
22941 SDValue In = Op.getOperand(IsStrict ? 1 : 0);
22942 MVT VT = Op.getSimpleValueType();
22943 MVT SVT = In.getSimpleValueType();
22945 if (SVT == MVT::f128 || (VT == MVT::f16 && SVT == MVT::f80))
22948 if (VT.getScalarType() == MVT::f16 && !Subtarget.hasFP16()) {
22949 if (!Subtarget.hasF16C() || SVT.getScalarType() != MVT::f32)
22956 SDValue Rnd = DAG.getTargetConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, DL,
22959 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4f32,
22960 DAG.getConstantFP(0, DL, MVT::v4f32), In,
22961 DAG.getIntPtrConstant(0, DL));
22962 Res = DAG.getNode(X86ISD::STRICT_CVTPS2PH, DL, {MVT::v8i16, MVT::Other},
22963 {Chain, Res, Rnd});
22964 Chain = Res.getValue(1);
22966 // FIXME: Should we use zeros for upper elements for non-strict?
22967 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, In);
22968 Res = DAG.getNode(X86ISD::CVTPS2PH, DL, MVT::v8i16, Res, Rnd);
22971 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i16, Res,
22972 DAG.getIntPtrConstant(0, DL));
22973 Res = DAG.getBitcast(MVT::f16, Res);
22976 return DAG.getMergeValues({Res, Chain}, DL);
22984 static SDValue LowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) {
22985 bool IsStrict = Op->isStrictFPOpcode();
22986 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
22987 assert(Src.getValueType() == MVT::i16 && Op.getValueType() == MVT::f32 &&
22991 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16,
22992 DAG.getConstant(0, dl, MVT::v8i16), Src,
22993 DAG.getIntPtrConstant(0, dl));
22997 Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {MVT::v4f32, MVT::Other},
22998 {Op.getOperand(0), Res});
22999 Chain = Res.getValue(1);
23001 Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
23004 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
23005 DAG.getIntPtrConstant(0, dl));
23008 return DAG.getMergeValues({Res, Chain}, dl);
23013 static SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) {
23014 bool IsStrict = Op->isStrictFPOpcode();
23015 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
23016 assert(Src.getValueType() == MVT::f32 && Op.getValueType() == MVT::i16 &&
23020 SDValue Res, Chain;
23022 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4f32,
23023 DAG.getConstantFP(0, dl, MVT::v4f32), Src,
23024 DAG.getIntPtrConstant(0, dl));
23026 X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
23027 {Op.getOperand(0), Res, DAG.getTargetConstant(4, dl, MVT::i32)});
23028 Chain = Res.getValue(1);
23030 // FIXME: Should we use zeros for upper elements for non-strict?
23031 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, Src);
23032 Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
23033 DAG.getTargetConstant(4, dl, MVT::i32));
23036 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Res,
23037 DAG.getIntPtrConstant(0, dl));
23040 return DAG.getMergeValues({Res, Chain}, dl);
23045 /// Depending on uarch and/or optimizing for size, we might prefer to use a
23046 /// vector operation in place of the typical scalar operation.
23047 static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
23048 const X86Subtarget &Subtarget) {
23049 // If both operands have other uses, this is probably not profitable.
23050 SDValue LHS = Op.getOperand(0);
23051 SDValue RHS = Op.getOperand(1);
23052 if (!LHS.hasOneUse() && !RHS.hasOneUse())
23055 // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
23056 bool IsFP = Op.getSimpleValueType().isFloatingPoint();
23057 if (IsFP && !Subtarget.hasSSE3())
23059 if (!IsFP && !Subtarget.hasSSSE3())
23062 // Extract from a common vector.
23063 if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
23064 RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
23065 LHS.getOperand(0) != RHS.getOperand(0) ||
23066 !isa<ConstantSDNode>(LHS.getOperand(1)) ||
23067 !isa<ConstantSDNode>(RHS.getOperand(1)) ||
23068 !shouldUseHorizontalOp(true, DAG, Subtarget))
23071 // Allow commuted 'hadd' ops.
23072 // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
23074 switch (Op.getOpcode()) {
23075 case ISD::ADD: HOpcode = X86ISD::HADD; break;
23076 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
23077 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
23078 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
23080 llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
23082 unsigned LExtIndex = LHS.getConstantOperandVal(1);
23083 unsigned RExtIndex = RHS.getConstantOperandVal(1);
23084 if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
23085 (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
23086 std::swap(LExtIndex, RExtIndex);
23088 if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
23091 SDValue X = LHS.getOperand(0);
23092 EVT VecVT = X.getValueType();
23093 unsigned BitWidth = VecVT.getSizeInBits();
23094 unsigned NumLanes = BitWidth / 128;
23095 unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
23096 assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
23097 "Not expecting illegal vector widths here");
23099 // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
23100 // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
23102 if (BitWidth == 256 || BitWidth == 512) {
23103 unsigned LaneIdx = LExtIndex / NumEltsPerLane;
23104 X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
23105 LExtIndex %= NumEltsPerLane;
23108 // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
23109 // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
23110 // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
23111 // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
23112 SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
23113 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
23114 DAG.getIntPtrConstant(LExtIndex / 2, DL));
23117 /// Depending on uarch and/or optimizing for size, we might prefer to use a
23118 /// vector operation in place of the typical scalar operation.
23119 SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
23120 assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
23121 "Only expecting float/double");
23122 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
23125 /// ISD::FROUND is defined to round to nearest with ties rounding away from 0.
23126 /// This mode isn't supported in hardware on X86. But as long as we aren't
23127 /// compiling with trapping math, we can emulate this with
23128 /// trunc(X + copysign(nextafter(0.5, 0.0), X)).
23129 static SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) {
23130 SDValue N0 = Op.getOperand(0);
23132 MVT VT = Op.getSimpleValueType();
23134 // N0 += copysign(nextafter(0.5, 0.0), N0)
23135 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
23137 APFloat Point5Pred = APFloat(0.5f);
23138 Point5Pred.convert(Sem, APFloat::rmNearestTiesToEven, &Ignored);
23139 Point5Pred.next(/*nextDown*/true);
23141 SDValue Adder = DAG.getNode(ISD::FCOPYSIGN, dl, VT,
23142 DAG.getConstantFP(Point5Pred, dl, VT), N0);
23143 N0 = DAG.getNode(ISD::FADD, dl, VT, N0, Adder);
23145 // Truncate the result to remove fraction.
23146 return DAG.getNode(ISD::FTRUNC, dl, VT, N0);
23149 /// The only differences between FABS and FNEG are the mask and the logic op.
23150 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
23151 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
23152 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
23153 "Wrong opcode for lowering FABS or FNEG.");
23155 bool IsFABS = (Op.getOpcode() == ISD::FABS);
23157 // If this is a FABS and it has an FNEG user, bail out to fold the combination
23158 // into an FNABS. We'll lower the FABS after that if it is still in use.
23160 for (SDNode *User : Op->uses())
23161 if (User->getOpcode() == ISD::FNEG)
23165 MVT VT = Op.getSimpleValueType();
23167 bool IsF128 = (VT == MVT::f128);
23168 assert(VT.isFloatingPoint() && VT != MVT::f80 &&
23169 DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
23170 "Unexpected type in LowerFABSorFNEG");
23172 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
23173 // decide if we should generate a 16-byte constant mask when we only need 4 or
23174 // 8 bytes for the scalar case.
23176 // There are no scalar bitwise logical SSE/AVX instructions, so we
23177 // generate a 16-byte vector constant and logic op even for the scalar case.
23178 // Using a 16-byte mask allows folding the load of the mask with
23179 // the logic op, so it can save (~4 bytes) on code size.
23180 bool IsFakeVector = !VT.isVector() && !IsF128;
23183 LogicVT = (VT == MVT::f64) ? MVT::v2f64
23184 : (VT == MVT::f32) ? MVT::v4f32
23187 unsigned EltBits = VT.getScalarSizeInBits();
23188 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
23189 APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
23190 APInt::getSignMask(EltBits);
23191 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
23192 SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
23194 SDValue Op0 = Op.getOperand(0);
23195 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
23196 unsigned LogicOp = IsFABS ? X86ISD::FAND :
23197 IsFNABS ? X86ISD::FOR :
23199 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
23201 if (VT.isVector() || IsF128)
23202 return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
23204 // For the scalar case extend to a 128-bit vector, perform the logic op,
23205 // and extract the scalar result back out.
23206 Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
23207 SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
23208 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
23209 DAG.getIntPtrConstant(0, dl));
23212 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
23213 SDValue Mag = Op.getOperand(0);
23214 SDValue Sign = Op.getOperand(1);
23217 // If the sign operand is smaller, extend it first.
23218 MVT VT = Op.getSimpleValueType();
23219 if (Sign.getSimpleValueType().bitsLT(VT))
23220 Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
23222 // And if it is bigger, shrink it first.
23223 if (Sign.getSimpleValueType().bitsGT(VT))
23224 Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign,
23225 DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
23227 // At this point the operands and the result should have the same
23228 // type, and that won't be f80 since that is not custom lowered.
23229 bool IsF128 = (VT == MVT::f128);
23230 assert(VT.isFloatingPoint() && VT != MVT::f80 &&
23231 DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
23232 "Unexpected type in LowerFCOPYSIGN");
23234 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
23236 // Perform all scalar logic operations as 16-byte vectors because there are no
23237 // scalar FP logic instructions in SSE.
23238 // TODO: This isn't necessary. If we used scalar types, we might avoid some
23239 // unnecessary splats, but we might miss load folding opportunities. Should
23240 // this decision be based on OptimizeForSize?
23241 bool IsFakeVector = !VT.isVector() && !IsF128;
23244 LogicVT = (VT == MVT::f64) ? MVT::v2f64
23245 : (VT == MVT::f32) ? MVT::v4f32
23248 // The mask constants are automatically splatted for vector types.
23249 unsigned EltSizeInBits = VT.getScalarSizeInBits();
23250 SDValue SignMask = DAG.getConstantFP(
23251 APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
23252 SDValue MagMask = DAG.getConstantFP(
23253 APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
23255 // First, clear all bits but the sign bit from the second operand (sign).
23257 Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
23258 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
23260 // Next, clear the sign bit from the first operand (magnitude).
23261 // TODO: If we had general constant folding for FP logic ops, this check
23262 // wouldn't be necessary.
23264 if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
23265 APFloat APF = Op0CN->getValueAPF();
23267 MagBits = DAG.getConstantFP(APF, dl, LogicVT);
23269 // If the magnitude operand wasn't a constant, we need to AND out the sign.
23271 Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
23272 MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
23275 // OR the magnitude value with the sign bit.
23276 SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
23277 return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
23278 DAG.getIntPtrConstant(0, dl));
23281 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
23282 SDValue N0 = Op.getOperand(0);
23284 MVT VT = Op.getSimpleValueType();
23286 MVT OpVT = N0.getSimpleValueType();
23287 assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
23288 "Unexpected type for FGETSIGN");
23290 // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
23291 MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
23292 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
23293 Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
23294 Res = DAG.getZExtOrTrunc(Res, dl, VT);
23295 Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
23299 /// Helper for attempting to create a X86ISD::BT node.
23300 static SDValue getBT(SDValue Src, SDValue BitNo, const SDLoc &DL, SelectionDAG &DAG) {
23301 // If Src is i8, promote it to i32 with any_extend. There is no i8 BT
23302 // instruction. Since the shift amount is in-range-or-undefined, we know
23303 // that doing a bittest on the i32 value is ok. We extend to i32 because
23304 // the encoding for the i16 version is larger than the i32 version.
23305 // Also promote i16 to i32 for performance / code size reason.
23306 if (Src.getValueType().getScalarSizeInBits() < 32)
23307 Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
23309 // No legal type found, give up.
23310 if (!DAG.getTargetLoweringInfo().isTypeLegal(Src.getValueType()))
23313 // See if we can use the 32-bit instruction instead of the 64-bit one for a
23314 // shorter encoding. Since the former takes the modulo 32 of BitNo and the
23315 // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
23316 // known to be zero.
23317 if (Src.getValueType() == MVT::i64 &&
23318 DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
23319 Src = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Src);
23321 // If the operand types disagree, extend the shift amount to match. Since
23322 // BT ignores high bits (like shifts) we can use anyextend.
23323 if (Src.getValueType() != BitNo.getValueType()) {
23324 // Peek through a mask/modulo operation.
23325 // TODO: DAGCombine fails to do this as it just checks isTruncateFree, but
23326 // we probably need a better IsDesirableToPromoteOp to handle this as well.
23327 if (BitNo.getOpcode() == ISD::AND && BitNo->hasOneUse())
23328 BitNo = DAG.getNode(ISD::AND, DL, Src.getValueType(),
23329 DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(),
23330 BitNo.getOperand(0)),
23331 DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(),
23332 BitNo.getOperand(1)));
23334 BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo);
23337 return DAG.getNode(X86ISD::BT, DL, MVT::i32, Src, BitNo);
23340 /// Helper for creating a X86ISD::SETCC node.
23341 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
23342 SelectionDAG &DAG) {
23343 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
23344 DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
23347 /// Helper for matching OR(EXTRACTELT(X,0),OR(EXTRACTELT(X,1),...))
23348 /// style scalarized (associative) reduction patterns. Partial reductions
23349 /// are supported when the pointer SrcMask is non-null.
23350 /// TODO - move this to SelectionDAG?
23351 static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
23352 SmallVectorImpl<SDValue> &SrcOps,
23353 SmallVectorImpl<APInt> *SrcMask = nullptr) {
23354 SmallVector<SDValue, 8> Opnds;
23355 DenseMap<SDValue, APInt> SrcOpMap;
23356 EVT VT = MVT::Other;
23358 // Recognize a special case where a vector is casted into wide integer to
23360 assert(Op.getOpcode() == unsigned(BinOp) &&
23361 "Unexpected bit reduction opcode");
23362 Opnds.push_back(Op.getOperand(0));
23363 Opnds.push_back(Op.getOperand(1));
23365 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
23366 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
23367 // BFS traverse all BinOp operands.
23368 if (I->getOpcode() == unsigned(BinOp)) {
23369 Opnds.push_back(I->getOperand(0));
23370 Opnds.push_back(I->getOperand(1));
23371 // Re-evaluate the number of nodes to be traversed.
23372 e += 2; // 2 more nodes (LHS and RHS) are pushed.
23376 // Quit if a non-EXTRACT_VECTOR_ELT
23377 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23380 // Quit if without a constant index.
23381 auto *Idx = dyn_cast<ConstantSDNode>(I->getOperand(1));
23385 SDValue Src = I->getOperand(0);
23386 DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
23387 if (M == SrcOpMap.end()) {
23388 VT = Src.getValueType();
23389 // Quit if not the same type.
23390 if (!SrcOpMap.empty() && VT != SrcOpMap.begin()->first.getValueType())
23392 unsigned NumElts = VT.getVectorNumElements();
23393 APInt EltCount = APInt::getZero(NumElts);
23394 M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
23395 SrcOps.push_back(Src);
23398 // Quit if element already used.
23399 unsigned CIdx = Idx->getZExtValue();
23400 if (M->second[CIdx])
23402 M->second.setBit(CIdx);
23406 // Collect the source partial masks.
23407 for (SDValue &SrcOp : SrcOps)
23408 SrcMask->push_back(SrcOpMap[SrcOp]);
23410 // Quit if not all elements are used.
23411 for (const auto &I : SrcOpMap)
23412 if (!I.second.isAllOnes())
23419 // Helper function for comparing all bits of a vector against zero.
23420 static SDValue LowerVectorAllZero(const SDLoc &DL, SDValue V, ISD::CondCode CC,
23422 const X86Subtarget &Subtarget,
23423 SelectionDAG &DAG, X86::CondCode &X86CC) {
23424 EVT VT = V.getValueType();
23425 unsigned ScalarSize = VT.getScalarSizeInBits();
23426 if (Mask.getBitWidth() != ScalarSize) {
23427 assert(ScalarSize == 1 && "Element Mask vs Vector bitwidth mismatch");
23431 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
23432 X86CC = (CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE);
23434 auto MaskBits = [&](SDValue Src) {
23435 if (Mask.isAllOnes())
23437 EVT SrcVT = Src.getValueType();
23438 SDValue MaskValue = DAG.getConstant(Mask, DL, SrcVT);
23439 return DAG.getNode(ISD::AND, DL, SrcVT, Src, MaskValue);
23442 // For sub-128-bit vector, cast to (legal) integer and compare with zero.
23443 if (VT.getSizeInBits() < 128) {
23444 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
23445 if (!DAG.getTargetLoweringInfo().isTypeLegal(IntVT))
23447 return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
23448 DAG.getBitcast(IntVT, MaskBits(V)),
23449 DAG.getConstant(0, DL, IntVT));
23452 // Quit if not splittable to 128/256-bit vector.
23453 if (!isPowerOf2_32(VT.getSizeInBits()))
23456 // Split down to 128/256-bit vector.
23457 unsigned TestSize = Subtarget.hasAVX() ? 256 : 128;
23458 while (VT.getSizeInBits() > TestSize) {
23459 auto Split = DAG.SplitVector(V, DL);
23460 VT = Split.first.getValueType();
23461 V = DAG.getNode(ISD::OR, DL, VT, Split.first, Split.second);
23464 bool UsePTEST = Subtarget.hasSSE41();
23466 MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
23467 V = DAG.getBitcast(TestVT, MaskBits(V));
23468 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, V, V);
23471 // Without PTEST, a masked v2i64 or-reduction is not faster than
23473 if (!Mask.isAllOnes() && VT.getScalarSizeInBits() > 32)
23476 V = DAG.getBitcast(MVT::v16i8, MaskBits(V));
23477 V = DAG.getNode(X86ISD::PCMPEQ, DL, MVT::v16i8, V,
23478 getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
23479 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
23480 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, V,
23481 DAG.getConstant(0xFFFF, DL, MVT::i32));
23484 // Check whether an OR'd reduction tree is PTEST-able, or if we can fallback to
23485 // CMP(MOVMSK(PCMPEQB(X,0))).
23486 static SDValue MatchVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
23488 const X86Subtarget &Subtarget,
23489 SelectionDAG &DAG, SDValue &X86CC) {
23490 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
23492 if (!Subtarget.hasSSE2() || !Op->hasOneUse())
23495 // Check whether we're masking/truncating an OR-reduction result, in which
23496 // case track the masked bits.
23497 APInt Mask = APInt::getAllOnes(Op.getScalarValueSizeInBits());
23498 switch (Op.getOpcode()) {
23499 case ISD::TRUNCATE: {
23500 SDValue Src = Op.getOperand(0);
23501 Mask = APInt::getLowBitsSet(Src.getScalarValueSizeInBits(),
23502 Op.getScalarValueSizeInBits());
23507 if (auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
23508 Mask = Cst->getAPIntValue();
23509 Op = Op.getOperand(0);
23515 SmallVector<SDValue, 8> VecIns;
23516 if (Op.getOpcode() == ISD::OR && matchScalarReduction(Op, ISD::OR, VecIns)) {
23517 EVT VT = VecIns[0].getValueType();
23518 assert(llvm::all_of(VecIns,
23519 [VT](SDValue V) { return VT == V.getValueType(); }) &&
23520 "Reduction source vector mismatch");
23522 // Quit if less than 128-bits or not splittable to 128/256-bit vector.
23523 if (VT.getSizeInBits() < 128 || !isPowerOf2_32(VT.getSizeInBits()))
23526 // If more than one full vector is evaluated, OR them first before PTEST.
23527 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1;
23528 Slot += 2, e += 1) {
23529 // Each iteration will OR 2 nodes and append the result until there is
23530 // only 1 node left, i.e. the final OR'd value of all vectors.
23531 SDValue LHS = VecIns[Slot];
23532 SDValue RHS = VecIns[Slot + 1];
23533 VecIns.push_back(DAG.getNode(ISD::OR, DL, VT, LHS, RHS));
23536 X86::CondCode CCode;
23537 if (SDValue V = LowerVectorAllZero(DL, VecIns.back(), CC, Mask, Subtarget,
23539 X86CC = DAG.getTargetConstant(CCode, DL, MVT::i8);
23544 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
23545 ISD::NodeType BinOp;
23546 if (SDValue Match =
23547 DAG.matchBinOpReduction(Op.getNode(), BinOp, {ISD::OR})) {
23548 X86::CondCode CCode;
23550 LowerVectorAllZero(DL, Match, CC, Mask, Subtarget, DAG, CCode)) {
23551 X86CC = DAG.getTargetConstant(CCode, DL, MVT::i8);
23560 /// return true if \c Op has a use that doesn't just read flags.
23561 static bool hasNonFlagsUse(SDValue Op) {
23562 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
23564 SDNode *User = *UI;
23565 unsigned UOpNo = UI.getOperandNo();
23566 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
23567 // Look pass truncate.
23568 UOpNo = User->use_begin().getOperandNo();
23569 User = *User->use_begin();
23572 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
23573 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
23579 // Transform to an x86-specific ALU node with flags if there is a chance of
23580 // using an RMW op or only the flags are used. Otherwise, leave
23581 // the node alone and emit a 'cmp' or 'test' instruction.
23582 static bool isProfitableToUseFlagOp(SDValue Op) {
23583 for (SDNode *U : Op->uses())
23584 if (U->getOpcode() != ISD::CopyToReg &&
23585 U->getOpcode() != ISD::SETCC &&
23586 U->getOpcode() != ISD::STORE)
23592 /// Emit nodes that will be selected as "test Op0,Op0", or something
23594 static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
23595 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
23596 // CF and OF aren't always set the way we want. Determine which
23597 // of these we need.
23598 bool NeedCF = false;
23599 bool NeedOF = false;
23602 case X86::COND_A: case X86::COND_AE:
23603 case X86::COND_B: case X86::COND_BE:
23606 case X86::COND_G: case X86::COND_GE:
23607 case X86::COND_L: case X86::COND_LE:
23608 case X86::COND_O: case X86::COND_NO: {
23609 // Check if we really need to set the
23610 // Overflow flag. If NoSignedWrap is present
23611 // that is not actually needed.
23612 switch (Op->getOpcode()) {
23617 if (Op.getNode()->getFlags().hasNoSignedWrap())
23627 // See if we can use the EFLAGS value from the operand instead of
23628 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
23629 // we prove that the arithmetic won't overflow, we can't use OF or CF.
23630 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
23631 // Emit a CMP with 0, which is the TEST pattern.
23632 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
23633 DAG.getConstant(0, dl, Op.getValueType()));
23635 unsigned Opcode = 0;
23636 unsigned NumOperands = 0;
23638 SDValue ArithOp = Op;
23640 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
23641 // which may be the result of a CAST. We use the variable 'Op', which is the
23642 // non-casted variable when we check for possible users.
23643 switch (ArithOp.getOpcode()) {
23645 // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
23646 // because a TEST instruction will be better.
23647 if (!hasNonFlagsUse(Op))
23655 if (!isProfitableToUseFlagOp(Op))
23658 // Otherwise use a regular EFLAGS-setting instruction.
23659 switch (ArithOp.getOpcode()) {
23660 default: llvm_unreachable("unexpected operator!");
23661 case ISD::ADD: Opcode = X86ISD::ADD; break;
23662 case ISD::SUB: Opcode = X86ISD::SUB; break;
23663 case ISD::XOR: Opcode = X86ISD::XOR; break;
23664 case ISD::AND: Opcode = X86ISD::AND; break;
23665 case ISD::OR: Opcode = X86ISD::OR; break;
23675 return SDValue(Op.getNode(), 1);
23678 // /USUBO/SSUBO will become a X86ISD::SUB and we can use its Z flag.
23679 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
23680 return DAG.getNode(X86ISD::SUB, dl, VTs, Op->getOperand(0),
23681 Op->getOperand(1)).getValue(1);
23688 // Emit a CMP with 0, which is the TEST pattern.
23689 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
23690 DAG.getConstant(0, dl, Op.getValueType()));
23692 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
23693 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
23695 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
23696 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
23697 return SDValue(New.getNode(), 1);
23700 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
23702 static SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
23703 const SDLoc &dl, SelectionDAG &DAG,
23704 const X86Subtarget &Subtarget) {
23705 if (isNullConstant(Op1))
23706 return EmitTest(Op0, X86CC, dl, DAG, Subtarget);
23708 EVT CmpVT = Op0.getValueType();
23710 assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
23711 CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");
23713 // Only promote the compare up to I32 if it is a 16 bit operation
23714 // with an immediate. 16 bit immediates are to be avoided.
23715 if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
23716 !DAG.getMachineFunction().getFunction().hasMinSize()) {
23717 ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
23718 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
23719 // Don't do this if the immediate can fit in 8-bits.
23720 if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
23721 (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
23722 unsigned ExtendOp =
23723 isX86CCSigned(X86CC) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
23724 if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
23725 // For equality comparisons try to use SIGN_EXTEND if the input was
23726 // truncate from something with enough sign bits.
23727 if (Op0.getOpcode() == ISD::TRUNCATE) {
23728 if (DAG.ComputeMaxSignificantBits(Op0.getOperand(0)) <= 16)
23729 ExtendOp = ISD::SIGN_EXTEND;
23730 } else if (Op1.getOpcode() == ISD::TRUNCATE) {
23731 if (DAG.ComputeMaxSignificantBits(Op1.getOperand(0)) <= 16)
23732 ExtendOp = ISD::SIGN_EXTEND;
23737 Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
23738 Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
23742 // Try to shrink i64 compares if the input has enough zero bits.
23743 // FIXME: Do this for non-constant compares for constant on LHS?
23744 if (CmpVT == MVT::i64 && isa<ConstantSDNode>(Op1) && !isX86CCSigned(X86CC) &&
23745 Op0.hasOneUse() && // Hacky way to not break CSE opportunities with sub.
23746 cast<ConstantSDNode>(Op1)->getAPIntValue().getActiveBits() <= 32 &&
23747 DAG.MaskedValueIsZero(Op0, APInt::getHighBitsSet(64, 32))) {
23749 Op0 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op0);
23750 Op1 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op1);
23753 // 0-x == y --> x+y == 0
23754 // 0-x != y --> x+y != 0
23755 if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op0.getOperand(0)) &&
23756 Op0.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
23757 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
23758 SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(1), Op1);
23759 return Add.getValue(1);
23762 // x == 0-y --> x+y == 0
23763 // x != 0-y --> x+y != 0
23764 if (Op1.getOpcode() == ISD::SUB && isNullConstant(Op1.getOperand(0)) &&
23765 Op1.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
23766 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
23767 SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0, Op1.getOperand(1));
23768 return Add.getValue(1);
23771 // Use SUB instead of CMP to enable CSE between SUB and CMP.
23772 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
23773 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
23774 return Sub.getValue(1);
23777 /// Check if replacement of SQRT with RSQRT should be disabled.
23778 bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
23779 EVT VT = Op.getValueType();
23781 // We don't need to replace SQRT with RSQRT for half type.
23782 if (VT.getScalarType() == MVT::f16)
23785 // We never want to use both SQRT and RSQRT instructions for the same input.
23786 if (DAG.doesNodeExist(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
23790 return Subtarget.hasFastVectorFSQRT();
23791 return Subtarget.hasFastScalarFSQRT();
23794 /// The minimum architected relative accuracy is 2^-12. We need one
23795 /// Newton-Raphson step to have a good float result (24 bits of precision).
23796 SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
23797 SelectionDAG &DAG, int Enabled,
23798 int &RefinementSteps,
23799 bool &UseOneConstNR,
23800 bool Reciprocal) const {
23802 EVT VT = Op.getValueType();
23804 // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
23805 // It is likely not profitable to do this for f64 because a double-precision
23806 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
23807 // instructions: convert to single, rsqrtss, convert back to double, refine
23808 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
23809 // along with FMA, this could be a throughput win.
23810 // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
23811 // after legalize types.
23812 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
23813 (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
23814 (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
23815 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
23816 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
23817 if (RefinementSteps == ReciprocalEstimate::Unspecified)
23818 RefinementSteps = 1;
23820 UseOneConstNR = false;
23821 // There is no FSQRT for 512-bits, but there is RSQRT14.
23822 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
23823 SDValue Estimate = DAG.getNode(Opcode, DL, VT, Op);
23824 if (RefinementSteps == 0 && !Reciprocal)
23825 Estimate = DAG.getNode(ISD::FMUL, DL, VT, Op, Estimate);
23829 if (VT.getScalarType() == MVT::f16 && isTypeLegal(VT) &&
23830 Subtarget.hasFP16()) {
23831 assert(Reciprocal && "Don't replace SQRT with RSQRT for half type");
23832 if (RefinementSteps == ReciprocalEstimate::Unspecified)
23833 RefinementSteps = 0;
23835 if (VT == MVT::f16) {
23836 SDValue Zero = DAG.getIntPtrConstant(0, DL);
23837 SDValue Undef = DAG.getUNDEF(MVT::v8f16);
23838 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v8f16, Op);
23839 Op = DAG.getNode(X86ISD::RSQRT14S, DL, MVT::v8f16, Undef, Op);
23840 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Op, Zero);
23843 return DAG.getNode(X86ISD::RSQRT14, DL, VT, Op);
23848 /// The minimum architected relative accuracy is 2^-12. We need one
23849 /// Newton-Raphson step to have a good float result (24 bits of precision).
23850 SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
23852 int &RefinementSteps) const {
23854 EVT VT = Op.getValueType();
23856 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
23857 // It is likely not profitable to do this for f64 because a double-precision
23858 // reciprocal estimate with refinement on x86 prior to FMA requires
23859 // 15 instructions: convert to single, rcpss, convert back to double, refine
23860 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
23861 // along with FMA, this could be a throughput win.
23863 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
23864 (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
23865 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
23866 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
23867 // Enable estimate codegen with 1 refinement step for vector division.
23868 // Scalar division estimates are disabled because they break too much
23869 // real-world code. These defaults are intended to match GCC behavior.
23870 if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
23873 if (RefinementSteps == ReciprocalEstimate::Unspecified)
23874 RefinementSteps = 1;
23876 // There is no FSQRT for 512-bits, but there is RCP14.
23877 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
23878 return DAG.getNode(Opcode, DL, VT, Op);
23881 if (VT.getScalarType() == MVT::f16 && isTypeLegal(VT) &&
23882 Subtarget.hasFP16()) {
23883 if (RefinementSteps == ReciprocalEstimate::Unspecified)
23884 RefinementSteps = 0;
23886 if (VT == MVT::f16) {
23887 SDValue Zero = DAG.getIntPtrConstant(0, DL);
23888 SDValue Undef = DAG.getUNDEF(MVT::v8f16);
23889 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v8f16, Op);
23890 Op = DAG.getNode(X86ISD::RCP14S, DL, MVT::v8f16, Undef, Op);
23891 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Op, Zero);
23894 return DAG.getNode(X86ISD::RCP14, DL, VT, Op);
23899 /// If we have at least two divisions that use the same divisor, convert to
23900 /// multiplication by a reciprocal. This may need to be adjusted for a given
23901 /// CPU if a division's cost is not at least twice the cost of a multiplication.
23902 /// This is because we still need one division to calculate the reciprocal and
23903 /// then we need two multiplies by that reciprocal as replacements for the
23904 /// original divisions.
23905 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
23910 X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
23912 SmallVectorImpl<SDNode *> &Created) const {
23913 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
23914 if (isIntDivCheap(N->getValueType(0), Attr))
23915 return SDValue(N,0); // Lower SDIV as SDIV
23917 assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
23918 "Unexpected divisor!");
23920 // Only perform this transform if CMOV is supported otherwise the select
23921 // below will become a branch.
23922 if (!Subtarget.canUseCMOV())
23925 // fold (sdiv X, pow2)
23926 EVT VT = N->getValueType(0);
23927 // FIXME: Support i8.
23928 if (VT != MVT::i16 && VT != MVT::i32 &&
23929 !(Subtarget.is64Bit() && VT == MVT::i64))
23932 unsigned Lg2 = Divisor.countTrailingZeros();
23934 // If the divisor is 2 or -2, the default expansion is better.
23939 SDValue N0 = N->getOperand(0);
23940 SDValue Zero = DAG.getConstant(0, DL, VT);
23941 APInt Lg2Mask = APInt::getLowBitsSet(VT.getSizeInBits(), Lg2);
23942 SDValue Pow2MinusOne = DAG.getConstant(Lg2Mask, DL, VT);
23944 // If N0 is negative, we need to add (Pow2 - 1) to it before shifting right.
23945 SDValue Cmp = DAG.getSetCC(DL, MVT::i8, N0, Zero, ISD::SETLT);
23946 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
23947 SDValue CMov = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
23949 Created.push_back(Cmp.getNode());
23950 Created.push_back(Add.getNode());
23951 Created.push_back(CMov.getNode());
23955 DAG.getNode(ISD::SRA, DL, VT, CMov, DAG.getConstant(Lg2, DL, MVT::i8));
23957 // If we're dividing by a positive value, we're done. Otherwise, we must
23958 // negate the result.
23959 if (Divisor.isNonNegative())
23962 Created.push_back(SRA.getNode());
23963 return DAG.getNode(ISD::SUB, DL, VT, Zero, SRA);
23966 /// Result of 'and' is compared against zero. Change to a BT node if possible.
23967 /// Returns the BT node and the condition code needed to use it.
23968 static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, const SDLoc &dl,
23969 SelectionDAG &DAG, X86::CondCode &X86CC) {
23970 assert(And.getOpcode() == ISD::AND && "Expected AND node!");
23971 SDValue Op0 = And.getOperand(0);
23972 SDValue Op1 = And.getOperand(1);
23973 if (Op0.getOpcode() == ISD::TRUNCATE)
23974 Op0 = Op0.getOperand(0);
23975 if (Op1.getOpcode() == ISD::TRUNCATE)
23976 Op1 = Op1.getOperand(0);
23978 SDValue Src, BitNo;
23979 if (Op1.getOpcode() == ISD::SHL)
23980 std::swap(Op0, Op1);
23981 if (Op0.getOpcode() == ISD::SHL) {
23982 if (isOneConstant(Op0.getOperand(0))) {
23983 // If we looked past a truncate, check that it's only truncating away
23985 unsigned BitWidth = Op0.getValueSizeInBits();
23986 unsigned AndBitWidth = And.getValueSizeInBits();
23987 if (BitWidth > AndBitWidth) {
23988 KnownBits Known = DAG.computeKnownBits(Op0);
23989 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
23993 BitNo = Op0.getOperand(1);
23995 } else if (Op1.getOpcode() == ISD::Constant) {
23996 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
23997 uint64_t AndRHSVal = AndRHS->getZExtValue();
23998 SDValue AndLHS = Op0;
24000 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
24001 Src = AndLHS.getOperand(0);
24002 BitNo = AndLHS.getOperand(1);
24004 // Use BT if the immediate can't be encoded in a TEST instruction or we
24005 // are optimizing for size and the immedaite won't fit in a byte.
24006 bool OptForSize = DAG.shouldOptForSize();
24007 if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
24008 isPowerOf2_64(AndRHSVal)) {
24010 BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
24011 Src.getValueType());
24016 // No patterns found, give up.
24017 if (!Src.getNode())
24020 // Remove any bit flip.
24021 if (isBitwiseNot(Src)) {
24022 Src = Src.getOperand(0);
24023 CC = CC == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ;
24026 // Attempt to create the X86ISD::BT node.
24027 if (SDValue BT = getBT(Src, BitNo, dl, DAG)) {
24028 X86CC = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
24035 // Check if pre-AVX condcode can be performed by a single FCMP op.
24036 static bool cheapX86FSETCC_SSE(ISD::CondCode SetCCOpcode) {
24037 return (SetCCOpcode != ISD::SETONE) && (SetCCOpcode != ISD::SETUEQ);
24040 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
24042 static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
24043 SDValue &Op1, bool &IsAlwaysSignaling) {
24047 // SSE Condition code mapping:
24056 switch (SetCCOpcode) {
24057 default: llvm_unreachable("Unexpected SETCC condition");
24059 case ISD::SETEQ: SSECC = 0; break;
24061 case ISD::SETGT: Swap = true; LLVM_FALLTHROUGH;
24063 case ISD::SETOLT: SSECC = 1; break;
24065 case ISD::SETGE: Swap = true; LLVM_FALLTHROUGH;
24067 case ISD::SETOLE: SSECC = 2; break;
24068 case ISD::SETUO: SSECC = 3; break;
24070 case ISD::SETNE: SSECC = 4; break;
24071 case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
24072 case ISD::SETUGE: SSECC = 5; break;
24073 case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
24074 case ISD::SETUGT: SSECC = 6; break;
24075 case ISD::SETO: SSECC = 7; break;
24076 case ISD::SETUEQ: SSECC = 8; break;
24077 case ISD::SETONE: SSECC = 12; break;
24080 std::swap(Op0, Op1);
24082 switch (SetCCOpcode) {
24084 IsAlwaysSignaling = true;
24094 IsAlwaysSignaling = false;
24101 /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
24102 /// concatenate the result back.
24103 static SDValue splitIntVSETCC(EVT VT, SDValue LHS, SDValue RHS,
24104 ISD::CondCode Cond, SelectionDAG &DAG,
24106 assert(VT.isInteger() && VT == LHS.getValueType() &&
24107 VT == RHS.getValueType() && "Unsupported VTs!");
24109 SDValue CC = DAG.getCondCode(Cond);
24111 // Extract the LHS Lo/Hi vectors
24112 SDValue LHS1, LHS2;
24113 std::tie(LHS1, LHS2) = splitVector(LHS, DAG, dl);
24115 // Extract the RHS Lo/Hi vectors
24116 SDValue RHS1, RHS2;
24117 std::tie(RHS1, RHS2) = splitVector(RHS, DAG, dl);
24119 // Issue the operation on the smaller types and concatenate the result back
24121 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
24122 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
24123 DAG.getNode(ISD::SETCC, dl, LoVT, LHS1, RHS1, CC),
24124 DAG.getNode(ISD::SETCC, dl, HiVT, LHS2, RHS2, CC));
24127 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
24129 SDValue Op0 = Op.getOperand(0);
24130 SDValue Op1 = Op.getOperand(1);
24131 SDValue CC = Op.getOperand(2);
24132 MVT VT = Op.getSimpleValueType();
24135 assert(VT.getVectorElementType() == MVT::i1 &&
24136 "Cannot set masked compare for this operation");
24138 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
24140 // Prefer SETGT over SETLT.
24141 if (SetCCOpcode == ISD::SETLT) {
24142 SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
24143 std::swap(Op0, Op1);
24146 return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
24149 /// Given a buildvector constant, return a new vector constant with each element
24150 /// incremented or decremented. If incrementing or decrementing would result in
24151 /// unsigned overflow or underflow or this is not a simple vector constant,
24152 /// return an empty value.
24153 static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc) {
24154 auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
24158 MVT VT = V.getSimpleValueType();
24159 MVT EltVT = VT.getVectorElementType();
24160 unsigned NumElts = VT.getVectorNumElements();
24161 SmallVector<SDValue, 8> NewVecC;
24163 for (unsigned i = 0; i < NumElts; ++i) {
24164 auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
24165 if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
24168 // Avoid overflow/underflow.
24169 const APInt &EltC = Elt->getAPIntValue();
24170 if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isZero()))
24173 NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
24176 return DAG.getBuildVector(VT, DL, NewVecC);
24179 /// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
24181 /// t = psubus Op0, Op1
24182 /// pcmpeq t, <0..0>
24183 static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
24184 ISD::CondCode Cond, const SDLoc &dl,
24185 const X86Subtarget &Subtarget,
24186 SelectionDAG &DAG) {
24187 if (!Subtarget.hasSSE2())
24190 MVT VET = VT.getVectorElementType();
24191 if (VET != MVT::i8 && VET != MVT::i16)
24197 case ISD::SETULT: {
24198 // If the comparison is against a constant we can turn this into a
24199 // setule. With psubus, setule does not require a swap. This is
24200 // beneficial because the constant in the register is no longer
24201 // destructed as the destination so it can be hoisted out of a loop.
24202 // Only do this pre-AVX since vpcmp* is no longer destructive.
24203 if (Subtarget.hasAVX())
24205 SDValue ULEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false);
24211 case ISD::SETUGT: {
24212 // If the comparison is against a constant, we can turn this into a setuge.
24213 // This is beneficial because materializing a constant 0 for the PCMPEQ is
24214 // probably cheaper than XOR+PCMPGT using 2 different vector constants:
24215 // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
24216 SDValue UGEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true);
24223 // Psubus is better than flip-sign because it requires no inversion.
24225 std::swap(Op0, Op1);
24231 SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
24232 return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
24233 DAG.getConstant(0, dl, VT));
24236 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
24237 SelectionDAG &DAG) {
24238 bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
24239 Op.getOpcode() == ISD::STRICT_FSETCCS;
24240 SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
24241 SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
24242 SDValue CC = Op.getOperand(IsStrict ? 3 : 2);
24243 MVT VT = Op->getSimpleValueType(0);
24244 ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
24245 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
24249 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
24250 assert(EltVT == MVT::f16 || EltVT == MVT::f32 || EltVT == MVT::f64);
24251 if (isSoftFP16(EltVT, Subtarget))
24254 bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
24255 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
24257 // If we have a strict compare with a vXi1 result and the input is 128/256
24258 // bits we can't use a masked compare unless we have VLX. If we use a wider
24259 // compare like we do for non-strict, we might trigger spurious exceptions
24260 // from the upper elements. Instead emit a AVX compare and convert to mask.
24262 if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1 &&
24263 (!IsStrict || Subtarget.hasVLX() ||
24264 Op0.getSimpleValueType().is512BitVector())) {
24266 unsigned Num = VT.getVectorNumElements();
24267 assert(Num <= 16 || (Num == 32 && EltVT == MVT::f16));
24269 Opc = IsStrict ? X86ISD::STRICT_CMPM : X86ISD::CMPM;
24271 Opc = IsStrict ? X86ISD::STRICT_CMPP : X86ISD::CMPP;
24272 // The SSE/AVX packed FP comparison nodes are defined with a
24273 // floating-point vector result that matches the operand type. This allows
24274 // them to work with an SSE1 target (integer vector types are not legal).
24275 VT = Op0.getSimpleValueType();
24279 bool IsAlwaysSignaling;
24280 unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1, IsAlwaysSignaling);
24281 if (!Subtarget.hasAVX()) {
24282 // TODO: We could use following steps to handle a quiet compare with
24283 // signaling encodings.
24284 // 1. Get ordered masks from a quiet ISD::SETO
24285 // 2. Use the masks to mask potential unordered elements in operand A, B
24286 // 3. Get the compare results of masked A, B
24287 // 4. Calculating final result using the mask and result from 3
24288 // But currently, we just fall back to scalar operations.
24289 if (IsStrict && IsAlwaysSignaling && !IsSignaling)
24292 // Insert an extra signaling instruction to raise exception.
24293 if (IsStrict && !IsAlwaysSignaling && IsSignaling) {
24294 SDValue SignalCmp = DAG.getNode(
24295 Opc, dl, {VT, MVT::Other},
24296 {Chain, Op0, Op1, DAG.getTargetConstant(1, dl, MVT::i8)}); // LT_OS
24297 // FIXME: It seems we need to update the flags of all new strict nodes.
24298 // Otherwise, mayRaiseFPException in MI will return false due to
24299 // NoFPExcept = false by default. However, I didn't find it in other
24301 SignalCmp->setFlags(Op->getFlags());
24302 Chain = SignalCmp.getValue(1);
24305 // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
24306 // emit two comparisons and a logic op to tie them together.
24307 if (!cheapX86FSETCC_SSE(Cond)) {
24308 // LLVM predicate is SETUEQ or SETONE.
24310 unsigned CombineOpc;
24311 if (Cond == ISD::SETUEQ) {
24314 CombineOpc = X86ISD::FOR;
24316 assert(Cond == ISD::SETONE);
24319 CombineOpc = X86ISD::FAND;
24322 SDValue Cmp0, Cmp1;
24324 Cmp0 = DAG.getNode(
24325 Opc, dl, {VT, MVT::Other},
24326 {Chain, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8)});
24327 Cmp1 = DAG.getNode(
24328 Opc, dl, {VT, MVT::Other},
24329 {Chain, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8)});
24330 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Cmp0.getValue(1),
24333 Cmp0 = DAG.getNode(
24334 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8));
24335 Cmp1 = DAG.getNode(
24336 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8));
24338 Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
24342 Opc, dl, {VT, MVT::Other},
24343 {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
24344 Chain = Cmp.getValue(1);
24347 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
24350 // Handle all other FP comparisons here.
24352 // Make a flip on already signaling CCs before setting bit 4 of AVX CC.
24353 SSECC |= (IsAlwaysSignaling ^ IsSignaling) << 4;
24355 Opc, dl, {VT, MVT::Other},
24356 {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
24357 Chain = Cmp.getValue(1);
24360 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
24363 if (VT.getFixedSizeInBits() >
24364 Op.getSimpleValueType().getFixedSizeInBits()) {
24365 // We emitted a compare with an XMM/YMM result. Finish converting to a
24366 // mask register using a vptestm.
24367 EVT CastVT = EVT(VT).changeVectorElementTypeToInteger();
24368 Cmp = DAG.getBitcast(CastVT, Cmp);
24369 Cmp = DAG.getSetCC(dl, Op.getSimpleValueType(), Cmp,
24370 DAG.getConstant(0, dl, CastVT), ISD::SETNE);
24372 // If this is SSE/AVX CMPP, bitcast the result back to integer to match
24373 // the result type of SETCC. The bitcast is expected to be optimized
24374 // away during combining/isel.
24375 Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
24379 return DAG.getMergeValues({Cmp, Chain}, dl);
24384 assert(!IsStrict && "Strict SETCC only handles FP operands.");
24386 MVT VTOp0 = Op0.getSimpleValueType();
24388 assert(VTOp0 == Op1.getSimpleValueType() &&
24389 "Expected operands with same type!");
24390 assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
24391 "Invalid number of packed elements for source and destination!");
24393 // The non-AVX512 code below works under the assumption that source and
24394 // destination types are the same.
24395 assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
24396 "Value types for source and destination must be the same!");
24398 // The result is boolean, but operands are int/float
24399 if (VT.getVectorElementType() == MVT::i1) {
24400 // In AVX-512 architecture setcc returns mask with i1 elements,
24401 // But there is no compare instruction for i8 and i16 elements in KNL.
24402 assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
24403 "Unexpected operand type");
24404 return LowerIntVSETCC_AVX512(Op, DAG);
24407 // Lower using XOP integer comparisons.
24408 if (VT.is128BitVector() && Subtarget.hasXOP()) {
24409 // Translate compare code to XOP PCOM compare mode.
24410 unsigned CmpMode = 0;
24412 default: llvm_unreachable("Unexpected SETCC condition");
24414 case ISD::SETLT: CmpMode = 0x00; break;
24416 case ISD::SETLE: CmpMode = 0x01; break;
24418 case ISD::SETGT: CmpMode = 0x02; break;
24420 case ISD::SETGE: CmpMode = 0x03; break;
24421 case ISD::SETEQ: CmpMode = 0x04; break;
24422 case ISD::SETNE: CmpMode = 0x05; break;
24425 // Are we comparing unsigned or signed integers?
24427 ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
24429 return DAG.getNode(Opc, dl, VT, Op0, Op1,
24430 DAG.getTargetConstant(CmpMode, dl, MVT::i8));
24433 // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
24434 // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
24435 if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
24436 SDValue BC0 = peekThroughBitcasts(Op0);
24437 if (BC0.getOpcode() == ISD::AND) {
24439 SmallVector<APInt, 64> EltBits;
24440 if (getTargetConstantBitsFromNode(BC0.getOperand(1),
24441 VT.getScalarSizeInBits(), UndefElts,
24442 EltBits, false, false)) {
24443 if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
24445 Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
24451 // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
24452 if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
24453 Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
24454 ConstantSDNode *C1 = isConstOrConstSplat(Op1);
24455 if (C1 && C1->getAPIntValue().isPowerOf2()) {
24456 unsigned BitWidth = VT.getScalarSizeInBits();
24457 unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
24459 SDValue Result = Op0.getOperand(0);
24460 Result = DAG.getNode(ISD::SHL, dl, VT, Result,
24461 DAG.getConstant(ShiftAmt, dl, VT));
24462 Result = DAG.getNode(ISD::SRA, dl, VT, Result,
24463 DAG.getConstant(BitWidth - 1, dl, VT));
24468 // Break 256-bit integer vector compare into smaller ones.
24469 if (VT.is256BitVector() && !Subtarget.hasInt256())
24470 return splitIntVSETCC(VT, Op0, Op1, Cond, DAG, dl);
24472 // Break 512-bit integer vector compare into smaller ones.
24473 // TODO: Try harder to use VPCMPx + VPMOV2x?
24474 if (VT.is512BitVector())
24475 return splitIntVSETCC(VT, Op0, Op1, Cond, DAG, dl);
24477 // If we have a limit constant, try to form PCMPGT (signed cmp) to avoid
24479 // X != INT_MIN --> X >s INT_MIN
24480 // X != INT_MAX --> X <s INT_MAX --> INT_MAX >s X
24481 // +X != 0 --> +X >s 0
24483 if (Cond == ISD::SETNE &&
24484 ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
24485 if (ConstValue.isMinSignedValue())
24487 else if (ConstValue.isMaxSignedValue())
24489 else if (ConstValue.isZero() && DAG.SignBitIsZero(Op0))
24493 // If both operands are known non-negative, then an unsigned compare is the
24494 // same as a signed compare and there's no need to flip signbits.
24495 // TODO: We could check for more general simplifications here since we're
24496 // computing known bits.
24497 bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
24498 !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
24500 // Special case: Use min/max operations for unsigned compares.
24501 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24502 if (ISD::isUnsignedIntSetCC(Cond) &&
24503 (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
24504 TLI.isOperationLegal(ISD::UMIN, VT)) {
24505 // If we have a constant operand, increment/decrement it and change the
24506 // condition to avoid an invert.
24507 if (Cond == ISD::SETUGT) {
24508 // X > C --> X >= (C+1) --> X == umax(X, C+1)
24509 if (SDValue UGTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true)) {
24511 Cond = ISD::SETUGE;
24514 if (Cond == ISD::SETULT) {
24515 // X < C --> X <= (C-1) --> X == umin(X, C-1)
24516 if (SDValue ULTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false)) {
24518 Cond = ISD::SETULE;
24521 bool Invert = false;
24524 default: llvm_unreachable("Unexpected condition code");
24525 case ISD::SETUGT: Invert = true; LLVM_FALLTHROUGH;
24526 case ISD::SETULE: Opc = ISD::UMIN; break;
24527 case ISD::SETULT: Invert = true; LLVM_FALLTHROUGH;
24528 case ISD::SETUGE: Opc = ISD::UMAX; break;
24531 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
24532 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
24534 // If the logical-not of the result is required, perform that now.
24536 Result = DAG.getNOT(dl, Result, VT);
24541 // Try to use SUBUS and PCMPEQ.
24544 LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
24547 // We are handling one of the integer comparisons here. Since SSE only has
24548 // GT and EQ comparisons for integer, swapping operands and multiple
24549 // operations may be required for some comparisons.
24550 unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
24552 bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
24553 Cond == ISD::SETGE || Cond == ISD::SETUGE;
24554 bool Invert = Cond == ISD::SETNE ||
24555 (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
24558 std::swap(Op0, Op1);
24560 // Check that the operation in question is available (most are plain SSE2,
24561 // but PCMPGTQ and PCMPEQQ have different requirements).
24562 if (VT == MVT::v2i64) {
24563 if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
24564 assert(Subtarget.hasSSE2() && "Don't know how to lower!");
24566 // Special case for sign bit test. We can use a v4i32 PCMPGT and shuffle
24567 // the odd elements over the even elements.
24568 if (!FlipSigns && !Invert && ISD::isBuildVectorAllZeros(Op0.getNode())) {
24569 Op0 = DAG.getConstant(0, dl, MVT::v4i32);
24570 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
24572 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
24573 static const int MaskHi[] = { 1, 1, 3, 3 };
24574 SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
24576 return DAG.getBitcast(VT, Result);
24579 if (!FlipSigns && !Invert && ISD::isBuildVectorAllOnes(Op1.getNode())) {
24580 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
24581 Op1 = DAG.getConstant(-1, dl, MVT::v4i32);
24583 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
24584 static const int MaskHi[] = { 1, 1, 3, 3 };
24585 SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
24587 return DAG.getBitcast(VT, Result);
24590 // Since SSE has no unsigned integer comparisons, we need to flip the sign
24591 // bits of the inputs before performing those operations. The lower
24592 // compare is always unsigned.
24593 SDValue SB = DAG.getConstant(FlipSigns ? 0x8000000080000000ULL
24594 : 0x0000000080000000ULL,
24597 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
24598 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
24600 // Cast everything to the right type.
24601 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
24602 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
24604 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
24605 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
24606 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
24608 // Create masks for only the low parts/high parts of the 64 bit integers.
24609 static const int MaskHi[] = { 1, 1, 3, 3 };
24610 static const int MaskLo[] = { 0, 0, 2, 2 };
24611 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
24612 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
24613 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
24615 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
24616 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
24619 Result = DAG.getNOT(dl, Result, MVT::v4i32);
24621 return DAG.getBitcast(VT, Result);
24624 if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
24625 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
24626 // pcmpeqd + pshufd + pand.
24627 assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
24629 // First cast everything to the right type.
24630 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
24631 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
24634 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
24636 // Make sure the lower and upper halves are both all-ones.
24637 static const int Mask[] = { 1, 0, 3, 2 };
24638 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
24639 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
24642 Result = DAG.getNOT(dl, Result, MVT::v4i32);
24644 return DAG.getBitcast(VT, Result);
24648 // Since SSE has no unsigned integer comparisons, we need to flip the sign
24649 // bits of the inputs before performing those operations.
24651 MVT EltVT = VT.getVectorElementType();
24652 SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
24654 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
24655 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
24658 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
24660 // If the logical-not of the result is required, perform that now.
24662 Result = DAG.getNOT(dl, Result, VT);
24667 // Try to select this as a KORTEST+SETCC or KTEST+SETCC if possible.
24668 static SDValue EmitAVX512Test(SDValue Op0, SDValue Op1, ISD::CondCode CC,
24669 const SDLoc &dl, SelectionDAG &DAG,
24670 const X86Subtarget &Subtarget,
24672 // Only support equality comparisons.
24673 if (CC != ISD::SETEQ && CC != ISD::SETNE)
24676 // Must be a bitcast from vXi1.
24677 if (Op0.getOpcode() != ISD::BITCAST)
24680 Op0 = Op0.getOperand(0);
24681 MVT VT = Op0.getSimpleValueType();
24682 if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
24683 !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
24684 !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
24687 X86::CondCode X86Cond;
24688 if (isNullConstant(Op1)) {
24689 X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
24690 } else if (isAllOnesConstant(Op1)) {
24691 // C flag is set for all ones.
24692 X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
24696 // If the input is an AND, we can combine it's operands into the KTEST.
24697 bool KTestable = false;
24698 if (Subtarget.hasDQI() && (VT == MVT::v8i1 || VT == MVT::v16i1))
24700 if (Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1))
24702 if (!isNullConstant(Op1))
24704 if (KTestable && Op0.getOpcode() == ISD::AND && Op0.hasOneUse()) {
24705 SDValue LHS = Op0.getOperand(0);
24706 SDValue RHS = Op0.getOperand(1);
24707 X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
24708 return DAG.getNode(X86ISD::KTEST, dl, MVT::i32, LHS, RHS);
24711 // If the input is an OR, we can combine it's operands into the KORTEST.
24714 if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
24715 LHS = Op0.getOperand(0);
24716 RHS = Op0.getOperand(1);
24719 X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
24720 return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
24723 /// Emit flags for the given setcc condition and operands. Also returns the
24724 /// corresponding X86 condition code constant in X86CC.
24725 SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
24726 ISD::CondCode CC, const SDLoc &dl,
24728 SDValue &X86CC) const {
24729 // Optimize to BT if possible.
24730 // Lower (X & (1 << N)) == 0 to BT(X, N).
24731 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
24732 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
24733 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
24734 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
24735 X86::CondCode X86CondCode;
24736 if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CondCode)) {
24737 X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
24742 // Try to use PTEST/PMOVMSKB for a tree ORs equality compared with 0.
24743 // TODO: We could do AND tree with all 1s as well by using the C flag.
24744 if (isNullConstant(Op1) && (CC == ISD::SETEQ || CC == ISD::SETNE))
24746 MatchVectorAllZeroTest(Op0, CC, dl, Subtarget, DAG, X86CC))
24749 // Try to lower using KORTEST or KTEST.
24750 if (SDValue Test = EmitAVX512Test(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
24753 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
24755 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
24756 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
24757 // If the input is a setcc, then reuse the input setcc or use a new one with
24758 // the inverted condition.
24759 if (Op0.getOpcode() == X86ISD::SETCC) {
24760 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
24762 X86CC = Op0.getOperand(0);
24764 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
24765 CCode = X86::GetOppositeBranchCondition(CCode);
24766 X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
24769 return Op0.getOperand(1);
24773 // Try to use the carry flag from the add in place of an separate CMP for:
24774 // (seteq (add X, -1), -1). Similar for setne.
24775 if (isAllOnesConstant(Op1) && Op0.getOpcode() == ISD::ADD &&
24776 Op0.getOperand(1) == Op1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
24777 if (isProfitableToUseFlagOp(Op0)) {
24778 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
24780 SDValue New = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(0),
24781 Op0.getOperand(1));
24782 DAG.ReplaceAllUsesOfValueWith(SDValue(Op0.getNode(), 0), New);
24783 X86::CondCode CCode = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
24784 X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
24785 return SDValue(New.getNode(), 1);
24789 X86::CondCode CondCode =
24790 TranslateX86CC(CC, dl, /*IsFP*/ false, Op0, Op1, DAG);
24791 assert(CondCode != X86::COND_INVALID && "Unexpected condition code!");
24793 SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG, Subtarget);
24794 X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
24798 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
24800 bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
24801 Op.getOpcode() == ISD::STRICT_FSETCCS;
24802 MVT VT = Op->getSimpleValueType(0);
24804 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
24806 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
24807 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
24808 SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
24809 SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
24812 cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
24814 if (isSoftFP16(Op0.getValueType()))
24817 // Handle f128 first, since one possible outcome is a normal integer
24818 // comparison which gets handled by emitFlagsForSetcc.
24819 if (Op0.getValueType() == MVT::f128) {
24820 softenSetCCOperands(DAG, MVT::f128, Op0, Op1, CC, dl, Op0, Op1, Chain,
24821 Op.getOpcode() == ISD::STRICT_FSETCCS);
24823 // If softenSetCCOperands returned a scalar, use it.
24824 if (!Op1.getNode()) {
24825 assert(Op0.getValueType() == Op.getValueType() &&
24826 "Unexpected setcc expansion!");
24828 return DAG.getMergeValues({Op0, Chain}, dl);
24833 if (Op0.getSimpleValueType().isInteger()) {
24834 // Attempt to canonicalize SGT/UGT -> SGE/UGE compares with constant which
24835 // reduces the number of EFLAGs bit reads (the GE conditions don't read ZF),
24836 // this may translate to less uops depending on uarch implementation. The
24837 // equivalent for SLE/ULE -> SLT/ULT isn't likely to happen as we already
24838 // canonicalize to that CondCode.
24839 // NOTE: Only do this if incrementing the constant doesn't increase the bit
24840 // encoding size - so it must either already be a i8 or i32 immediate, or it
24841 // shrinks down to that. We don't do this for any i64's to avoid additional
24842 // constant materializations.
24843 // TODO: Can we move this to TranslateX86CC to handle jumps/branches too?
24844 if (auto *Op1C = dyn_cast<ConstantSDNode>(Op1)) {
24845 const APInt &Op1Val = Op1C->getAPIntValue();
24846 if (!Op1Val.isZero()) {
24847 // Ensure the constant+1 doesn't overflow.
24848 if ((CC == ISD::CondCode::SETGT && !Op1Val.isMaxSignedValue()) ||
24849 (CC == ISD::CondCode::SETUGT && !Op1Val.isMaxValue())) {
24850 APInt Op1ValPlusOne = Op1Val + 1;
24851 if (Op1ValPlusOne.isSignedIntN(32) &&
24852 (!Op1Val.isSignedIntN(8) || Op1ValPlusOne.isSignedIntN(8))) {
24853 Op1 = DAG.getConstant(Op1ValPlusOne, dl, Op0.getValueType());
24854 CC = CC == ISD::CondCode::SETGT ? ISD::CondCode::SETGE
24855 : ISD::CondCode::SETUGE;
24862 SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
24863 SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
24864 return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
24867 // Handle floating point.
24868 X86::CondCode CondCode = TranslateX86CC(CC, dl, /*IsFP*/ true, Op0, Op1, DAG);
24869 if (CondCode == X86::COND_INVALID)
24874 bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
24876 DAG.getNode(IsSignaling ? X86ISD::STRICT_FCMPS : X86ISD::STRICT_FCMP,
24877 dl, {MVT::i32, MVT::Other}, {Chain, Op0, Op1});
24878 Chain = EFLAGS.getValue(1);
24880 EFLAGS = DAG.getNode(X86ISD::FCMP, dl, MVT::i32, Op0, Op1);
24883 SDValue X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
24884 SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
24885 return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
24888 SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
24889 SDValue LHS = Op.getOperand(0);
24890 SDValue RHS = Op.getOperand(1);
24891 SDValue Carry = Op.getOperand(2);
24892 SDValue Cond = Op.getOperand(3);
24895 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
24896 X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
24898 // Recreate the carry if needed.
24899 EVT CarryVT = Carry.getValueType();
24900 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
24901 Carry, DAG.getAllOnesConstant(DL, CarryVT));
24903 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
24904 SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
24905 return getSETCC(CC, Cmp.getValue(1), DL, DAG);
24908 // This function returns three things: the arithmetic computation itself
24909 // (Value), an EFLAGS result (Overflow), and a condition code (Cond). The
24910 // flag and the condition code define the case in which the arithmetic
24911 // computation overflows.
24912 static std::pair<SDValue, SDValue>
24913 getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
24914 assert(Op.getResNo() == 0 && "Unexpected result number!");
24915 SDValue Value, Overflow;
24916 SDValue LHS = Op.getOperand(0);
24917 SDValue RHS = Op.getOperand(1);
24918 unsigned BaseOp = 0;
24920 switch (Op.getOpcode()) {
24921 default: llvm_unreachable("Unknown ovf instruction!");
24923 BaseOp = X86ISD::ADD;
24924 Cond = X86::COND_O;
24927 BaseOp = X86ISD::ADD;
24928 Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
24931 BaseOp = X86ISD::SUB;
24932 Cond = X86::COND_O;
24935 BaseOp = X86ISD::SUB;
24936 Cond = X86::COND_B;
24939 BaseOp = X86ISD::SMUL;
24940 Cond = X86::COND_O;
24943 BaseOp = X86ISD::UMUL;
24944 Cond = X86::COND_O;
24949 // Also sets EFLAGS.
24950 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
24951 Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
24952 Overflow = Value.getValue(1);
24955 return std::make_pair(Value, Overflow);
24958 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
24959 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
24960 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
24961 // looks for this combo and may remove the "setcc" instruction if the "setcc"
24962 // has only one use.
24964 X86::CondCode Cond;
24965 SDValue Value, Overflow;
24966 std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
24968 SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
24969 assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
24970 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
24973 /// Return true if opcode is a X86 logical comparison.
24974 static bool isX86LogicalCmp(SDValue Op) {
24975 unsigned Opc = Op.getOpcode();
24976 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
24977 Opc == X86ISD::FCMP)
24979 if (Op.getResNo() == 1 &&
24980 (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
24981 Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
24982 Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
24988 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
24989 if (V.getOpcode() != ISD::TRUNCATE)
24992 SDValue VOp0 = V.getOperand(0);
24993 unsigned InBits = VOp0.getValueSizeInBits();
24994 unsigned Bits = V.getValueSizeInBits();
24995 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
24998 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
24999 bool AddTest = true;
25000 SDValue Cond = Op.getOperand(0);
25001 SDValue Op1 = Op.getOperand(1);
25002 SDValue Op2 = Op.getOperand(2);
25004 MVT VT = Op1.getSimpleValueType();
25007 if (isSoftFP16(VT)) {
25008 MVT NVT = VT.changeTypeToInteger();
25009 return DAG.getBitcast(VT, DAG.getNode(ISD::SELECT, DL, NVT, Cond,
25010 DAG.getBitcast(NVT, Op1),
25011 DAG.getBitcast(NVT, Op2)));
25014 // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
25015 // are available or VBLENDV if AVX is available.
25016 // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
25017 if (Cond.getOpcode() == ISD::SETCC && isScalarFPTypeInSSEReg(VT) &&
25018 VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
25019 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
25020 bool IsAlwaysSignaling;
25022 translateX86FSETCC(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
25023 CondOp0, CondOp1, IsAlwaysSignaling);
25025 if (Subtarget.hasAVX512()) {
25027 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, CondOp1,
25028 DAG.getTargetConstant(SSECC, DL, MVT::i8));
25029 assert(!VT.isVector() && "Not a scalar type?");
25030 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
25033 if (SSECC < 8 || Subtarget.hasAVX()) {
25034 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
25035 DAG.getTargetConstant(SSECC, DL, MVT::i8));
25037 // If we have AVX, we can use a variable vector select (VBLENDV) instead
25038 // of 3 logic instructions for size savings and potentially speed.
25039 // Unfortunately, there is no scalar form of VBLENDV.
25041 // If either operand is a +0.0 constant, don't try this. We can expect to
25042 // optimize away at least one of the logic instructions later in that
25043 // case, so that sequence would be faster than a variable blend.
25045 // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
25046 // uses XMM0 as the selection register. That may need just as many
25047 // instructions as the AND/ANDN/OR sequence due to register moves, so
25049 if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
25050 !isNullFPConstant(Op2)) {
25051 // Convert to vectors, do a VSELECT, and convert back to scalar.
25052 // All of the conversions should be optimized away.
25053 MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
25054 SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
25055 SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
25056 SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
25058 MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
25059 VCmp = DAG.getBitcast(VCmpVT, VCmp);
25061 SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
25063 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
25064 VSel, DAG.getIntPtrConstant(0, DL));
25066 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
25067 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
25068 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
25072 // AVX512 fallback is to lower selects of scalar floats to masked moves.
25073 if (isScalarFPTypeInSSEReg(VT) && Subtarget.hasAVX512()) {
25074 SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
25075 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
25078 if (Cond.getOpcode() == ISD::SETCC &&
25079 !isSoftFP16(Cond.getOperand(0).getSimpleValueType())) {
25080 if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
25082 // If the condition was updated, it's possible that the operands of the
25083 // select were also updated (for example, EmitTest has a RAUW). Refresh
25084 // the local references to the select operands in case they got stale.
25085 Op1 = Op.getOperand(1);
25086 Op2 = Op.getOperand(2);
25090 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
25091 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
25092 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
25093 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
25094 // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
25095 // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
25096 // (select (x > 0), x, 0) -> (~(x >> (size_in_bits(x)-1))) & x
25097 // (select (x < 0), x, 0) -> ((x >> (size_in_bits(x)-1))) & x
25098 if (Cond.getOpcode() == X86ISD::SETCC &&
25099 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
25100 isNullConstant(Cond.getOperand(1).getOperand(1))) {
25101 SDValue Cmp = Cond.getOperand(1);
25102 SDValue CmpOp0 = Cmp.getOperand(0);
25103 unsigned CondCode = Cond.getConstantOperandVal(0);
25105 // Special handling for __builtin_ffs(X) - 1 pattern which looks like
25106 // (select (seteq X, 0), -1, (cttz_zero_undef X)). Disable the special
25107 // handle to keep the CMP with 0. This should be removed by
25108 // optimizeCompareInst by using the flags from the BSR/TZCNT used for the
25109 // cttz_zero_undef.
25110 auto MatchFFSMinus1 = [&](SDValue Op1, SDValue Op2) {
25111 return (Op1.getOpcode() == ISD::CTTZ_ZERO_UNDEF && Op1.hasOneUse() &&
25112 Op1.getOperand(0) == CmpOp0 && isAllOnesConstant(Op2));
25114 if (Subtarget.canUseCMOV() && (VT == MVT::i32 || VT == MVT::i64) &&
25115 ((CondCode == X86::COND_NE && MatchFFSMinus1(Op1, Op2)) ||
25116 (CondCode == X86::COND_E && MatchFFSMinus1(Op2, Op1)))) {
25118 } else if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
25119 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
25120 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
25121 SDVTList CmpVTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
25123 // 'X - 1' sets the carry flag if X == 0.
25124 // '0 - X' sets the carry flag if X != 0.
25125 // Convert the carry flag to a -1/0 mask with sbb:
25126 // select (X != 0), -1, Y --> 0 - X; or (sbb), Y
25127 // select (X == 0), Y, -1 --> 0 - X; or (sbb), Y
25128 // select (X != 0), Y, -1 --> X - 1; or (sbb), Y
25129 // select (X == 0), -1, Y --> X - 1; or (sbb), Y
25131 if (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE)) {
25132 SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
25133 Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, Zero, CmpOp0);
25135 SDValue One = DAG.getConstant(1, DL, CmpOp0.getValueType());
25136 Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, CmpOp0, One);
25138 SDValue SBB = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
25139 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
25141 return DAG.getNode(ISD::OR, DL, VT, SBB, Y);
25142 } else if (!Subtarget.canUseCMOV() && CondCode == X86::COND_E &&
25143 Cmp.getOperand(0).getOpcode() == ISD::AND &&
25144 isOneConstant(Cmp.getOperand(0).getOperand(1))) {
25145 SDValue Src1, Src2;
25146 // true if Op2 is XOR or OR operator and one of its operands
25148 // ( a , a op b) || ( b , a op b)
25149 auto isOrXorPattern = [&]() {
25150 if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
25151 (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
25153 Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
25160 if (isOrXorPattern()) {
25162 unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
25163 // we need mask of all zeros or ones with same size of the other
25165 if (CmpSz > VT.getSizeInBits())
25166 Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
25167 else if (CmpSz < VT.getSizeInBits())
25168 Neg = DAG.getNode(ISD::AND, DL, VT,
25169 DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
25170 DAG.getConstant(1, DL, VT));
25173 SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
25174 Neg); // -(and (x, 0x1))
25175 SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
25176 return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2); // And Op y
25178 } else if ((VT == MVT::i32 || VT == MVT::i64) && isNullConstant(Op2) &&
25179 Cmp.getNode()->hasOneUse() && (CmpOp0 == Op1) &&
25180 ((CondCode == X86::COND_S) || // smin(x, 0)
25181 (CondCode == X86::COND_G && hasAndNot(Op1)))) { // smax(x, 0)
25182 // (select (x < 0), x, 0) -> ((x >> (size_in_bits(x)-1))) & x
25184 // If the comparison is testing for a positive value, we have to invert
25185 // the sign bit mask, so only do that transform if the target has a
25186 // bitwise 'and not' instruction (the invert is free).
25187 // (select (x > 0), x, 0) -> (~(x >> (size_in_bits(x)-1))) & x
25188 unsigned ShCt = VT.getSizeInBits() - 1;
25189 SDValue ShiftAmt = DAG.getConstant(ShCt, DL, VT);
25190 SDValue Shift = DAG.getNode(ISD::SRA, DL, VT, Op1, ShiftAmt);
25191 if (CondCode == X86::COND_G)
25192 Shift = DAG.getNOT(DL, Shift, VT);
25193 return DAG.getNode(ISD::AND, DL, VT, Shift, Op1);
25197 // Look past (and (setcc_carry (cmp ...)), 1).
25198 if (Cond.getOpcode() == ISD::AND &&
25199 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
25200 isOneConstant(Cond.getOperand(1)))
25201 Cond = Cond.getOperand(0);
25203 // If condition flag is set by a X86ISD::CMP, then use it as the condition
25204 // setting operand in place of the X86ISD::SETCC.
25205 unsigned CondOpcode = Cond.getOpcode();
25206 if (CondOpcode == X86ISD::SETCC ||
25207 CondOpcode == X86ISD::SETCC_CARRY) {
25208 CC = Cond.getOperand(0);
25210 SDValue Cmp = Cond.getOperand(1);
25211 bool IllegalFPCMov = false;
25212 if (VT.isFloatingPoint() && !VT.isVector() &&
25213 !isScalarFPTypeInSSEReg(VT) && Subtarget.canUseCMOV()) // FPStack?
25214 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
25216 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
25217 Cmp.getOpcode() == X86ISD::BT) { // FIXME
25221 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
25222 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
25223 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
25225 X86::CondCode X86Cond;
25226 std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
25228 CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
25233 // Look past the truncate if the high bits are known zero.
25234 if (isTruncWithZeroHighBitsInput(Cond, DAG))
25235 Cond = Cond.getOperand(0);
25237 // We know the result of AND is compared against zero. Try to match
25239 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
25240 X86::CondCode X86CondCode;
25241 if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, X86CondCode)) {
25242 CC = DAG.getTargetConstant(X86CondCode, DL, MVT::i8);
25250 CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
25251 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG, Subtarget);
25254 // a < b ? -1 : 0 -> RES = ~setcc_carry
25255 // a < b ? 0 : -1 -> RES = setcc_carry
25256 // a >= b ? -1 : 0 -> RES = setcc_carry
25257 // a >= b ? 0 : -1 -> RES = ~setcc_carry
25258 if (Cond.getOpcode() == X86ISD::SUB) {
25259 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
25261 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
25262 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
25263 (isNullConstant(Op1) || isNullConstant(Op2))) {
25265 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
25266 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
25267 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
25268 return DAG.getNOT(DL, Res, Res.getValueType());
25273 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
25274 // widen the cmov and push the truncate through. This avoids introducing a new
25275 // branch during isel and doesn't add any extensions.
25276 if (Op.getValueType() == MVT::i8 &&
25277 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
25278 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
25279 if (T1.getValueType() == T2.getValueType() &&
25280 // Exclude CopyFromReg to avoid partial register stalls.
25281 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
25282 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
25284 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
25288 // Or finally, promote i8 cmovs if we have CMOV,
25289 // or i16 cmovs if it won't prevent folding a load.
25290 // FIXME: we should not limit promotion of i8 case to only when the CMOV is
25291 // legal, but EmitLoweredSelect() can not deal with these extensions
25292 // being inserted between two CMOV's. (in i16 case too TBN)
25293 // https://bugs.llvm.org/show_bug.cgi?id=40974
25294 if ((Op.getValueType() == MVT::i8 && Subtarget.canUseCMOV()) ||
25295 (Op.getValueType() == MVT::i16 && !X86::mayFoldLoad(Op1, Subtarget) &&
25296 !X86::mayFoldLoad(Op2, Subtarget))) {
25297 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
25298 Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
25299 SDValue Ops[] = { Op2, Op1, CC, Cond };
25300 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
25301 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
25304 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
25305 // condition is true.
25306 SDValue Ops[] = { Op2, Op1, CC, Cond };
25307 return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops);
25310 static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
25311 const X86Subtarget &Subtarget,
25312 SelectionDAG &DAG) {
25313 MVT VT = Op->getSimpleValueType(0);
25314 SDValue In = Op->getOperand(0);
25315 MVT InVT = In.getSimpleValueType();
25316 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
25317 MVT VTElt = VT.getVectorElementType();
25320 unsigned NumElts = VT.getVectorNumElements();
25322 // Extend VT if the scalar type is i8/i16 and BWI is not supported.
25324 if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
25325 // If v16i32 is to be avoided, we'll need to split and concatenate.
25326 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
25327 return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
25329 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
25332 // Widen to 512-bits if VLX is not supported.
25333 MVT WideVT = ExtVT;
25334 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
25335 NumElts *= 512 / ExtVT.getSizeInBits();
25336 InVT = MVT::getVectorVT(MVT::i1, NumElts);
25337 In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
25338 In, DAG.getIntPtrConstant(0, dl));
25339 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
25343 MVT WideEltVT = WideVT.getVectorElementType();
25344 if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
25345 (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
25346 V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
25348 SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
25349 SDValue Zero = DAG.getConstant(0, dl, WideVT);
25350 V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
25353 // Truncate if we had to extend i16/i8 above.
25355 WideVT = MVT::getVectorVT(VTElt, NumElts);
25356 V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
25359 // Extract back to 128/256-bit if we widened.
25361 V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
25362 DAG.getIntPtrConstant(0, dl));
25367 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
25368 SelectionDAG &DAG) {
25369 SDValue In = Op->getOperand(0);
25370 MVT InVT = In.getSimpleValueType();
25372 if (InVT.getVectorElementType() == MVT::i1)
25373 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
25375 assert(Subtarget.hasAVX() && "Expected AVX support");
25376 return LowerAVXExtend(Op, DAG, Subtarget);
25379 // Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
25380 // For sign extend this needs to handle all vector sizes and SSE4.1 and
25381 // non-SSE4.1 targets. For zero extend this should only handle inputs of
25382 // MVT::v64i8 when BWI is not supported, but AVX512 is.
25383 static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
25384 const X86Subtarget &Subtarget,
25385 SelectionDAG &DAG) {
25386 SDValue In = Op->getOperand(0);
25387 MVT VT = Op->getSimpleValueType(0);
25388 MVT InVT = In.getSimpleValueType();
25390 MVT SVT = VT.getVectorElementType();
25391 MVT InSVT = InVT.getVectorElementType();
25392 assert(SVT.getFixedSizeInBits() > InSVT.getFixedSizeInBits());
25394 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
25396 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
25398 if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
25399 !(VT.is256BitVector() && Subtarget.hasAVX()) &&
25400 !(VT.is512BitVector() && Subtarget.hasAVX512()))
25404 unsigned Opc = Op.getOpcode();
25405 unsigned NumElts = VT.getVectorNumElements();
25407 // For 256-bit vectors, we only need the lower (128-bit) half of the input.
25408 // For 512-bit vectors, we need 128-bits or 256-bits.
25409 if (InVT.getSizeInBits() > 128) {
25410 // Input needs to be at least the same number of elements as output, and
25411 // at least 128-bits.
25412 int InSize = InSVT.getSizeInBits() * NumElts;
25413 In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
25414 InVT = In.getSimpleValueType();
25417 // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
25418 // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
25419 // need to be handled here for 256/512-bit results.
25420 if (Subtarget.hasInt256()) {
25421 assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
25423 if (InVT.getVectorNumElements() != NumElts)
25424 return DAG.getNode(Op.getOpcode(), dl, VT, In);
25426 // FIXME: Apparently we create inreg operations that could be regular
25429 Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
25430 : ISD::ZERO_EXTEND;
25431 return DAG.getNode(ExtOpc, dl, VT, In);
25434 // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
25435 if (Subtarget.hasAVX()) {
25436 assert(VT.is256BitVector() && "256-bit vector expected");
25437 MVT HalfVT = VT.getHalfNumVectorElementsVT();
25438 int HalfNumElts = HalfVT.getVectorNumElements();
25440 unsigned NumSrcElts = InVT.getVectorNumElements();
25441 SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
25442 for (int i = 0; i != HalfNumElts; ++i)
25443 HiMask[i] = HalfNumElts + i;
25445 SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
25446 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
25447 Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
25448 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
25451 // We should only get here for sign extend.
25452 assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
25453 assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
25455 // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
25457 SDValue SignExt = Curr;
25459 // As SRAI is only available on i16/i32 types, we expand only up to i32
25460 // and handle i64 separately.
25461 if (InVT != MVT::v4i32) {
25462 MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
25464 unsigned DestWidth = DestVT.getScalarSizeInBits();
25465 unsigned Scale = DestWidth / InSVT.getSizeInBits();
25467 unsigned InNumElts = InVT.getVectorNumElements();
25468 unsigned DestElts = DestVT.getVectorNumElements();
25470 // Build a shuffle mask that takes each input element and places it in the
25471 // MSBs of the new element size.
25472 SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
25473 for (unsigned i = 0; i != DestElts; ++i)
25474 Mask[i * Scale + (Scale - 1)] = i;
25476 Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
25477 Curr = DAG.getBitcast(DestVT, Curr);
25479 unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
25480 SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
25481 DAG.getTargetConstant(SignExtShift, dl, MVT::i8));
25484 if (VT == MVT::v2i64) {
25485 assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
25486 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
25487 SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
25488 SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
25489 SignExt = DAG.getBitcast(VT, SignExt);
25495 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
25496 SelectionDAG &DAG) {
25497 MVT VT = Op->getSimpleValueType(0);
25498 SDValue In = Op->getOperand(0);
25499 MVT InVT = In.getSimpleValueType();
25502 if (InVT.getVectorElementType() == MVT::i1)
25503 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
25505 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
25506 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
25507 "Expected same number of elements");
25508 assert((VT.getVectorElementType() == MVT::i16 ||
25509 VT.getVectorElementType() == MVT::i32 ||
25510 VT.getVectorElementType() == MVT::i64) &&
25511 "Unexpected element type");
25512 assert((InVT.getVectorElementType() == MVT::i8 ||
25513 InVT.getVectorElementType() == MVT::i16 ||
25514 InVT.getVectorElementType() == MVT::i32) &&
25515 "Unexpected element type");
25517 if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
25518 assert(InVT == MVT::v32i8 && "Unexpected VT!");
25519 return splitVectorIntUnary(Op, DAG);
25522 if (Subtarget.hasInt256())
25525 // Optimize vectors in AVX mode
25526 // Sign extend v8i16 to v8i32 and
25529 // Divide input vector into two parts
25530 // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
25531 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
25532 // concat the vectors to original VT
25533 MVT HalfVT = VT.getHalfNumVectorElementsVT();
25534 SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
25536 unsigned NumElems = InVT.getVectorNumElements();
25537 SmallVector<int,8> ShufMask(NumElems, -1);
25538 for (unsigned i = 0; i != NumElems/2; ++i)
25539 ShufMask[i] = i + NumElems/2;
25541 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
25542 OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
25544 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
25547 /// Change a vector store into a pair of half-size vector stores.
25548 static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
25549 SDValue StoredVal = Store->getValue();
25550 assert((StoredVal.getValueType().is256BitVector() ||
25551 StoredVal.getValueType().is512BitVector()) &&
25552 "Expecting 256/512-bit op");
25554 // Splitting volatile memory ops is not allowed unless the operation was not
25555 // legal to begin with. Assume the input store is legal (this transform is
25556 // only used for targets with AVX). Note: It is possible that we have an
25557 // illegal type like v2i128, and so we could allow splitting a volatile store
25558 // in that case if that is important.
25559 if (!Store->isSimple())
25563 SDValue Value0, Value1;
25564 std::tie(Value0, Value1) = splitVector(StoredVal, DAG, DL);
25565 unsigned HalfOffset = Value0.getValueType().getStoreSize();
25566 SDValue Ptr0 = Store->getBasePtr();
25568 DAG.getMemBasePlusOffset(Ptr0, TypeSize::Fixed(HalfOffset), DL);
25570 DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
25571 Store->getOriginalAlign(),
25572 Store->getMemOperand()->getFlags());
25573 SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
25574 Store->getPointerInfo().getWithOffset(HalfOffset),
25575 Store->getOriginalAlign(),
25576 Store->getMemOperand()->getFlags());
25577 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
25580 /// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
25582 static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
25583 SelectionDAG &DAG) {
25584 SDValue StoredVal = Store->getValue();
25585 assert(StoreVT.is128BitVector() &&
25586 StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
25587 StoredVal = DAG.getBitcast(StoreVT, StoredVal);
25589 // Splitting volatile memory ops is not allowed unless the operation was not
25590 // legal to begin with. We are assuming the input op is legal (this transform
25591 // is only used for targets with AVX).
25592 if (!Store->isSimple())
25595 MVT StoreSVT = StoreVT.getScalarType();
25596 unsigned NumElems = StoreVT.getVectorNumElements();
25597 unsigned ScalarSize = StoreSVT.getStoreSize();
25600 SmallVector<SDValue, 4> Stores;
25601 for (unsigned i = 0; i != NumElems; ++i) {
25602 unsigned Offset = i * ScalarSize;
25603 SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(),
25604 TypeSize::Fixed(Offset), DL);
25605 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
25606 DAG.getIntPtrConstant(i, DL));
25607 SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
25608 Store->getPointerInfo().getWithOffset(Offset),
25609 Store->getOriginalAlign(),
25610 Store->getMemOperand()->getFlags());
25611 Stores.push_back(Ch);
25613 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
25616 static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
25617 SelectionDAG &DAG) {
25618 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
25620 SDValue StoredVal = St->getValue();
25622 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
25623 if (StoredVal.getValueType().isVector() &&
25624 StoredVal.getValueType().getVectorElementType() == MVT::i1) {
25625 unsigned NumElts = StoredVal.getValueType().getVectorNumElements();
25626 assert(NumElts <= 8 && "Unexpected VT");
25627 assert(!St->isTruncatingStore() && "Expected non-truncating store");
25628 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
25629 "Expected AVX512F without AVX512DQI");
25631 // We must pad with zeros to ensure we store zeroes to any unused bits.
25632 StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
25633 DAG.getUNDEF(MVT::v16i1), StoredVal,
25634 DAG.getIntPtrConstant(0, dl));
25635 StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
25636 StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
25637 // Make sure we store zeros in the extra bits.
25639 StoredVal = DAG.getZeroExtendInReg(
25640 StoredVal, dl, EVT::getIntegerVT(*DAG.getContext(), NumElts));
25642 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
25643 St->getPointerInfo(), St->getOriginalAlign(),
25644 St->getMemOperand()->getFlags());
25647 if (St->isTruncatingStore())
25650 // If this is a 256-bit store of concatenated ops, we are better off splitting
25651 // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
25652 // and each half can execute independently. Some cores would split the op into
25653 // halves anyway, so the concat (vinsertf128) is purely an extra op.
25654 MVT StoreVT = StoredVal.getSimpleValueType();
25655 if (StoreVT.is256BitVector() ||
25656 ((StoreVT == MVT::v32i16 || StoreVT == MVT::v64i8) &&
25657 !Subtarget.hasBWI())) {
25658 SmallVector<SDValue, 4> CatOps;
25659 if (StoredVal.hasOneUse() &&
25660 collectConcatOps(StoredVal.getNode(), CatOps, DAG))
25661 return splitVectorStore(St, DAG);
25665 if (StoreVT.is32BitVector())
25668 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25669 assert(StoreVT.is64BitVector() && "Unexpected VT");
25670 assert(TLI.getTypeAction(*DAG.getContext(), StoreVT) ==
25671 TargetLowering::TypeWidenVector &&
25672 "Unexpected type action!");
25674 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StoreVT);
25675 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
25676 DAG.getUNDEF(StoreVT));
25678 if (Subtarget.hasSSE2()) {
25679 // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
25681 MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
25682 MVT CastVT = MVT::getVectorVT(StVT, 2);
25683 StoredVal = DAG.getBitcast(CastVT, StoredVal);
25684 StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
25685 DAG.getIntPtrConstant(0, dl));
25687 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
25688 St->getPointerInfo(), St->getOriginalAlign(),
25689 St->getMemOperand()->getFlags());
25691 assert(Subtarget.hasSSE1() && "Expected SSE");
25692 SDVTList Tys = DAG.getVTList(MVT::Other);
25693 SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
25694 return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
25695 St->getMemOperand());
25698 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
25699 // may emit an illegal shuffle but the expansion is still better than scalar
25700 // code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
25701 // we'll emit a shuffle and a arithmetic shift.
25702 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
25703 // TODO: It is possible to support ZExt by zeroing the undef values during
25704 // the shuffle phase or after the shuffle.
25705 static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
25706 SelectionDAG &DAG) {
25707 MVT RegVT = Op.getSimpleValueType();
25708 assert(RegVT.isVector() && "We only custom lower vector loads.");
25709 assert(RegVT.isInteger() &&
25710 "We only custom lower integer vector loads.");
25712 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
25715 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
25716 if (RegVT.getVectorElementType() == MVT::i1) {
25717 assert(EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load");
25718 assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
25719 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
25720 "Expected AVX512F without AVX512DQI");
25722 SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
25723 Ld->getPointerInfo(), Ld->getOriginalAlign(),
25724 Ld->getMemOperand()->getFlags());
25726 // Replace chain users with the new chain.
25727 assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
25729 SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
25730 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
25731 DAG.getBitcast(MVT::v16i1, Val),
25732 DAG.getIntPtrConstant(0, dl));
25733 return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
25739 /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
25740 /// each of which has no other use apart from the AND / OR.
25741 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
25742 Opc = Op.getOpcode();
25743 if (Opc != ISD::OR && Opc != ISD::AND)
25745 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
25746 Op.getOperand(0).hasOneUse() &&
25747 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
25748 Op.getOperand(1).hasOneUse());
25751 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
25752 SDValue Chain = Op.getOperand(0);
25753 SDValue Cond = Op.getOperand(1);
25754 SDValue Dest = Op.getOperand(2);
25757 // Bail out when we don't have native compare instructions.
25758 if (Cond.getOpcode() == ISD::SETCC &&
25759 Cond.getOperand(0).getValueType() != MVT::f128 &&
25760 !isSoftFP16(Cond.getOperand(0).getValueType())) {
25761 SDValue LHS = Cond.getOperand(0);
25762 SDValue RHS = Cond.getOperand(1);
25763 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
25765 // Special case for
25766 // setcc([su]{add,sub,mul}o == 0)
25767 // setcc([su]{add,sub,mul}o != 1)
25768 if (ISD::isOverflowIntrOpRes(LHS) &&
25769 (CC == ISD::SETEQ || CC == ISD::SETNE) &&
25770 (isNullConstant(RHS) || isOneConstant(RHS))) {
25771 SDValue Value, Overflow;
25772 X86::CondCode X86Cond;
25773 std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, LHS.getValue(0), DAG);
25775 if ((CC == ISD::SETEQ) == isNullConstant(RHS))
25776 X86Cond = X86::GetOppositeBranchCondition(X86Cond);
25778 SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
25779 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
25783 if (LHS.getSimpleValueType().isInteger()) {
25785 SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, CC, SDLoc(Cond), DAG, CCVal);
25786 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
25790 if (CC == ISD::SETOEQ) {
25791 // For FCMP_OEQ, we can emit
25792 // two branches instead of an explicit AND instruction with a
25793 // separate test. However, we only do this if this block doesn't
25794 // have a fall-through edge, because this requires an explicit
25795 // jmp when the condition is false.
25796 if (Op.getNode()->hasOneUse()) {
25797 SDNode *User = *Op.getNode()->use_begin();
25798 // Look for an unconditional branch following this conditional branch.
25799 // We need this because we need to reverse the successors in order
25800 // to implement FCMP_OEQ.
25801 if (User->getOpcode() == ISD::BR) {
25802 SDValue FalseBB = User->getOperand(1);
25804 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
25805 assert(NewBR == User);
25810 DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
25811 SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
25812 Chain = DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest,
25814 CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
25815 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
25819 } else if (CC == ISD::SETUNE) {
25820 // For FCMP_UNE, we can emit
25821 // two branches instead of an explicit OR instruction with a
25823 SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
25824 SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
25826 DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, Cmp);
25827 CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
25828 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
25831 X86::CondCode X86Cond =
25832 TranslateX86CC(CC, dl, /*IsFP*/ true, LHS, RHS, DAG);
25833 SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
25834 SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
25835 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
25840 if (ISD::isOverflowIntrOpRes(Cond)) {
25841 SDValue Value, Overflow;
25842 X86::CondCode X86Cond;
25843 std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
25845 SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
25846 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
25850 // Look past the truncate if the high bits are known zero.
25851 if (isTruncWithZeroHighBitsInput(Cond, DAG))
25852 Cond = Cond.getOperand(0);
25854 EVT CondVT = Cond.getValueType();
25856 // Add an AND with 1 if we don't already have one.
25857 if (!(Cond.getOpcode() == ISD::AND && isOneConstant(Cond.getOperand(1))))
25859 DAG.getNode(ISD::AND, dl, CondVT, Cond, DAG.getConstant(1, dl, CondVT));
25861 SDValue LHS = Cond;
25862 SDValue RHS = DAG.getConstant(0, dl, CondVT);
25865 SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, ISD::SETNE, dl, DAG, CCVal);
25866 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
25870 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
25871 // Calls to _alloca are needed to probe the stack when allocating more than 4k
25872 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
25873 // that the guard pages used by the OS virtual memory manager are allocated in
25874 // correct sequence.
25876 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
25877 SelectionDAG &DAG) const {
25878 MachineFunction &MF = DAG.getMachineFunction();
25879 bool SplitStack = MF.shouldSplitStack();
25880 bool EmitStackProbeCall = hasStackProbeSymbol(MF);
25881 bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
25882 SplitStack || EmitStackProbeCall;
25886 SDNode *Node = Op.getNode();
25887 SDValue Chain = Op.getOperand(0);
25888 SDValue Size = Op.getOperand(1);
25889 MaybeAlign Alignment(Op.getConstantOperandVal(2));
25890 EVT VT = Node->getValueType(0);
25892 // Chain the dynamic stack allocation so that it doesn't modify the stack
25893 // pointer when other instructions are using the stack.
25894 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
25896 bool Is64Bit = Subtarget.is64Bit();
25897 MVT SPTy = getPointerTy(DAG.getDataLayout());
25901 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25902 Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
25903 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
25904 " not tell us which reg is the stack pointer!");
25906 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
25907 const Align StackAlign = TFI.getStackAlign();
25908 if (hasInlineStackProbe(MF)) {
25909 MachineRegisterInfo &MRI = MF.getRegInfo();
25911 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
25912 Register Vreg = MRI.createVirtualRegister(AddrRegClass);
25913 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
25914 Result = DAG.getNode(X86ISD::PROBED_ALLOCA, dl, SPTy, Chain,
25915 DAG.getRegister(Vreg, SPTy));
25917 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
25918 Chain = SP.getValue(1);
25919 Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
25921 if (Alignment && *Alignment > StackAlign)
25923 DAG.getNode(ISD::AND, dl, VT, Result,
25924 DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
25925 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
25926 } else if (SplitStack) {
25927 MachineRegisterInfo &MRI = MF.getRegInfo();
25930 // The 64 bit implementation of segmented stacks needs to clobber both r10
25931 // r11. This makes it impossible to use it along with nested parameters.
25932 const Function &F = MF.getFunction();
25933 for (const auto &A : F.args()) {
25934 if (A.hasNestAttr())
25935 report_fatal_error("Cannot use segmented stacks with functions that "
25936 "have nested arguments.");
25940 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
25941 Register Vreg = MRI.createVirtualRegister(AddrRegClass);
25942 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
25943 Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
25944 DAG.getRegister(Vreg, SPTy));
25946 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
25947 Chain = DAG.getNode(X86ISD::DYN_ALLOCA, dl, NodeTys, Chain, Size);
25948 MF.getInfo<X86MachineFunctionInfo>()->setHasDynAlloca(true);
25950 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25951 Register SPReg = RegInfo->getStackRegister();
25952 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
25953 Chain = SP.getValue(1);
25956 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
25957 DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
25958 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
25964 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
25965 DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
25967 SDValue Ops[2] = {Result, Chain};
25968 return DAG.getMergeValues(Ops, dl);
25971 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
25972 MachineFunction &MF = DAG.getMachineFunction();
25973 auto PtrVT = getPointerTy(MF.getDataLayout());
25974 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
25976 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
25979 if (!Subtarget.is64Bit() ||
25980 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
25981 // vastart just stores the address of the VarArgsFrameIndex slot into the
25982 // memory location argument.
25983 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
25984 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
25985 MachinePointerInfo(SV));
25989 // gp_offset (0 - 6 * 8)
25990 // fp_offset (48 - 48 + 8 * 16)
25991 // overflow_arg_area (point to parameters coming in memory).
25993 SmallVector<SDValue, 8> MemOps;
25994 SDValue FIN = Op.getOperand(1);
25996 SDValue Store = DAG.getStore(
25997 Op.getOperand(0), DL,
25998 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
25999 MachinePointerInfo(SV));
26000 MemOps.push_back(Store);
26003 FIN = DAG.getMemBasePlusOffset(FIN, TypeSize::Fixed(4), DL);
26004 Store = DAG.getStore(
26005 Op.getOperand(0), DL,
26006 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
26007 MachinePointerInfo(SV, 4));
26008 MemOps.push_back(Store);
26010 // Store ptr to overflow_arg_area
26011 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
26012 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
26014 DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
26015 MemOps.push_back(Store);
26017 // Store ptr to reg_save_area.
26018 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
26019 Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
26020 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
26021 Store = DAG.getStore(
26022 Op.getOperand(0), DL, RSFIN, FIN,
26023 MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
26024 MemOps.push_back(Store);
26025 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
26028 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
26029 assert(Subtarget.is64Bit() &&
26030 "LowerVAARG only handles 64-bit va_arg!");
26031 assert(Op.getNumOperands() == 4);
26033 MachineFunction &MF = DAG.getMachineFunction();
26034 if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
26035 // The Win64 ABI uses char* instead of a structure.
26036 return DAG.expandVAArg(Op.getNode());
26038 SDValue Chain = Op.getOperand(0);
26039 SDValue SrcPtr = Op.getOperand(1);
26040 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
26041 unsigned Align = Op.getConstantOperandVal(3);
26044 EVT ArgVT = Op.getNode()->getValueType(0);
26045 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
26046 uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
26049 // Decide which area this value should be read from.
26050 // TODO: Implement the AMD64 ABI in its entirety. This simple
26051 // selection mechanism works only for the basic types.
26052 assert(ArgVT != MVT::f80 && "va_arg for f80 not yet implemented");
26053 if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
26054 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
26056 assert(ArgVT.isInteger() && ArgSize <= 32 /*bytes*/ &&
26057 "Unhandled argument type in LowerVAARG");
26058 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
26061 if (ArgMode == 2) {
26062 // Make sure using fp_offset makes sense.
26063 assert(!Subtarget.useSoftFloat() &&
26064 !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
26065 Subtarget.hasSSE1());
26068 // Insert VAARG node into the DAG
26069 // VAARG returns two values: Variable Argument Address, Chain
26070 SDValue InstOps[] = {Chain, SrcPtr,
26071 DAG.getTargetConstant(ArgSize, dl, MVT::i32),
26072 DAG.getTargetConstant(ArgMode, dl, MVT::i8),
26073 DAG.getTargetConstant(Align, dl, MVT::i32)};
26074 SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
26075 SDValue VAARG = DAG.getMemIntrinsicNode(
26076 Subtarget.isTarget64BitLP64() ? X86ISD::VAARG_64 : X86ISD::VAARG_X32, dl,
26077 VTs, InstOps, MVT::i64, MachinePointerInfo(SV),
26078 /*Alignment=*/None,
26079 MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
26080 Chain = VAARG.getValue(1);
26082 // Load the next argument and return it
26083 return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
26086 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
26087 SelectionDAG &DAG) {
26088 // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
26089 // where a va_list is still an i8*.
26090 assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
26091 if (Subtarget.isCallingConvWin64(
26092 DAG.getMachineFunction().getFunction().getCallingConv()))
26093 // Probably a Win64 va_copy.
26094 return DAG.expandVACopy(Op.getNode());
26096 SDValue Chain = Op.getOperand(0);
26097 SDValue DstPtr = Op.getOperand(1);
26098 SDValue SrcPtr = Op.getOperand(2);
26099 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
26100 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
26103 return DAG.getMemcpy(
26104 Chain, DL, DstPtr, SrcPtr,
26105 DAG.getIntPtrConstant(Subtarget.isTarget64BitLP64() ? 24 : 16, DL),
26106 Align(Subtarget.isTarget64BitLP64() ? 8 : 4), /*isVolatile*/ false, false,
26107 false, MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
26110 // Helper to get immediate/variable SSE shift opcode from other shift opcodes.
26111 static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
26115 case X86ISD::VSHLI:
26116 return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
26119 case X86ISD::VSRLI:
26120 return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
26123 case X86ISD::VSRAI:
26124 return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
26126 llvm_unreachable("Unknown target vector shift node");
26129 /// Handle vector element shifts where the shift amount is a constant.
26130 /// Takes immediate version of shift as input.
26131 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
26132 SDValue SrcOp, uint64_t ShiftAmt,
26133 SelectionDAG &DAG) {
26134 MVT ElementType = VT.getVectorElementType();
26136 // Bitcast the source vector to the output type, this is mainly necessary for
26137 // vXi8/vXi64 shifts.
26138 if (VT != SrcOp.getSimpleValueType())
26139 SrcOp = DAG.getBitcast(VT, SrcOp);
26141 // Fold this packed shift into its first operand if ShiftAmt is 0.
26145 // Check for ShiftAmt >= element width
26146 if (ShiftAmt >= ElementType.getSizeInBits()) {
26147 if (Opc == X86ISD::VSRAI)
26148 ShiftAmt = ElementType.getSizeInBits() - 1;
26150 return DAG.getConstant(0, dl, VT);
26153 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
26154 && "Unknown target vector shift-by-constant node");
26156 // Fold this packed vector shift into a build vector if SrcOp is a
26157 // vector of Constants or UNDEFs.
26158 if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
26161 default: llvm_unreachable("Unknown opcode!");
26162 case X86ISD::VSHLI:
26163 ShiftOpc = ISD::SHL;
26165 case X86ISD::VSRLI:
26166 ShiftOpc = ISD::SRL;
26168 case X86ISD::VSRAI:
26169 ShiftOpc = ISD::SRA;
26173 SDValue Amt = DAG.getConstant(ShiftAmt, dl, VT);
26174 if (SDValue C = DAG.FoldConstantArithmetic(ShiftOpc, dl, VT, {SrcOp, Amt}))
26178 return DAG.getNode(Opc, dl, VT, SrcOp,
26179 DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
26182 /// Handle vector element shifts by a splat shift amount
26183 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
26184 SDValue SrcOp, SDValue ShAmt, int ShAmtIdx,
26185 const X86Subtarget &Subtarget,
26186 SelectionDAG &DAG) {
26187 MVT AmtVT = ShAmt.getSimpleValueType();
26188 assert(AmtVT.isVector() && "Vector shift type mismatch");
26189 assert(0 <= ShAmtIdx && ShAmtIdx < (int)AmtVT.getVectorNumElements() &&
26190 "Illegal vector splat index");
26192 // Move the splat element to the bottom element.
26193 if (ShAmtIdx != 0) {
26194 SmallVector<int> Mask(AmtVT.getVectorNumElements(), -1);
26195 Mask[0] = ShAmtIdx;
26196 ShAmt = DAG.getVectorShuffle(AmtVT, dl, ShAmt, DAG.getUNDEF(AmtVT), Mask);
26199 // Peek through any zext node if we can get back to a 128-bit source.
26200 if (AmtVT.getScalarSizeInBits() == 64 &&
26201 (ShAmt.getOpcode() == ISD::ZERO_EXTEND ||
26202 ShAmt.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) &&
26203 ShAmt.getOperand(0).getValueType().isSimple() &&
26204 ShAmt.getOperand(0).getValueType().is128BitVector()) {
26205 ShAmt = ShAmt.getOperand(0);
26206 AmtVT = ShAmt.getSimpleValueType();
26209 // See if we can mask off the upper elements using the existing source node.
26210 // The shift uses the entire lower 64-bits of the amount vector, so no need to
26211 // do this for vXi64 types.
26212 bool IsMasked = false;
26213 if (AmtVT.getScalarSizeInBits() < 64) {
26214 if (ShAmt.getOpcode() == ISD::BUILD_VECTOR ||
26215 ShAmt.getOpcode() == ISD::SCALAR_TO_VECTOR) {
26216 // If the shift amount has come from a scalar, then zero-extend the scalar
26217 // before moving to the vector.
26218 ShAmt = DAG.getZExtOrTrunc(ShAmt.getOperand(0), dl, MVT::i32);
26219 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
26220 ShAmt = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, ShAmt);
26221 AmtVT = MVT::v4i32;
26223 } else if (ShAmt.getOpcode() == ISD::AND) {
26224 // See if the shift amount is already masked (e.g. for rotation modulo),
26225 // then we can zero-extend it by setting all the other mask elements to
26227 SmallVector<SDValue> MaskElts(
26228 AmtVT.getVectorNumElements(),
26229 DAG.getConstant(0, dl, AmtVT.getScalarType()));
26230 MaskElts[0] = DAG.getAllOnesConstant(dl, AmtVT.getScalarType());
26231 SDValue Mask = DAG.getBuildVector(AmtVT, dl, MaskElts);
26232 if ((Mask = DAG.FoldConstantArithmetic(ISD::AND, dl, AmtVT,
26233 {ShAmt.getOperand(1), Mask}))) {
26234 ShAmt = DAG.getNode(ISD::AND, dl, AmtVT, ShAmt.getOperand(0), Mask);
26240 // Extract if the shift amount vector is larger than 128-bits.
26241 if (AmtVT.getSizeInBits() > 128) {
26242 ShAmt = extract128BitVector(ShAmt, 0, DAG, dl);
26243 AmtVT = ShAmt.getSimpleValueType();
26246 // Zero-extend bottom element to v2i64 vector type, either by extension or
26247 // shuffle masking.
26248 if (!IsMasked && AmtVT.getScalarSizeInBits() < 64) {
26249 if (AmtVT == MVT::v4i32 && (ShAmt.getOpcode() == X86ISD::VBROADCAST ||
26250 ShAmt.getOpcode() == X86ISD::VBROADCAST_LOAD)) {
26251 ShAmt = DAG.getNode(X86ISD::VZEXT_MOVL, SDLoc(ShAmt), MVT::v4i32, ShAmt);
26252 } else if (Subtarget.hasSSE41()) {
26253 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
26254 MVT::v2i64, ShAmt);
26256 SDValue ByteShift = DAG.getTargetConstant(
26257 (128 - AmtVT.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
26258 ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
26259 ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
26261 ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
26266 // Change opcode to non-immediate version.
26267 Opc = getTargetVShiftUniformOpcode(Opc, true);
26269 // The return type has to be a 128-bit type with the same element
26270 // type as the input type.
26271 MVT EltVT = VT.getVectorElementType();
26272 MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
26274 ShAmt = DAG.getBitcast(ShVT, ShAmt);
26275 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
26278 /// Return Mask with the necessary casting or extending
26279 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
26280 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
26281 const X86Subtarget &Subtarget, SelectionDAG &DAG,
26284 if (isAllOnesConstant(Mask))
26285 return DAG.getConstant(1, dl, MaskVT);
26286 if (X86::isZeroNode(Mask))
26287 return DAG.getConstant(0, dl, MaskVT);
26289 assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
26291 if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
26292 assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
26293 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
26294 // In case 32bit mode, bitcast i64 is illegal, extend/split it.
26296 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
26297 DAG.getConstant(0, dl, MVT::i32));
26298 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
26299 DAG.getConstant(1, dl, MVT::i32));
26301 Lo = DAG.getBitcast(MVT::v32i1, Lo);
26302 Hi = DAG.getBitcast(MVT::v32i1, Hi);
26304 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
26306 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
26307 Mask.getSimpleValueType().getSizeInBits());
26308 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
26309 // are extracted by EXTRACT_SUBVECTOR.
26310 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
26311 DAG.getBitcast(BitcastVT, Mask),
26312 DAG.getIntPtrConstant(0, dl));
26316 /// Return (and \p Op, \p Mask) for compare instructions or
26317 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
26318 /// necessary casting or extending for \p Mask when lowering masking intrinsics
26319 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
26320 SDValue PreservedSrc,
26321 const X86Subtarget &Subtarget,
26322 SelectionDAG &DAG) {
26323 MVT VT = Op.getSimpleValueType();
26324 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
26325 unsigned OpcodeSelect = ISD::VSELECT;
26328 if (isAllOnesConstant(Mask))
26331 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
26333 if (PreservedSrc.isUndef())
26334 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
26335 return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
26338 /// Creates an SDNode for a predicated scalar operation.
26339 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
26340 /// The mask is coming as MVT::i8 and it should be transformed
26341 /// to MVT::v1i1 while lowering masking intrinsics.
26342 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
26343 /// "X86select" instead of "vselect". We just can't create the "vselect" node
26344 /// for a scalar instruction.
26345 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
26346 SDValue PreservedSrc,
26347 const X86Subtarget &Subtarget,
26348 SelectionDAG &DAG) {
26350 if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
26351 if (MaskConst->getZExtValue() & 0x1)
26354 MVT VT = Op.getSimpleValueType();
26357 assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
26358 SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
26359 DAG.getBitcast(MVT::v8i1, Mask),
26360 DAG.getIntPtrConstant(0, dl));
26361 if (Op.getOpcode() == X86ISD::FSETCCM ||
26362 Op.getOpcode() == X86ISD::FSETCCM_SAE ||
26363 Op.getOpcode() == X86ISD::VFPCLASSS)
26364 return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
26366 if (PreservedSrc.isUndef())
26367 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
26368 return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
26371 static int getSEHRegistrationNodeSize(const Function *Fn) {
26372 if (!Fn->hasPersonalityFn())
26373 report_fatal_error(
26374 "querying registration node size for function without personality");
26375 // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
26376 // WinEHStatePass for the full struct definition.
26377 switch (classifyEHPersonality(Fn->getPersonalityFn())) {
26378 case EHPersonality::MSVC_X86SEH: return 24;
26379 case EHPersonality::MSVC_CXX: return 16;
26382 report_fatal_error(
26383 "can only recover FP for 32-bit MSVC EH personality functions");
26386 /// When the MSVC runtime transfers control to us, either to an outlined
26387 /// function or when returning to a parent frame after catching an exception, we
26388 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
26389 /// Here's the math:
26390 /// RegNodeBase = EntryEBP - RegNodeSize
26391 /// ParentFP = RegNodeBase - ParentFrameOffset
26392 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
26393 /// subtracting the offset (negative on x86) takes us back to the parent FP.
26394 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
26395 SDValue EntryEBP) {
26396 MachineFunction &MF = DAG.getMachineFunction();
26399 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26400 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
26402 // It's possible that the parent function no longer has a personality function
26403 // if the exceptional code was optimized away, in which case we just return
26404 // the incoming EBP.
26405 if (!Fn->hasPersonalityFn())
26408 // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
26409 // registration, or the .set_setframe offset.
26410 MCSymbol *OffsetSym =
26411 MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
26412 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
26413 SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
26414 SDValue ParentFrameOffset =
26415 DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
26417 // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
26418 // prologue to RBP in the parent function.
26419 const X86Subtarget &Subtarget = DAG.getSubtarget<X86Subtarget>();
26420 if (Subtarget.is64Bit())
26421 return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
26423 int RegNodeSize = getSEHRegistrationNodeSize(Fn);
26424 // RegNodeBase = EntryEBP - RegNodeSize
26425 // ParentFP = RegNodeBase - ParentFrameOffset
26426 SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
26427 DAG.getConstant(RegNodeSize, dl, PtrVT));
26428 return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
26431 SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
26432 SelectionDAG &DAG) const {
26433 // Helper to detect if the operand is CUR_DIRECTION rounding mode.
26434 auto isRoundModeCurDirection = [](SDValue Rnd) {
26435 if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
26436 return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
26440 auto isRoundModeSAE = [](SDValue Rnd) {
26441 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
26442 unsigned RC = C->getZExtValue();
26443 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
26444 // Clear the NO_EXC bit and check remaining bits.
26445 RC ^= X86::STATIC_ROUNDING::NO_EXC;
26446 // As a convenience we allow no other bits or explicitly
26447 // current direction.
26448 return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
26454 auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
26455 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
26456 RC = C->getZExtValue();
26457 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
26458 // Clear the NO_EXC bit and check remaining bits.
26459 RC ^= X86::STATIC_ROUNDING::NO_EXC;
26460 return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
26461 RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
26462 RC == X86::STATIC_ROUNDING::TO_POS_INF ||
26463 RC == X86::STATIC_ROUNDING::TO_ZERO;
26471 unsigned IntNo = Op.getConstantOperandVal(0);
26472 MVT VT = Op.getSimpleValueType();
26473 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
26475 // Propagate flags from original node to transformed node(s).
26476 SelectionDAG::FlagInserter FlagsInserter(DAG, Op->getFlags());
26479 switch(IntrData->Type) {
26480 case INTR_TYPE_1OP: {
26481 // We specify 2 possible opcodes for intrinsics with rounding modes.
26482 // First, we check if the intrinsic may have non-default rounding mode,
26483 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
26484 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
26485 if (IntrWithRoundingModeOpcode != 0) {
26486 SDValue Rnd = Op.getOperand(2);
26488 if (isRoundModeSAEToX(Rnd, RC))
26489 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
26491 DAG.getTargetConstant(RC, dl, MVT::i32));
26492 if (!isRoundModeCurDirection(Rnd))
26495 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
26498 case INTR_TYPE_1OP_SAE: {
26499 SDValue Sae = Op.getOperand(2);
26502 if (isRoundModeCurDirection(Sae))
26503 Opc = IntrData->Opc0;
26504 else if (isRoundModeSAE(Sae))
26505 Opc = IntrData->Opc1;
26509 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
26511 case INTR_TYPE_2OP: {
26512 SDValue Src2 = Op.getOperand(2);
26514 // We specify 2 possible opcodes for intrinsics with rounding modes.
26515 // First, we check if the intrinsic may have non-default rounding mode,
26516 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
26517 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
26518 if (IntrWithRoundingModeOpcode != 0) {
26519 SDValue Rnd = Op.getOperand(3);
26521 if (isRoundModeSAEToX(Rnd, RC))
26522 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
26523 Op.getOperand(1), Src2,
26524 DAG.getTargetConstant(RC, dl, MVT::i32));
26525 if (!isRoundModeCurDirection(Rnd))
26529 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
26530 Op.getOperand(1), Src2);
26532 case INTR_TYPE_2OP_SAE: {
26533 SDValue Sae = Op.getOperand(3);
26536 if (isRoundModeCurDirection(Sae))
26537 Opc = IntrData->Opc0;
26538 else if (isRoundModeSAE(Sae))
26539 Opc = IntrData->Opc1;
26543 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
26546 case INTR_TYPE_3OP:
26547 case INTR_TYPE_3OP_IMM8: {
26548 SDValue Src1 = Op.getOperand(1);
26549 SDValue Src2 = Op.getOperand(2);
26550 SDValue Src3 = Op.getOperand(3);
26552 if (IntrData->Type == INTR_TYPE_3OP_IMM8 &&
26553 Src3.getValueType() != MVT::i8) {
26554 Src3 = DAG.getTargetConstant(
26555 cast<ConstantSDNode>(Src3)->getZExtValue() & 0xff, dl, MVT::i8);
26558 // We specify 2 possible opcodes for intrinsics with rounding modes.
26559 // First, we check if the intrinsic may have non-default rounding mode,
26560 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
26561 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
26562 if (IntrWithRoundingModeOpcode != 0) {
26563 SDValue Rnd = Op.getOperand(4);
26565 if (isRoundModeSAEToX(Rnd, RC))
26566 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
26568 DAG.getTargetConstant(RC, dl, MVT::i32));
26569 if (!isRoundModeCurDirection(Rnd))
26573 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
26574 {Src1, Src2, Src3});
26576 case INTR_TYPE_4OP_IMM8: {
26577 assert(Op.getOperand(4)->getOpcode() == ISD::TargetConstant);
26578 SDValue Src4 = Op.getOperand(4);
26579 if (Src4.getValueType() != MVT::i8) {
26580 Src4 = DAG.getTargetConstant(
26581 cast<ConstantSDNode>(Src4)->getZExtValue() & 0xff, dl, MVT::i8);
26584 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
26585 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
26588 case INTR_TYPE_1OP_MASK: {
26589 SDValue Src = Op.getOperand(1);
26590 SDValue PassThru = Op.getOperand(2);
26591 SDValue Mask = Op.getOperand(3);
26592 // We add rounding mode to the Node when
26593 // - RC Opcode is specified and
26594 // - RC is not "current direction".
26595 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
26596 if (IntrWithRoundingModeOpcode != 0) {
26597 SDValue Rnd = Op.getOperand(4);
26599 if (isRoundModeSAEToX(Rnd, RC))
26600 return getVectorMaskingNode(
26601 DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
26602 Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
26603 Mask, PassThru, Subtarget, DAG);
26604 if (!isRoundModeCurDirection(Rnd))
26607 return getVectorMaskingNode(
26608 DAG.getNode(IntrData->Opc0, dl, VT, Src), Mask, PassThru,
26611 case INTR_TYPE_1OP_MASK_SAE: {
26612 SDValue Src = Op.getOperand(1);
26613 SDValue PassThru = Op.getOperand(2);
26614 SDValue Mask = Op.getOperand(3);
26615 SDValue Rnd = Op.getOperand(4);
26618 if (isRoundModeCurDirection(Rnd))
26619 Opc = IntrData->Opc0;
26620 else if (isRoundModeSAE(Rnd))
26621 Opc = IntrData->Opc1;
26625 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src), Mask, PassThru,
26628 case INTR_TYPE_SCALAR_MASK: {
26629 SDValue Src1 = Op.getOperand(1);
26630 SDValue Src2 = Op.getOperand(2);
26631 SDValue passThru = Op.getOperand(3);
26632 SDValue Mask = Op.getOperand(4);
26633 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
26634 // There are 2 kinds of intrinsics in this group:
26635 // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
26636 // (2) With rounding mode and sae - 7 operands.
26637 bool HasRounding = IntrWithRoundingModeOpcode != 0;
26638 if (Op.getNumOperands() == (5U + HasRounding)) {
26640 SDValue Rnd = Op.getOperand(5);
26642 if (isRoundModeSAEToX(Rnd, RC))
26643 return getScalarMaskingNode(
26644 DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
26645 DAG.getTargetConstant(RC, dl, MVT::i32)),
26646 Mask, passThru, Subtarget, DAG);
26647 if (!isRoundModeCurDirection(Rnd))
26650 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
26652 Mask, passThru, Subtarget, DAG);
26655 assert(Op.getNumOperands() == (6U + HasRounding) &&
26656 "Unexpected intrinsic form");
26657 SDValue RoundingMode = Op.getOperand(5);
26658 unsigned Opc = IntrData->Opc0;
26660 SDValue Sae = Op.getOperand(6);
26661 if (isRoundModeSAE(Sae))
26662 Opc = IntrWithRoundingModeOpcode;
26663 else if (!isRoundModeCurDirection(Sae))
26666 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
26667 Src2, RoundingMode),
26668 Mask, passThru, Subtarget, DAG);
26670 case INTR_TYPE_SCALAR_MASK_RND: {
26671 SDValue Src1 = Op.getOperand(1);
26672 SDValue Src2 = Op.getOperand(2);
26673 SDValue passThru = Op.getOperand(3);
26674 SDValue Mask = Op.getOperand(4);
26675 SDValue Rnd = Op.getOperand(5);
26679 if (isRoundModeCurDirection(Rnd))
26680 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
26681 else if (isRoundModeSAEToX(Rnd, RC))
26682 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
26683 DAG.getTargetConstant(RC, dl, MVT::i32));
26687 return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
26689 case INTR_TYPE_SCALAR_MASK_SAE: {
26690 SDValue Src1 = Op.getOperand(1);
26691 SDValue Src2 = Op.getOperand(2);
26692 SDValue passThru = Op.getOperand(3);
26693 SDValue Mask = Op.getOperand(4);
26694 SDValue Sae = Op.getOperand(5);
26696 if (isRoundModeCurDirection(Sae))
26697 Opc = IntrData->Opc0;
26698 else if (isRoundModeSAE(Sae))
26699 Opc = IntrData->Opc1;
26703 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
26704 Mask, passThru, Subtarget, DAG);
26706 case INTR_TYPE_2OP_MASK: {
26707 SDValue Src1 = Op.getOperand(1);
26708 SDValue Src2 = Op.getOperand(2);
26709 SDValue PassThru = Op.getOperand(3);
26710 SDValue Mask = Op.getOperand(4);
26712 if (IntrData->Opc1 != 0) {
26713 SDValue Rnd = Op.getOperand(5);
26715 if (isRoundModeSAEToX(Rnd, RC))
26716 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
26717 DAG.getTargetConstant(RC, dl, MVT::i32));
26718 else if (!isRoundModeCurDirection(Rnd))
26722 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
26723 return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
26725 case INTR_TYPE_2OP_MASK_SAE: {
26726 SDValue Src1 = Op.getOperand(1);
26727 SDValue Src2 = Op.getOperand(2);
26728 SDValue PassThru = Op.getOperand(3);
26729 SDValue Mask = Op.getOperand(4);
26731 unsigned Opc = IntrData->Opc0;
26732 if (IntrData->Opc1 != 0) {
26733 SDValue Sae = Op.getOperand(5);
26734 if (isRoundModeSAE(Sae))
26735 Opc = IntrData->Opc1;
26736 else if (!isRoundModeCurDirection(Sae))
26740 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
26741 Mask, PassThru, Subtarget, DAG);
26743 case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
26744 SDValue Src1 = Op.getOperand(1);
26745 SDValue Src2 = Op.getOperand(2);
26746 SDValue Src3 = Op.getOperand(3);
26747 SDValue PassThru = Op.getOperand(4);
26748 SDValue Mask = Op.getOperand(5);
26749 SDValue Sae = Op.getOperand(6);
26751 if (isRoundModeCurDirection(Sae))
26752 Opc = IntrData->Opc0;
26753 else if (isRoundModeSAE(Sae))
26754 Opc = IntrData->Opc1;
26758 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
26759 Mask, PassThru, Subtarget, DAG);
26761 case INTR_TYPE_3OP_MASK_SAE: {
26762 SDValue Src1 = Op.getOperand(1);
26763 SDValue Src2 = Op.getOperand(2);
26764 SDValue Src3 = Op.getOperand(3);
26765 SDValue PassThru = Op.getOperand(4);
26766 SDValue Mask = Op.getOperand(5);
26768 unsigned Opc = IntrData->Opc0;
26769 if (IntrData->Opc1 != 0) {
26770 SDValue Sae = Op.getOperand(6);
26771 if (isRoundModeSAE(Sae))
26772 Opc = IntrData->Opc1;
26773 else if (!isRoundModeCurDirection(Sae))
26776 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
26777 Mask, PassThru, Subtarget, DAG);
26780 SDValue Src1 = Op.getOperand(1);
26781 SDValue Src2 = Op.getOperand(2);
26782 SDValue Src3 = Op.getOperand(3);
26784 EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
26785 Src3 = DAG.getBitcast(MaskVT, Src3);
26787 // Reverse the operands to match VSELECT order.
26788 return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
26791 SDValue Src1 = Op.getOperand(1);
26792 SDValue Src2 = Op.getOperand(2);
26794 // Swap Src1 and Src2 in the node creation
26795 return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
26797 case CFMA_OP_MASKZ:
26798 case CFMA_OP_MASK: {
26799 SDValue Src1 = Op.getOperand(1);
26800 SDValue Src2 = Op.getOperand(2);
26801 SDValue Src3 = Op.getOperand(3);
26802 SDValue Mask = Op.getOperand(4);
26803 MVT VT = Op.getSimpleValueType();
26805 SDValue PassThru = Src3;
26806 if (IntrData->Type == CFMA_OP_MASKZ)
26807 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
26809 // We add rounding mode to the Node when
26810 // - RC Opcode is specified and
26811 // - RC is not "current direction".
26813 if (IntrData->Opc1 != 0) {
26814 SDValue Rnd = Op.getOperand(5);
26816 if (isRoundModeSAEToX(Rnd, RC))
26817 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2, Src3,
26818 DAG.getTargetConstant(RC, dl, MVT::i32));
26819 else if (!isRoundModeCurDirection(Rnd))
26823 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2, Src3);
26824 return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
26827 // NOTE: We need to swizzle the operands to pass the multiply operands
26829 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
26830 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
26832 SDValue Src1 = Op.getOperand(1);
26833 SDValue Imm = Op.getOperand(2);
26834 SDValue Mask = Op.getOperand(3);
26835 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
26836 SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
26838 // Need to fill with zeros to ensure the bitcast will produce zeroes
26839 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
26840 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
26841 DAG.getConstant(0, dl, MVT::v8i1),
26842 FPclassMask, DAG.getIntPtrConstant(0, dl));
26843 return DAG.getBitcast(MVT::i8, Ins);
26846 case CMP_MASK_CC: {
26847 MVT MaskVT = Op.getSimpleValueType();
26848 SDValue CC = Op.getOperand(3);
26849 SDValue Mask = Op.getOperand(4);
26850 // We specify 2 possible opcodes for intrinsics with rounding modes.
26851 // First, we check if the intrinsic may have non-default rounding mode,
26852 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
26853 if (IntrData->Opc1 != 0) {
26854 SDValue Sae = Op.getOperand(5);
26855 if (isRoundModeSAE(Sae))
26856 return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
26857 Op.getOperand(2), CC, Mask, Sae);
26858 if (!isRoundModeCurDirection(Sae))
26861 //default rounding mode
26862 return DAG.getNode(IntrData->Opc0, dl, MaskVT,
26863 {Op.getOperand(1), Op.getOperand(2), CC, Mask});
26865 case CMP_MASK_SCALAR_CC: {
26866 SDValue Src1 = Op.getOperand(1);
26867 SDValue Src2 = Op.getOperand(2);
26868 SDValue CC = Op.getOperand(3);
26869 SDValue Mask = Op.getOperand(4);
26872 if (IntrData->Opc1 != 0) {
26873 SDValue Sae = Op.getOperand(5);
26874 if (isRoundModeSAE(Sae))
26875 Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
26876 else if (!isRoundModeCurDirection(Sae))
26879 //default rounding mode
26880 if (!Cmp.getNode())
26881 Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
26883 SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
26885 // Need to fill with zeros to ensure the bitcast will produce zeroes
26886 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
26887 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
26888 DAG.getConstant(0, dl, MVT::v8i1),
26889 CmpMask, DAG.getIntPtrConstant(0, dl));
26890 return DAG.getBitcast(MVT::i8, Ins);
26892 case COMI: { // Comparison intrinsics
26893 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
26894 SDValue LHS = Op.getOperand(1);
26895 SDValue RHS = Op.getOperand(2);
26896 // Some conditions require the operands to be swapped.
26897 if (CC == ISD::SETLT || CC == ISD::SETLE)
26898 std::swap(LHS, RHS);
26900 SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
26903 case ISD::SETEQ: { // (ZF = 0 and PF = 0)
26904 SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
26905 SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
26906 SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
26909 case ISD::SETNE: { // (ZF = 1 or PF = 1)
26910 SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
26911 SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
26912 SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
26915 case ISD::SETGT: // (CF = 0 and ZF = 0)
26916 case ISD::SETLT: { // Condition opposite to GT. Operands swapped above.
26917 SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
26920 case ISD::SETGE: // CF = 0
26921 case ISD::SETLE: // Condition opposite to GE. Operands swapped above.
26922 SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
26925 llvm_unreachable("Unexpected illegal condition!");
26927 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
26929 case COMI_RM: { // Comparison intrinsics with Sae
26930 SDValue LHS = Op.getOperand(1);
26931 SDValue RHS = Op.getOperand(2);
26932 unsigned CondVal = Op.getConstantOperandVal(3);
26933 SDValue Sae = Op.getOperand(4);
26936 if (isRoundModeCurDirection(Sae))
26937 FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
26938 DAG.getTargetConstant(CondVal, dl, MVT::i8));
26939 else if (isRoundModeSAE(Sae))
26940 FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
26941 DAG.getTargetConstant(CondVal, dl, MVT::i8), Sae);
26944 // Need to fill with zeros to ensure the bitcast will produce zeroes
26945 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
26946 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
26947 DAG.getConstant(0, dl, MVT::v16i1),
26948 FCmp, DAG.getIntPtrConstant(0, dl));
26949 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
26950 DAG.getBitcast(MVT::i16, Ins));
26953 SDValue SrcOp = Op.getOperand(1);
26954 SDValue ShAmt = Op.getOperand(2);
26955 assert(ShAmt.getValueType() == MVT::i32 &&
26956 "Unexpected VSHIFT amount type");
26958 // Catch shift-by-constant.
26959 if (auto *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
26960 return getTargetVShiftByConstNode(IntrData->Opc0, dl,
26961 Op.getSimpleValueType(), SrcOp,
26962 CShAmt->getZExtValue(), DAG);
26964 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
26965 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
26966 SrcOp, ShAmt, 0, Subtarget, DAG);
26968 case COMPRESS_EXPAND_IN_REG: {
26969 SDValue Mask = Op.getOperand(3);
26970 SDValue DataToCompress = Op.getOperand(1);
26971 SDValue PassThru = Op.getOperand(2);
26972 if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
26973 return Op.getOperand(1);
26975 // Avoid false dependency.
26976 if (PassThru.isUndef())
26977 PassThru = DAG.getConstant(0, dl, VT);
26979 return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
26983 case FIXUPIMM_MASKZ: {
26984 SDValue Src1 = Op.getOperand(1);
26985 SDValue Src2 = Op.getOperand(2);
26986 SDValue Src3 = Op.getOperand(3);
26987 SDValue Imm = Op.getOperand(4);
26988 SDValue Mask = Op.getOperand(5);
26989 SDValue Passthru = (IntrData->Type == FIXUPIMM)
26991 : getZeroVector(VT, Subtarget, DAG, dl);
26993 unsigned Opc = IntrData->Opc0;
26994 if (IntrData->Opc1 != 0) {
26995 SDValue Sae = Op.getOperand(6);
26996 if (isRoundModeSAE(Sae))
26997 Opc = IntrData->Opc1;
26998 else if (!isRoundModeCurDirection(Sae))
27002 SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
27004 if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
27005 return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
27007 return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
27010 assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
27011 // Clear the upper bits of the rounding immediate so that the legacy
27012 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
27013 auto Round = cast<ConstantSDNode>(Op.getOperand(2));
27014 SDValue RoundingMode =
27015 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
27016 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27017 Op.getOperand(1), RoundingMode);
27020 assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
27021 // Clear the upper bits of the rounding immediate so that the legacy
27022 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
27023 auto Round = cast<ConstantSDNode>(Op.getOperand(3));
27024 SDValue RoundingMode =
27025 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
27026 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27027 Op.getOperand(1), Op.getOperand(2), RoundingMode);
27030 assert(IntrData->Opc0 == X86ISD::BEXTRI && "Unexpected opcode");
27032 uint64_t Imm = Op.getConstantOperandVal(2);
27033 SDValue Control = DAG.getTargetConstant(Imm & 0xffff, dl,
27034 Op.getValueType());
27035 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27036 Op.getOperand(1), Control);
27040 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
27041 SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
27044 // If the carry in is zero, then we should just use ADD/SUB instead of
27046 if (isNullConstant(Op.getOperand(1))) {
27047 Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
27050 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
27051 DAG.getConstant(-1, dl, MVT::i8));
27052 Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
27053 Op.getOperand(3), GenCF.getValue(1));
27055 SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
27056 SDValue Results[] = { SetCC, Res };
27057 return DAG.getMergeValues(Results, dl);
27059 case CVTPD2PS_MASK:
27060 case CVTPD2DQ_MASK:
27061 case CVTQQ2PS_MASK:
27062 case TRUNCATE_TO_REG: {
27063 SDValue Src = Op.getOperand(1);
27064 SDValue PassThru = Op.getOperand(2);
27065 SDValue Mask = Op.getOperand(3);
27067 if (isAllOnesConstant(Mask))
27068 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
27070 MVT SrcVT = Src.getSimpleValueType();
27071 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
27072 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27073 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(),
27074 {Src, PassThru, Mask});
27076 case CVTPS2PH_MASK: {
27077 SDValue Src = Op.getOperand(1);
27078 SDValue Rnd = Op.getOperand(2);
27079 SDValue PassThru = Op.getOperand(3);
27080 SDValue Mask = Op.getOperand(4);
27082 if (isAllOnesConstant(Mask))
27083 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src, Rnd);
27085 MVT SrcVT = Src.getSimpleValueType();
27086 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
27087 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27088 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, Rnd,
27092 case CVTNEPS2BF16_MASK: {
27093 SDValue Src = Op.getOperand(1);
27094 SDValue PassThru = Op.getOperand(2);
27095 SDValue Mask = Op.getOperand(3);
27097 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
27098 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
27100 // Break false dependency.
27101 if (PassThru.isUndef())
27102 PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
27104 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
27113 default: return SDValue(); // Don't custom lower most intrinsics.
27115 // ptest and testp intrinsics. The intrinsic these come from are designed to
27116 // return an integer value, not just an instruction so lower it to the ptest
27117 // or testp pattern and a setcc for the result.
27118 case Intrinsic::x86_avx512_ktestc_b:
27119 case Intrinsic::x86_avx512_ktestc_w:
27120 case Intrinsic::x86_avx512_ktestc_d:
27121 case Intrinsic::x86_avx512_ktestc_q:
27122 case Intrinsic::x86_avx512_ktestz_b:
27123 case Intrinsic::x86_avx512_ktestz_w:
27124 case Intrinsic::x86_avx512_ktestz_d:
27125 case Intrinsic::x86_avx512_ktestz_q:
27126 case Intrinsic::x86_sse41_ptestz:
27127 case Intrinsic::x86_sse41_ptestc:
27128 case Intrinsic::x86_sse41_ptestnzc:
27129 case Intrinsic::x86_avx_ptestz_256:
27130 case Intrinsic::x86_avx_ptestc_256:
27131 case Intrinsic::x86_avx_ptestnzc_256:
27132 case Intrinsic::x86_avx_vtestz_ps:
27133 case Intrinsic::x86_avx_vtestc_ps:
27134 case Intrinsic::x86_avx_vtestnzc_ps:
27135 case Intrinsic::x86_avx_vtestz_pd:
27136 case Intrinsic::x86_avx_vtestc_pd:
27137 case Intrinsic::x86_avx_vtestnzc_pd:
27138 case Intrinsic::x86_avx_vtestz_ps_256:
27139 case Intrinsic::x86_avx_vtestc_ps_256:
27140 case Intrinsic::x86_avx_vtestnzc_ps_256:
27141 case Intrinsic::x86_avx_vtestz_pd_256:
27142 case Intrinsic::x86_avx_vtestc_pd_256:
27143 case Intrinsic::x86_avx_vtestnzc_pd_256: {
27144 unsigned TestOpc = X86ISD::PTEST;
27145 X86::CondCode X86CC;
27147 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
27148 case Intrinsic::x86_avx512_ktestc_b:
27149 case Intrinsic::x86_avx512_ktestc_w:
27150 case Intrinsic::x86_avx512_ktestc_d:
27151 case Intrinsic::x86_avx512_ktestc_q:
27153 TestOpc = X86ISD::KTEST;
27154 X86CC = X86::COND_B;
27156 case Intrinsic::x86_avx512_ktestz_b:
27157 case Intrinsic::x86_avx512_ktestz_w:
27158 case Intrinsic::x86_avx512_ktestz_d:
27159 case Intrinsic::x86_avx512_ktestz_q:
27160 TestOpc = X86ISD::KTEST;
27161 X86CC = X86::COND_E;
27163 case Intrinsic::x86_avx_vtestz_ps:
27164 case Intrinsic::x86_avx_vtestz_pd:
27165 case Intrinsic::x86_avx_vtestz_ps_256:
27166 case Intrinsic::x86_avx_vtestz_pd_256:
27167 TestOpc = X86ISD::TESTP;
27169 case Intrinsic::x86_sse41_ptestz:
27170 case Intrinsic::x86_avx_ptestz_256:
27172 X86CC = X86::COND_E;
27174 case Intrinsic::x86_avx_vtestc_ps:
27175 case Intrinsic::x86_avx_vtestc_pd:
27176 case Intrinsic::x86_avx_vtestc_ps_256:
27177 case Intrinsic::x86_avx_vtestc_pd_256:
27178 TestOpc = X86ISD::TESTP;
27180 case Intrinsic::x86_sse41_ptestc:
27181 case Intrinsic::x86_avx_ptestc_256:
27183 X86CC = X86::COND_B;
27185 case Intrinsic::x86_avx_vtestnzc_ps:
27186 case Intrinsic::x86_avx_vtestnzc_pd:
27187 case Intrinsic::x86_avx_vtestnzc_ps_256:
27188 case Intrinsic::x86_avx_vtestnzc_pd_256:
27189 TestOpc = X86ISD::TESTP;
27191 case Intrinsic::x86_sse41_ptestnzc:
27192 case Intrinsic::x86_avx_ptestnzc_256:
27194 X86CC = X86::COND_A;
27198 SDValue LHS = Op.getOperand(1);
27199 SDValue RHS = Op.getOperand(2);
27200 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
27201 SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
27202 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
27205 case Intrinsic::x86_sse42_pcmpistria128:
27206 case Intrinsic::x86_sse42_pcmpestria128:
27207 case Intrinsic::x86_sse42_pcmpistric128:
27208 case Intrinsic::x86_sse42_pcmpestric128:
27209 case Intrinsic::x86_sse42_pcmpistrio128:
27210 case Intrinsic::x86_sse42_pcmpestrio128:
27211 case Intrinsic::x86_sse42_pcmpistris128:
27212 case Intrinsic::x86_sse42_pcmpestris128:
27213 case Intrinsic::x86_sse42_pcmpistriz128:
27214 case Intrinsic::x86_sse42_pcmpestriz128: {
27216 X86::CondCode X86CC;
27218 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
27219 case Intrinsic::x86_sse42_pcmpistria128:
27220 Opcode = X86ISD::PCMPISTR;
27221 X86CC = X86::COND_A;
27223 case Intrinsic::x86_sse42_pcmpestria128:
27224 Opcode = X86ISD::PCMPESTR;
27225 X86CC = X86::COND_A;
27227 case Intrinsic::x86_sse42_pcmpistric128:
27228 Opcode = X86ISD::PCMPISTR;
27229 X86CC = X86::COND_B;
27231 case Intrinsic::x86_sse42_pcmpestric128:
27232 Opcode = X86ISD::PCMPESTR;
27233 X86CC = X86::COND_B;
27235 case Intrinsic::x86_sse42_pcmpistrio128:
27236 Opcode = X86ISD::PCMPISTR;
27237 X86CC = X86::COND_O;
27239 case Intrinsic::x86_sse42_pcmpestrio128:
27240 Opcode = X86ISD::PCMPESTR;
27241 X86CC = X86::COND_O;
27243 case Intrinsic::x86_sse42_pcmpistris128:
27244 Opcode = X86ISD::PCMPISTR;
27245 X86CC = X86::COND_S;
27247 case Intrinsic::x86_sse42_pcmpestris128:
27248 Opcode = X86ISD::PCMPESTR;
27249 X86CC = X86::COND_S;
27251 case Intrinsic::x86_sse42_pcmpistriz128:
27252 Opcode = X86ISD::PCMPISTR;
27253 X86CC = X86::COND_E;
27255 case Intrinsic::x86_sse42_pcmpestriz128:
27256 Opcode = X86ISD::PCMPESTR;
27257 X86CC = X86::COND_E;
27260 SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
27261 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
27262 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
27263 SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
27264 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
27267 case Intrinsic::x86_sse42_pcmpistri128:
27268 case Intrinsic::x86_sse42_pcmpestri128: {
27270 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
27271 Opcode = X86ISD::PCMPISTR;
27273 Opcode = X86ISD::PCMPESTR;
27275 SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
27276 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
27277 return DAG.getNode(Opcode, dl, VTs, NewOps);
27280 case Intrinsic::x86_sse42_pcmpistrm128:
27281 case Intrinsic::x86_sse42_pcmpestrm128: {
27283 if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
27284 Opcode = X86ISD::PCMPISTR;
27286 Opcode = X86ISD::PCMPESTR;
27288 SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
27289 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
27290 return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
27293 case Intrinsic::eh_sjlj_lsda: {
27294 MachineFunction &MF = DAG.getMachineFunction();
27295 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27296 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
27297 auto &Context = MF.getMMI().getContext();
27298 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
27299 Twine(MF.getFunctionNumber()));
27300 return DAG.getNode(getGlobalWrapperKind(), dl, VT,
27301 DAG.getMCSymbol(S, PtrVT));
27304 case Intrinsic::x86_seh_lsda: {
27305 // Compute the symbol for the LSDA. We know it'll get emitted later.
27306 MachineFunction &MF = DAG.getMachineFunction();
27307 SDValue Op1 = Op.getOperand(1);
27308 auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
27309 MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
27310 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
27312 // Generate a simple absolute symbol reference. This intrinsic is only
27313 // supported on 32-bit Windows, which isn't PIC.
27314 SDValue Result = DAG.getMCSymbol(LSDASym, VT);
27315 return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
27318 case Intrinsic::eh_recoverfp: {
27319 SDValue FnOp = Op.getOperand(1);
27320 SDValue IncomingFPOp = Op.getOperand(2);
27321 GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
27322 auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
27324 report_fatal_error(
27325 "llvm.eh.recoverfp must take a function as the first argument");
27326 return recoverFramePointer(DAG, Fn, IncomingFPOp);
27329 case Intrinsic::localaddress: {
27330 // Returns one of the stack, base, or frame pointer registers, depending on
27331 // which is used to reference local variables.
27332 MachineFunction &MF = DAG.getMachineFunction();
27333 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27335 if (RegInfo->hasBasePointer(MF))
27336 Reg = RegInfo->getBaseRegister();
27337 else { // Handles the SP or FP case.
27338 bool CantUseFP = RegInfo->hasStackRealignment(MF);
27340 Reg = RegInfo->getPtrSizedStackRegister(MF);
27342 Reg = RegInfo->getPtrSizedFrameRegister(MF);
27344 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
27346 case Intrinsic::x86_avx512_vp2intersect_q_512:
27347 case Intrinsic::x86_avx512_vp2intersect_q_256:
27348 case Intrinsic::x86_avx512_vp2intersect_q_128:
27349 case Intrinsic::x86_avx512_vp2intersect_d_512:
27350 case Intrinsic::x86_avx512_vp2intersect_d_256:
27351 case Intrinsic::x86_avx512_vp2intersect_d_128: {
27352 MVT MaskVT = Op.getSimpleValueType();
27354 SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
27357 SDValue Operation =
27358 DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
27359 Op->getOperand(1), Op->getOperand(2));
27361 SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
27362 MaskVT, Operation);
27363 SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
27364 MaskVT, Operation);
27365 return DAG.getMergeValues({Result0, Result1}, DL);
27367 case Intrinsic::x86_mmx_pslli_w:
27368 case Intrinsic::x86_mmx_pslli_d:
27369 case Intrinsic::x86_mmx_pslli_q:
27370 case Intrinsic::x86_mmx_psrli_w:
27371 case Intrinsic::x86_mmx_psrli_d:
27372 case Intrinsic::x86_mmx_psrli_q:
27373 case Intrinsic::x86_mmx_psrai_w:
27374 case Intrinsic::x86_mmx_psrai_d: {
27376 SDValue ShAmt = Op.getOperand(2);
27377 // If the argument is a constant, convert it to a target constant.
27378 if (auto *C = dyn_cast<ConstantSDNode>(ShAmt)) {
27379 // Clamp out of bounds shift amounts since they will otherwise be masked
27380 // to 8-bits which may make it no longer out of bounds.
27381 unsigned ShiftAmount = C->getAPIntValue().getLimitedValue(255);
27382 if (ShiftAmount == 0)
27383 return Op.getOperand(1);
27385 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
27386 Op.getOperand(0), Op.getOperand(1),
27387 DAG.getTargetConstant(ShiftAmount, DL, MVT::i32));
27390 unsigned NewIntrinsic;
27392 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
27393 case Intrinsic::x86_mmx_pslli_w:
27394 NewIntrinsic = Intrinsic::x86_mmx_psll_w;
27396 case Intrinsic::x86_mmx_pslli_d:
27397 NewIntrinsic = Intrinsic::x86_mmx_psll_d;
27399 case Intrinsic::x86_mmx_pslli_q:
27400 NewIntrinsic = Intrinsic::x86_mmx_psll_q;
27402 case Intrinsic::x86_mmx_psrli_w:
27403 NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
27405 case Intrinsic::x86_mmx_psrli_d:
27406 NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
27408 case Intrinsic::x86_mmx_psrli_q:
27409 NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
27411 case Intrinsic::x86_mmx_psrai_w:
27412 NewIntrinsic = Intrinsic::x86_mmx_psra_w;
27414 case Intrinsic::x86_mmx_psrai_d:
27415 NewIntrinsic = Intrinsic::x86_mmx_psra_d;
27419 // The vector shift intrinsics with scalars uses 32b shift amounts but
27420 // the sse2/mmx shift instructions reads 64 bits. Copy the 32 bits to an
27422 ShAmt = DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, ShAmt);
27423 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
27424 DAG.getTargetConstant(NewIntrinsic, DL,
27425 getPointerTy(DAG.getDataLayout())),
27426 Op.getOperand(1), ShAmt);
27428 case Intrinsic::thread_pointer: {
27429 if (Subtarget.isTargetELF()) {
27431 EVT PtrVT = getPointerTy(DAG.getDataLayout());
27432 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
27433 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(
27434 *DAG.getContext(), Subtarget.is64Bit() ? X86AS::FS : X86AS::GS));
27435 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
27436 DAG.getIntPtrConstant(0, dl), MachinePointerInfo(Ptr));
27438 report_fatal_error(
27439 "Target OS doesn't support __builtin_thread_pointer() yet.");
27444 static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
27445 SDValue Src, SDValue Mask, SDValue Base,
27446 SDValue Index, SDValue ScaleOp, SDValue Chain,
27447 const X86Subtarget &Subtarget) {
27449 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
27450 // Scale must be constant.
27453 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27454 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
27455 TLI.getPointerTy(DAG.getDataLayout()));
27456 EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
27457 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
27458 // If source is undef or we know it won't be used, use a zero vector
27459 // to break register dependency.
27460 // TODO: use undef instead and let BreakFalseDeps deal with it?
27461 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
27462 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
27464 // Cast mask to an integer type.
27465 Mask = DAG.getBitcast(MaskVT, Mask);
27467 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
27469 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
27471 DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
27472 MemIntr->getMemoryVT(), MemIntr->getMemOperand());
27473 return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
27476 static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
27477 SDValue Src, SDValue Mask, SDValue Base,
27478 SDValue Index, SDValue ScaleOp, SDValue Chain,
27479 const X86Subtarget &Subtarget) {
27480 MVT VT = Op.getSimpleValueType();
27482 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
27483 // Scale must be constant.
27486 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27487 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
27488 TLI.getPointerTy(DAG.getDataLayout()));
27489 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
27490 VT.getVectorNumElements());
27491 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
27493 // We support two versions of the gather intrinsics. One with scalar mask and
27494 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
27495 if (Mask.getValueType() != MaskVT)
27496 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27498 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
27499 // If source is undef or we know it won't be used, use a zero vector
27500 // to break register dependency.
27501 // TODO: use undef instead and let BreakFalseDeps deal with it?
27502 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
27503 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
27505 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
27507 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
27509 DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
27510 MemIntr->getMemoryVT(), MemIntr->getMemOperand());
27511 return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
27514 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
27515 SDValue Src, SDValue Mask, SDValue Base,
27516 SDValue Index, SDValue ScaleOp, SDValue Chain,
27517 const X86Subtarget &Subtarget) {
27519 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
27520 // Scale must be constant.
27523 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27524 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
27525 TLI.getPointerTy(DAG.getDataLayout()));
27526 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
27527 Src.getSimpleValueType().getVectorNumElements());
27528 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
27530 // We support two versions of the scatter intrinsics. One with scalar mask and
27531 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
27532 if (Mask.getValueType() != MaskVT)
27533 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27535 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
27537 SDVTList VTs = DAG.getVTList(MVT::Other);
27538 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
27540 DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
27541 MemIntr->getMemoryVT(), MemIntr->getMemOperand());
27545 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
27546 SDValue Mask, SDValue Base, SDValue Index,
27547 SDValue ScaleOp, SDValue Chain,
27548 const X86Subtarget &Subtarget) {
27550 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
27551 // Scale must be constant.
27554 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27555 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
27556 TLI.getPointerTy(DAG.getDataLayout()));
27557 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
27558 SDValue Segment = DAG.getRegister(0, MVT::i32);
27560 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
27561 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27562 SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
27563 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
27564 return SDValue(Res, 0);
27567 /// Handles the lowering of builtin intrinsics with chain that return their
27568 /// value into registers EDX:EAX.
27569 /// If operand ScrReg is a valid register identifier, then operand 2 of N is
27570 /// copied to SrcReg. The assumption is that SrcReg is an implicit input to
27572 /// Returns a Glue value which can be used to add extra copy-from-reg if the
27573 /// expanded intrinsics implicitly defines extra registers (i.e. not just
27575 static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
27577 unsigned TargetOpcode,
27579 const X86Subtarget &Subtarget,
27580 SmallVectorImpl<SDValue> &Results) {
27581 SDValue Chain = N->getOperand(0);
27585 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
27586 Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
27587 Glue = Chain.getValue(1);
27590 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
27591 SDValue N1Ops[] = {Chain, Glue};
27592 SDNode *N1 = DAG.getMachineNode(
27593 TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
27594 Chain = SDValue(N1, 0);
27596 // Reads the content of XCR and returns it in registers EDX:EAX.
27598 if (Subtarget.is64Bit()) {
27599 LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
27600 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
27603 LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
27604 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
27607 Chain = HI.getValue(1);
27608 Glue = HI.getValue(2);
27610 if (Subtarget.is64Bit()) {
27611 // Merge the two 32-bit values into a 64-bit one.
27612 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
27613 DAG.getConstant(32, DL, MVT::i8));
27614 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
27615 Results.push_back(Chain);
27619 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
27620 SDValue Ops[] = { LO, HI };
27621 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
27622 Results.push_back(Pair);
27623 Results.push_back(Chain);
27627 /// Handles the lowering of builtin intrinsics that read the time stamp counter
27628 /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
27629 /// READCYCLECOUNTER nodes.
27630 static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
27632 const X86Subtarget &Subtarget,
27633 SmallVectorImpl<SDValue> &Results) {
27634 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
27635 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
27636 // and the EAX register is loaded with the low-order 32 bits.
27637 SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
27638 /* NoRegister */0, Subtarget,
27640 if (Opcode != X86::RDTSCP)
27643 SDValue Chain = Results[1];
27644 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
27645 // the ECX register. Add 'ecx' explicitly to the chain.
27646 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
27648 Results.push_back(ecx.getValue(1));
27651 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
27652 SelectionDAG &DAG) {
27653 SmallVector<SDValue, 3> Results;
27655 getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
27657 return DAG.getMergeValues(Results, DL);
27660 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
27661 MachineFunction &MF = DAG.getMachineFunction();
27662 SDValue Chain = Op.getOperand(0);
27663 SDValue RegNode = Op.getOperand(2);
27664 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
27666 report_fatal_error("EH registrations only live in functions using WinEH");
27668 // Cast the operand to an alloca, and remember the frame index.
27669 auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
27671 report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
27672 EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
27674 // Return the chain operand without making any DAG nodes.
27678 static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
27679 MachineFunction &MF = DAG.getMachineFunction();
27680 SDValue Chain = Op.getOperand(0);
27681 SDValue EHGuard = Op.getOperand(2);
27682 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
27684 report_fatal_error("EHGuard only live in functions using WinEH");
27686 // Cast the operand to an alloca, and remember the frame index.
27687 auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
27689 report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
27690 EHInfo->EHGuardFrameIndex = FINode->getIndex();
27692 // Return the chain operand without making any DAG nodes.
27696 /// Emit Truncating Store with signed or unsigned saturation.
27698 EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
27699 SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
27700 SelectionDAG &DAG) {
27701 SDVTList VTs = DAG.getVTList(MVT::Other);
27702 SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
27703 SDValue Ops[] = { Chain, Val, Ptr, Undef };
27704 unsigned Opc = SignedSat ? X86ISD::VTRUNCSTORES : X86ISD::VTRUNCSTOREUS;
27705 return DAG.getMemIntrinsicNode(Opc, Dl, VTs, Ops, MemVT, MMO);
27708 /// Emit Masked Truncating Store with signed or unsigned saturation.
27710 EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
27711 SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
27712 MachineMemOperand *MMO, SelectionDAG &DAG) {
27713 SDVTList VTs = DAG.getVTList(MVT::Other);
27714 SDValue Ops[] = { Chain, Val, Ptr, Mask };
27715 unsigned Opc = SignedSat ? X86ISD::VMTRUNCSTORES : X86ISD::VMTRUNCSTOREUS;
27716 return DAG.getMemIntrinsicNode(Opc, Dl, VTs, Ops, MemVT, MMO);
27719 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
27720 SelectionDAG &DAG) {
27721 unsigned IntNo = Op.getConstantOperandVal(1);
27722 const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
27726 case Intrinsic::swift_async_context_addr: {
27728 auto &MF = DAG.getMachineFunction();
27729 auto X86FI = MF.getInfo<X86MachineFunctionInfo>();
27730 if (Subtarget.is64Bit()) {
27731 MF.getFrameInfo().setFrameAddressIsTaken(true);
27732 X86FI->setHasSwiftAsyncContext(true);
27733 SDValue Chain = Op->getOperand(0);
27734 SDValue CopyRBP = DAG.getCopyFromReg(Chain, dl, X86::RBP, MVT::i64);
27736 SDValue(DAG.getMachineNode(X86::SUB64ri8, dl, MVT::i64, CopyRBP,
27737 DAG.getTargetConstant(8, dl, MVT::i32)),
27739 // Return { result, chain }.
27740 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
27741 CopyRBP.getValue(1));
27743 // 32-bit so no special extended frame, create or reuse an existing
27745 if (!X86FI->getSwiftAsyncContextFrameIdx())
27746 X86FI->setSwiftAsyncContextFrameIdx(
27747 MF.getFrameInfo().CreateStackObject(4, Align(4), false));
27749 DAG.getFrameIndex(*X86FI->getSwiftAsyncContextFrameIdx(), MVT::i32);
27750 // Return { result, chain }.
27751 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
27752 Op->getOperand(0));
27756 case llvm::Intrinsic::x86_seh_ehregnode:
27757 return MarkEHRegistrationNode(Op, DAG);
27758 case llvm::Intrinsic::x86_seh_ehguard:
27759 return MarkEHGuard(Op, DAG);
27760 case llvm::Intrinsic::x86_rdpkru: {
27762 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
27763 // Create a RDPKRU node and pass 0 to the ECX parameter.
27764 return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
27765 DAG.getConstant(0, dl, MVT::i32));
27767 case llvm::Intrinsic::x86_wrpkru: {
27769 // Create a WRPKRU node, pass the input to the EAX parameter, and pass 0
27770 // to the EDX and ECX parameters.
27771 return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
27772 Op.getOperand(0), Op.getOperand(2),
27773 DAG.getConstant(0, dl, MVT::i32),
27774 DAG.getConstant(0, dl, MVT::i32));
27776 case llvm::Intrinsic::asan_check_memaccess: {
27777 // Mark this as adjustsStack because it will be lowered to a call.
27778 DAG.getMachineFunction().getFrameInfo().setAdjustsStack(true);
27779 // Don't do anything here, we will expand these intrinsics out later.
27782 case llvm::Intrinsic::x86_flags_read_u32:
27783 case llvm::Intrinsic::x86_flags_read_u64:
27784 case llvm::Intrinsic::x86_flags_write_u32:
27785 case llvm::Intrinsic::x86_flags_write_u64: {
27786 // We need a frame pointer because this will get lowered to a PUSH/POP
27788 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
27789 MFI.setHasCopyImplyingStackAdjustment(true);
27790 // Don't do anything here, we will expand these intrinsics out later
27791 // during FinalizeISel in EmitInstrWithCustomInserter.
27794 case Intrinsic::x86_lwpins32:
27795 case Intrinsic::x86_lwpins64:
27796 case Intrinsic::x86_umwait:
27797 case Intrinsic::x86_tpause: {
27799 SDValue Chain = Op->getOperand(0);
27800 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
27804 default: llvm_unreachable("Impossible intrinsic");
27805 case Intrinsic::x86_umwait:
27806 Opcode = X86ISD::UMWAIT;
27808 case Intrinsic::x86_tpause:
27809 Opcode = X86ISD::TPAUSE;
27811 case Intrinsic::x86_lwpins32:
27812 case Intrinsic::x86_lwpins64:
27813 Opcode = X86ISD::LWPINS;
27817 SDValue Operation =
27818 DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
27819 Op->getOperand(3), Op->getOperand(4));
27820 SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
27821 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
27822 Operation.getValue(1));
27824 case Intrinsic::x86_enqcmd:
27825 case Intrinsic::x86_enqcmds: {
27827 SDValue Chain = Op.getOperand(0);
27828 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
27831 default: llvm_unreachable("Impossible intrinsic!");
27832 case Intrinsic::x86_enqcmd:
27833 Opcode = X86ISD::ENQCMD;
27835 case Intrinsic::x86_enqcmds:
27836 Opcode = X86ISD::ENQCMDS;
27839 SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
27841 SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
27842 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
27843 Operation.getValue(1));
27845 case Intrinsic::x86_aesenc128kl:
27846 case Intrinsic::x86_aesdec128kl:
27847 case Intrinsic::x86_aesenc256kl:
27848 case Intrinsic::x86_aesdec256kl: {
27850 SDVTList VTs = DAG.getVTList(MVT::v2i64, MVT::i32, MVT::Other);
27851 SDValue Chain = Op.getOperand(0);
27855 default: llvm_unreachable("Impossible intrinsic");
27856 case Intrinsic::x86_aesenc128kl:
27857 Opcode = X86ISD::AESENC128KL;
27859 case Intrinsic::x86_aesdec128kl:
27860 Opcode = X86ISD::AESDEC128KL;
27862 case Intrinsic::x86_aesenc256kl:
27863 Opcode = X86ISD::AESENC256KL;
27865 case Intrinsic::x86_aesdec256kl:
27866 Opcode = X86ISD::AESDEC256KL;
27870 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
27871 MachineMemOperand *MMO = MemIntr->getMemOperand();
27872 EVT MemVT = MemIntr->getMemoryVT();
27873 SDValue Operation = DAG.getMemIntrinsicNode(
27874 Opcode, DL, VTs, {Chain, Op.getOperand(2), Op.getOperand(3)}, MemVT,
27876 SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(1), DL, DAG);
27878 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
27879 {ZF, Operation.getValue(0), Operation.getValue(2)});
27881 case Intrinsic::x86_aesencwide128kl:
27882 case Intrinsic::x86_aesdecwide128kl:
27883 case Intrinsic::x86_aesencwide256kl:
27884 case Intrinsic::x86_aesdecwide256kl: {
27886 SDVTList VTs = DAG.getVTList(
27887 {MVT::i32, MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::v2i64,
27888 MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::Other});
27889 SDValue Chain = Op.getOperand(0);
27893 default: llvm_unreachable("Impossible intrinsic");
27894 case Intrinsic::x86_aesencwide128kl:
27895 Opcode = X86ISD::AESENCWIDE128KL;
27897 case Intrinsic::x86_aesdecwide128kl:
27898 Opcode = X86ISD::AESDECWIDE128KL;
27900 case Intrinsic::x86_aesencwide256kl:
27901 Opcode = X86ISD::AESENCWIDE256KL;
27903 case Intrinsic::x86_aesdecwide256kl:
27904 Opcode = X86ISD::AESDECWIDE256KL;
27908 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
27909 MachineMemOperand *MMO = MemIntr->getMemOperand();
27910 EVT MemVT = MemIntr->getMemoryVT();
27911 SDValue Operation = DAG.getMemIntrinsicNode(
27913 {Chain, Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
27914 Op.getOperand(5), Op.getOperand(6), Op.getOperand(7),
27915 Op.getOperand(8), Op.getOperand(9), Op.getOperand(10)},
27917 SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(0), DL, DAG);
27919 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
27920 {ZF, Operation.getValue(1), Operation.getValue(2),
27921 Operation.getValue(3), Operation.getValue(4),
27922 Operation.getValue(5), Operation.getValue(6),
27923 Operation.getValue(7), Operation.getValue(8),
27924 Operation.getValue(9)});
27926 case Intrinsic::x86_testui: {
27928 SDValue Chain = Op.getOperand(0);
27929 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
27930 SDValue Operation = DAG.getNode(X86ISD::TESTUI, dl, VTs, Chain);
27931 SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
27932 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
27933 Operation.getValue(1));
27935 case Intrinsic::x86_atomic_bts:
27936 case Intrinsic::x86_atomic_btc:
27937 case Intrinsic::x86_atomic_btr: {
27939 MVT VT = Op.getSimpleValueType();
27940 SDValue Chain = Op.getOperand(0);
27941 SDValue Op1 = Op.getOperand(2);
27942 SDValue Op2 = Op.getOperand(3);
27943 unsigned Opc = IntNo == Intrinsic::x86_atomic_bts ? X86ISD::LBTS
27944 : IntNo == Intrinsic::x86_atomic_btc ? X86ISD::LBTC
27946 SDValue Size = DAG.getConstant(VT.getScalarSizeInBits(), DL, MVT::i32);
27947 MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
27949 DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
27950 {Chain, Op1, Op2, Size}, VT, MMO);
27951 Chain = Res.getValue(1);
27952 Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
27953 unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
27955 Res = DAG.getNode(ISD::SHL, DL, VT, Res,
27956 DAG.getShiftAmountConstant(Imm, VT, DL));
27957 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Res, Chain);
27964 switch(IntrData->Type) {
27965 default: llvm_unreachable("Unknown Intrinsic Type");
27968 // Emit the node with the right value type.
27969 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
27970 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
27972 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
27973 // Otherwise return the value from Rand, which is always 0, casted to i32.
27974 SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
27975 DAG.getConstant(1, dl, Op->getValueType(1)),
27976 DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
27977 SDValue(Result.getNode(), 1)};
27978 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
27980 // Return { result, isValid, chain }.
27981 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
27982 SDValue(Result.getNode(), 2));
27984 case GATHER_AVX2: {
27985 SDValue Chain = Op.getOperand(0);
27986 SDValue Src = Op.getOperand(2);
27987 SDValue Base = Op.getOperand(3);
27988 SDValue Index = Op.getOperand(4);
27989 SDValue Mask = Op.getOperand(5);
27990 SDValue Scale = Op.getOperand(6);
27991 return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
27992 Scale, Chain, Subtarget);
27995 //gather(v1, mask, index, base, scale);
27996 SDValue Chain = Op.getOperand(0);
27997 SDValue Src = Op.getOperand(2);
27998 SDValue Base = Op.getOperand(3);
27999 SDValue Index = Op.getOperand(4);
28000 SDValue Mask = Op.getOperand(5);
28001 SDValue Scale = Op.getOperand(6);
28002 return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
28006 //scatter(base, mask, index, v1, scale);
28007 SDValue Chain = Op.getOperand(0);
28008 SDValue Base = Op.getOperand(2);
28009 SDValue Mask = Op.getOperand(3);
28010 SDValue Index = Op.getOperand(4);
28011 SDValue Src = Op.getOperand(5);
28012 SDValue Scale = Op.getOperand(6);
28013 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
28014 Scale, Chain, Subtarget);
28017 const APInt &HintVal = Op.getConstantOperandAPInt(6);
28018 assert((HintVal == 2 || HintVal == 3) &&
28019 "Wrong prefetch hint in intrinsic: should be 2 or 3");
28020 unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
28021 SDValue Chain = Op.getOperand(0);
28022 SDValue Mask = Op.getOperand(2);
28023 SDValue Index = Op.getOperand(3);
28024 SDValue Base = Op.getOperand(4);
28025 SDValue Scale = Op.getOperand(5);
28026 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
28029 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
28031 SmallVector<SDValue, 2> Results;
28032 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
28034 return DAG.getMergeValues(Results, dl);
28036 // Read Performance Monitoring Counters.
28038 // Read Processor Register.
28040 // GetExtended Control Register.
28042 SmallVector<SDValue, 2> Results;
28044 // RDPMC uses ECX to select the index of the performance counter to read.
28045 // RDPRU uses ECX to select the processor register to read.
28046 // XGETBV uses ECX to select the index of the XCR register to return.
28047 // The result is stored into registers EDX:EAX.
28048 expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
28049 Subtarget, Results);
28050 return DAG.getMergeValues(Results, dl);
28052 // XTEST intrinsics.
28054 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
28055 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
28057 SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
28058 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
28059 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
28060 Ret, SDValue(InTrans.getNode(), 1));
28062 case TRUNCATE_TO_MEM_VI8:
28063 case TRUNCATE_TO_MEM_VI16:
28064 case TRUNCATE_TO_MEM_VI32: {
28065 SDValue Mask = Op.getOperand(4);
28066 SDValue DataToTruncate = Op.getOperand(3);
28067 SDValue Addr = Op.getOperand(2);
28068 SDValue Chain = Op.getOperand(0);
28070 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
28071 assert(MemIntr && "Expected MemIntrinsicSDNode!");
28073 EVT MemVT = MemIntr->getMemoryVT();
28075 uint16_t TruncationOp = IntrData->Opc0;
28076 switch (TruncationOp) {
28077 case X86ISD::VTRUNC: {
28078 if (isAllOnesConstant(Mask)) // return just a truncate store
28079 return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
28080 MemIntr->getMemOperand());
28082 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
28083 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
28084 SDValue Offset = DAG.getUNDEF(VMask.getValueType());
28086 return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, Offset, VMask,
28087 MemVT, MemIntr->getMemOperand(), ISD::UNINDEXED,
28088 true /* truncating */);
28090 case X86ISD::VTRUNCUS:
28091 case X86ISD::VTRUNCS: {
28092 bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
28093 if (isAllOnesConstant(Mask))
28094 return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
28095 MemIntr->getMemOperand(), DAG);
28097 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
28098 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
28100 return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
28101 VMask, MemVT, MemIntr->getMemOperand(), DAG);
28104 llvm_unreachable("Unsupported truncstore intrinsic");
28110 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
28111 SelectionDAG &DAG) const {
28112 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
28113 MFI.setReturnAddressIsTaken(true);
28115 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
28118 unsigned Depth = Op.getConstantOperandVal(0);
28120 EVT PtrVT = getPointerTy(DAG.getDataLayout());
28123 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
28124 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28125 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
28126 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
28127 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
28128 MachinePointerInfo());
28131 // Just load the return address.
28132 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
28133 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
28134 MachinePointerInfo());
28137 SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
28138 SelectionDAG &DAG) const {
28139 DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
28140 return getReturnAddressFrameIndex(DAG);
28143 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
28144 MachineFunction &MF = DAG.getMachineFunction();
28145 MachineFrameInfo &MFI = MF.getFrameInfo();
28146 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
28147 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28148 EVT VT = Op.getValueType();
28150 MFI.setFrameAddressIsTaken(true);
28152 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
28153 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
28154 // is not possible to crawl up the stack without looking at the unwind codes
28156 int FrameAddrIndex = FuncInfo->getFAIndex();
28157 if (!FrameAddrIndex) {
28158 // Set up a frame object for the return address.
28159 unsigned SlotSize = RegInfo->getSlotSize();
28160 FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
28161 SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
28162 FuncInfo->setFAIndex(FrameAddrIndex);
28164 return DAG.getFrameIndex(FrameAddrIndex, VT);
28167 unsigned FrameReg =
28168 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
28169 SDLoc dl(Op); // FIXME probably not meaningful
28170 unsigned Depth = Op.getConstantOperandVal(0);
28171 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
28172 (FrameReg == X86::EBP && VT == MVT::i32)) &&
28173 "Invalid Frame Register!");
28174 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
28176 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
28177 MachinePointerInfo());
28181 // FIXME? Maybe this could be a TableGen attribute on some registers and
28182 // this table could be generated automatically from RegInfo.
28183 Register X86TargetLowering::getRegisterByName(const char* RegName, LLT VT,
28184 const MachineFunction &MF) const {
28185 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
28187 Register Reg = StringSwitch<unsigned>(RegName)
28188 .Case("esp", X86::ESP)
28189 .Case("rsp", X86::RSP)
28190 .Case("ebp", X86::EBP)
28191 .Case("rbp", X86::RBP)
28194 if (Reg == X86::EBP || Reg == X86::RBP) {
28195 if (!TFI.hasFP(MF))
28196 report_fatal_error("register " + StringRef(RegName) +
28197 " is allocatable: function has no frame pointer");
28200 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28201 Register FrameReg = RegInfo->getPtrSizedFrameRegister(MF);
28202 assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
28203 "Invalid Frame Register!");
28211 report_fatal_error("Invalid register name global variable");
28214 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
28215 SelectionDAG &DAG) const {
28216 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28217 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
28220 Register X86TargetLowering::getExceptionPointerRegister(
28221 const Constant *PersonalityFn) const {
28222 if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
28223 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
28225 return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
28228 Register X86TargetLowering::getExceptionSelectorRegister(
28229 const Constant *PersonalityFn) const {
28230 // Funclet personalities don't use selectors (the runtime does the selection).
28231 if (isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)))
28232 return X86::NoRegister;
28233 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
28236 bool X86TargetLowering::needsFixedCatchObjects() const {
28237 return Subtarget.isTargetWin64();
28240 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
28241 SDValue Chain = Op.getOperand(0);
28242 SDValue Offset = Op.getOperand(1);
28243 SDValue Handler = Op.getOperand(2);
28246 EVT PtrVT = getPointerTy(DAG.getDataLayout());
28247 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28248 Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
28249 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
28250 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
28251 "Invalid Frame Register!");
28252 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
28253 Register StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
28255 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
28256 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
28258 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
28259 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
28260 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
28262 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
28263 DAG.getRegister(StoreAddrReg, PtrVT));
28266 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
28267 SelectionDAG &DAG) const {
28269 // If the subtarget is not 64bit, we may need the global base reg
28270 // after isel expand pseudo, i.e., after CGBR pass ran.
28271 // Therefore, ask for the GlobalBaseReg now, so that the pass
28272 // inserts the code for us in case we need it.
28273 // Otherwise, we will end up in a situation where we will
28274 // reference a virtual register that is not defined!
28275 if (!Subtarget.is64Bit()) {
28276 const X86InstrInfo *TII = Subtarget.getInstrInfo();
28277 (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
28279 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
28280 DAG.getVTList(MVT::i32, MVT::Other),
28281 Op.getOperand(0), Op.getOperand(1));
28284 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
28285 SelectionDAG &DAG) const {
28287 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
28288 Op.getOperand(0), Op.getOperand(1));
28291 SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
28292 SelectionDAG &DAG) const {
28294 return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
28298 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
28299 return Op.getOperand(0);
28302 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
28303 SelectionDAG &DAG) const {
28304 SDValue Root = Op.getOperand(0);
28305 SDValue Trmp = Op.getOperand(1); // trampoline
28306 SDValue FPtr = Op.getOperand(2); // nested function
28307 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
28310 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
28311 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
28313 if (Subtarget.is64Bit()) {
28314 SDValue OutChains[6];
28316 // Large code-model.
28317 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
28318 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
28320 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
28321 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
28323 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
28325 // Load the pointer to the nested function into R11.
28326 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
28327 SDValue Addr = Trmp;
28328 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
28329 Addr, MachinePointerInfo(TrmpAddr));
28331 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
28332 DAG.getConstant(2, dl, MVT::i64));
28333 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
28334 MachinePointerInfo(TrmpAddr, 2), Align(2));
28336 // Load the 'nest' parameter value into R10.
28337 // R10 is specified in X86CallingConv.td
28338 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
28339 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
28340 DAG.getConstant(10, dl, MVT::i64));
28341 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
28342 Addr, MachinePointerInfo(TrmpAddr, 10));
28344 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
28345 DAG.getConstant(12, dl, MVT::i64));
28346 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
28347 MachinePointerInfo(TrmpAddr, 12), Align(2));
28349 // Jump to the nested function.
28350 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
28351 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
28352 DAG.getConstant(20, dl, MVT::i64));
28353 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
28354 Addr, MachinePointerInfo(TrmpAddr, 20));
28356 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
28357 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
28358 DAG.getConstant(22, dl, MVT::i64));
28359 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
28360 Addr, MachinePointerInfo(TrmpAddr, 22));
28362 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
28364 const Function *Func =
28365 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
28366 CallingConv::ID CC = Func->getCallingConv();
28371 llvm_unreachable("Unsupported calling convention");
28372 case CallingConv::C:
28373 case CallingConv::X86_StdCall: {
28374 // Pass 'nest' parameter in ECX.
28375 // Must be kept in sync with X86CallingConv.td
28376 NestReg = X86::ECX;
28378 // Check that ECX wasn't needed by an 'inreg' parameter.
28379 FunctionType *FTy = Func->getFunctionType();
28380 const AttributeList &Attrs = Func->getAttributes();
28382 if (!Attrs.isEmpty() && !Func->isVarArg()) {
28383 unsigned InRegCount = 0;
28386 for (FunctionType::param_iterator I = FTy->param_begin(),
28387 E = FTy->param_end(); I != E; ++I, ++Idx)
28388 if (Attrs.hasParamAttr(Idx, Attribute::InReg)) {
28389 const DataLayout &DL = DAG.getDataLayout();
28390 // FIXME: should only count parameters that are lowered to integers.
28391 InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
28394 if (InRegCount > 2) {
28395 report_fatal_error("Nest register in use - reduce number of inreg"
28401 case CallingConv::X86_FastCall:
28402 case CallingConv::X86_ThisCall:
28403 case CallingConv::Fast:
28404 case CallingConv::Tail:
28405 case CallingConv::SwiftTail:
28406 // Pass 'nest' parameter in EAX.
28407 // Must be kept in sync with X86CallingConv.td
28408 NestReg = X86::EAX;
28412 SDValue OutChains[4];
28413 SDValue Addr, Disp;
28415 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
28416 DAG.getConstant(10, dl, MVT::i32));
28417 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
28419 // This is storing the opcode for MOV32ri.
28420 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
28421 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
28423 DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
28424 Trmp, MachinePointerInfo(TrmpAddr));
28426 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
28427 DAG.getConstant(1, dl, MVT::i32));
28428 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
28429 MachinePointerInfo(TrmpAddr, 1), Align(1));
28431 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
28432 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
28433 DAG.getConstant(5, dl, MVT::i32));
28435 DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8), Addr,
28436 MachinePointerInfo(TrmpAddr, 5), Align(1));
28438 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
28439 DAG.getConstant(6, dl, MVT::i32));
28440 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
28441 MachinePointerInfo(TrmpAddr, 6), Align(1));
28443 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
28447 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
28448 SelectionDAG &DAG) const {
28450 The rounding mode is in bits 11:10 of FPSR, and has the following
28452 00 Round to nearest
28457 FLT_ROUNDS, on the other hand, expects the following:
28464 To perform the conversion, we use a packed lookup table of the four 2-bit
28465 values that we can index by FPSP[11:10]
28466 0x2d --> (0b00,10,11,01) --> (0,2,3,1) >> FPSR[11:10]
28468 (0x2d >> ((FPSR & 0xc00) >> 9)) & 3
28471 MachineFunction &MF = DAG.getMachineFunction();
28472 MVT VT = Op.getSimpleValueType();
28475 // Save FP Control Word to stack slot
28476 int SSFI = MF.getFrameInfo().CreateStackObject(2, Align(2), false);
28477 SDValue StackSlot =
28478 DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
28480 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
28482 SDValue Chain = Op.getOperand(0);
28483 SDValue Ops[] = {Chain, StackSlot};
28484 Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
28485 DAG.getVTList(MVT::Other), Ops, MVT::i16, MPI,
28486 Align(2), MachineMemOperand::MOStore);
28488 // Load FP Control Word from stack slot
28489 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI, Align(2));
28490 Chain = CWD.getValue(1);
28492 // Mask and turn the control bits into a shift for the lookup table.
28494 DAG.getNode(ISD::SRL, DL, MVT::i16,
28495 DAG.getNode(ISD::AND, DL, MVT::i16,
28496 CWD, DAG.getConstant(0xc00, DL, MVT::i16)),
28497 DAG.getConstant(9, DL, MVT::i8));
28498 Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Shift);
28500 SDValue LUT = DAG.getConstant(0x2d, DL, MVT::i32);
28502 DAG.getNode(ISD::AND, DL, MVT::i32,
28503 DAG.getNode(ISD::SRL, DL, MVT::i32, LUT, Shift),
28504 DAG.getConstant(3, DL, MVT::i32));
28506 RetVal = DAG.getZExtOrTrunc(RetVal, DL, VT);
28508 return DAG.getMergeValues({RetVal, Chain}, DL);
28511 SDValue X86TargetLowering::LowerSET_ROUNDING(SDValue Op,
28512 SelectionDAG &DAG) const {
28513 MachineFunction &MF = DAG.getMachineFunction();
28515 SDValue Chain = Op.getNode()->getOperand(0);
28517 // FP control word may be set only from data in memory. So we need to allocate
28518 // stack space to save/load FP control word.
28519 int OldCWFrameIdx = MF.getFrameInfo().CreateStackObject(4, Align(4), false);
28520 SDValue StackSlot =
28521 DAG.getFrameIndex(OldCWFrameIdx, getPointerTy(DAG.getDataLayout()));
28522 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, OldCWFrameIdx);
28523 MachineMemOperand *MMO =
28524 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 2, Align(2));
28526 // Store FP control word into memory.
28527 SDValue Ops[] = {Chain, StackSlot};
28528 Chain = DAG.getMemIntrinsicNode(
28529 X86ISD::FNSTCW16m, DL, DAG.getVTList(MVT::Other), Ops, MVT::i16, MMO);
28531 // Load FP Control Word from stack slot and clear RM field (bits 11:10).
28532 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI);
28533 Chain = CWD.getValue(1);
28534 CWD = DAG.getNode(ISD::AND, DL, MVT::i16, CWD.getValue(0),
28535 DAG.getConstant(0xf3ff, DL, MVT::i16));
28537 // Calculate new rounding mode.
28538 SDValue NewRM = Op.getNode()->getOperand(1);
28540 if (auto *CVal = dyn_cast<ConstantSDNode>(NewRM)) {
28541 uint64_t RM = CVal->getZExtValue();
28543 switch (static_cast<RoundingMode>(RM)) {
28544 case RoundingMode::NearestTiesToEven: FieldVal = X86::rmToNearest; break;
28545 case RoundingMode::TowardNegative: FieldVal = X86::rmDownward; break;
28546 case RoundingMode::TowardPositive: FieldVal = X86::rmUpward; break;
28547 case RoundingMode::TowardZero: FieldVal = X86::rmTowardZero; break;
28549 llvm_unreachable("rounding mode is not supported by X86 hardware");
28551 RMBits = DAG.getConstant(FieldVal, DL, MVT::i16);
28553 // Need to convert argument into bits of control word:
28554 // 0 Round to 0 -> 11
28555 // 1 Round to nearest -> 00
28556 // 2 Round to +inf -> 10
28557 // 3 Round to -inf -> 01
28558 // The 2-bit value needs then to be shifted so that it occupies bits 11:10.
28559 // To make the conversion, put all these values into a value 0xc9 and shift
28560 // it left depending on the rounding mode:
28561 // (0xc9 << 4) & 0xc00 = X86::rmTowardZero
28562 // (0xc9 << 6) & 0xc00 = X86::rmToNearest
28564 // (0xc9 << (2 * NewRM + 4)) & 0xc00
28565 SDValue ShiftValue =
28566 DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
28567 DAG.getNode(ISD::ADD, DL, MVT::i32,
28568 DAG.getNode(ISD::SHL, DL, MVT::i32, NewRM,
28569 DAG.getConstant(1, DL, MVT::i8)),
28570 DAG.getConstant(4, DL, MVT::i32)));
28572 DAG.getNode(ISD::SHL, DL, MVT::i16, DAG.getConstant(0xc9, DL, MVT::i16),
28574 RMBits = DAG.getNode(ISD::AND, DL, MVT::i16, Shifted,
28575 DAG.getConstant(0xc00, DL, MVT::i16));
28578 // Update rounding mode bits and store the new FP Control Word into stack.
28579 CWD = DAG.getNode(ISD::OR, DL, MVT::i16, CWD, RMBits);
28580 Chain = DAG.getStore(Chain, DL, CWD, StackSlot, MPI, /* Alignment = */ 2);
28582 // Load FP control word from the slot.
28583 SDValue OpsLD[] = {Chain, StackSlot};
28584 MachineMemOperand *MMOL =
28585 MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 2, Align(2));
28586 Chain = DAG.getMemIntrinsicNode(
28587 X86ISD::FLDCW16m, DL, DAG.getVTList(MVT::Other), OpsLD, MVT::i16, MMOL);
28589 // If target supports SSE, set MXCSR as well. Rounding mode is encoded in the
28590 // same way but in bits 14:13.
28591 if (Subtarget.hasSSE1()) {
28592 // Store MXCSR into memory.
28593 Chain = DAG.getNode(
28594 ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
28595 DAG.getTargetConstant(Intrinsic::x86_sse_stmxcsr, DL, MVT::i32),
28598 // Load MXCSR from stack slot and clear RM field (bits 14:13).
28599 SDValue CWD = DAG.getLoad(MVT::i32, DL, Chain, StackSlot, MPI);
28600 Chain = CWD.getValue(1);
28601 CWD = DAG.getNode(ISD::AND, DL, MVT::i32, CWD.getValue(0),
28602 DAG.getConstant(0xffff9fff, DL, MVT::i32));
28604 // Shift X87 RM bits from 11:10 to 14:13.
28605 RMBits = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, RMBits);
28606 RMBits = DAG.getNode(ISD::SHL, DL, MVT::i32, RMBits,
28607 DAG.getConstant(3, DL, MVT::i8));
28609 // Update rounding mode bits and store the new FP Control Word into stack.
28610 CWD = DAG.getNode(ISD::OR, DL, MVT::i32, CWD, RMBits);
28611 Chain = DAG.getStore(Chain, DL, CWD, StackSlot, MPI, /* Alignment = */ 4);
28613 // Load MXCSR from the slot.
28614 Chain = DAG.getNode(
28615 ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
28616 DAG.getTargetConstant(Intrinsic::x86_sse_ldmxcsr, DL, MVT::i32),
28623 /// Lower a vector CTLZ using native supported vector CTLZ instruction.
28625 // i8/i16 vector implemented using dword LZCNT vector instruction
28626 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
28627 // split the vector, perform operation on it's Lo a Hi part and
28628 // concatenate the results.
28629 static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
28630 const X86Subtarget &Subtarget) {
28631 assert(Op.getOpcode() == ISD::CTLZ);
28633 MVT VT = Op.getSimpleValueType();
28634 MVT EltVT = VT.getVectorElementType();
28635 unsigned NumElems = VT.getVectorNumElements();
28637 assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
28638 "Unsupported element type");
28640 // Split vector, it's Lo and Hi parts will be handled in next iteration.
28641 if (NumElems > 16 ||
28642 (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
28643 return splitVectorIntUnary(Op, DAG);
28645 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
28646 assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
28647 "Unsupported value type for operation");
28649 // Use native supported vector instruction vplzcntd.
28650 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
28651 SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
28652 SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
28653 SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
28655 return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
28658 // Lower CTLZ using a PSHUFB lookup table implementation.
28659 static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
28660 const X86Subtarget &Subtarget,
28661 SelectionDAG &DAG) {
28662 MVT VT = Op.getSimpleValueType();
28663 int NumElts = VT.getVectorNumElements();
28664 int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
28665 MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
28667 // Per-nibble leading zero PSHUFB lookup table.
28668 const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
28669 /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
28670 /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
28671 /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
28673 SmallVector<SDValue, 64> LUTVec;
28674 for (int i = 0; i < NumBytes; ++i)
28675 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
28676 SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
28678 // Begin by bitcasting the input to byte vector, then split those bytes
28679 // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
28680 // If the hi input nibble is zero then we add both results together, otherwise
28681 // we just take the hi result (by masking the lo result to zero before the
28683 SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
28684 SDValue Zero = DAG.getConstant(0, DL, CurrVT);
28686 SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
28688 SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
28690 if (CurrVT.is512BitVector()) {
28691 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
28692 HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
28693 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
28695 HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
28698 Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
28699 Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
28700 Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
28701 SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
28703 // Merge result back from vXi8 back to VT, working on the lo/hi halves
28704 // of the current vector width in the same way we did for the nibbles.
28705 // If the upper half of the input element is zero then add the halves'
28706 // leading zero counts together, otherwise just use the upper half's.
28707 // Double the width of the result until we are at target width.
28708 while (CurrVT != VT) {
28709 int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
28710 int CurrNumElts = CurrVT.getVectorNumElements();
28711 MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
28712 MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
28713 SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
28715 // Check if the upper half of the input element is zero.
28716 if (CurrVT.is512BitVector()) {
28717 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
28718 HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
28719 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
28720 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
28722 HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
28723 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
28725 HiZ = DAG.getBitcast(NextVT, HiZ);
28727 // Move the upper/lower halves to the lower bits as we'll be extending to
28728 // NextVT. Mask the lower result to zero if HiZ is true and add the results
28730 SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
28731 SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
28732 SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
28733 R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
28734 Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
28741 static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
28742 const X86Subtarget &Subtarget,
28743 SelectionDAG &DAG) {
28744 MVT VT = Op.getSimpleValueType();
28746 if (Subtarget.hasCDI() &&
28747 // vXi8 vectors need to be promoted to 512-bits for vXi32.
28748 (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
28749 return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
28751 // Decompose 256-bit ops into smaller 128-bit ops.
28752 if (VT.is256BitVector() && !Subtarget.hasInt256())
28753 return splitVectorIntUnary(Op, DAG);
28755 // Decompose 512-bit ops into smaller 256-bit ops.
28756 if (VT.is512BitVector() && !Subtarget.hasBWI())
28757 return splitVectorIntUnary(Op, DAG);
28759 assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
28760 return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
28763 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
28764 SelectionDAG &DAG) {
28765 MVT VT = Op.getSimpleValueType();
28767 unsigned NumBits = VT.getSizeInBits();
28769 unsigned Opc = Op.getOpcode();
28772 return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
28774 Op = Op.getOperand(0);
28775 if (VT == MVT::i8) {
28776 // Zero extend to i32 since there is not an i8 bsr.
28778 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
28781 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
28782 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
28783 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
28785 if (Opc == ISD::CTLZ) {
28786 // If src is zero (i.e. bsr sets ZF), returns NumBits.
28787 SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
28788 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
28790 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
28793 // Finally xor with NumBits-1.
28794 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
28795 DAG.getConstant(NumBits - 1, dl, OpVT));
28798 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
28802 static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
28803 SelectionDAG &DAG) {
28804 MVT VT = Op.getSimpleValueType();
28805 unsigned NumBits = VT.getScalarSizeInBits();
28806 SDValue N0 = Op.getOperand(0);
28809 assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
28810 "Only scalar CTTZ requires custom lowering");
28812 // Issue a bsf (scan bits forward) which also sets EFLAGS.
28813 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
28814 Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
28816 // If src is zero (i.e. bsf sets ZF), returns NumBits.
28817 SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
28818 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
28820 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
28823 static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
28824 const X86Subtarget &Subtarget) {
28825 MVT VT = Op.getSimpleValueType();
28826 if (VT == MVT::i16 || VT == MVT::i32)
28827 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
28829 if (VT == MVT::v32i16 || VT == MVT::v64i8)
28830 return splitVectorIntBinary(Op, DAG);
28832 assert(Op.getSimpleValueType().is256BitVector() &&
28833 Op.getSimpleValueType().isInteger() &&
28834 "Only handle AVX 256-bit vector integer operation");
28835 return splitVectorIntBinary(Op, DAG);
28838 static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
28839 const X86Subtarget &Subtarget) {
28840 MVT VT = Op.getSimpleValueType();
28841 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
28842 unsigned Opcode = Op.getOpcode();
28845 if (VT == MVT::v32i16 || VT == MVT::v64i8 ||
28846 (VT.is256BitVector() && !Subtarget.hasInt256())) {
28847 assert(Op.getSimpleValueType().isInteger() &&
28848 "Only handle AVX vector integer operation");
28849 return splitVectorIntBinary(Op, DAG);
28852 // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
28853 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28854 EVT SetCCResultType =
28855 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
28857 unsigned BitWidth = VT.getScalarSizeInBits();
28858 if (Opcode == ISD::USUBSAT) {
28859 if (!TLI.isOperationLegal(ISD::UMAX, VT) || useVPTERNLOG(Subtarget, VT)) {
28860 // Handle a special-case with a bit-hack instead of cmp+select:
28861 // usubsat X, SMIN --> (X ^ SMIN) & (X s>> BW-1)
28862 // If the target can use VPTERNLOG, DAGToDAG will match this as
28863 // "vpsra + vpternlog" which is better than "vpmax + vpsub" with a
28864 // "broadcast" constant load.
28865 ConstantSDNode *C = isConstOrConstSplat(Y, true);
28866 if (C && C->getAPIntValue().isSignMask()) {
28867 SDValue SignMask = DAG.getConstant(C->getAPIntValue(), DL, VT);
28868 SDValue ShiftAmt = DAG.getConstant(BitWidth - 1, DL, VT);
28869 SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, X, SignMask);
28870 SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, X, ShiftAmt);
28871 return DAG.getNode(ISD::AND, DL, VT, Xor, Sra);
28874 if (!TLI.isOperationLegal(ISD::UMAX, VT)) {
28875 // usubsat X, Y --> (X >u Y) ? X - Y : 0
28876 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
28877 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
28878 // TODO: Move this to DAGCombiner?
28879 if (SetCCResultType == VT &&
28880 DAG.ComputeNumSignBits(Cmp) == VT.getScalarSizeInBits())
28881 return DAG.getNode(ISD::AND, DL, VT, Cmp, Sub);
28882 return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
28886 if ((Opcode == ISD::SADDSAT || Opcode == ISD::SSUBSAT) &&
28887 (!VT.isVector() || VT == MVT::v2i64)) {
28888 APInt MinVal = APInt::getSignedMinValue(BitWidth);
28889 APInt MaxVal = APInt::getSignedMaxValue(BitWidth);
28890 SDValue Zero = DAG.getConstant(0, DL, VT);
28892 DAG.getNode(Opcode == ISD::SADDSAT ? ISD::SADDO : ISD::SSUBO, DL,
28893 DAG.getVTList(VT, SetCCResultType), X, Y);
28894 SDValue SumDiff = Result.getValue(0);
28895 SDValue Overflow = Result.getValue(1);
28896 SDValue SatMin = DAG.getConstant(MinVal, DL, VT);
28897 SDValue SatMax = DAG.getConstant(MaxVal, DL, VT);
28899 DAG.getSetCC(DL, SetCCResultType, SumDiff, Zero, ISD::SETLT);
28900 Result = DAG.getSelect(DL, VT, SumNeg, SatMax, SatMin);
28901 return DAG.getSelect(DL, VT, Overflow, Result, SumDiff);
28904 // Use default expansion.
28908 static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
28909 SelectionDAG &DAG) {
28910 MVT VT = Op.getSimpleValueType();
28911 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
28912 // Since X86 does not have CMOV for 8-bit integer, we don't convert
28913 // 8-bit integer abs to NEG and CMOV.
28915 SDValue N0 = Op.getOperand(0);
28916 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
28917 DAG.getConstant(0, DL, VT), N0);
28918 SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_NS, DL, MVT::i8),
28919 SDValue(Neg.getNode(), 1)};
28920 return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
28923 // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
28924 if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
28926 SDValue Src = Op.getOperand(0);
28928 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
28929 return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
28932 if (VT.is256BitVector() && !Subtarget.hasInt256()) {
28933 assert(VT.isInteger() &&
28934 "Only handle AVX 256-bit vector integer operation");
28935 return splitVectorIntUnary(Op, DAG);
28938 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
28939 return splitVectorIntUnary(Op, DAG);
28941 // Default to expand.
28945 static SDValue LowerAVG(SDValue Op, const X86Subtarget &Subtarget,
28946 SelectionDAG &DAG) {
28947 MVT VT = Op.getSimpleValueType();
28949 // For AVX1 cases, split to use legal ops.
28950 if (VT.is256BitVector() && !Subtarget.hasInt256())
28951 return splitVectorIntBinary(Op, DAG);
28953 if (VT == MVT::v32i16 || VT == MVT::v64i8)
28954 return splitVectorIntBinary(Op, DAG);
28956 // Default to expand.
28960 static SDValue LowerMINMAX(SDValue Op, const X86Subtarget &Subtarget,
28961 SelectionDAG &DAG) {
28962 MVT VT = Op.getSimpleValueType();
28964 // For AVX1 cases, split to use legal ops.
28965 if (VT.is256BitVector() && !Subtarget.hasInt256())
28966 return splitVectorIntBinary(Op, DAG);
28968 if (VT == MVT::v32i16 || VT == MVT::v64i8)
28969 return splitVectorIntBinary(Op, DAG);
28971 // Default to expand.
28975 static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
28976 SelectionDAG &DAG) {
28978 MVT VT = Op.getSimpleValueType();
28980 // Decompose 256-bit ops into 128-bit ops.
28981 if (VT.is256BitVector() && !Subtarget.hasInt256())
28982 return splitVectorIntBinary(Op, DAG);
28984 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
28985 return splitVectorIntBinary(Op, DAG);
28987 SDValue A = Op.getOperand(0);
28988 SDValue B = Op.getOperand(1);
28990 // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
28991 // vector pairs, multiply and truncate.
28992 if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
28993 unsigned NumElts = VT.getVectorNumElements();
28995 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
28996 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
28997 MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
28998 return DAG.getNode(
28999 ISD::TRUNCATE, dl, VT,
29000 DAG.getNode(ISD::MUL, dl, ExVT,
29001 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
29002 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
29005 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
29007 // Extract the lo/hi parts to any extend to i16.
29008 // We're going to mask off the low byte of each result element of the
29009 // pmullw, so it doesn't matter what's in the high byte of each 16-bit
29011 SDValue Undef = DAG.getUNDEF(VT);
29012 SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
29013 SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
29016 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
29017 // If the RHS is a constant, manually unpackl/unpackh.
29018 SmallVector<SDValue, 16> LoOps, HiOps;
29019 for (unsigned i = 0; i != NumElts; i += 16) {
29020 for (unsigned j = 0; j != 8; ++j) {
29021 LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
29023 HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
29028 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
29029 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
29031 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
29032 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
29035 // Multiply, mask the lower 8bits of the lo/hi results and pack.
29036 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
29037 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
29038 return getPack(DAG, Subtarget, dl, VT, RLo, RHi);
29041 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
29042 if (VT == MVT::v4i32) {
29043 assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
29044 "Should not custom lower when pmulld is available!");
29046 // Extract the odd parts.
29047 static const int UnpackMask[] = { 1, -1, 3, -1 };
29048 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
29049 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
29051 // Multiply the even parts.
29052 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
29053 DAG.getBitcast(MVT::v2i64, A),
29054 DAG.getBitcast(MVT::v2i64, B));
29055 // Now multiply odd parts.
29056 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
29057 DAG.getBitcast(MVT::v2i64, Aodds),
29058 DAG.getBitcast(MVT::v2i64, Bodds));
29060 Evens = DAG.getBitcast(VT, Evens);
29061 Odds = DAG.getBitcast(VT, Odds);
29063 // Merge the two vectors back together with a shuffle. This expands into 2
29065 static const int ShufMask[] = { 0, 4, 2, 6 };
29066 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
29069 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
29070 "Only know how to lower V2I64/V4I64/V8I64 multiply");
29071 assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
29073 // Ahi = psrlqi(a, 32);
29074 // Bhi = psrlqi(b, 32);
29076 // AloBlo = pmuludq(a, b);
29077 // AloBhi = pmuludq(a, Bhi);
29078 // AhiBlo = pmuludq(Ahi, b);
29080 // Hi = psllqi(AloBhi + AhiBlo, 32);
29081 // return AloBlo + Hi;
29082 KnownBits AKnown = DAG.computeKnownBits(A);
29083 KnownBits BKnown = DAG.computeKnownBits(B);
29085 APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
29086 bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
29087 bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
29089 APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
29090 bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
29091 bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
29093 SDValue Zero = DAG.getConstant(0, dl, VT);
29095 // Only multiply lo/hi halves that aren't known to be zero.
29096 SDValue AloBlo = Zero;
29097 if (!ALoIsZero && !BLoIsZero)
29098 AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
29100 SDValue AloBhi = Zero;
29101 if (!ALoIsZero && !BHiIsZero) {
29102 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
29103 AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
29106 SDValue AhiBlo = Zero;
29107 if (!AHiIsZero && !BLoIsZero) {
29108 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
29109 AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
29112 SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
29113 Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
29115 return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
29118 static SDValue LowervXi8MulWithUNPCK(SDValue A, SDValue B, const SDLoc &dl,
29119 MVT VT, bool IsSigned,
29120 const X86Subtarget &Subtarget,
29122 SDValue *Low = nullptr) {
29123 unsigned NumElts = VT.getVectorNumElements();
29125 // For vXi8 we will unpack the low and high half of each 128 bit lane to widen
29126 // to a vXi16 type. Do the multiplies, shift the results and pack the half
29127 // lane results back together.
29129 // We'll take different approaches for signed and unsigned.
29130 // For unsigned we'll use punpcklbw/punpckhbw to put zero extend the bytes
29131 // and use pmullw to calculate the full 16-bit product.
29132 // For signed we'll use punpcklbw/punpckbw to extend the bytes to words and
29133 // shift them left into the upper byte of each word. This allows us to use
29134 // pmulhw to calculate the full 16-bit product. This trick means we don't
29135 // need to sign extend the bytes to use pmullw.
29137 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
29138 SDValue Zero = DAG.getConstant(0, dl, VT);
29142 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, A));
29143 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, A));
29145 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Zero));
29146 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Zero));
29150 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
29151 // If the RHS is a constant, manually unpackl/unpackh and extend.
29152 SmallVector<SDValue, 16> LoOps, HiOps;
29153 for (unsigned i = 0; i != NumElts; i += 16) {
29154 for (unsigned j = 0; j != 8; ++j) {
29155 SDValue LoOp = B.getOperand(i + j);
29156 SDValue HiOp = B.getOperand(i + j + 8);
29159 LoOp = DAG.getAnyExtOrTrunc(LoOp, dl, MVT::i16);
29160 HiOp = DAG.getAnyExtOrTrunc(HiOp, dl, MVT::i16);
29161 LoOp = DAG.getNode(ISD::SHL, dl, MVT::i16, LoOp,
29162 DAG.getConstant(8, dl, MVT::i16));
29163 HiOp = DAG.getNode(ISD::SHL, dl, MVT::i16, HiOp,
29164 DAG.getConstant(8, dl, MVT::i16));
29166 LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
29167 HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
29170 LoOps.push_back(LoOp);
29171 HiOps.push_back(HiOp);
29175 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
29176 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
29177 } else if (IsSigned) {
29178 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, B));
29179 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, B));
29181 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Zero));
29182 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Zero));
29185 // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
29186 // pack back to vXi8.
29187 unsigned MulOpc = IsSigned ? ISD::MULHS : ISD::MUL;
29188 SDValue RLo = DAG.getNode(MulOpc, dl, ExVT, ALo, BLo);
29189 SDValue RHi = DAG.getNode(MulOpc, dl, ExVT, AHi, BHi);
29192 *Low = getPack(DAG, Subtarget, dl, VT, RLo, RHi);
29194 return getPack(DAG, Subtarget, dl, VT, RLo, RHi, /*PackHiHalf*/ true);
29197 static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
29198 SelectionDAG &DAG) {
29200 MVT VT = Op.getSimpleValueType();
29201 bool IsSigned = Op->getOpcode() == ISD::MULHS;
29202 unsigned NumElts = VT.getVectorNumElements();
29203 SDValue A = Op.getOperand(0);
29204 SDValue B = Op.getOperand(1);
29206 // Decompose 256-bit ops into 128-bit ops.
29207 if (VT.is256BitVector() && !Subtarget.hasInt256())
29208 return splitVectorIntBinary(Op, DAG);
29210 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
29211 return splitVectorIntBinary(Op, DAG);
29213 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
29214 assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
29215 (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
29216 (VT == MVT::v16i32 && Subtarget.hasAVX512()));
29218 // PMULxD operations multiply each even value (starting at 0) of LHS with
29219 // the related value of RHS and produce a widen result.
29220 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
29221 // => <2 x i64> <ae|cg>
29223 // In other word, to have all the results, we need to perform two PMULxD:
29224 // 1. one with the even values.
29225 // 2. one with the odd values.
29226 // To achieve #2, with need to place the odd values at an even position.
29228 // Place the odd value at an even position (basically, shift all values 1
29229 // step to the left):
29230 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1,
29231 9, -1, 11, -1, 13, -1, 15, -1};
29232 // <a|b|c|d> => <b|undef|d|undef>
29233 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, A, A,
29234 makeArrayRef(&Mask[0], NumElts));
29235 // <e|f|g|h> => <f|undef|h|undef>
29236 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, B, B,
29237 makeArrayRef(&Mask[0], NumElts));
29239 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
29241 MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
29243 (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
29244 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
29245 // => <2 x i64> <ae|cg>
29246 SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
29247 DAG.getBitcast(MulVT, A),
29248 DAG.getBitcast(MulVT, B)));
29249 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
29250 // => <2 x i64> <bf|dh>
29251 SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
29252 DAG.getBitcast(MulVT, Odd0),
29253 DAG.getBitcast(MulVT, Odd1)));
29255 // Shuffle it back into the right order.
29256 SmallVector<int, 16> ShufMask(NumElts);
29257 for (int i = 0; i != (int)NumElts; ++i)
29258 ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
29260 SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
29262 // If we have a signed multiply but no PMULDQ fix up the result of an
29263 // unsigned multiply.
29264 if (IsSigned && !Subtarget.hasSSE41()) {
29265 SDValue Zero = DAG.getConstant(0, dl, VT);
29266 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
29267 DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
29268 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
29269 DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
29271 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
29272 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
29278 // Only i8 vectors should need custom lowering after this.
29279 assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
29280 (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
29281 "Unsupported vector type");
29283 // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
29284 // logical shift down the upper half and pack back to i8.
29286 // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
29287 // and then ashr/lshr the upper bits down to the lower bits before multiply.
29289 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
29290 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
29291 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
29292 unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
29293 SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
29294 SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
29295 SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
29296 Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
29297 return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
29300 return LowervXi8MulWithUNPCK(A, B, dl, VT, IsSigned, Subtarget, DAG);
29303 // Custom lowering for SMULO/UMULO.
29304 static SDValue LowerMULO(SDValue Op, const X86Subtarget &Subtarget,
29305 SelectionDAG &DAG) {
29306 MVT VT = Op.getSimpleValueType();
29308 // Scalars defer to LowerXALUO.
29309 if (!VT.isVector())
29310 return LowerXALUO(Op, DAG);
29313 bool IsSigned = Op->getOpcode() == ISD::SMULO;
29314 SDValue A = Op.getOperand(0);
29315 SDValue B = Op.getOperand(1);
29316 EVT OvfVT = Op->getValueType(1);
29318 if ((VT == MVT::v32i8 && !Subtarget.hasInt256()) ||
29319 (VT == MVT::v64i8 && !Subtarget.hasBWI())) {
29320 // Extract the LHS Lo/Hi vectors
29321 SDValue LHSLo, LHSHi;
29322 std::tie(LHSLo, LHSHi) = splitVector(A, DAG, dl);
29324 // Extract the RHS Lo/Hi vectors
29325 SDValue RHSLo, RHSHi;
29326 std::tie(RHSLo, RHSHi) = splitVector(B, DAG, dl);
29328 EVT LoOvfVT, HiOvfVT;
29329 std::tie(LoOvfVT, HiOvfVT) = DAG.GetSplitDestVTs(OvfVT);
29330 SDVTList LoVTs = DAG.getVTList(LHSLo.getValueType(), LoOvfVT);
29331 SDVTList HiVTs = DAG.getVTList(LHSHi.getValueType(), HiOvfVT);
29333 // Issue the split operations.
29334 SDValue Lo = DAG.getNode(Op.getOpcode(), dl, LoVTs, LHSLo, RHSLo);
29335 SDValue Hi = DAG.getNode(Op.getOpcode(), dl, HiVTs, LHSHi, RHSHi);
29337 // Join the separate data results and the overflow results.
29338 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
29339 SDValue Ovf = DAG.getNode(ISD::CONCAT_VECTORS, dl, OvfVT, Lo.getValue(1),
29342 return DAG.getMergeValues({Res, Ovf}, dl);
29345 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
29347 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
29349 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
29350 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
29351 unsigned NumElts = VT.getVectorNumElements();
29352 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
29353 unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
29354 SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
29355 SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
29356 SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
29358 SDValue Low = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
29362 SDValue High, LowSign;
29363 if (OvfVT.getVectorElementType() == MVT::i1 &&
29364 (Subtarget.hasBWI() || Subtarget.canExtendTo512DQ())) {
29365 // Rather the truncating try to do the compare on vXi16 or vXi32.
29366 // Shift the high down filling with sign bits.
29367 High = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Mul, 8, DAG);
29368 // Fill all 16 bits with the sign bit from the low.
29370 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExVT, Mul, 8, DAG);
29371 LowSign = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, LowSign,
29374 if (!Subtarget.hasBWI()) {
29375 // We can't do a vXi16 compare so sign extend to v16i32.
29376 High = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v16i32, High);
29377 LowSign = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v16i32, LowSign);
29380 // Otherwise do the compare at vXi8.
29381 High = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
29382 High = DAG.getNode(ISD::TRUNCATE, dl, VT, High);
29384 DAG.getNode(ISD::SRA, dl, VT, Low, DAG.getConstant(7, dl, VT));
29387 Ovf = DAG.getSetCC(dl, SetccVT, LowSign, High, ISD::SETNE);
29390 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
29391 if (OvfVT.getVectorElementType() == MVT::i1 &&
29392 (Subtarget.hasBWI() || Subtarget.canExtendTo512DQ())) {
29393 // Rather the truncating try to do the compare on vXi16 or vXi32.
29395 if (!Subtarget.hasBWI()) {
29396 // We can't do a vXi16 compare so sign extend to v16i32.
29397 High = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v16i32, High);
29400 // Otherwise do the compare at vXi8.
29401 High = DAG.getNode(ISD::TRUNCATE, dl, VT, High);
29405 DAG.getSetCC(dl, SetccVT, High,
29406 DAG.getConstant(0, dl, High.getValueType()), ISD::SETNE);
29409 Ovf = DAG.getSExtOrTrunc(Ovf, dl, OvfVT);
29411 return DAG.getMergeValues({Low, Ovf}, dl);
29416 LowervXi8MulWithUNPCK(A, B, dl, VT, IsSigned, Subtarget, DAG, &Low);
29420 // SMULO overflows if the high bits don't match the sign of the low.
29422 DAG.getNode(ISD::SRA, dl, VT, Low, DAG.getConstant(7, dl, VT));
29423 Ovf = DAG.getSetCC(dl, SetccVT, LowSign, High, ISD::SETNE);
29425 // UMULO overflows if the high bits are non-zero.
29427 DAG.getSetCC(dl, SetccVT, High, DAG.getConstant(0, dl, VT), ISD::SETNE);
29430 Ovf = DAG.getSExtOrTrunc(Ovf, dl, OvfVT);
29432 return DAG.getMergeValues({Low, Ovf}, dl);
29435 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
29436 assert(Subtarget.isTargetWin64() && "Unexpected target");
29437 EVT VT = Op.getValueType();
29438 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
29439 "Unexpected return type for lowering");
29443 switch (Op->getOpcode()) {
29444 default: llvm_unreachable("Unexpected request for libcall!");
29445 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
29446 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
29447 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
29448 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
29452 SDValue InChain = DAG.getEntryNode();
29454 TargetLowering::ArgListTy Args;
29455 TargetLowering::ArgListEntry Entry;
29456 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
29457 EVT ArgVT = Op->getOperand(i).getValueType();
29458 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
29459 "Unexpected argument type for lowering");
29460 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
29461 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
29462 MachinePointerInfo MPI =
29463 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
29464 Entry.Node = StackPtr;
29466 DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MPI, Align(16));
29467 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
29468 Entry.Ty = PointerType::get(ArgTy,0);
29469 Entry.IsSExt = false;
29470 Entry.IsZExt = false;
29471 Args.push_back(Entry);
29474 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
29475 getPointerTy(DAG.getDataLayout()));
29477 TargetLowering::CallLoweringInfo CLI(DAG);
29478 CLI.setDebugLoc(dl)
29481 getLibcallCallingConv(LC),
29482 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
29485 .setSExtResult(isSigned)
29486 .setZExtResult(!isSigned);
29488 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
29489 return DAG.getBitcast(VT, CallInfo.first);
29492 SDValue X86TargetLowering::LowerWin64_FP_TO_INT128(SDValue Op,
29494 SDValue &Chain) const {
29495 assert(Subtarget.isTargetWin64() && "Unexpected target");
29496 EVT VT = Op.getValueType();
29497 bool IsStrict = Op->isStrictFPOpcode();
29499 SDValue Arg = Op.getOperand(IsStrict ? 1 : 0);
29500 EVT ArgVT = Arg.getValueType();
29502 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
29503 "Unexpected return type for lowering");
29506 if (Op->getOpcode() == ISD::FP_TO_SINT ||
29507 Op->getOpcode() == ISD::STRICT_FP_TO_SINT)
29508 LC = RTLIB::getFPTOSINT(ArgVT, VT);
29510 LC = RTLIB::getFPTOUINT(ArgVT, VT);
29511 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected request for libcall!");
29514 MakeLibCallOptions CallOptions;
29515 Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
29518 // Expect the i128 argument returned as a v2i64 in xmm0, cast back to the
29519 // expected VT (i128).
29520 std::tie(Result, Chain) =
29521 makeLibCall(DAG, LC, MVT::v2i64, Arg, CallOptions, dl, Chain);
29522 Result = DAG.getBitcast(VT, Result);
29526 SDValue X86TargetLowering::LowerWin64_INT128_TO_FP(SDValue Op,
29527 SelectionDAG &DAG) const {
29528 assert(Subtarget.isTargetWin64() && "Unexpected target");
29529 EVT VT = Op.getValueType();
29530 bool IsStrict = Op->isStrictFPOpcode();
29532 SDValue Arg = Op.getOperand(IsStrict ? 1 : 0);
29533 EVT ArgVT = Arg.getValueType();
29535 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
29536 "Unexpected argument type for lowering");
29539 if (Op->getOpcode() == ISD::SINT_TO_FP ||
29540 Op->getOpcode() == ISD::STRICT_SINT_TO_FP)
29541 LC = RTLIB::getSINTTOFP(ArgVT, VT);
29543 LC = RTLIB::getUINTTOFP(ArgVT, VT);
29544 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected request for libcall!");
29547 MakeLibCallOptions CallOptions;
29548 SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
29550 // Pass the i128 argument as an indirect argument on the stack.
29551 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
29552 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
29553 MachinePointerInfo MPI =
29554 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
29555 Chain = DAG.getStore(Chain, dl, Arg, StackPtr, MPI, Align(16));
29558 std::tie(Result, Chain) =
29559 makeLibCall(DAG, LC, VT, StackPtr, CallOptions, dl, Chain);
29560 return IsStrict ? DAG.getMergeValues({Result, Chain}, dl) : Result;
29563 // Return true if the required (according to Opcode) shift-imm form is natively
29564 // supported by the Subtarget
29565 static bool supportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
29567 if (!(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))
29570 if (VT.getScalarSizeInBits() < 16)
29573 if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
29574 (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
29577 bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
29578 (VT.is256BitVector() && Subtarget.hasInt256());
29580 bool AShift = LShift && (Subtarget.hasAVX512() ||
29581 (VT != MVT::v2i64 && VT != MVT::v4i64));
29582 return (Opcode == ISD::SRA) ? AShift : LShift;
29585 // The shift amount is a variable, but it is the same for all vector lanes.
29586 // These instructions are defined together with shift-immediate.
29588 bool supportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
29590 return supportedVectorShiftWithImm(VT, Subtarget, Opcode);
29593 // Return true if the required (according to Opcode) variable-shift form is
29594 // natively supported by the Subtarget
29595 static bool supportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
29597 if (!(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))
29600 if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
29603 // vXi16 supported only on AVX-512, BWI
29604 if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
29607 if (Subtarget.hasAVX512() &&
29608 (Subtarget.useAVX512Regs() || !VT.is512BitVector()))
29611 bool LShift = VT.is128BitVector() || VT.is256BitVector();
29612 bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64;
29613 return (Opcode == ISD::SRA) ? AShift : LShift;
29616 static SDValue LowerShiftByScalarImmediate(SDValue Op, SelectionDAG &DAG,
29617 const X86Subtarget &Subtarget) {
29618 MVT VT = Op.getSimpleValueType();
29620 SDValue R = Op.getOperand(0);
29621 SDValue Amt = Op.getOperand(1);
29622 unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
29624 auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
29625 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
29626 MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
29627 SDValue Ex = DAG.getBitcast(ExVT, R);
29629 // ashr(R, 63) === cmp_slt(R, 0)
29630 if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
29631 assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
29632 "Unsupported PCMPGT op");
29633 return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
29636 if (ShiftAmt >= 32) {
29637 // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
29639 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
29640 SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
29641 ShiftAmt - 32, DAG);
29642 if (VT == MVT::v2i64)
29643 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
29644 if (VT == MVT::v4i64)
29645 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
29646 {9, 1, 11, 3, 13, 5, 15, 7});
29648 // SRA upper i32, SRL whole i64 and select lower i32.
29649 SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
29652 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
29653 Lower = DAG.getBitcast(ExVT, Lower);
29654 if (VT == MVT::v2i64)
29655 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
29656 if (VT == MVT::v4i64)
29657 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
29658 {8, 1, 10, 3, 12, 5, 14, 7});
29660 return DAG.getBitcast(VT, Ex);
29663 // Optimize shl/srl/sra with constant shift amount.
29664 APInt APIntShiftAmt;
29665 if (!X86::isConstantSplat(Amt, APIntShiftAmt))
29668 // If the shift amount is out of range, return undef.
29669 if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
29670 return DAG.getUNDEF(VT);
29672 uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
29674 if (supportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
29675 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
29677 // i64 SRA needs to be performed as partial shifts.
29678 if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
29679 (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
29680 Op.getOpcode() == ISD::SRA)
29681 return ArithmeticShiftRight64(ShiftAmt);
29683 if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
29684 (Subtarget.hasBWI() && VT == MVT::v64i8)) {
29685 unsigned NumElts = VT.getVectorNumElements();
29686 MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
29688 // Simple i8 add case
29689 if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) {
29690 // R may be undef at run-time, but (shl R, 1) must be an even number (LSB
29691 // must be 0). (add undef, undef) however can be any value. To make this
29692 // safe, we must freeze R to ensure that register allocation uses the same
29693 // register for an undefined value. This ensures that the result will
29694 // still be even and preserves the original semantics.
29695 R = DAG.getNode(ISD::FREEZE, dl, VT, R);
29696 return DAG.getNode(ISD::ADD, dl, VT, R, R);
29699 // ashr(R, 7) === cmp_slt(R, 0)
29700 if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
29701 SDValue Zeros = DAG.getConstant(0, dl, VT);
29702 if (VT.is512BitVector()) {
29703 assert(VT == MVT::v64i8 && "Unexpected element type!");
29704 SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
29705 return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
29707 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
29710 // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
29711 if (VT == MVT::v16i8 && Subtarget.hasXOP())
29714 if (Op.getOpcode() == ISD::SHL) {
29715 // Make a large shift.
29716 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
29718 SHL = DAG.getBitcast(VT, SHL);
29719 // Zero out the rightmost bits.
29720 APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
29721 return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
29723 if (Op.getOpcode() == ISD::SRL) {
29724 // Make a large shift.
29725 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
29727 SRL = DAG.getBitcast(VT, SRL);
29728 // Zero out the leftmost bits.
29729 APInt Mask = APInt::getLowBitsSet(8, 8 - ShiftAmt);
29730 return DAG.getNode(ISD::AND, dl, VT, SRL, DAG.getConstant(Mask, dl, VT));
29732 if (Op.getOpcode() == ISD::SRA) {
29733 // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
29734 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
29736 SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
29737 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
29738 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
29741 llvm_unreachable("Unknown shift opcode.");
29747 static SDValue LowerShiftByScalarVariable(SDValue Op, SelectionDAG &DAG,
29748 const X86Subtarget &Subtarget) {
29749 MVT VT = Op.getSimpleValueType();
29751 SDValue R = Op.getOperand(0);
29752 SDValue Amt = Op.getOperand(1);
29753 unsigned Opcode = Op.getOpcode();
29754 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
29756 int BaseShAmtIdx = -1;
29757 if (SDValue BaseShAmt = DAG.getSplatSourceVector(Amt, BaseShAmtIdx)) {
29758 if (supportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode))
29759 return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, BaseShAmtIdx,
29762 // vXi8 shifts - shift as v8i16 + mask result.
29763 if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
29764 (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
29765 VT == MVT::v64i8) &&
29766 !Subtarget.hasXOP()) {
29767 unsigned NumElts = VT.getVectorNumElements();
29768 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
29769 if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
29770 unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
29771 unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
29773 // Create the mask using vXi16 shifts. For shift-rights we need to move
29774 // the upper byte down before splatting the vXi8 mask.
29775 SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
29776 BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
29777 BaseShAmt, BaseShAmtIdx, Subtarget, DAG);
29778 if (Opcode != ISD::SHL)
29779 BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
29781 BitMask = DAG.getBitcast(VT, BitMask);
29782 BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
29783 SmallVector<int, 64>(NumElts, 0));
29785 SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
29786 DAG.getBitcast(ExtVT, R), BaseShAmt,
29787 BaseShAmtIdx, Subtarget, DAG);
29788 Res = DAG.getBitcast(VT, Res);
29789 Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
29791 if (Opcode == ISD::SRA) {
29792 // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
29793 // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
29794 SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
29796 getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask, BaseShAmt,
29797 BaseShAmtIdx, Subtarget, DAG);
29798 SignMask = DAG.getBitcast(VT, SignMask);
29799 Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
29800 Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
29810 // Convert a shift/rotate left amount to a multiplication scale factor.
29811 static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
29812 const X86Subtarget &Subtarget,
29813 SelectionDAG &DAG) {
29814 MVT VT = Amt.getSimpleValueType();
29815 if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
29816 (Subtarget.hasInt256() && VT == MVT::v16i16) ||
29817 (Subtarget.hasAVX512() && VT == MVT::v32i16) ||
29818 (!Subtarget.hasAVX512() && VT == MVT::v16i8) ||
29819 (Subtarget.hasInt256() && VT == MVT::v32i8) ||
29820 (Subtarget.hasBWI() && VT == MVT::v64i8)))
29823 MVT SVT = VT.getVectorElementType();
29824 unsigned SVTBits = SVT.getSizeInBits();
29825 unsigned NumElems = VT.getVectorNumElements();
29828 SmallVector<APInt> EltBits;
29829 if (getTargetConstantBitsFromNode(Amt, SVTBits, UndefElts, EltBits)) {
29830 APInt One(SVTBits, 1);
29831 SmallVector<SDValue> Elts(NumElems, DAG.getUNDEF(SVT));
29832 for (unsigned I = 0; I != NumElems; ++I) {
29833 if (UndefElts[I] || EltBits[I].uge(SVTBits))
29835 uint64_t ShAmt = EltBits[I].getZExtValue();
29836 Elts[I] = DAG.getConstant(One.shl(ShAmt), dl, SVT);
29838 return DAG.getBuildVector(VT, dl, Elts);
29841 // If the target doesn't support variable shifts, use either FP conversion
29842 // or integer multiplication to avoid shifting each element individually.
29843 if (VT == MVT::v4i32) {
29844 Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
29845 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
29846 DAG.getConstant(0x3f800000U, dl, VT));
29847 Amt = DAG.getBitcast(MVT::v4f32, Amt);
29848 return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
29851 // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
29852 if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
29853 SDValue Z = DAG.getConstant(0, dl, VT);
29854 SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
29855 SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
29856 Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
29857 Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
29858 if (Subtarget.hasSSE41())
29859 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
29860 return getPack(DAG, Subtarget, dl, VT, Lo, Hi);
29866 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
29867 SelectionDAG &DAG) {
29868 MVT VT = Op.getSimpleValueType();
29870 SDValue R = Op.getOperand(0);
29871 SDValue Amt = Op.getOperand(1);
29872 unsigned EltSizeInBits = VT.getScalarSizeInBits();
29873 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
29875 unsigned Opc = Op.getOpcode();
29876 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
29877 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
29879 assert(VT.isVector() && "Custom lowering only for vector shifts!");
29880 assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
29882 if (SDValue V = LowerShiftByScalarImmediate(Op, DAG, Subtarget))
29885 if (SDValue V = LowerShiftByScalarVariable(Op, DAG, Subtarget))
29888 if (supportedVectorVarShift(VT, Subtarget, Opc))
29891 // i64 vector arithmetic shift can be emulated with the transform:
29892 // M = lshr(SIGN_MASK, Amt)
29893 // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
29894 if (((VT == MVT::v2i64 && !Subtarget.hasXOP()) ||
29895 (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
29897 SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
29898 SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
29899 R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
29900 R = DAG.getNode(ISD::XOR, dl, VT, R, M);
29901 R = DAG.getNode(ISD::SUB, dl, VT, R, M);
29905 // XOP has 128-bit variable logical/arithmetic shifts.
29906 // +ve/-ve Amt = shift left/right.
29907 if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
29908 VT == MVT::v8i16 || VT == MVT::v16i8)) {
29909 if (Opc == ISD::SRL || Opc == ISD::SRA) {
29910 SDValue Zero = DAG.getConstant(0, dl, VT);
29911 Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
29913 if (Opc == ISD::SHL || Opc == ISD::SRL)
29914 return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
29915 if (Opc == ISD::SRA)
29916 return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
29919 // 2i64 vector logical shifts can efficiently avoid scalarization - do the
29920 // shifts per-lane and then shuffle the partial results back together.
29921 if (VT == MVT::v2i64 && Opc != ISD::SRA) {
29922 // Splat the shift amounts so the scalar shifts above will catch it.
29923 SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
29924 SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
29925 SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
29926 SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
29927 return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
29930 // If possible, lower this shift as a sequence of two shifts by
29931 // constant plus a BLENDing shuffle instead of scalarizing it.
29933 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
29935 // Could be rewritten as:
29936 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
29938 // The advantage is that the two shifts from the example would be
29939 // lowered as X86ISD::VSRLI nodes in parallel before blending.
29940 if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
29941 (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
29942 SDValue Amt1, Amt2;
29943 unsigned NumElts = VT.getVectorNumElements();
29944 SmallVector<int, 8> ShuffleMask;
29945 for (unsigned i = 0; i != NumElts; ++i) {
29946 SDValue A = Amt->getOperand(i);
29948 ShuffleMask.push_back(SM_SentinelUndef);
29951 if (!Amt1 || Amt1 == A) {
29952 ShuffleMask.push_back(i);
29956 if (!Amt2 || Amt2 == A) {
29957 ShuffleMask.push_back(i + NumElts);
29964 // Only perform this blend if we can perform it without loading a mask.
29965 if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
29966 (VT != MVT::v16i16 ||
29967 is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
29968 (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
29969 canWidenShuffleElements(ShuffleMask))) {
29970 auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
29971 auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
29972 if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
29973 Cst2->getAPIntValue().ult(EltSizeInBits)) {
29974 SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
29975 Cst1->getZExtValue(), DAG);
29976 SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
29977 Cst2->getZExtValue(), DAG);
29978 return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
29983 // If possible, lower this packed shift into a vector multiply instead of
29984 // expanding it into a sequence of scalar shifts.
29985 // For v32i8 cases, it might be quicker to split/extend to vXi16 shifts.
29986 if (Opc == ISD::SHL && !(VT == MVT::v32i8 && (Subtarget.hasXOP() ||
29987 Subtarget.canExtendTo512BW())))
29988 if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
29989 return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
29991 // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
29992 // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
29993 if (Opc == ISD::SRL && ConstantAmt &&
29994 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
29995 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
29996 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
29997 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
29998 SDValue Zero = DAG.getConstant(0, dl, VT);
29999 SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
30000 SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
30001 return DAG.getSelect(dl, VT, ZAmt, R, Res);
30005 // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
30006 // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
30007 // TODO: Special case handling for shift by 0/1, really we can afford either
30008 // of these cases in pre-SSE41/XOP/AVX512 but not both.
30009 if (Opc == ISD::SRA && ConstantAmt &&
30010 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
30011 ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
30012 !Subtarget.hasAVX512()) ||
30013 DAG.isKnownNeverZero(Amt))) {
30014 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
30015 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
30016 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
30018 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
30020 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
30022 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
30023 SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
30024 Res = DAG.getSelect(dl, VT, Amt0, R, Res);
30025 return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
30029 // v4i32 Non Uniform Shifts.
30030 // If the shift amount is constant we can shift each lane using the SSE2
30031 // immediate shifts, else we need to zero-extend each lane to the lower i64
30032 // and shift using the SSE2 variable shifts.
30033 // The separate results can then be blended together.
30034 if (VT == MVT::v4i32) {
30035 SDValue Amt0, Amt1, Amt2, Amt3;
30037 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
30038 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
30039 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
30040 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
30042 // The SSE2 shifts use the lower i64 as the same shift amount for
30043 // all lanes and the upper i64 is ignored. On AVX we're better off
30044 // just zero-extending, but for SSE just duplicating the top 16-bits is
30045 // cheaper and has the same effect for out of range values.
30046 if (Subtarget.hasAVX()) {
30047 SDValue Z = DAG.getConstant(0, dl, VT);
30048 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
30049 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
30050 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
30051 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
30053 SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
30054 SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
30055 {4, 5, 6, 7, -1, -1, -1, -1});
30056 SDValue Msk02 = getV4X86ShuffleImm8ForMask({0, 1, 1, 1}, dl, DAG);
30057 SDValue Msk13 = getV4X86ShuffleImm8ForMask({2, 3, 3, 3}, dl, DAG);
30058 Amt0 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt01, Msk02);
30059 Amt1 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt01, Msk13);
30060 Amt2 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt23, Msk02);
30061 Amt3 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt23, Msk13);
30065 unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
30066 SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
30067 SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
30068 SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
30069 SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
30071 // Merge the shifted lane results optimally with/without PBLENDW.
30072 // TODO - ideally shuffle combining would handle this.
30073 if (Subtarget.hasSSE41()) {
30074 SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
30075 SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
30076 return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
30078 SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
30079 SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
30080 return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
30083 // It's worth extending once and using the vXi16/vXi32 shifts for smaller
30084 // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
30085 // make the existing SSE solution better.
30086 // NOTE: We honor prefered vector width before promoting to 512-bits.
30087 if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
30088 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
30089 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
30090 (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
30091 (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
30092 assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
30093 "Unexpected vector type");
30094 MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
30095 MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
30096 unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
30097 R = DAG.getNode(ExtOpc, dl, ExtVT, R);
30098 Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
30099 return DAG.getNode(ISD::TRUNCATE, dl, VT,
30100 DAG.getNode(Opc, dl, ExtVT, R, Amt));
30103 // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
30104 // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
30105 if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
30106 (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
30107 (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
30108 !Subtarget.hasXOP()) {
30109 int NumElts = VT.getVectorNumElements();
30110 SDValue Cst8 = DAG.getTargetConstant(8, dl, MVT::i8);
30112 // Extend constant shift amount to vXi16 (it doesn't matter if the type
30114 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
30115 Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
30116 Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
30117 Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
30118 assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
30119 "Constant build vector expected");
30121 if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
30122 R = Opc == ISD::SRA ? DAG.getSExtOrTrunc(R, dl, ExVT)
30123 : DAG.getZExtOrTrunc(R, dl, ExVT);
30124 R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
30125 R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
30126 return DAG.getZExtOrTrunc(R, dl, VT);
30129 SmallVector<SDValue, 16> LoAmt, HiAmt;
30130 for (int i = 0; i != NumElts; i += 16) {
30131 for (int j = 0; j != 8; ++j) {
30132 LoAmt.push_back(Amt.getOperand(i + j));
30133 HiAmt.push_back(Amt.getOperand(i + j + 8));
30137 MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
30138 SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
30139 SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
30141 SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
30142 SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
30143 LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
30144 HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
30145 LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
30146 HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
30147 LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
30148 HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
30149 return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
30152 if (VT == MVT::v16i8 ||
30153 (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
30154 (VT == MVT::v64i8 && Subtarget.hasBWI())) {
30155 MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
30157 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
30158 if (VT.is512BitVector()) {
30159 // On AVX512BW targets we make use of the fact that VSELECT lowers
30160 // to a masked blend which selects bytes based just on the sign bit
30161 // extracted to a mask.
30162 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
30163 V0 = DAG.getBitcast(VT, V0);
30164 V1 = DAG.getBitcast(VT, V1);
30165 Sel = DAG.getBitcast(VT, Sel);
30166 Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
30168 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
30169 } else if (Subtarget.hasSSE41()) {
30170 // On SSE41 targets we can use PBLENDVB which selects bytes based just
30171 // on the sign bit.
30172 V0 = DAG.getBitcast(VT, V0);
30173 V1 = DAG.getBitcast(VT, V1);
30174 Sel = DAG.getBitcast(VT, Sel);
30175 return DAG.getBitcast(SelVT,
30176 DAG.getNode(X86ISD::BLENDV, dl, VT, Sel, V0, V1));
30178 // On pre-SSE41 targets we test for the sign bit by comparing to
30179 // zero - a negative value will set all bits of the lanes to true
30180 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
30181 SDValue Z = DAG.getConstant(0, dl, SelVT);
30182 SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
30183 return DAG.getSelect(dl, SelVT, C, V0, V1);
30186 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
30187 // We can safely do this using i16 shifts as we're only interested in
30188 // the 3 lower bits of each byte.
30189 Amt = DAG.getBitcast(ExtVT, Amt);
30190 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
30191 Amt = DAG.getBitcast(VT, Amt);
30193 if (Opc == ISD::SHL || Opc == ISD::SRL) {
30194 // r = VSELECT(r, shift(r, 4), a);
30195 SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
30196 R = SignBitSelect(VT, Amt, M, R);
30199 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
30201 // r = VSELECT(r, shift(r, 2), a);
30202 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
30203 R = SignBitSelect(VT, Amt, M, R);
30206 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
30208 // return VSELECT(r, shift(r, 1), a);
30209 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
30210 R = SignBitSelect(VT, Amt, M, R);
30214 if (Opc == ISD::SRA) {
30215 // For SRA we need to unpack each byte to the higher byte of a i16 vector
30216 // so we can correctly sign extend. We don't care what happens to the
30218 SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
30219 SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
30220 SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
30221 SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
30222 ALo = DAG.getBitcast(ExtVT, ALo);
30223 AHi = DAG.getBitcast(ExtVT, AHi);
30224 RLo = DAG.getBitcast(ExtVT, RLo);
30225 RHi = DAG.getBitcast(ExtVT, RHi);
30227 // r = VSELECT(r, shift(r, 4), a);
30228 SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
30229 SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
30230 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
30231 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
30234 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
30235 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
30237 // r = VSELECT(r, shift(r, 2), a);
30238 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
30239 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
30240 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
30241 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
30244 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
30245 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
30247 // r = VSELECT(r, shift(r, 1), a);
30248 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
30249 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
30250 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
30251 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
30253 // Logical shift the result back to the lower byte, leaving a zero upper
30254 // byte meaning that we can safely pack with PACKUSWB.
30255 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
30256 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
30257 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
30261 if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
30262 MVT ExtVT = MVT::v8i32;
30263 SDValue Z = DAG.getConstant(0, dl, VT);
30264 SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
30265 SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
30266 SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
30267 SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
30268 ALo = DAG.getBitcast(ExtVT, ALo);
30269 AHi = DAG.getBitcast(ExtVT, AHi);
30270 RLo = DAG.getBitcast(ExtVT, RLo);
30271 RHi = DAG.getBitcast(ExtVT, RHi);
30272 SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
30273 SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
30274 Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
30275 Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
30276 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
30279 if (VT == MVT::v8i16) {
30280 // If we have a constant shift amount, the non-SSE41 path is best as
30281 // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
30282 bool UseSSE41 = Subtarget.hasSSE41() &&
30283 !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
30285 auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
30286 // On SSE41 targets we can use PBLENDVB which selects bytes based just on
30289 MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
30290 V0 = DAG.getBitcast(ExtVT, V0);
30291 V1 = DAG.getBitcast(ExtVT, V1);
30292 Sel = DAG.getBitcast(ExtVT, Sel);
30293 return DAG.getBitcast(
30294 VT, DAG.getNode(X86ISD::BLENDV, dl, ExtVT, Sel, V0, V1));
30296 // On pre-SSE41 targets we splat the sign bit - a negative value will
30297 // set all bits of the lanes to true and VSELECT uses that in
30298 // its OR(AND(V0,C),AND(V1,~C)) lowering.
30300 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
30301 return DAG.getSelect(dl, VT, C, V0, V1);
30304 // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
30306 // On SSE41 targets we need to replicate the shift mask in both
30307 // bytes for PBLENDVB.
30310 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
30311 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
30313 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
30316 // r = VSELECT(r, shift(r, 8), a);
30317 SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
30318 R = SignBitSelect(Amt, M, R);
30321 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
30323 // r = VSELECT(r, shift(r, 4), a);
30324 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
30325 R = SignBitSelect(Amt, M, R);
30328 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
30330 // r = VSELECT(r, shift(r, 2), a);
30331 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
30332 R = SignBitSelect(Amt, M, R);
30335 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
30337 // return VSELECT(r, shift(r, 1), a);
30338 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
30339 R = SignBitSelect(Amt, M, R);
30343 // Decompose 256-bit shifts into 128-bit shifts.
30344 if (VT.is256BitVector())
30345 return splitVectorIntBinary(Op, DAG);
30347 if (VT == MVT::v32i16 || VT == MVT::v64i8)
30348 return splitVectorIntBinary(Op, DAG);
30353 static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
30354 SelectionDAG &DAG) {
30355 MVT VT = Op.getSimpleValueType();
30356 assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
30357 "Unexpected funnel shift opcode!");
30360 SDValue Op0 = Op.getOperand(0);
30361 SDValue Op1 = Op.getOperand(1);
30362 SDValue Amt = Op.getOperand(2);
30363 unsigned EltSizeInBits = VT.getScalarSizeInBits();
30364 bool IsFSHR = Op.getOpcode() == ISD::FSHR;
30366 if (VT.isVector()) {
30367 APInt APIntShiftAmt;
30368 bool IsCstSplat = X86::isConstantSplat(Amt, APIntShiftAmt);
30370 if (Subtarget.hasVBMI2() && EltSizeInBits > 8) {
30372 std::swap(Op0, Op1);
30375 uint64_t ShiftAmt = APIntShiftAmt.urem(EltSizeInBits);
30376 SDValue Imm = DAG.getTargetConstant(ShiftAmt, DL, MVT::i8);
30377 return getAVX512Node(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT,
30378 {Op0, Op1, Imm}, DAG, Subtarget);
30380 return getAVX512Node(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
30381 {Op0, Op1, Amt}, DAG, Subtarget);
30383 assert((VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8 ||
30384 VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16 ||
30385 VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) &&
30386 "Unexpected funnel shift type!");
30388 // fshl(x,y,z) -> unpack(y,x) << (z & (bw-1))) >> bw.
30389 // fshr(x,y,z) -> unpack(y,x) >> (z & (bw-1))).
30393 SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
30394 SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
30395 bool IsCst = ISD::isBuildVectorOfConstantSDNodes(AmtMod.getNode());
30397 // Constant vXi16 funnel shifts can be efficiently handled by default.
30398 if (IsCst && EltSizeInBits == 16)
30401 unsigned ShiftOpc = IsFSHR ? ISD::SRL : ISD::SHL;
30402 unsigned NumElts = VT.getVectorNumElements();
30403 MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
30404 MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
30406 // Split 256-bit integers on XOP/pre-AVX2 targets.
30407 // Split 512-bit integers on non 512-bit BWI targets.
30408 if ((VT.is256BitVector() && ((Subtarget.hasXOP() && EltSizeInBits < 16) ||
30409 !Subtarget.hasAVX2())) ||
30410 (VT.is512BitVector() && !Subtarget.useBWIRegs() &&
30411 EltSizeInBits < 32)) {
30412 // Pre-mask the amount modulo using the wider vector.
30413 Op = DAG.getNode(Op.getOpcode(), DL, VT, Op0, Op1, AmtMod);
30414 return splitVectorOp(Op, DAG);
30417 // Attempt to fold scalar shift as unpack(y,x) << zext(splat(z))
30418 if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, ShiftOpc)) {
30419 int ScalarAmtIdx = -1;
30420 if (SDValue ScalarAmt = DAG.getSplatSourceVector(AmtMod, ScalarAmtIdx)) {
30421 // Uniform vXi16 funnel shifts can be efficiently handled by default.
30422 if (EltSizeInBits == 16)
30425 SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
30426 SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
30427 Lo = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Lo, ScalarAmt,
30428 ScalarAmtIdx, Subtarget, DAG);
30429 Hi = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Hi, ScalarAmt,
30430 ScalarAmtIdx, Subtarget, DAG);
30431 return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
30435 MVT WideSVT = MVT::getIntegerVT(
30436 std::min<unsigned>(EltSizeInBits * 2, Subtarget.hasBWI() ? 16 : 32));
30437 MVT WideVT = MVT::getVectorVT(WideSVT, NumElts);
30439 // If per-element shifts are legal, fallback to generic expansion.
30440 if (supportedVectorVarShift(VT, Subtarget, ShiftOpc) || Subtarget.hasXOP())
30443 // Attempt to fold as:
30444 // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
30445 // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
30446 if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
30447 supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
30448 Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Op0);
30449 Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Op1);
30450 AmtMod = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, AmtMod);
30451 Op0 = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, WideVT, Op0,
30452 EltSizeInBits, DAG);
30453 SDValue Res = DAG.getNode(ISD::OR, DL, WideVT, Op0, Op1);
30454 Res = DAG.getNode(ShiftOpc, DL, WideVT, Res, AmtMod);
30456 Res = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, Res,
30457 EltSizeInBits, DAG);
30458 return DAG.getNode(ISD::TRUNCATE, DL, VT, Res);
30461 // Attempt to fold per-element (ExtVT) shift as unpack(y,x) << zext(z)
30462 if (((IsCst || !Subtarget.hasAVX512()) && !IsFSHR && EltSizeInBits <= 16) ||
30463 supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc)) {
30464 SDValue Z = DAG.getConstant(0, DL, VT);
30465 SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
30466 SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
30467 SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
30468 SDValue AHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, AmtMod, Z));
30469 SDValue Lo = DAG.getNode(ShiftOpc, DL, ExtVT, RLo, ALo);
30470 SDValue Hi = DAG.getNode(ShiftOpc, DL, ExtVT, RHi, AHi);
30471 return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
30474 // Fallback to generic expansion.
30478 (VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
30479 "Unexpected funnel shift type!");
30481 // Expand slow SHLD/SHRD cases if we are not optimizing for size.
30482 bool OptForSize = DAG.shouldOptForSize();
30483 bool ExpandFunnel = !OptForSize && Subtarget.isSHLDSlow();
30485 // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
30486 // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
30487 if ((VT == MVT::i8 || (ExpandFunnel && VT == MVT::i16)) &&
30488 !isa<ConstantSDNode>(Amt)) {
30489 SDValue Mask = DAG.getConstant(EltSizeInBits - 1, DL, Amt.getValueType());
30490 SDValue HiShift = DAG.getConstant(EltSizeInBits, DL, Amt.getValueType());
30491 Op0 = DAG.getAnyExtOrTrunc(Op0, DL, MVT::i32);
30492 Op1 = DAG.getZExtOrTrunc(Op1, DL, MVT::i32);
30493 Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt, Mask);
30494 SDValue Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Op0, HiShift);
30495 Res = DAG.getNode(ISD::OR, DL, MVT::i32, Res, Op1);
30497 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, Amt);
30499 Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Res, Amt);
30500 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, HiShift);
30502 return DAG.getZExtOrTrunc(Res, DL, VT);
30505 if (VT == MVT::i8 || ExpandFunnel)
30508 // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
30509 if (VT == MVT::i16) {
30510 Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
30511 DAG.getConstant(15, DL, Amt.getValueType()));
30512 unsigned FSHOp = (IsFSHR ? X86ISD::FSHR : X86ISD::FSHL);
30513 return DAG.getNode(FSHOp, DL, VT, Op0, Op1, Amt);
30519 static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
30520 SelectionDAG &DAG) {
30521 MVT VT = Op.getSimpleValueType();
30522 assert(VT.isVector() && "Custom lowering only for vector rotates!");
30525 SDValue R = Op.getOperand(0);
30526 SDValue Amt = Op.getOperand(1);
30527 unsigned Opcode = Op.getOpcode();
30528 unsigned EltSizeInBits = VT.getScalarSizeInBits();
30529 int NumElts = VT.getVectorNumElements();
30530 bool IsROTL = Opcode == ISD::ROTL;
30532 // Check for constant splat rotation amount.
30533 APInt CstSplatValue;
30534 bool IsCstSplat = X86::isConstantSplat(Amt, CstSplatValue);
30536 // Check for splat rotate by zero.
30537 if (IsCstSplat && CstSplatValue.urem(EltSizeInBits) == 0)
30540 // AVX512 implicitly uses modulo rotation amounts.
30541 if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
30542 // Attempt to rotate by immediate.
30544 unsigned RotOpc = IsROTL ? X86ISD::VROTLI : X86ISD::VROTRI;
30545 uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
30546 return DAG.getNode(RotOpc, DL, VT, R,
30547 DAG.getTargetConstant(RotAmt, DL, MVT::i8));
30550 // Else, fall-back on VPROLV/VPRORV.
30554 // AVX512 VBMI2 vXi16 - lower to funnel shifts.
30555 if (Subtarget.hasVBMI2() && 16 == EltSizeInBits) {
30556 unsigned FunnelOpc = IsROTL ? ISD::FSHL : ISD::FSHR;
30557 return DAG.getNode(FunnelOpc, DL, VT, R, R, Amt);
30560 SDValue Z = DAG.getConstant(0, DL, VT);
30563 // If the ISD::ROTR amount is constant, we're always better converting to
30565 if (SDValue NegAmt = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {Z, Amt}))
30566 return DAG.getNode(ISD::ROTL, DL, VT, R, NegAmt);
30568 // XOP targets always prefers ISD::ROTL.
30569 if (Subtarget.hasXOP())
30570 return DAG.getNode(ISD::ROTL, DL, VT, R,
30571 DAG.getNode(ISD::SUB, DL, VT, Z, Amt));
30574 // Split 256-bit integers on XOP/pre-AVX2 targets.
30575 if (VT.is256BitVector() && (Subtarget.hasXOP() || !Subtarget.hasAVX2()))
30576 return splitVectorIntBinary(Op, DAG);
30578 // XOP has 128-bit vector variable + immediate rotates.
30579 // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
30580 // XOP implicitly uses modulo rotation amounts.
30581 if (Subtarget.hasXOP()) {
30582 assert(IsROTL && "Only ROTL expected");
30583 assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
30585 // Attempt to rotate by immediate.
30587 uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
30588 return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
30589 DAG.getTargetConstant(RotAmt, DL, MVT::i8));
30592 // Use general rotate by variable (per-element).
30596 // Rotate by an uniform constant - expand back to shifts.
30600 // Split 512-bit integers on non 512-bit BWI targets.
30601 if (VT.is512BitVector() && !Subtarget.useBWIRegs())
30602 return splitVectorIntBinary(Op, DAG);
30605 (VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
30606 ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
30607 Subtarget.hasAVX2()) ||
30608 ((VT == MVT::v32i16 || VT == MVT::v64i8) && Subtarget.useBWIRegs())) &&
30609 "Only vXi32/vXi16/vXi8 vector rotates supported");
30611 MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
30612 MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
30614 SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
30615 SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
30617 // Attempt to fold as unpack(x,x) << zext(splat(y)):
30618 // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
30619 // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
30620 if (EltSizeInBits == 8 || EltSizeInBits == 16 || EltSizeInBits == 32) {
30621 int BaseRotAmtIdx = -1;
30622 if (SDValue BaseRotAmt = DAG.getSplatSourceVector(AmtMod, BaseRotAmtIdx)) {
30623 if (EltSizeInBits == 16 && Subtarget.hasSSE41()) {
30624 unsigned FunnelOpc = IsROTL ? ISD::FSHL : ISD::FSHR;
30625 return DAG.getNode(FunnelOpc, DL, VT, R, R, Amt);
30627 unsigned ShiftX86Opc = IsROTL ? X86ISD::VSHLI : X86ISD::VSRLI;
30628 SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
30629 SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
30630 Lo = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Lo, BaseRotAmt,
30631 BaseRotAmtIdx, Subtarget, DAG);
30632 Hi = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Hi, BaseRotAmt,
30633 BaseRotAmtIdx, Subtarget, DAG);
30634 return getPack(DAG, Subtarget, DL, VT, Lo, Hi, IsROTL);
30638 // v16i8/v32i8/v64i8: Split rotation into rot4/rot2/rot1 stages and select by
30640 // TODO: We're doing nothing here that we couldn't do for funnel shifts.
30641 if (EltSizeInBits == 8) {
30642 bool IsConstAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
30644 MVT::getVectorVT(Subtarget.hasBWI() ? MVT::i16 : MVT::i32, NumElts);
30645 unsigned ShiftOpc = IsROTL ? ISD::SHL : ISD::SRL;
30647 // Attempt to fold as:
30648 // rotl(x,y) -> (((aext(x) << bw) | zext(x)) << (y & (bw-1))) >> bw.
30649 // rotr(x,y) -> (((aext(x) << bw) | zext(x)) >> (y & (bw-1))).
30650 if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
30651 supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
30652 // If we're rotating by constant, just use default promotion.
30655 // See if we can perform this by widening to vXi16 or vXi32.
30656 R = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, R);
30658 ISD::OR, DL, WideVT, R,
30659 getTargetVShiftByConstNode(X86ISD::VSHLI, DL, WideVT, R, 8, DAG));
30660 Amt = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, AmtMod);
30661 R = DAG.getNode(ShiftOpc, DL, WideVT, R, Amt);
30663 R = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, R, 8, DAG);
30664 return DAG.getNode(ISD::TRUNCATE, DL, VT, R);
30667 // Attempt to fold as unpack(x,x) << zext(y):
30668 // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
30669 // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
30670 if (IsConstAmt || supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc)) {
30671 // See if we can perform this by unpacking to lo/hi vXi16.
30672 SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
30673 SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
30674 SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
30675 SDValue AHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, AmtMod, Z));
30676 SDValue Lo = DAG.getNode(ShiftOpc, DL, ExtVT, RLo, ALo);
30677 SDValue Hi = DAG.getNode(ShiftOpc, DL, ExtVT, RHi, AHi);
30678 return getPack(DAG, Subtarget, DL, VT, Lo, Hi, IsROTL);
30680 assert((VT == MVT::v16i8 || VT == MVT::v32i8) && "Unsupported vXi8 type");
30682 // We don't need ModuloAmt here as we just peek at individual bits.
30683 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
30684 if (Subtarget.hasSSE41()) {
30685 // On SSE41 targets we can use PBLENDVB which selects bytes based just
30686 // on the sign bit.
30687 V0 = DAG.getBitcast(VT, V0);
30688 V1 = DAG.getBitcast(VT, V1);
30689 Sel = DAG.getBitcast(VT, Sel);
30690 return DAG.getBitcast(SelVT,
30691 DAG.getNode(X86ISD::BLENDV, DL, VT, Sel, V0, V1));
30693 // On pre-SSE41 targets we test for the sign bit by comparing to
30694 // zero - a negative value will set all bits of the lanes to true
30695 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
30696 SDValue Z = DAG.getConstant(0, DL, SelVT);
30697 SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
30698 return DAG.getSelect(DL, SelVT, C, V0, V1);
30701 // ISD::ROTR is currently only profitable on AVX512 targets with VPTERNLOG.
30702 if (!IsROTL && !useVPTERNLOG(Subtarget, VT)) {
30703 Amt = DAG.getNode(ISD::SUB, DL, VT, Z, Amt);
30707 unsigned ShiftLHS = IsROTL ? ISD::SHL : ISD::SRL;
30708 unsigned ShiftRHS = IsROTL ? ISD::SRL : ISD::SHL;
30710 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
30711 // We can safely do this using i16 shifts as we're only interested in
30712 // the 3 lower bits of each byte.
30713 Amt = DAG.getBitcast(ExtVT, Amt);
30714 Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
30715 Amt = DAG.getBitcast(VT, Amt);
30717 // r = VSELECT(r, rot(r, 4), a);
30721 DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(4, DL, VT)),
30722 DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(4, DL, VT)));
30723 R = SignBitSelect(VT, Amt, M, R);
30726 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
30728 // r = VSELECT(r, rot(r, 2), a);
30731 DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(2, DL, VT)),
30732 DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(6, DL, VT)));
30733 R = SignBitSelect(VT, Amt, M, R);
30736 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
30738 // return VSELECT(r, rot(r, 1), a);
30741 DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(1, DL, VT)),
30742 DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(7, DL, VT)));
30743 return SignBitSelect(VT, Amt, M, R);
30746 bool IsSplatAmt = DAG.isSplatValue(Amt);
30747 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
30748 bool LegalVarShifts = supportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
30749 supportedVectorVarShift(VT, Subtarget, ISD::SRL);
30751 // Fallback for splats + all supported variable shifts.
30752 // Fallback for non-constants AVX2 vXi16 as well.
30753 if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
30754 Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
30755 SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
30756 AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
30757 SDValue SHL = DAG.getNode(IsROTL ? ISD::SHL : ISD::SRL, DL, VT, R, Amt);
30758 SDValue SRL = DAG.getNode(IsROTL ? ISD::SRL : ISD::SHL, DL, VT, R, AmtR);
30759 return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
30762 // Everything below assumes ISD::ROTL.
30764 Amt = DAG.getNode(ISD::SUB, DL, VT, Z, Amt);
30768 // ISD::ROT* uses modulo rotate amounts.
30769 Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
30771 assert(IsROTL && "Only ROTL supported");
30773 // As with shifts, attempt to convert the rotation amount to a multiplication
30774 // factor, fallback to general expansion.
30775 SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
30779 // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
30780 if (EltSizeInBits == 16) {
30781 SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
30782 SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
30783 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
30786 // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
30787 // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
30788 // that can then be OR'd with the lower 32-bits.
30789 assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
30790 static const int OddMask[] = {1, -1, 3, -1};
30791 SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
30792 SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
30794 SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
30795 DAG.getBitcast(MVT::v2i64, R),
30796 DAG.getBitcast(MVT::v2i64, Scale));
30797 SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
30798 DAG.getBitcast(MVT::v2i64, R13),
30799 DAG.getBitcast(MVT::v2i64, Scale13));
30800 Res02 = DAG.getBitcast(VT, Res02);
30801 Res13 = DAG.getBitcast(VT, Res13);
30803 return DAG.getNode(ISD::OR, DL, VT,
30804 DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
30805 DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
30808 /// Returns true if the operand type is exactly twice the native width, and
30809 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
30810 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
30811 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
30812 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
30813 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
30816 return Subtarget.canUseCMPXCHG8B() && !Subtarget.is64Bit();
30817 if (OpWidth == 128)
30818 return Subtarget.canUseCMPXCHG16B();
30823 TargetLoweringBase::AtomicExpansionKind
30824 X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
30825 Type *MemType = SI->getValueOperand()->getType();
30827 bool NoImplicitFloatOps =
30828 SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
30829 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
30830 !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
30831 (Subtarget.hasSSE1() || Subtarget.hasX87()))
30832 return AtomicExpansionKind::None;
30834 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::Expand
30835 : AtomicExpansionKind::None;
30838 // Note: this turns large loads into lock cmpxchg8b/16b.
30839 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
30840 TargetLowering::AtomicExpansionKind
30841 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
30842 Type *MemType = LI->getType();
30844 // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
30845 // can use movq to do the load. If we have X87 we can load into an 80-bit
30846 // X87 register and store it to a stack temporary.
30847 bool NoImplicitFloatOps =
30848 LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
30849 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
30850 !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
30851 (Subtarget.hasSSE1() || Subtarget.hasX87()))
30852 return AtomicExpansionKind::None;
30854 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
30855 : AtomicExpansionKind::None;
30858 TargetLowering::AtomicExpansionKind
30859 X86TargetLowering::shouldExpandLogicAtomicRMWInIR(AtomicRMWInst *AI) const {
30860 // If the atomicrmw's result isn't actually used, we can just add a "lock"
30861 // prefix to a normal instruction for these operations.
30862 if (AI->use_empty())
30863 return AtomicExpansionKind::None;
30865 // If the atomicrmw's result is used by a single bit AND, we may use
30866 // bts/btr/btc instruction for these operations.
30867 auto *C1 = dyn_cast<ConstantInt>(AI->getValOperand());
30868 Instruction *I = AI->user_back();
30869 if (!C1 || !AI->hasOneUse() || I->getOpcode() != Instruction::And ||
30870 AI->getParent() != I->getParent())
30871 return AtomicExpansionKind::CmpXChg;
30872 // The following instruction must be a AND single bit.
30873 auto *C2 = dyn_cast<ConstantInt>(I->getOperand(1));
30874 unsigned Bits = AI->getType()->getPrimitiveSizeInBits();
30875 if (!C2 || Bits == 8 || !isPowerOf2_64(C2->getZExtValue()))
30876 return AtomicExpansionKind::CmpXChg;
30878 if (AI->getOperation() == AtomicRMWInst::And)
30879 return ~C1->getValue() == C2->getValue()
30880 ? AtomicExpansionKind::BitTestIntrinsic
30881 : AtomicExpansionKind::CmpXChg;
30883 return C1 == C2 ? AtomicExpansionKind::BitTestIntrinsic
30884 : AtomicExpansionKind::CmpXChg;
30887 void X86TargetLowering::emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
30888 IRBuilder<> Builder(AI);
30889 Intrinsic::ID IID = Intrinsic::not_intrinsic;
30890 switch (AI->getOperation()) {
30892 llvm_unreachable("Unknown atomic operation");
30893 case AtomicRMWInst::Or:
30894 IID = Intrinsic::x86_atomic_bts;
30896 case AtomicRMWInst::Xor:
30897 IID = Intrinsic::x86_atomic_btc;
30899 case AtomicRMWInst::And:
30900 IID = Intrinsic::x86_atomic_btr;
30903 Instruction *I = AI->user_back();
30904 LLVMContext &Ctx = AI->getContext();
30906 countTrailingZeros(cast<ConstantInt>(I->getOperand(1))->getZExtValue());
30907 Function *BitTest =
30908 Intrinsic::getDeclaration(AI->getModule(), IID, AI->getType());
30909 Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
30910 Type::getInt8PtrTy(Ctx));
30911 Value *Result = Builder.CreateCall(BitTest, {Addr, Builder.getInt8(Imm)});
30912 I->replaceAllUsesWith(Result);
30913 I->eraseFromParent();
30914 AI->eraseFromParent();
30917 TargetLowering::AtomicExpansionKind
30918 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
30919 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
30920 Type *MemType = AI->getType();
30922 // If the operand is too big, we must see if cmpxchg8/16b is available
30923 // and default to library calls otherwise.
30924 if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
30925 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
30926 : AtomicExpansionKind::None;
30929 AtomicRMWInst::BinOp Op = AI->getOperation();
30932 llvm_unreachable("Unknown atomic operation");
30933 case AtomicRMWInst::Xchg:
30934 case AtomicRMWInst::Add:
30935 case AtomicRMWInst::Sub:
30936 // It's better to use xadd, xsub or xchg for these in all cases.
30937 return AtomicExpansionKind::None;
30938 case AtomicRMWInst::Or:
30939 case AtomicRMWInst::And:
30940 case AtomicRMWInst::Xor:
30941 return shouldExpandLogicAtomicRMWInIR(AI);
30942 case AtomicRMWInst::Nand:
30943 case AtomicRMWInst::Max:
30944 case AtomicRMWInst::Min:
30945 case AtomicRMWInst::UMax:
30946 case AtomicRMWInst::UMin:
30947 case AtomicRMWInst::FAdd:
30948 case AtomicRMWInst::FSub:
30949 case AtomicRMWInst::FMax:
30950 case AtomicRMWInst::FMin:
30951 // These always require a non-trivial set of data operations on x86. We must
30952 // use a cmpxchg loop.
30953 return AtomicExpansionKind::CmpXChg;
30958 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
30959 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
30960 Type *MemType = AI->getType();
30961 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
30962 // there is no benefit in turning such RMWs into loads, and it is actually
30963 // harmful as it introduces a mfence.
30964 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
30967 // If this is a canonical idempotent atomicrmw w/no uses, we have a better
30968 // lowering available in lowerAtomicArith.
30969 // TODO: push more cases through this path.
30970 if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
30971 if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
30975 IRBuilder<> Builder(AI);
30976 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
30977 auto SSID = AI->getSyncScopeID();
30978 // We must restrict the ordering to avoid generating loads with Release or
30979 // ReleaseAcquire orderings.
30980 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
30982 // Before the load we need a fence. Here is an example lifted from
30983 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
30986 // x.store(1, relaxed);
30987 // r1 = y.fetch_add(0, release);
30989 // y.fetch_add(42, acquire);
30990 // r2 = x.load(relaxed);
30991 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
30992 // lowered to just a load without a fence. A mfence flushes the store buffer,
30993 // making the optimization clearly correct.
30994 // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
30995 // otherwise, we might be able to be more aggressive on relaxed idempotent
30996 // rmw. In practice, they do not look useful, so we don't try to be
30997 // especially clever.
30998 if (SSID == SyncScope::SingleThread)
30999 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
31000 // the IR level, so we must wrap it in an intrinsic.
31003 if (!Subtarget.hasMFence())
31004 // FIXME: it might make sense to use a locked operation here but on a
31005 // different cache-line to prevent cache-line bouncing. In practice it
31006 // is probably a small win, and x86 processors without mfence are rare
31007 // enough that we do not bother.
31011 llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
31012 Builder.CreateCall(MFence, {});
31014 // Finally we can emit the atomic load.
31015 LoadInst *Loaded = Builder.CreateAlignedLoad(
31016 AI->getType(), AI->getPointerOperand(), AI->getAlign());
31017 Loaded->setAtomic(Order, SSID);
31018 AI->replaceAllUsesWith(Loaded);
31019 AI->eraseFromParent();
31023 bool X86TargetLowering::lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
31024 if (!SI.isUnordered())
31026 return ExperimentalUnorderedISEL;
31028 bool X86TargetLowering::lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
31029 if (!LI.isUnordered())
31031 return ExperimentalUnorderedISEL;
31035 /// Emit a locked operation on a stack location which does not change any
31036 /// memory location, but does involve a lock prefix. Location is chosen to be
31037 /// a) very likely accessed only by a single thread to minimize cache traffic,
31038 /// and b) definitely dereferenceable. Returns the new Chain result.
31039 static SDValue emitLockedStackOp(SelectionDAG &DAG,
31040 const X86Subtarget &Subtarget, SDValue Chain,
31042 // Implementation notes:
31043 // 1) LOCK prefix creates a full read/write reordering barrier for memory
31044 // operations issued by the current processor. As such, the location
31045 // referenced is not relevant for the ordering properties of the instruction.
31046 // See: Intel® 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
31047 // 8.2.3.9 Loads and Stores Are Not Reordered with Locked Instructions
31048 // 2) Using an immediate operand appears to be the best encoding choice
31049 // here since it doesn't require an extra register.
31050 // 3) OR appears to be very slightly faster than ADD. (Though, the difference
31051 // is small enough it might just be measurement noise.)
31052 // 4) When choosing offsets, there are several contributing factors:
31053 // a) If there's no redzone, we default to TOS. (We could allocate a cache
31054 // line aligned stack object to improve this case.)
31055 // b) To minimize our chances of introducing a false dependence, we prefer
31056 // to offset the stack usage from TOS slightly.
31057 // c) To minimize concerns about cross thread stack usage - in particular,
31058 // the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
31059 // captures state in the TOS frame and accesses it from many threads -
31060 // we want to use an offset such that the offset is in a distinct cache
31061 // line from the TOS frame.
31063 // For a general discussion of the tradeoffs and benchmark results, see:
31064 // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
31066 auto &MF = DAG.getMachineFunction();
31067 auto &TFL = *Subtarget.getFrameLowering();
31068 const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
31070 if (Subtarget.is64Bit()) {
31071 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
31073 DAG.getRegister(X86::RSP, MVT::i64), // Base
31074 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
31075 DAG.getRegister(0, MVT::i64), // Index
31076 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
31077 DAG.getRegister(0, MVT::i16), // Segment.
31080 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
31082 return SDValue(Res, 1);
31085 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
31087 DAG.getRegister(X86::ESP, MVT::i32), // Base
31088 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
31089 DAG.getRegister(0, MVT::i32), // Index
31090 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
31091 DAG.getRegister(0, MVT::i16), // Segment.
31095 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
31097 return SDValue(Res, 1);
31100 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
31101 SelectionDAG &DAG) {
31103 AtomicOrdering FenceOrdering =
31104 static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
31105 SyncScope::ID FenceSSID =
31106 static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
31108 // The only fence that needs an instruction is a sequentially-consistent
31109 // cross-thread fence.
31110 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
31111 FenceSSID == SyncScope::System) {
31112 if (Subtarget.hasMFence())
31113 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
31115 SDValue Chain = Op.getOperand(0);
31116 return emitLockedStackOp(DAG, Subtarget, Chain, dl);
31119 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
31120 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
31123 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
31124 SelectionDAG &DAG) {
31125 MVT T = Op.getSimpleValueType();
31129 switch(T.SimpleTy) {
31130 default: llvm_unreachable("Invalid value type!");
31131 case MVT::i8: Reg = X86::AL; size = 1; break;
31132 case MVT::i16: Reg = X86::AX; size = 2; break;
31133 case MVT::i32: Reg = X86::EAX; size = 4; break;
31135 assert(Subtarget.is64Bit() && "Node not type legal!");
31136 Reg = X86::RAX; size = 8;
31139 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
31140 Op.getOperand(2), SDValue());
31141 SDValue Ops[] = { cpIn.getValue(0),
31144 DAG.getTargetConstant(size, DL, MVT::i8),
31145 cpIn.getValue(1) };
31146 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
31147 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
31148 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
31152 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
31153 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
31154 MVT::i32, cpOut.getValue(2));
31155 SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
31157 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
31158 cpOut, Success, EFLAGS.getValue(1));
31161 // Create MOVMSKB, taking into account whether we need to split for AVX1.
31162 static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
31163 const X86Subtarget &Subtarget) {
31164 MVT InVT = V.getSimpleValueType();
31166 if (InVT == MVT::v64i8) {
31168 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
31169 Lo = getPMOVMSKB(DL, Lo, DAG, Subtarget);
31170 Hi = getPMOVMSKB(DL, Hi, DAG, Subtarget);
31171 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
31172 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
31173 Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
31174 DAG.getConstant(32, DL, MVT::i8));
31175 return DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
31177 if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
31179 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
31180 Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
31181 Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
31182 Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
31183 DAG.getConstant(16, DL, MVT::i8));
31184 return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
31187 return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
31190 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
31191 SelectionDAG &DAG) {
31192 SDValue Src = Op.getOperand(0);
31193 MVT SrcVT = Src.getSimpleValueType();
31194 MVT DstVT = Op.getSimpleValueType();
31196 // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
31197 // half to v32i1 and concatenating the result.
31198 if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
31199 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
31200 assert(Subtarget.hasBWI() && "Expected BWI target");
31202 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
31203 DAG.getIntPtrConstant(0, dl));
31204 Lo = DAG.getBitcast(MVT::v32i1, Lo);
31205 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
31206 DAG.getIntPtrConstant(1, dl));
31207 Hi = DAG.getBitcast(MVT::v32i1, Hi);
31208 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
31211 // Use MOVMSK for vector to scalar conversion to prevent scalarization.
31212 if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
31213 assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
31214 MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
31216 SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
31217 V = getPMOVMSKB(DL, V, DAG, Subtarget);
31218 return DAG.getZExtOrTrunc(V, DL, DstVT);
31221 assert((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
31222 SrcVT == MVT::i64) && "Unexpected VT!");
31224 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
31225 if (!(DstVT == MVT::f64 && SrcVT == MVT::i64) &&
31226 !(DstVT == MVT::x86mmx && SrcVT.isVector()))
31227 // This conversion needs to be expanded.
31231 if (SrcVT.isVector()) {
31232 // Widen the vector in input in the case of MVT::v2i32.
31233 // Example: from MVT::v2i32 to MVT::v4i32.
31234 MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
31235 SrcVT.getVectorNumElements() * 2);
31236 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
31237 DAG.getUNDEF(SrcVT));
31239 assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
31240 "Unexpected source type in LowerBITCAST");
31241 Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
31244 MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
31245 Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
31247 if (DstVT == MVT::x86mmx)
31248 return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
31250 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
31251 DAG.getIntPtrConstant(0, dl));
31254 /// Compute the horizontal sum of bytes in V for the elements of VT.
31256 /// Requires V to be a byte vector and VT to be an integer vector type with
31257 /// wider elements than V's type. The width of the elements of VT determines
31258 /// how many bytes of V are summed horizontally to produce each element of the
31260 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
31261 const X86Subtarget &Subtarget,
31262 SelectionDAG &DAG) {
31264 MVT ByteVecVT = V.getSimpleValueType();
31265 MVT EltVT = VT.getVectorElementType();
31266 assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
31267 "Expected value to have byte element type.");
31268 assert(EltVT != MVT::i8 &&
31269 "Horizontal byte sum only makes sense for wider elements!");
31270 unsigned VecSize = VT.getSizeInBits();
31271 assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
31273 // PSADBW instruction horizontally add all bytes and leave the result in i64
31274 // chunks, thus directly computes the pop count for v2i64 and v4i64.
31275 if (EltVT == MVT::i64) {
31276 SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
31277 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
31278 V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
31279 return DAG.getBitcast(VT, V);
31282 if (EltVT == MVT::i32) {
31283 // We unpack the low half and high half into i32s interleaved with zeros so
31284 // that we can use PSADBW to horizontally sum them. The most useful part of
31285 // this is that it lines up the results of two PSADBW instructions to be
31286 // two v2i64 vectors which concatenated are the 4 population counts. We can
31287 // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
31288 SDValue Zeros = DAG.getConstant(0, DL, VT);
31289 SDValue V32 = DAG.getBitcast(VT, V);
31290 SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
31291 SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
31293 // Do the horizontal sums into two v2i64s.
31294 Zeros = DAG.getConstant(0, DL, ByteVecVT);
31295 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
31296 Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
31297 DAG.getBitcast(ByteVecVT, Low), Zeros);
31298 High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
31299 DAG.getBitcast(ByteVecVT, High), Zeros);
31301 // Merge them together.
31302 MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
31303 V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
31304 DAG.getBitcast(ShortVecVT, Low),
31305 DAG.getBitcast(ShortVecVT, High));
31307 return DAG.getBitcast(VT, V);
31310 // The only element type left is i16.
31311 assert(EltVT == MVT::i16 && "Unknown how to handle type");
31313 // To obtain pop count for each i16 element starting from the pop count for
31314 // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
31315 // right by 8. It is important to shift as i16s as i8 vector shift isn't
31316 // directly supported.
31317 SDValue ShifterV = DAG.getConstant(8, DL, VT);
31318 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
31319 V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
31320 DAG.getBitcast(ByteVecVT, V));
31321 return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
31324 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
31325 const X86Subtarget &Subtarget,
31326 SelectionDAG &DAG) {
31327 MVT VT = Op.getSimpleValueType();
31328 MVT EltVT = VT.getVectorElementType();
31329 int NumElts = VT.getVectorNumElements();
31331 assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
31333 // Implement a lookup table in register by using an algorithm based on:
31334 // http://wm.ite.pl/articles/sse-popcount.html
31336 // The general idea is that every lower byte nibble in the input vector is an
31337 // index into a in-register pre-computed pop count table. We then split up the
31338 // input vector in two new ones: (1) a vector with only the shifted-right
31339 // higher nibbles for each byte and (2) a vector with the lower nibbles (and
31340 // masked out higher ones) for each byte. PSHUFB is used separately with both
31341 // to index the in-register table. Next, both are added and the result is a
31342 // i8 vector where each element contains the pop count for input byte.
31343 const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
31344 /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
31345 /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
31346 /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
31348 SmallVector<SDValue, 64> LUTVec;
31349 for (int i = 0; i < NumElts; ++i)
31350 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
31351 SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
31352 SDValue M0F = DAG.getConstant(0x0F, DL, VT);
31355 SDValue FourV = DAG.getConstant(4, DL, VT);
31356 SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
31359 SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
31361 // The input vector is used as the shuffle mask that index elements into the
31362 // LUT. After counting low and high nibbles, add the vector to obtain the
31363 // final pop count per i8 element.
31364 SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
31365 SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
31366 return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
31369 // Please ensure that any codegen change from LowerVectorCTPOP is reflected in
31370 // updated cost models in X86TTIImpl::getIntrinsicInstrCost.
31371 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
31372 SelectionDAG &DAG) {
31373 MVT VT = Op.getSimpleValueType();
31374 assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
31375 "Unknown CTPOP type to handle");
31376 SDLoc DL(Op.getNode());
31377 SDValue Op0 = Op.getOperand(0);
31379 // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
31380 if (Subtarget.hasVPOPCNTDQ()) {
31381 unsigned NumElems = VT.getVectorNumElements();
31382 assert((VT.getVectorElementType() == MVT::i8 ||
31383 VT.getVectorElementType() == MVT::i16) && "Unexpected type");
31384 if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
31385 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
31386 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
31387 Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
31388 return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
31392 // Decompose 256-bit ops into smaller 128-bit ops.
31393 if (VT.is256BitVector() && !Subtarget.hasInt256())
31394 return splitVectorIntUnary(Op, DAG);
31396 // Decompose 512-bit ops into smaller 256-bit ops.
31397 if (VT.is512BitVector() && !Subtarget.hasBWI())
31398 return splitVectorIntUnary(Op, DAG);
31400 // For element types greater than i8, do vXi8 pop counts and a bytesum.
31401 if (VT.getScalarType() != MVT::i8) {
31402 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
31403 SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
31404 SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
31405 return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
31408 // We can't use the fast LUT approach, so fall back on LegalizeDAG.
31409 if (!Subtarget.hasSSSE3())
31412 return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
31415 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
31416 SelectionDAG &DAG) {
31417 assert(Op.getSimpleValueType().isVector() &&
31418 "We only do custom lowering for vector population count.");
31419 return LowerVectorCTPOP(Op, Subtarget, DAG);
31422 static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
31423 MVT VT = Op.getSimpleValueType();
31424 SDValue In = Op.getOperand(0);
31427 // For scalars, its still beneficial to transfer to/from the SIMD unit to
31428 // perform the BITREVERSE.
31429 if (!VT.isVector()) {
31430 MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
31431 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
31432 Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
31433 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
31434 DAG.getIntPtrConstant(0, DL));
31437 int NumElts = VT.getVectorNumElements();
31438 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
31440 // Decompose 256-bit ops into smaller 128-bit ops.
31441 if (VT.is256BitVector())
31442 return splitVectorIntUnary(Op, DAG);
31444 assert(VT.is128BitVector() &&
31445 "Only 128-bit vector bitreverse lowering supported.");
31447 // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
31448 // perform the BSWAP in the shuffle.
31449 // Its best to shuffle using the second operand as this will implicitly allow
31450 // memory folding for multiple vectors.
31451 SmallVector<SDValue, 16> MaskElts;
31452 for (int i = 0; i != NumElts; ++i) {
31453 for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
31454 int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
31455 int PermuteByte = SourceByte | (2 << 5);
31456 MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
31460 SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
31461 SDValue Res = DAG.getBitcast(MVT::v16i8, In);
31462 Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
31464 return DAG.getBitcast(VT, Res);
31467 static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
31468 SelectionDAG &DAG) {
31469 MVT VT = Op.getSimpleValueType();
31471 if (Subtarget.hasXOP() && !VT.is512BitVector())
31472 return LowerBITREVERSE_XOP(Op, DAG);
31474 assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
31476 SDValue In = Op.getOperand(0);
31479 assert(VT.getScalarType() == MVT::i8 &&
31480 "Only byte vector BITREVERSE supported");
31482 // Split v64i8 without BWI so that we can still use the PSHUFB lowering.
31483 if (VT == MVT::v64i8 && !Subtarget.hasBWI())
31484 return splitVectorIntUnary(Op, DAG);
31486 // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
31487 if (VT == MVT::v32i8 && !Subtarget.hasInt256())
31488 return splitVectorIntUnary(Op, DAG);
31490 unsigned NumElts = VT.getVectorNumElements();
31492 // If we have GFNI, we can use GF2P8AFFINEQB to reverse the bits.
31493 if (Subtarget.hasGFNI()) {
31494 MVT MatrixVT = MVT::getVectorVT(MVT::i64, NumElts / 8);
31495 SDValue Matrix = DAG.getConstant(0x8040201008040201ULL, DL, MatrixVT);
31496 Matrix = DAG.getBitcast(VT, Matrix);
31497 return DAG.getNode(X86ISD::GF2P8AFFINEQB, DL, VT, In, Matrix,
31498 DAG.getTargetConstant(0, DL, MVT::i8));
31501 // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
31502 // two nibbles and a PSHUFB lookup to find the bitreverse of each
31503 // 0-15 value (moved to the other nibble).
31504 SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
31505 SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
31506 SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
31508 const int LoLUT[16] = {
31509 /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
31510 /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
31511 /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
31512 /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
31513 const int HiLUT[16] = {
31514 /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
31515 /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
31516 /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
31517 /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
31519 SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
31520 for (unsigned i = 0; i < NumElts; ++i) {
31521 LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
31522 HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
31525 SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
31526 SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
31527 Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
31528 Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
31529 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
31532 static SDValue LowerPARITY(SDValue Op, const X86Subtarget &Subtarget,
31533 SelectionDAG &DAG) {
31535 SDValue X = Op.getOperand(0);
31536 MVT VT = Op.getSimpleValueType();
31538 // Special case. If the input fits in 8-bits we can use a single 8-bit TEST.
31539 if (VT == MVT::i8 ||
31540 DAG.MaskedValueIsZero(X, APInt::getBitsSetFrom(VT.getSizeInBits(), 8))) {
31541 X = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
31542 SDValue Flags = DAG.getNode(X86ISD::CMP, DL, MVT::i32, X,
31543 DAG.getConstant(0, DL, MVT::i8));
31544 // Copy the inverse of the parity flag into a register with setcc.
31545 SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
31546 // Extend to the original type.
31547 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Setnp);
31550 // If we have POPCNT, use the default expansion.
31551 if (Subtarget.hasPOPCNT())
31554 if (VT == MVT::i64) {
31555 // Xor the high and low 16-bits together using a 32-bit operation.
31556 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
31557 DAG.getNode(ISD::SRL, DL, MVT::i64, X,
31558 DAG.getConstant(32, DL, MVT::i8)));
31559 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
31560 X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
31563 if (VT != MVT::i16) {
31564 // Xor the high and low 16-bits together using a 32-bit operation.
31565 SDValue Hi16 = DAG.getNode(ISD::SRL, DL, MVT::i32, X,
31566 DAG.getConstant(16, DL, MVT::i8));
31567 X = DAG.getNode(ISD::XOR, DL, MVT::i32, X, Hi16);
31569 // If the input is 16-bits, we need to extend to use an i32 shift below.
31570 X = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, X);
31573 // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
31574 // This should allow an h-reg to be used to save a shift.
31575 SDValue Hi = DAG.getNode(
31576 ISD::TRUNCATE, DL, MVT::i8,
31577 DAG.getNode(ISD::SRL, DL, MVT::i32, X, DAG.getConstant(8, DL, MVT::i8)));
31578 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
31579 SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
31580 SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
31582 // Copy the inverse of the parity flag into a register with setcc.
31583 SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
31584 // Extend to the original type.
31585 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Setnp);
31588 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
31589 const X86Subtarget &Subtarget) {
31590 unsigned NewOpc = 0;
31591 switch (N->getOpcode()) {
31592 case ISD::ATOMIC_LOAD_ADD:
31593 NewOpc = X86ISD::LADD;
31595 case ISD::ATOMIC_LOAD_SUB:
31596 NewOpc = X86ISD::LSUB;
31598 case ISD::ATOMIC_LOAD_OR:
31599 NewOpc = X86ISD::LOR;
31601 case ISD::ATOMIC_LOAD_XOR:
31602 NewOpc = X86ISD::LXOR;
31604 case ISD::ATOMIC_LOAD_AND:
31605 NewOpc = X86ISD::LAND;
31608 llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
31611 MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
31613 return DAG.getMemIntrinsicNode(
31614 NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
31615 {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
31616 /*MemVT=*/N->getSimpleValueType(0), MMO);
31619 /// Lower atomic_load_ops into LOCK-prefixed operations.
31620 static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
31621 const X86Subtarget &Subtarget) {
31622 AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
31623 SDValue Chain = N->getOperand(0);
31624 SDValue LHS = N->getOperand(1);
31625 SDValue RHS = N->getOperand(2);
31626 unsigned Opc = N->getOpcode();
31627 MVT VT = N->getSimpleValueType(0);
31630 // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
31631 // can only be lowered when the result is unused. They should have already
31632 // been transformed into a cmpxchg loop in AtomicExpand.
31633 if (N->hasAnyUseOfValue(0)) {
31634 // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
31635 // select LXADD if LOCK_SUB can't be selected.
31636 if (Opc == ISD::ATOMIC_LOAD_SUB) {
31637 RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
31638 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
31639 RHS, AN->getMemOperand());
31641 assert(Opc == ISD::ATOMIC_LOAD_ADD &&
31642 "Used AtomicRMW ops other than Add should have been expanded!");
31646 // Specialized lowering for the canonical form of an idemptotent atomicrmw.
31647 // The core idea here is that since the memory location isn't actually
31648 // changing, all we need is a lowering for the *ordering* impacts of the
31649 // atomicrmw. As such, we can chose a different operation and memory
31650 // location to minimize impact on other code.
31651 if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS)) {
31652 // On X86, the only ordering which actually requires an instruction is
31653 // seq_cst which isn't SingleThread, everything just needs to be preserved
31654 // during codegen and then dropped. Note that we expect (but don't assume),
31655 // that orderings other than seq_cst and acq_rel have been canonicalized to
31656 // a store or load.
31657 if (AN->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent &&
31658 AN->getSyncScopeID() == SyncScope::System) {
31659 // Prefer a locked operation against a stack location to minimize cache
31660 // traffic. This assumes that stack locations are very likely to be
31661 // accessed only by the owning thread.
31662 SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
31663 assert(!N->hasAnyUseOfValue(0));
31664 // NOTE: The getUNDEF is needed to give something for the unused result 0.
31665 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
31666 DAG.getUNDEF(VT), NewChain);
31668 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
31669 SDValue NewChain = DAG.getNode(X86ISD::MEMBARRIER, DL, MVT::Other, Chain);
31670 assert(!N->hasAnyUseOfValue(0));
31671 // NOTE: The getUNDEF is needed to give something for the unused result 0.
31672 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
31673 DAG.getUNDEF(VT), NewChain);
31676 SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
31677 // RAUW the chain, but don't worry about the result, as it's unused.
31678 assert(!N->hasAnyUseOfValue(0));
31679 // NOTE: The getUNDEF is needed to give something for the unused result 0.
31680 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
31681 DAG.getUNDEF(VT), LockOp.getValue(1));
31684 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
31685 const X86Subtarget &Subtarget) {
31686 auto *Node = cast<AtomicSDNode>(Op.getNode());
31688 EVT VT = Node->getMemoryVT();
31691 Node->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent;
31692 bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
31694 // If this store is not sequentially consistent and the type is legal
31695 // we can just keep it.
31696 if (!IsSeqCst && IsTypeLegal)
31699 if (VT == MVT::i64 && !IsTypeLegal) {
31700 // For illegal i64 atomic_stores, we can try to use MOVQ or MOVLPS if SSE
31702 bool NoImplicitFloatOps =
31703 DAG.getMachineFunction().getFunction().hasFnAttribute(
31704 Attribute::NoImplicitFloat);
31705 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
31707 if (Subtarget.hasSSE1()) {
31708 SDValue SclToVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
31709 Node->getOperand(2));
31710 MVT StVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
31711 SclToVec = DAG.getBitcast(StVT, SclToVec);
31712 SDVTList Tys = DAG.getVTList(MVT::Other);
31713 SDValue Ops[] = {Node->getChain(), SclToVec, Node->getBasePtr()};
31714 Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops,
31715 MVT::i64, Node->getMemOperand());
31716 } else if (Subtarget.hasX87()) {
31717 // First load this into an 80-bit X87 register using a stack temporary.
31718 // This will put the whole integer into the significand.
31719 SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
31720 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
31721 MachinePointerInfo MPI =
31722 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
31724 DAG.getStore(Node->getChain(), dl, Node->getOperand(2), StackPtr,
31725 MPI, MaybeAlign(), MachineMemOperand::MOStore);
31726 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
31727 SDValue LdOps[] = {Chain, StackPtr};
31729 DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, LdOps, MVT::i64, MPI,
31730 /*Align*/ None, MachineMemOperand::MOLoad);
31731 Chain = Value.getValue(1);
31733 // Now use an FIST to do the atomic store.
31734 SDValue StoreOps[] = {Chain, Value, Node->getBasePtr()};
31736 DAG.getMemIntrinsicNode(X86ISD::FIST, dl, DAG.getVTList(MVT::Other),
31737 StoreOps, MVT::i64, Node->getMemOperand());
31741 // If this is a sequentially consistent store, also emit an appropriate
31744 Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
31751 // Convert seq_cst store -> xchg
31752 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
31753 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
31754 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
31755 Node->getMemoryVT(),
31756 Node->getOperand(0),
31757 Node->getOperand(1), Node->getOperand(2),
31758 Node->getMemOperand());
31759 return Swap.getValue(1);
31762 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
31763 SDNode *N = Op.getNode();
31764 MVT VT = N->getSimpleValueType(0);
31765 unsigned Opc = Op.getOpcode();
31767 // Let legalize expand this if it isn't a legal type yet.
31768 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
31771 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
31774 // Set the carry flag.
31775 SDValue Carry = Op.getOperand(2);
31776 EVT CarryVT = Carry.getValueType();
31777 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
31778 Carry, DAG.getAllOnesConstant(DL, CarryVT));
31780 bool IsAdd = Opc == ISD::ADDCARRY || Opc == ISD::SADDO_CARRY;
31781 SDValue Sum = DAG.getNode(IsAdd ? X86ISD::ADC : X86ISD::SBB, DL, VTs,
31782 Op.getOperand(0), Op.getOperand(1),
31783 Carry.getValue(1));
31785 bool IsSigned = Opc == ISD::SADDO_CARRY || Opc == ISD::SSUBO_CARRY;
31786 SDValue SetCC = getSETCC(IsSigned ? X86::COND_O : X86::COND_B,
31787 Sum.getValue(1), DL, DAG);
31788 if (N->getValueType(1) == MVT::i1)
31789 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
31791 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
31794 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
31795 SelectionDAG &DAG) {
31796 assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
31798 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
31799 // which returns the values as { float, float } (in XMM0) or
31800 // { double, double } (which is returned in XMM0, XMM1).
31802 SDValue Arg = Op.getOperand(0);
31803 EVT ArgVT = Arg.getValueType();
31804 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
31806 TargetLowering::ArgListTy Args;
31807 TargetLowering::ArgListEntry Entry;
31811 Entry.IsSExt = false;
31812 Entry.IsZExt = false;
31813 Args.push_back(Entry);
31815 bool isF64 = ArgVT == MVT::f64;
31816 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
31817 // the small struct {f32, f32} is returned in (eax, edx). For f64,
31818 // the results are returned via SRet in memory.
31819 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
31820 RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
31821 const char *LibcallName = TLI.getLibcallName(LC);
31823 DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
31825 Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
31826 : (Type *)FixedVectorType::get(ArgTy, 4);
31828 TargetLowering::CallLoweringInfo CLI(DAG);
31829 CLI.setDebugLoc(dl)
31830 .setChain(DAG.getEntryNode())
31831 .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
31833 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
31836 // Returned in xmm0 and xmm1.
31837 return CallResult.first;
31839 // Returned in bits 0:31 and 32:64 xmm0.
31840 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
31841 CallResult.first, DAG.getIntPtrConstant(0, dl));
31842 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
31843 CallResult.first, DAG.getIntPtrConstant(1, dl));
31844 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
31845 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
31848 /// Widen a vector input to a vector of NVT. The
31849 /// input vector must have the same element type as NVT.
31850 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
31851 bool FillWithZeroes = false) {
31852 // Check if InOp already has the right width.
31853 MVT InVT = InOp.getSimpleValueType();
31857 if (InOp.isUndef())
31858 return DAG.getUNDEF(NVT);
31860 assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
31861 "input and widen element type must match");
31863 unsigned InNumElts = InVT.getVectorNumElements();
31864 unsigned WidenNumElts = NVT.getVectorNumElements();
31865 assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
31866 "Unexpected request for vector widening");
31869 if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
31870 InOp.getNumOperands() == 2) {
31871 SDValue N1 = InOp.getOperand(1);
31872 if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
31874 InOp = InOp.getOperand(0);
31875 InVT = InOp.getSimpleValueType();
31876 InNumElts = InVT.getVectorNumElements();
31879 if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
31880 ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
31881 SmallVector<SDValue, 16> Ops;
31882 for (unsigned i = 0; i < InNumElts; ++i)
31883 Ops.push_back(InOp.getOperand(i));
31885 EVT EltVT = InOp.getOperand(0).getValueType();
31887 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
31888 DAG.getUNDEF(EltVT);
31889 for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
31890 Ops.push_back(FillVal);
31891 return DAG.getBuildVector(NVT, dl, Ops);
31893 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
31895 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
31896 InOp, DAG.getIntPtrConstant(0, dl));
31899 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
31900 SelectionDAG &DAG) {
31901 assert(Subtarget.hasAVX512() &&
31902 "MGATHER/MSCATTER are supported on AVX-512 arch only");
31904 MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
31905 SDValue Src = N->getValue();
31906 MVT VT = Src.getSimpleValueType();
31907 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
31910 SDValue Scale = N->getScale();
31911 SDValue Index = N->getIndex();
31912 SDValue Mask = N->getMask();
31913 SDValue Chain = N->getChain();
31914 SDValue BasePtr = N->getBasePtr();
31916 if (VT == MVT::v2f32 || VT == MVT::v2i32) {
31917 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
31918 // If the index is v2i64 and we have VLX we can use xmm for data and index.
31919 if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
31920 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
31921 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
31922 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Src, DAG.getUNDEF(VT));
31923 SDVTList VTs = DAG.getVTList(MVT::Other);
31924 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
31925 return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
31926 N->getMemoryVT(), N->getMemOperand());
31931 MVT IndexVT = Index.getSimpleValueType();
31933 // If the index is v2i32, we're being called by type legalization and we
31934 // should just let the default handling take care of it.
31935 if (IndexVT == MVT::v2i32)
31938 // If we don't have VLX and neither the passthru or index is 512-bits, we
31939 // need to widen until one is.
31940 if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
31941 !Index.getSimpleValueType().is512BitVector()) {
31942 // Determine how much we need to widen by to get a 512-bit type.
31943 unsigned Factor = std::min(512/VT.getSizeInBits(),
31944 512/IndexVT.getSizeInBits());
31945 unsigned NumElts = VT.getVectorNumElements() * Factor;
31947 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
31948 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
31949 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
31951 Src = ExtendToType(Src, VT, DAG);
31952 Index = ExtendToType(Index, IndexVT, DAG);
31953 Mask = ExtendToType(Mask, MaskVT, DAG, true);
31956 SDVTList VTs = DAG.getVTList(MVT::Other);
31957 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
31958 return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
31959 N->getMemoryVT(), N->getMemOperand());
31962 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
31963 SelectionDAG &DAG) {
31965 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
31966 MVT VT = Op.getSimpleValueType();
31967 MVT ScalarVT = VT.getScalarType();
31968 SDValue Mask = N->getMask();
31969 MVT MaskVT = Mask.getSimpleValueType();
31970 SDValue PassThru = N->getPassThru();
31973 // Handle AVX masked loads which don't support passthru other than 0.
31974 if (MaskVT.getVectorElementType() != MVT::i1) {
31975 // We also allow undef in the isel pattern.
31976 if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
31979 SDValue NewLoad = DAG.getMaskedLoad(
31980 VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
31981 getZeroVector(VT, Subtarget, DAG, dl), N->getMemoryVT(),
31982 N->getMemOperand(), N->getAddressingMode(), N->getExtensionType(),
31983 N->isExpandingLoad());
31985 SDValue Select = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
31986 return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
31989 assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
31990 "Expanding masked load is supported on AVX-512 target only!");
31992 assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
31993 "Expanding masked load is supported for 32 and 64-bit types only!");
31995 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
31996 "Cannot lower masked load op.");
31998 assert((ScalarVT.getSizeInBits() >= 32 ||
31999 (Subtarget.hasBWI() &&
32000 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
32001 "Unsupported masked load op.");
32003 // This operation is legal for targets with VLX, but without
32004 // VLX the vector should be widened to 512 bit
32005 unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
32006 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
32007 PassThru = ExtendToType(PassThru, WideDataVT, DAG);
32009 // Mask element has to be i1.
32010 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
32011 "Unexpected mask type");
32013 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
32015 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
32016 SDValue NewLoad = DAG.getMaskedLoad(
32017 WideDataVT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
32018 PassThru, N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
32019 N->getExtensionType(), N->isExpandingLoad());
32022 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, NewLoad.getValue(0),
32023 DAG.getIntPtrConstant(0, dl));
32024 SDValue RetOps[] = {Extract, NewLoad.getValue(1)};
32025 return DAG.getMergeValues(RetOps, dl);
32028 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
32029 SelectionDAG &DAG) {
32030 MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
32031 SDValue DataToStore = N->getValue();
32032 MVT VT = DataToStore.getSimpleValueType();
32033 MVT ScalarVT = VT.getScalarType();
32034 SDValue Mask = N->getMask();
32037 assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
32038 "Expanding masked load is supported on AVX-512 target only!");
32040 assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
32041 "Expanding masked load is supported for 32 and 64-bit types only!");
32043 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
32044 "Cannot lower masked store op.");
32046 assert((ScalarVT.getSizeInBits() >= 32 ||
32047 (Subtarget.hasBWI() &&
32048 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
32049 "Unsupported masked store op.");
32051 // This operation is legal for targets with VLX, but without
32052 // VLX the vector should be widened to 512 bit
32053 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
32054 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
32056 // Mask element has to be i1.
32057 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
32058 "Unexpected mask type");
32060 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
32062 DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
32063 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
32064 return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
32065 N->getOffset(), Mask, N->getMemoryVT(),
32066 N->getMemOperand(), N->getAddressingMode(),
32067 N->isTruncatingStore(), N->isCompressingStore());
32070 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
32071 SelectionDAG &DAG) {
32072 assert(Subtarget.hasAVX2() &&
32073 "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
32075 MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
32077 MVT VT = Op.getSimpleValueType();
32078 SDValue Index = N->getIndex();
32079 SDValue Mask = N->getMask();
32080 SDValue PassThru = N->getPassThru();
32081 MVT IndexVT = Index.getSimpleValueType();
32083 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
32085 // If the index is v2i32, we're being called by type legalization.
32086 if (IndexVT == MVT::v2i32)
32089 // If we don't have VLX and neither the passthru or index is 512-bits, we
32090 // need to widen until one is.
32092 if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
32093 !IndexVT.is512BitVector()) {
32094 // Determine how much we need to widen by to get a 512-bit type.
32095 unsigned Factor = std::min(512/VT.getSizeInBits(),
32096 512/IndexVT.getSizeInBits());
32098 unsigned NumElts = VT.getVectorNumElements() * Factor;
32100 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
32101 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
32102 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
32104 PassThru = ExtendToType(PassThru, VT, DAG);
32105 Index = ExtendToType(Index, IndexVT, DAG);
32106 Mask = ExtendToType(Mask, MaskVT, DAG, true);
32109 // Break dependency on the data register.
32110 if (PassThru.isUndef())
32111 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
32113 SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
32115 SDValue NewGather = DAG.getMemIntrinsicNode(
32116 X86ISD::MGATHER, dl, DAG.getVTList(VT, MVT::Other), Ops, N->getMemoryVT(),
32117 N->getMemOperand());
32118 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
32119 NewGather, DAG.getIntPtrConstant(0, dl));
32120 return DAG.getMergeValues({Extract, NewGather.getValue(1)}, dl);
32123 static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {
32125 SDValue Src = Op.getOperand(0);
32126 MVT DstVT = Op.getSimpleValueType();
32128 AddrSpaceCastSDNode *N = cast<AddrSpaceCastSDNode>(Op.getNode());
32129 unsigned SrcAS = N->getSrcAddressSpace();
32131 assert(SrcAS != N->getDestAddressSpace() &&
32132 "addrspacecast must be between different address spaces");
32134 if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64) {
32135 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
32136 } else if (DstVT == MVT::i64) {
32137 Op = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
32138 } else if (DstVT == MVT::i32) {
32139 Op = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
32141 report_fatal_error("Bad address space in addrspacecast");
32146 SDValue X86TargetLowering::LowerGC_TRANSITION(SDValue Op,
32147 SelectionDAG &DAG) const {
32148 // TODO: Eventually, the lowering of these nodes should be informed by or
32149 // deferred to the GC strategy for the function in which they appear. For
32150 // now, however, they must be lowered to something. Since they are logically
32151 // no-ops in the case of a null GC strategy (or a GC strategy which does not
32152 // require special handling for these nodes), lower them as literal NOOPs for
32154 SmallVector<SDValue, 2> Ops;
32155 Ops.push_back(Op.getOperand(0));
32156 if (Op->getGluedNode())
32157 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
32159 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
32160 return SDValue(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
32163 // Custom split CVTPS2PH with wide types.
32164 static SDValue LowerCVTPS2PH(SDValue Op, SelectionDAG &DAG) {
32166 EVT VT = Op.getValueType();
32168 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
32170 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
32171 SDValue RC = Op.getOperand(1);
32172 Lo = DAG.getNode(X86ISD::CVTPS2PH, dl, LoVT, Lo, RC);
32173 Hi = DAG.getNode(X86ISD::CVTPS2PH, dl, HiVT, Hi, RC);
32174 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32177 /// Provide custom lowering hooks for some operations.
32178 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
32179 switch (Op.getOpcode()) {
32180 default: llvm_unreachable("Should not custom lower this!");
32181 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
32182 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
32183 return LowerCMP_SWAP(Op, Subtarget, DAG);
32184 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
32185 case ISD::ATOMIC_LOAD_ADD:
32186 case ISD::ATOMIC_LOAD_SUB:
32187 case ISD::ATOMIC_LOAD_OR:
32188 case ISD::ATOMIC_LOAD_XOR:
32189 case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
32190 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG, Subtarget);
32191 case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
32192 case ISD::PARITY: return LowerPARITY(Op, Subtarget, DAG);
32193 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
32194 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
32195 case ISD::VECTOR_SHUFFLE: return lowerVECTOR_SHUFFLE(Op, Subtarget, DAG);
32196 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
32197 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
32198 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
32199 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
32200 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
32201 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
32202 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
32203 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
32204 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
32205 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
32206 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
32207 case ISD::SHL_PARTS:
32208 case ISD::SRA_PARTS:
32209 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
32211 case ISD::FSHR: return LowerFunnelShift(Op, Subtarget, DAG);
32212 case ISD::STRICT_SINT_TO_FP:
32213 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
32214 case ISD::STRICT_UINT_TO_FP:
32215 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
32216 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
32217 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
32218 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
32219 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
32220 case ISD::ZERO_EXTEND_VECTOR_INREG:
32221 case ISD::SIGN_EXTEND_VECTOR_INREG:
32222 return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
32223 case ISD::FP_TO_SINT:
32224 case ISD::STRICT_FP_TO_SINT:
32225 case ISD::FP_TO_UINT:
32226 case ISD::STRICT_FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
32227 case ISD::FP_TO_SINT_SAT:
32228 case ISD::FP_TO_UINT_SAT: return LowerFP_TO_INT_SAT(Op, DAG);
32229 case ISD::FP_EXTEND:
32230 case ISD::STRICT_FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
32231 case ISD::FP_ROUND:
32232 case ISD::STRICT_FP_ROUND: return LowerFP_ROUND(Op, DAG);
32233 case ISD::FP16_TO_FP:
32234 case ISD::STRICT_FP16_TO_FP: return LowerFP16_TO_FP(Op, DAG);
32235 case ISD::FP_TO_FP16:
32236 case ISD::STRICT_FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
32237 case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG);
32238 case ISD::STORE: return LowerStore(Op, Subtarget, DAG);
32240 case ISD::FSUB: return lowerFaddFsub(Op, DAG);
32241 case ISD::FROUND: return LowerFROUND(Op, DAG);
32243 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
32244 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
32245 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
32247 case ISD::LLRINT: return LowerLRINT_LLRINT(Op, DAG);
32249 case ISD::STRICT_FSETCC:
32250 case ISD::STRICT_FSETCCS: return LowerSETCC(Op, DAG);
32251 case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG);
32252 case ISD::SELECT: return LowerSELECT(Op, DAG);
32253 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
32254 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
32255 case ISD::VASTART: return LowerVASTART(Op, DAG);
32256 case ISD::VAARG: return LowerVAARG(Op, DAG);
32257 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
32258 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
32259 case ISD::INTRINSIC_VOID:
32260 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
32261 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
32262 case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG);
32263 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
32264 case ISD::FRAME_TO_ARGS_OFFSET:
32265 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
32266 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
32267 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
32268 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
32269 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
32270 case ISD::EH_SJLJ_SETUP_DISPATCH:
32271 return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
32272 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
32273 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
32274 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
32275 case ISD::SET_ROUNDING: return LowerSET_ROUNDING(Op, DAG);
32277 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
32279 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, Subtarget, DAG);
32280 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
32282 case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
32284 case ISD::ROTR: return LowerRotate(Op, Subtarget, DAG);
32287 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
32291 case ISD::USUBO: return LowerXALUO(Op, DAG);
32293 case ISD::UMULO: return LowerMULO(Op, Subtarget, DAG);
32294 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
32295 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
32296 case ISD::SADDO_CARRY:
32297 case ISD::SSUBO_CARRY:
32298 case ISD::ADDCARRY:
32299 case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG);
32301 case ISD::SUB: return lowerAddSub(Op, DAG, Subtarget);
32305 case ISD::SSUBSAT: return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
32309 case ISD::UMIN: return LowerMINMAX(Op, Subtarget, DAG);
32310 case ISD::ABS: return LowerABS(Op, Subtarget, DAG);
32311 case ISD::AVGCEILU: return LowerAVG(Op, Subtarget, DAG);
32312 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
32313 case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
32314 case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
32315 case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG);
32316 case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG);
32317 case ISD::GC_TRANSITION_START:
32318 case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION(Op, DAG);
32319 case ISD::ADDRSPACECAST: return LowerADDRSPACECAST(Op, DAG);
32320 case X86ISD::CVTPS2PH: return LowerCVTPS2PH(Op, DAG);
32324 /// Replace a node with an illegal result type with a new node built out of
32326 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
32327 SmallVectorImpl<SDValue>&Results,
32328 SelectionDAG &DAG) const {
32330 switch (N->getOpcode()) {
32333 dbgs() << "ReplaceNodeResults: ";
32336 llvm_unreachable("Do not know how to custom type legalize this operation!");
32337 case X86ISD::CVTPH2PS: {
32338 EVT VT = N->getValueType(0);
32340 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
32342 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
32343 Lo = DAG.getNode(X86ISD::CVTPH2PS, dl, LoVT, Lo);
32344 Hi = DAG.getNode(X86ISD::CVTPH2PS, dl, HiVT, Hi);
32345 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32346 Results.push_back(Res);
32349 case X86ISD::STRICT_CVTPH2PS: {
32350 EVT VT = N->getValueType(0);
32352 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 1);
32354 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
32355 Lo = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {LoVT, MVT::Other},
32356 {N->getOperand(0), Lo});
32357 Hi = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {HiVT, MVT::Other},
32358 {N->getOperand(0), Hi});
32359 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
32360 Lo.getValue(1), Hi.getValue(1));
32361 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32362 Results.push_back(Res);
32363 Results.push_back(Chain);
32366 case X86ISD::CVTPS2PH:
32367 Results.push_back(LowerCVTPS2PH(SDValue(N, 0), DAG));
32370 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
32371 // Use a v2i64 if possible.
32372 bool NoImplicitFloatOps =
32373 DAG.getMachineFunction().getFunction().hasFnAttribute(
32374 Attribute::NoImplicitFloat);
32375 if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
32377 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
32378 Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
32379 // Bit count should fit in 32-bits, extract it as that and then zero
32380 // extend to i64. Otherwise we end up extracting bits 63:32 separately.
32381 Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
32382 Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
32383 DAG.getIntPtrConstant(0, dl));
32384 Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
32385 Results.push_back(Wide);
32390 EVT VT = N->getValueType(0);
32391 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32392 VT.getVectorElementType() == MVT::i8 && "Unexpected VT!");
32393 // Pre-promote these to vXi16 to avoid op legalization thinking all 16
32394 // elements are needed.
32395 MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
32396 SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
32397 SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
32398 SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
32399 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
32400 unsigned NumConcats = 16 / VT.getVectorNumElements();
32401 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
32402 ConcatOps[0] = Res;
32403 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
32404 Results.push_back(Res);
32409 EVT VT = N->getValueType(0);
32410 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32411 VT == MVT::v2i32 && "Unexpected VT!");
32412 bool IsSigned = N->getOpcode() == ISD::SMULO;
32413 unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
32414 SDValue Op0 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(0));
32415 SDValue Op1 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(1));
32416 SDValue Res = DAG.getNode(ISD::MUL, dl, MVT::v2i64, Op0, Op1);
32417 // Extract the high 32 bits from each result using PSHUFD.
32418 // TODO: Could use SRL+TRUNCATE but that doesn't become a PSHUFD.
32419 SDValue Hi = DAG.getBitcast(MVT::v4i32, Res);
32420 Hi = DAG.getVectorShuffle(MVT::v4i32, dl, Hi, Hi, {1, 3, -1, -1});
32421 Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Hi,
32422 DAG.getIntPtrConstant(0, dl));
32424 // Truncate the low bits of the result. This will become PSHUFD.
32425 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
32429 // SMULO overflows if the high bits don't match the sign of the low.
32430 HiCmp = DAG.getNode(ISD::SRA, dl, VT, Res, DAG.getConstant(31, dl, VT));
32432 // UMULO overflows if the high bits are non-zero.
32433 HiCmp = DAG.getConstant(0, dl, VT);
32435 SDValue Ovf = DAG.getSetCC(dl, N->getValueType(1), Hi, HiCmp, ISD::SETNE);
32437 // Widen the result with by padding with undef.
32438 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Res,
32440 Results.push_back(Res);
32441 Results.push_back(Ovf);
32444 case X86ISD::VPMADDWD: {
32445 // Legalize types for X86ISD::VPMADDWD by widening.
32446 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32448 EVT VT = N->getValueType(0);
32449 EVT InVT = N->getOperand(0).getValueType();
32450 assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
32451 "Expected a VT that divides into 128 bits.");
32452 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32453 "Unexpected type action!");
32454 unsigned NumConcat = 128 / InVT.getSizeInBits();
32456 EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
32457 InVT.getVectorElementType(),
32458 NumConcat * InVT.getVectorNumElements());
32459 EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
32460 VT.getVectorElementType(),
32461 NumConcat * VT.getVectorNumElements());
32463 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
32464 Ops[0] = N->getOperand(0);
32465 SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
32466 Ops[0] = N->getOperand(1);
32467 SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
32469 SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
32470 Results.push_back(Res);
32473 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
32474 case X86ISD::FMINC:
32476 case X86ISD::FMAXC:
32477 case X86ISD::FMAX: {
32478 EVT VT = N->getValueType(0);
32479 assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
32480 SDValue UNDEF = DAG.getUNDEF(VT);
32481 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
32482 N->getOperand(0), UNDEF);
32483 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
32484 N->getOperand(1), UNDEF);
32485 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
32492 EVT VT = N->getValueType(0);
32493 if (VT.isVector()) {
32494 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32495 "Unexpected type action!");
32496 // If this RHS is a constant splat vector we can widen this and let
32497 // division/remainder by constant optimize it.
32498 // TODO: Can we do something for non-splat?
32500 if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
32501 unsigned NumConcats = 128 / VT.getSizeInBits();
32502 SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
32503 Ops0[0] = N->getOperand(0);
32504 EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
32505 SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
32506 SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
32507 SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
32508 Results.push_back(Res);
32513 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
32514 Results.push_back(V);
32517 case ISD::TRUNCATE: {
32518 MVT VT = N->getSimpleValueType(0);
32519 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
32522 // The generic legalizer will try to widen the input type to the same
32523 // number of elements as the widened result type. But this isn't always
32524 // the best thing so do some custom legalization to avoid some cases.
32525 MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
32526 SDValue In = N->getOperand(0);
32527 EVT InVT = In.getValueType();
32529 unsigned InBits = InVT.getSizeInBits();
32530 if (128 % InBits == 0) {
32531 // 128 bit and smaller inputs should avoid truncate all together and
32532 // just use a build_vector that will become a shuffle.
32533 // TODO: Widen and use a shuffle directly?
32534 MVT InEltVT = InVT.getSimpleVT().getVectorElementType();
32535 EVT EltVT = VT.getVectorElementType();
32536 unsigned WidenNumElts = WidenVT.getVectorNumElements();
32537 SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
32538 // Use the original element count so we don't do more scalar opts than
32540 unsigned MinElts = VT.getVectorNumElements();
32541 for (unsigned i=0; i < MinElts; ++i) {
32542 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
32543 DAG.getIntPtrConstant(i, dl));
32544 Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
32546 Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
32549 // With AVX512 there are some cases that can use a target specific
32550 // truncate node to go from 256/512 to less than 128 with zeros in the
32551 // upper elements of the 128 bit result.
32552 if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
32553 // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
32554 if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
32555 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
32558 // There's one case we can widen to 512 bits and use VTRUNC.
32559 if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
32560 In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
32561 DAG.getUNDEF(MVT::v4i64));
32562 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
32566 if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 &&
32567 getTypeAction(*DAG.getContext(), InVT) == TypeSplitVector &&
32568 isTypeLegal(MVT::v4i64)) {
32569 // Input needs to be split and output needs to widened. Let's use two
32570 // VTRUNCs, and shuffle their results together into the wider type.
32572 std::tie(Lo, Hi) = DAG.SplitVector(In, dl);
32574 Lo = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Lo);
32575 Hi = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Hi);
32576 SDValue Res = DAG.getVectorShuffle(MVT::v16i8, dl, Lo, Hi,
32577 { 0, 1, 2, 3, 16, 17, 18, 19,
32578 -1, -1, -1, -1, -1, -1, -1, -1 });
32579 Results.push_back(Res);
32585 case ISD::ANY_EXTEND:
32586 // Right now, only MVT::v8i8 has Custom action for an illegal type.
32587 // It's intended to custom handle the input type.
32588 assert(N->getValueType(0) == MVT::v8i8 &&
32589 "Do not know how to legalize this Node");
32591 case ISD::SIGN_EXTEND:
32592 case ISD::ZERO_EXTEND: {
32593 EVT VT = N->getValueType(0);
32594 SDValue In = N->getOperand(0);
32595 EVT InVT = In.getValueType();
32596 if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
32597 (InVT == MVT::v4i16 || InVT == MVT::v4i8)){
32598 assert(getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
32599 "Unexpected type action!");
32600 assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
32601 // Custom split this so we can extend i8/i16->i32 invec. This is better
32602 // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
32603 // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
32604 // we allow the sra from the extend to i32 to be shared by the split.
32605 In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
32607 // Fill a vector with sign bits for each element.
32608 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
32609 SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
32611 // Create an unpackl and unpackh to interleave the sign bits then bitcast
32613 SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
32615 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
32616 SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
32618 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
32620 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32621 Results.push_back(Res);
32625 if (VT == MVT::v16i32 || VT == MVT::v8i64) {
32626 if (!InVT.is128BitVector()) {
32627 // Not a 128 bit vector, but maybe type legalization will promote
32629 if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
32631 InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
32632 if (!InVT.is128BitVector())
32635 // Promote the input to 128 bits. Type legalization will turn this into
32636 // zext_inreg/sext_inreg.
32637 In = DAG.getNode(N->getOpcode(), dl, InVT, In);
32640 // Perform custom splitting instead of the two stage extend we would get
32643 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
32644 assert(isTypeLegal(LoVT) && "Split VT not legal?");
32646 SDValue Lo = getEXTEND_VECTOR_INREG(N->getOpcode(), dl, LoVT, In, DAG);
32648 // We need to shift the input over by half the number of elements.
32649 unsigned NumElts = InVT.getVectorNumElements();
32650 unsigned HalfNumElts = NumElts / 2;
32651 SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
32652 for (unsigned i = 0; i != HalfNumElts; ++i)
32653 ShufMask[i] = i + HalfNumElts;
32655 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
32656 Hi = getEXTEND_VECTOR_INREG(N->getOpcode(), dl, HiVT, Hi, DAG);
32658 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32659 Results.push_back(Res);
32663 case ISD::FP_TO_SINT:
32664 case ISD::STRICT_FP_TO_SINT:
32665 case ISD::FP_TO_UINT:
32666 case ISD::STRICT_FP_TO_UINT: {
32667 bool IsStrict = N->isStrictFPOpcode();
32668 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
32669 N->getOpcode() == ISD::STRICT_FP_TO_SINT;
32670 EVT VT = N->getValueType(0);
32671 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
32672 EVT SrcVT = Src.getValueType();
32674 if (VT.isVector() && Subtarget.hasFP16() &&
32675 SrcVT.getVectorElementType() == MVT::f16) {
32676 EVT EleVT = VT.getVectorElementType();
32677 EVT ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
32679 if (SrcVT != MVT::v8f16) {
32681 IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
32682 SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
32684 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
32687 SDValue Res, Chain;
32690 IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
32692 DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {N->getOperand(0), Src});
32693 Chain = Res.getValue(1);
32695 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
32696 Res = DAG.getNode(Opc, dl, ResVT, Src);
32699 // TODO: Need to add exception check code for strict FP.
32700 if (EleVT.getSizeInBits() < 16) {
32701 MVT TmpVT = MVT::getVectorVT(EleVT.getSimpleVT(), 8);
32702 Res = DAG.getNode(ISD::TRUNCATE, dl, TmpVT, Res);
32704 // Now widen to 128 bits.
32705 unsigned NumConcats = 128 / TmpVT.getSizeInBits();
32706 MVT ConcatVT = MVT::getVectorVT(EleVT.getSimpleVT(), 8 * NumConcats);
32707 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(TmpVT));
32708 ConcatOps[0] = Res;
32709 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
32712 Results.push_back(Res);
32714 Results.push_back(Chain);
32719 if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
32720 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32721 "Unexpected type action!");
32723 // Try to create a 128 bit vector, but don't exceed a 32 bit element.
32724 unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
32725 MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
32726 VT.getVectorNumElements());
32730 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {PromoteVT, MVT::Other},
32731 {N->getOperand(0), Src});
32732 Chain = Res.getValue(1);
32734 Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
32736 // Preserve what we know about the size of the original result. If the
32737 // result is v2i32, we have to manually widen the assert.
32738 if (PromoteVT == MVT::v2i32)
32739 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Res,
32740 DAG.getUNDEF(MVT::v2i32));
32742 Res = DAG.getNode(!IsSigned ? ISD::AssertZext : ISD::AssertSext, dl,
32743 Res.getValueType(), Res,
32744 DAG.getValueType(VT.getVectorElementType()));
32746 if (PromoteVT == MVT::v2i32)
32747 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
32748 DAG.getIntPtrConstant(0, dl));
32750 // Truncate back to the original width.
32751 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
32753 // Now widen to 128 bits.
32754 unsigned NumConcats = 128 / VT.getSizeInBits();
32755 MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
32756 VT.getVectorNumElements() * NumConcats);
32757 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
32758 ConcatOps[0] = Res;
32759 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
32760 Results.push_back(Res);
32762 Results.push_back(Chain);
32767 if (VT == MVT::v2i32) {
32768 assert((!IsStrict || IsSigned || Subtarget.hasAVX512()) &&
32769 "Strict unsigned conversion requires AVX512");
32770 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32771 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32772 "Unexpected type action!");
32773 if (Src.getValueType() == MVT::v2f64) {
32774 if (!IsSigned && !Subtarget.hasAVX512()) {
32776 expandFP_TO_UINT_SSE(MVT::v4i32, Src, dl, DAG, Subtarget);
32777 Results.push_back(Res);
32783 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
32785 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
32787 // If we have VLX we can emit a target specific FP_TO_UINT node,.
32788 if (!IsSigned && !Subtarget.hasVLX()) {
32789 // Otherwise we can defer to the generic legalizer which will widen
32790 // the input as well. This will be further widened during op
32791 // legalization to v8i32<-v8f64.
32792 // For strict nodes we'll need to widen ourselves.
32793 // FIXME: Fix the type legalizer to safely widen strict nodes?
32796 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64, Src,
32797 DAG.getConstantFP(0.0, dl, MVT::v2f64));
32798 Opc = N->getOpcode();
32803 Res = DAG.getNode(Opc, dl, {MVT::v4i32, MVT::Other},
32804 {N->getOperand(0), Src});
32805 Chain = Res.getValue(1);
32807 Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
32809 Results.push_back(Res);
32811 Results.push_back(Chain);
32815 // Custom widen strict v2f32->v2i32 by padding with zeros.
32816 // FIXME: Should generic type legalizer do this?
32817 if (Src.getValueType() == MVT::v2f32 && IsStrict) {
32818 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
32819 DAG.getConstantFP(0.0, dl, MVT::v2f32));
32820 SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4i32, MVT::Other},
32821 {N->getOperand(0), Src});
32822 Results.push_back(Res);
32823 Results.push_back(Res.getValue(1));
32827 // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
32828 // so early out here.
32832 assert(!VT.isVector() && "Vectors should have been handled above!");
32834 if ((Subtarget.hasDQI() && VT == MVT::i64 &&
32835 (SrcVT == MVT::f32 || SrcVT == MVT::f64)) ||
32836 (Subtarget.hasFP16() && SrcVT == MVT::f16)) {
32837 assert(!Subtarget.is64Bit() && "i64 should be legal");
32838 unsigned NumElts = Subtarget.hasVLX() ? 2 : 8;
32839 // If we use a 128-bit result we might need to use a target specific node.
32841 std::max(NumElts, 128U / (unsigned)SrcVT.getSizeInBits());
32842 MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
32843 MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), SrcElts);
32844 unsigned Opc = N->getOpcode();
32845 if (NumElts != SrcElts) {
32847 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
32849 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
32852 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
32853 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
32854 DAG.getConstantFP(0.0, dl, VecInVT), Src,
32858 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
32859 Res = DAG.getNode(Opc, SDLoc(N), Tys, N->getOperand(0), Res);
32860 Chain = Res.getValue(1);
32862 Res = DAG.getNode(Opc, SDLoc(N), VecVT, Res);
32863 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
32864 Results.push_back(Res);
32866 Results.push_back(Chain);
32870 if (VT == MVT::i128 && Subtarget.isTargetWin64()) {
32872 SDValue V = LowerWin64_FP_TO_INT128(SDValue(N, 0), DAG, Chain);
32873 Results.push_back(V);
32875 Results.push_back(Chain);
32880 if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, Chain)) {
32881 Results.push_back(V);
32883 Results.push_back(Chain);
32888 case ISD::LLRINT: {
32889 if (SDValue V = LRINT_LLRINTHelper(N, DAG))
32890 Results.push_back(V);
32894 case ISD::SINT_TO_FP:
32895 case ISD::STRICT_SINT_TO_FP:
32896 case ISD::UINT_TO_FP:
32897 case ISD::STRICT_UINT_TO_FP: {
32898 bool IsStrict = N->isStrictFPOpcode();
32899 bool IsSigned = N->getOpcode() == ISD::SINT_TO_FP ||
32900 N->getOpcode() == ISD::STRICT_SINT_TO_FP;
32901 EVT VT = N->getValueType(0);
32902 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
32903 if (VT.getVectorElementType() == MVT::f16 && Subtarget.hasFP16() &&
32904 Subtarget.hasVLX()) {
32905 if (Src.getValueType().getVectorElementType() == MVT::i16)
32908 if (VT == MVT::v2f16 && Src.getValueType() == MVT::v2i32)
32909 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
32910 IsStrict ? DAG.getConstant(0, dl, MVT::v2i32)
32911 : DAG.getUNDEF(MVT::v2i32));
32914 IsSigned ? X86ISD::STRICT_CVTSI2P : X86ISD::STRICT_CVTUI2P;
32915 SDValue Res = DAG.getNode(Opc, dl, {MVT::v8f16, MVT::Other},
32916 {N->getOperand(0), Src});
32917 Results.push_back(Res);
32918 Results.push_back(Res.getValue(1));
32920 unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
32921 Results.push_back(DAG.getNode(Opc, dl, MVT::v8f16, Src));
32925 if (VT != MVT::v2f32)
32927 EVT SrcVT = Src.getValueType();
32928 if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
32930 unsigned Opc = IsSigned ? X86ISD::STRICT_CVTSI2P
32931 : X86ISD::STRICT_CVTUI2P;
32932 SDValue Res = DAG.getNode(Opc, dl, {MVT::v4f32, MVT::Other},
32933 {N->getOperand(0), Src});
32934 Results.push_back(Res);
32935 Results.push_back(Res.getValue(1));
32937 unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
32938 Results.push_back(DAG.getNode(Opc, dl, MVT::v4f32, Src));
32942 if (SrcVT == MVT::v2i64 && !IsSigned && Subtarget.is64Bit() &&
32943 Subtarget.hasSSE41() && !Subtarget.hasAVX512()) {
32944 SDValue Zero = DAG.getConstant(0, dl, SrcVT);
32945 SDValue One = DAG.getConstant(1, dl, SrcVT);
32946 SDValue Sign = DAG.getNode(ISD::OR, dl, SrcVT,
32947 DAG.getNode(ISD::SRL, dl, SrcVT, Src, One),
32948 DAG.getNode(ISD::AND, dl, SrcVT, Src, One));
32949 SDValue IsNeg = DAG.getSetCC(dl, MVT::v2i64, Src, Zero, ISD::SETLT);
32950 SDValue SignSrc = DAG.getSelect(dl, SrcVT, IsNeg, Sign, Src);
32951 SmallVector<SDValue, 4> SignCvts(4, DAG.getConstantFP(0.0, dl, MVT::f32));
32952 for (int i = 0; i != 2; ++i) {
32953 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
32954 SignSrc, DAG.getIntPtrConstant(i, dl));
32957 DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {MVT::f32, MVT::Other},
32958 {N->getOperand(0), Elt});
32960 SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Elt);
32962 SDValue SignCvt = DAG.getBuildVector(MVT::v4f32, dl, SignCvts);
32963 SDValue Slow, Chain;
32965 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
32966 SignCvts[0].getValue(1), SignCvts[1].getValue(1));
32967 Slow = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v4f32, MVT::Other},
32968 {Chain, SignCvt, SignCvt});
32969 Chain = Slow.getValue(1);
32971 Slow = DAG.getNode(ISD::FADD, dl, MVT::v4f32, SignCvt, SignCvt);
32973 IsNeg = DAG.getBitcast(MVT::v4i32, IsNeg);
32975 DAG.getVectorShuffle(MVT::v4i32, dl, IsNeg, IsNeg, {1, 3, -1, -1});
32976 SDValue Cvt = DAG.getSelect(dl, MVT::v4f32, IsNeg, Slow, SignCvt);
32977 Results.push_back(Cvt);
32979 Results.push_back(Chain);
32983 if (SrcVT != MVT::v2i32)
32986 if (IsSigned || Subtarget.hasAVX512()) {
32990 // Custom widen strict v2i32->v2f32 to avoid scalarization.
32991 // FIXME: Should generic type legalizer do this?
32992 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
32993 DAG.getConstant(0, dl, MVT::v2i32));
32994 SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
32995 {N->getOperand(0), Src});
32996 Results.push_back(Res);
32997 Results.push_back(Res.getValue(1));
33001 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
33002 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
33004 DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
33005 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
33006 DAG.getBitcast(MVT::v2i64, VBias));
33007 Or = DAG.getBitcast(MVT::v2f64, Or);
33009 SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
33010 {N->getOperand(0), Or, VBias});
33011 SDValue Res = DAG.getNode(X86ISD::STRICT_VFPROUND, dl,
33012 {MVT::v4f32, MVT::Other},
33013 {Sub.getValue(1), Sub});
33014 Results.push_back(Res);
33015 Results.push_back(Res.getValue(1));
33017 // TODO: Are there any fast-math-flags to propagate here?
33018 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
33019 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
33023 case ISD::STRICT_FP_ROUND:
33024 case ISD::FP_ROUND: {
33025 bool IsStrict = N->isStrictFPOpcode();
33026 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
33027 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
33028 SDValue Rnd = N->getOperand(IsStrict ? 2 : 1);
33029 EVT SrcVT = Src.getValueType();
33030 EVT VT = N->getValueType(0);
33032 if (VT == MVT::v2f16 && Src.getValueType() == MVT::v2f32) {
33033 SDValue Ext = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v2f32)
33034 : DAG.getUNDEF(MVT::v2f32);
33035 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src, Ext);
33037 if (!Subtarget.hasFP16() && VT.getVectorElementType() == MVT::f16) {
33038 assert(Subtarget.hasF16C() && "Cannot widen f16 without F16C");
33039 if (SrcVT.getVectorElementType() != MVT::f32)
33043 V = DAG.getNode(X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
33044 {Chain, Src, Rnd});
33046 V = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Src, Rnd);
33048 Results.push_back(DAG.getBitcast(MVT::v8f16, V));
33050 Results.push_back(V.getValue(1));
33053 if (!isTypeLegal(Src.getValueType()))
33055 EVT NewVT = VT.getVectorElementType() == MVT::f16 ? MVT::v8f16 : MVT::v4f32;
33057 V = DAG.getNode(X86ISD::STRICT_VFPROUND, dl, {NewVT, MVT::Other},
33060 V = DAG.getNode(X86ISD::VFPROUND, dl, NewVT, Src);
33061 Results.push_back(V);
33063 Results.push_back(V.getValue(1));
33066 case ISD::FP_EXTEND:
33067 case ISD::STRICT_FP_EXTEND: {
33068 // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
33069 // No other ValueType for FP_EXTEND should reach this point.
33070 assert(N->getValueType(0) == MVT::v2f32 &&
33071 "Do not know how to legalize this Node");
33072 if (!Subtarget.hasFP16() || !Subtarget.hasVLX())
33074 bool IsStrict = N->isStrictFPOpcode();
33075 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
33076 SDValue Ext = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v2f16)
33077 : DAG.getUNDEF(MVT::v2f16);
33078 SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f16, Src, Ext);
33080 V = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::v4f32, MVT::Other},
33081 {N->getOperand(0), V});
33083 V = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, V);
33084 Results.push_back(V);
33086 Results.push_back(V.getValue(1));
33089 case ISD::INTRINSIC_W_CHAIN: {
33090 unsigned IntNo = N->getConstantOperandVal(1);
33092 default : llvm_unreachable("Do not know how to custom type "
33093 "legalize this intrinsic operation!");
33094 case Intrinsic::x86_rdtsc:
33095 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
33097 case Intrinsic::x86_rdtscp:
33098 return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
33100 case Intrinsic::x86_rdpmc:
33101 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
33104 case Intrinsic::x86_rdpru:
33105 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPRU, X86::ECX, Subtarget,
33108 case Intrinsic::x86_xgetbv:
33109 expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
33114 case ISD::READCYCLECOUNTER: {
33115 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
33117 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
33118 EVT T = N->getValueType(0);
33119 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
33120 bool Regs64bit = T == MVT::i128;
33121 assert((!Regs64bit || Subtarget.canUseCMPXCHG16B()) &&
33122 "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
33123 MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
33124 SDValue cpInL, cpInH;
33125 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
33126 DAG.getConstant(0, dl, HalfT));
33127 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
33128 DAG.getConstant(1, dl, HalfT));
33129 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
33130 Regs64bit ? X86::RAX : X86::EAX,
33132 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
33133 Regs64bit ? X86::RDX : X86::EDX,
33134 cpInH, cpInL.getValue(1));
33135 SDValue swapInL, swapInH;
33136 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
33137 DAG.getConstant(0, dl, HalfT));
33138 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
33139 DAG.getConstant(1, dl, HalfT));
33141 DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
33142 swapInH, cpInH.getValue(1));
33144 // In 64-bit mode we might need the base pointer in RBX, but we can't know
33145 // until later. So we keep the RBX input in a vreg and use a custom
33147 // Since RBX will be a reserved register the register allocator will not
33148 // make sure its value will be properly saved and restored around this
33151 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
33152 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
33154 SDValue Ops[] = {swapInH.getValue(0), N->getOperand(1), swapInL,
33155 swapInH.getValue(1)};
33157 DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG16_DAG, dl, Tys, Ops, T, MMO);
33159 swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl, X86::EBX, swapInL,
33160 swapInH.getValue(1));
33161 SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
33162 swapInL.getValue(1)};
33164 DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG8_DAG, dl, Tys, Ops, T, MMO);
33167 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
33168 Regs64bit ? X86::RAX : X86::EAX,
33169 HalfT, Result.getValue(1));
33170 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
33171 Regs64bit ? X86::RDX : X86::EDX,
33172 HalfT, cpOutL.getValue(2));
33173 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
33175 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
33176 MVT::i32, cpOutH.getValue(2));
33177 SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
33178 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
33180 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
33181 Results.push_back(Success);
33182 Results.push_back(EFLAGS.getValue(1));
33185 case ISD::ATOMIC_LOAD: {
33186 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
33187 bool NoImplicitFloatOps =
33188 DAG.getMachineFunction().getFunction().hasFnAttribute(
33189 Attribute::NoImplicitFloat);
33190 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
33191 auto *Node = cast<AtomicSDNode>(N);
33192 if (Subtarget.hasSSE1()) {
33193 // Use a VZEXT_LOAD which will be selected as MOVQ or XORPS+MOVLPS.
33194 // Then extract the lower 64-bits.
33195 MVT LdVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
33196 SDVTList Tys = DAG.getVTList(LdVT, MVT::Other);
33197 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
33198 SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
33199 MVT::i64, Node->getMemOperand());
33200 if (Subtarget.hasSSE2()) {
33201 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
33202 DAG.getIntPtrConstant(0, dl));
33203 Results.push_back(Res);
33204 Results.push_back(Ld.getValue(1));
33207 // We use an alternative sequence for SSE1 that extracts as v2f32 and
33208 // then casts to i64. This avoids a 128-bit stack temporary being
33209 // created by type legalization if we were to cast v4f32->v2i64.
33210 SDValue Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Ld,
33211 DAG.getIntPtrConstant(0, dl));
33212 Res = DAG.getBitcast(MVT::i64, Res);
33213 Results.push_back(Res);
33214 Results.push_back(Ld.getValue(1));
33217 if (Subtarget.hasX87()) {
33218 // First load this into an 80-bit X87 register. This will put the whole
33219 // integer into the significand.
33220 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
33221 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
33222 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD,
33223 dl, Tys, Ops, MVT::i64,
33224 Node->getMemOperand());
33225 SDValue Chain = Result.getValue(1);
33227 // Now store the X87 register to a stack temporary and convert to i64.
33228 // This store is not atomic and doesn't need to be.
33229 // FIXME: We don't need a stack temporary if the result of the load
33230 // is already being stored. We could just directly store there.
33231 SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
33232 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
33233 MachinePointerInfo MPI =
33234 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
33235 SDValue StoreOps[] = { Chain, Result, StackPtr };
33236 Chain = DAG.getMemIntrinsicNode(
33237 X86ISD::FIST, dl, DAG.getVTList(MVT::Other), StoreOps, MVT::i64,
33238 MPI, None /*Align*/, MachineMemOperand::MOStore);
33240 // Finally load the value back from the stack temporary and return it.
33241 // This load is not atomic and doesn't need to be.
33242 // This load will be further type legalized.
33243 Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
33244 Results.push_back(Result);
33245 Results.push_back(Result.getValue(1));
33249 // TODO: Use MOVLPS when SSE1 is available?
33250 // Delegate to generic TypeLegalization. Situations we can really handle
33251 // should have already been dealt with by AtomicExpandPass.cpp.
33254 case ISD::ATOMIC_SWAP:
33255 case ISD::ATOMIC_LOAD_ADD:
33256 case ISD::ATOMIC_LOAD_SUB:
33257 case ISD::ATOMIC_LOAD_AND:
33258 case ISD::ATOMIC_LOAD_OR:
33259 case ISD::ATOMIC_LOAD_XOR:
33260 case ISD::ATOMIC_LOAD_NAND:
33261 case ISD::ATOMIC_LOAD_MIN:
33262 case ISD::ATOMIC_LOAD_MAX:
33263 case ISD::ATOMIC_LOAD_UMIN:
33264 case ISD::ATOMIC_LOAD_UMAX:
33265 // Delegate to generic TypeLegalization. Situations we can really handle
33266 // should have already been dealt with by AtomicExpandPass.cpp.
33269 case ISD::BITCAST: {
33270 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
33271 EVT DstVT = N->getValueType(0);
33272 EVT SrcVT = N->getOperand(0).getValueType();
33274 // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
33275 // we can split using the k-register rather than memory.
33276 if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
33277 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
33279 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
33280 Lo = DAG.getBitcast(MVT::i32, Lo);
33281 Hi = DAG.getBitcast(MVT::i32, Hi);
33282 SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
33283 Results.push_back(Res);
33287 if (DstVT.isVector() && SrcVT == MVT::x86mmx) {
33288 // FIXME: Use v4f32 for SSE1?
33289 assert(Subtarget.hasSSE2() && "Requires SSE2");
33290 assert(getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
33291 "Unexpected type action!");
33292 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), DstVT);
33293 SDValue Res = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64,
33295 Res = DAG.getBitcast(WideVT, Res);
33296 Results.push_back(Res);
33302 case ISD::MGATHER: {
33303 EVT VT = N->getValueType(0);
33304 if ((VT == MVT::v2f32 || VT == MVT::v2i32) &&
33305 (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
33306 auto *Gather = cast<MaskedGatherSDNode>(N);
33307 SDValue Index = Gather->getIndex();
33308 if (Index.getValueType() != MVT::v2i64)
33310 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33311 "Unexpected type action!");
33312 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
33313 SDValue Mask = Gather->getMask();
33314 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
33315 SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT,
33316 Gather->getPassThru(),
33318 if (!Subtarget.hasVLX()) {
33319 // We need to widen the mask, but the instruction will only use 2
33320 // of its elements. So we can use undef.
33321 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
33322 DAG.getUNDEF(MVT::v2i1));
33323 Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
33325 SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
33326 Gather->getBasePtr(), Index, Gather->getScale() };
33327 SDValue Res = DAG.getMemIntrinsicNode(
33328 X86ISD::MGATHER, dl, DAG.getVTList(WideVT, MVT::Other), Ops,
33329 Gather->getMemoryVT(), Gather->getMemOperand());
33330 Results.push_back(Res);
33331 Results.push_back(Res.getValue(1));
33337 // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
33338 // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
33339 // cast since type legalization will try to use an i64 load.
33340 MVT VT = N->getSimpleValueType(0);
33341 assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
33342 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33343 "Unexpected type action!");
33344 if (!ISD::isNON_EXTLoad(N))
33346 auto *Ld = cast<LoadSDNode>(N);
33347 if (Subtarget.hasSSE2()) {
33348 MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
33349 SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
33350 Ld->getPointerInfo(), Ld->getOriginalAlign(),
33351 Ld->getMemOperand()->getFlags());
33352 SDValue Chain = Res.getValue(1);
33353 MVT VecVT = MVT::getVectorVT(LdVT, 2);
33354 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Res);
33355 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
33356 Res = DAG.getBitcast(WideVT, Res);
33357 Results.push_back(Res);
33358 Results.push_back(Chain);
33361 assert(Subtarget.hasSSE1() && "Expected SSE");
33362 SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
33363 SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
33364 SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
33365 MVT::i64, Ld->getMemOperand());
33366 Results.push_back(Res);
33367 Results.push_back(Res.getValue(1));
33370 case ISD::ADDRSPACECAST: {
33371 SDValue V = LowerADDRSPACECAST(SDValue(N,0), DAG);
33372 Results.push_back(V);
33375 case ISD::BITREVERSE:
33376 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
33377 assert(Subtarget.hasXOP() && "Expected XOP");
33378 // We can use VPPERM by copying to a vector register and back. We'll need
33379 // to move the scalar in two i32 pieces.
33380 Results.push_back(LowerBITREVERSE(SDValue(N, 0), Subtarget, DAG));
33385 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
33386 switch ((X86ISD::NodeType)Opcode) {
33387 case X86ISD::FIRST_NUMBER: break;
33388 #define NODE_NAME_CASE(NODE) case X86ISD::NODE: return "X86ISD::" #NODE;
33389 NODE_NAME_CASE(BSF)
33390 NODE_NAME_CASE(BSR)
33391 NODE_NAME_CASE(FSHL)
33392 NODE_NAME_CASE(FSHR)
33393 NODE_NAME_CASE(FAND)
33394 NODE_NAME_CASE(FANDN)
33395 NODE_NAME_CASE(FOR)
33396 NODE_NAME_CASE(FXOR)
33397 NODE_NAME_CASE(FILD)
33398 NODE_NAME_CASE(FIST)
33399 NODE_NAME_CASE(FP_TO_INT_IN_MEM)
33400 NODE_NAME_CASE(FLD)
33401 NODE_NAME_CASE(FST)
33402 NODE_NAME_CASE(CALL)
33403 NODE_NAME_CASE(CALL_RVMARKER)
33405 NODE_NAME_CASE(CMP)
33406 NODE_NAME_CASE(FCMP)
33407 NODE_NAME_CASE(STRICT_FCMP)
33408 NODE_NAME_CASE(STRICT_FCMPS)
33409 NODE_NAME_CASE(COMI)
33410 NODE_NAME_CASE(UCOMI)
33411 NODE_NAME_CASE(CMPM)
33412 NODE_NAME_CASE(CMPMM)
33413 NODE_NAME_CASE(STRICT_CMPM)
33414 NODE_NAME_CASE(CMPMM_SAE)
33415 NODE_NAME_CASE(SETCC)
33416 NODE_NAME_CASE(SETCC_CARRY)
33417 NODE_NAME_CASE(FSETCC)
33418 NODE_NAME_CASE(FSETCCM)
33419 NODE_NAME_CASE(FSETCCM_SAE)
33420 NODE_NAME_CASE(CMOV)
33421 NODE_NAME_CASE(BRCOND)
33422 NODE_NAME_CASE(RET_FLAG)
33423 NODE_NAME_CASE(IRET)
33424 NODE_NAME_CASE(REP_STOS)
33425 NODE_NAME_CASE(REP_MOVS)
33426 NODE_NAME_CASE(GlobalBaseReg)
33427 NODE_NAME_CASE(Wrapper)
33428 NODE_NAME_CASE(WrapperRIP)
33429 NODE_NAME_CASE(MOVQ2DQ)
33430 NODE_NAME_CASE(MOVDQ2Q)
33431 NODE_NAME_CASE(MMX_MOVD2W)
33432 NODE_NAME_CASE(MMX_MOVW2D)
33433 NODE_NAME_CASE(PEXTRB)
33434 NODE_NAME_CASE(PEXTRW)
33435 NODE_NAME_CASE(INSERTPS)
33436 NODE_NAME_CASE(PINSRB)
33437 NODE_NAME_CASE(PINSRW)
33438 NODE_NAME_CASE(PSHUFB)
33439 NODE_NAME_CASE(ANDNP)
33440 NODE_NAME_CASE(BLENDI)
33441 NODE_NAME_CASE(BLENDV)
33442 NODE_NAME_CASE(HADD)
33443 NODE_NAME_CASE(HSUB)
33444 NODE_NAME_CASE(FHADD)
33445 NODE_NAME_CASE(FHSUB)
33446 NODE_NAME_CASE(CONFLICT)
33447 NODE_NAME_CASE(FMAX)
33448 NODE_NAME_CASE(FMAXS)
33449 NODE_NAME_CASE(FMAX_SAE)
33450 NODE_NAME_CASE(FMAXS_SAE)
33451 NODE_NAME_CASE(FMIN)
33452 NODE_NAME_CASE(FMINS)
33453 NODE_NAME_CASE(FMIN_SAE)
33454 NODE_NAME_CASE(FMINS_SAE)
33455 NODE_NAME_CASE(FMAXC)
33456 NODE_NAME_CASE(FMINC)
33457 NODE_NAME_CASE(FRSQRT)
33458 NODE_NAME_CASE(FRCP)
33459 NODE_NAME_CASE(EXTRQI)
33460 NODE_NAME_CASE(INSERTQI)
33461 NODE_NAME_CASE(TLSADDR)
33462 NODE_NAME_CASE(TLSBASEADDR)
33463 NODE_NAME_CASE(TLSCALL)
33464 NODE_NAME_CASE(EH_SJLJ_SETJMP)
33465 NODE_NAME_CASE(EH_SJLJ_LONGJMP)
33466 NODE_NAME_CASE(EH_SJLJ_SETUP_DISPATCH)
33467 NODE_NAME_CASE(EH_RETURN)
33468 NODE_NAME_CASE(TC_RETURN)
33469 NODE_NAME_CASE(FNSTCW16m)
33470 NODE_NAME_CASE(FLDCW16m)
33471 NODE_NAME_CASE(LCMPXCHG_DAG)
33472 NODE_NAME_CASE(LCMPXCHG8_DAG)
33473 NODE_NAME_CASE(LCMPXCHG16_DAG)
33474 NODE_NAME_CASE(LCMPXCHG16_SAVE_RBX_DAG)
33475 NODE_NAME_CASE(LADD)
33476 NODE_NAME_CASE(LSUB)
33477 NODE_NAME_CASE(LOR)
33478 NODE_NAME_CASE(LXOR)
33479 NODE_NAME_CASE(LAND)
33480 NODE_NAME_CASE(LBTS)
33481 NODE_NAME_CASE(LBTC)
33482 NODE_NAME_CASE(LBTR)
33483 NODE_NAME_CASE(VZEXT_MOVL)
33484 NODE_NAME_CASE(VZEXT_LOAD)
33485 NODE_NAME_CASE(VEXTRACT_STORE)
33486 NODE_NAME_CASE(VTRUNC)
33487 NODE_NAME_CASE(VTRUNCS)
33488 NODE_NAME_CASE(VTRUNCUS)
33489 NODE_NAME_CASE(VMTRUNC)
33490 NODE_NAME_CASE(VMTRUNCS)
33491 NODE_NAME_CASE(VMTRUNCUS)
33492 NODE_NAME_CASE(VTRUNCSTORES)
33493 NODE_NAME_CASE(VTRUNCSTOREUS)
33494 NODE_NAME_CASE(VMTRUNCSTORES)
33495 NODE_NAME_CASE(VMTRUNCSTOREUS)
33496 NODE_NAME_CASE(VFPEXT)
33497 NODE_NAME_CASE(STRICT_VFPEXT)
33498 NODE_NAME_CASE(VFPEXT_SAE)
33499 NODE_NAME_CASE(VFPEXTS)
33500 NODE_NAME_CASE(VFPEXTS_SAE)
33501 NODE_NAME_CASE(VFPROUND)
33502 NODE_NAME_CASE(STRICT_VFPROUND)
33503 NODE_NAME_CASE(VMFPROUND)
33504 NODE_NAME_CASE(VFPROUND_RND)
33505 NODE_NAME_CASE(VFPROUNDS)
33506 NODE_NAME_CASE(VFPROUNDS_RND)
33507 NODE_NAME_CASE(VSHLDQ)
33508 NODE_NAME_CASE(VSRLDQ)
33509 NODE_NAME_CASE(VSHL)
33510 NODE_NAME_CASE(VSRL)
33511 NODE_NAME_CASE(VSRA)
33512 NODE_NAME_CASE(VSHLI)
33513 NODE_NAME_CASE(VSRLI)
33514 NODE_NAME_CASE(VSRAI)
33515 NODE_NAME_CASE(VSHLV)
33516 NODE_NAME_CASE(VSRLV)
33517 NODE_NAME_CASE(VSRAV)
33518 NODE_NAME_CASE(VROTLI)
33519 NODE_NAME_CASE(VROTRI)
33520 NODE_NAME_CASE(VPPERM)
33521 NODE_NAME_CASE(CMPP)
33522 NODE_NAME_CASE(STRICT_CMPP)
33523 NODE_NAME_CASE(PCMPEQ)
33524 NODE_NAME_CASE(PCMPGT)
33525 NODE_NAME_CASE(PHMINPOS)
33526 NODE_NAME_CASE(ADD)
33527 NODE_NAME_CASE(SUB)
33528 NODE_NAME_CASE(ADC)
33529 NODE_NAME_CASE(SBB)
33530 NODE_NAME_CASE(SMUL)
33531 NODE_NAME_CASE(UMUL)
33533 NODE_NAME_CASE(XOR)
33534 NODE_NAME_CASE(AND)
33535 NODE_NAME_CASE(BEXTR)
33536 NODE_NAME_CASE(BEXTRI)
33537 NODE_NAME_CASE(BZHI)
33538 NODE_NAME_CASE(PDEP)
33539 NODE_NAME_CASE(PEXT)
33540 NODE_NAME_CASE(MUL_IMM)
33541 NODE_NAME_CASE(MOVMSK)
33542 NODE_NAME_CASE(PTEST)
33543 NODE_NAME_CASE(TESTP)
33544 NODE_NAME_CASE(KORTEST)
33545 NODE_NAME_CASE(KTEST)
33546 NODE_NAME_CASE(KADD)
33547 NODE_NAME_CASE(KSHIFTL)
33548 NODE_NAME_CASE(KSHIFTR)
33549 NODE_NAME_CASE(PACKSS)
33550 NODE_NAME_CASE(PACKUS)
33551 NODE_NAME_CASE(PALIGNR)
33552 NODE_NAME_CASE(VALIGN)
33553 NODE_NAME_CASE(VSHLD)
33554 NODE_NAME_CASE(VSHRD)
33555 NODE_NAME_CASE(VSHLDV)
33556 NODE_NAME_CASE(VSHRDV)
33557 NODE_NAME_CASE(PSHUFD)
33558 NODE_NAME_CASE(PSHUFHW)
33559 NODE_NAME_CASE(PSHUFLW)
33560 NODE_NAME_CASE(SHUFP)
33561 NODE_NAME_CASE(SHUF128)
33562 NODE_NAME_CASE(MOVLHPS)
33563 NODE_NAME_CASE(MOVHLPS)
33564 NODE_NAME_CASE(MOVDDUP)
33565 NODE_NAME_CASE(MOVSHDUP)
33566 NODE_NAME_CASE(MOVSLDUP)
33567 NODE_NAME_CASE(MOVSD)
33568 NODE_NAME_CASE(MOVSS)
33569 NODE_NAME_CASE(MOVSH)
33570 NODE_NAME_CASE(UNPCKL)
33571 NODE_NAME_CASE(UNPCKH)
33572 NODE_NAME_CASE(VBROADCAST)
33573 NODE_NAME_CASE(VBROADCAST_LOAD)
33574 NODE_NAME_CASE(VBROADCASTM)
33575 NODE_NAME_CASE(SUBV_BROADCAST_LOAD)
33576 NODE_NAME_CASE(VPERMILPV)
33577 NODE_NAME_CASE(VPERMILPI)
33578 NODE_NAME_CASE(VPERM2X128)
33579 NODE_NAME_CASE(VPERMV)
33580 NODE_NAME_CASE(VPERMV3)
33581 NODE_NAME_CASE(VPERMI)
33582 NODE_NAME_CASE(VPTERNLOG)
33583 NODE_NAME_CASE(VFIXUPIMM)
33584 NODE_NAME_CASE(VFIXUPIMM_SAE)
33585 NODE_NAME_CASE(VFIXUPIMMS)
33586 NODE_NAME_CASE(VFIXUPIMMS_SAE)
33587 NODE_NAME_CASE(VRANGE)
33588 NODE_NAME_CASE(VRANGE_SAE)
33589 NODE_NAME_CASE(VRANGES)
33590 NODE_NAME_CASE(VRANGES_SAE)
33591 NODE_NAME_CASE(PMULUDQ)
33592 NODE_NAME_CASE(PMULDQ)
33593 NODE_NAME_CASE(PSADBW)
33594 NODE_NAME_CASE(DBPSADBW)
33595 NODE_NAME_CASE(VASTART_SAVE_XMM_REGS)
33596 NODE_NAME_CASE(VAARG_64)
33597 NODE_NAME_CASE(VAARG_X32)
33598 NODE_NAME_CASE(DYN_ALLOCA)
33599 NODE_NAME_CASE(MEMBARRIER)
33600 NODE_NAME_CASE(MFENCE)
33601 NODE_NAME_CASE(SEG_ALLOCA)
33602 NODE_NAME_CASE(PROBED_ALLOCA)
33603 NODE_NAME_CASE(RDRAND)
33604 NODE_NAME_CASE(RDSEED)
33605 NODE_NAME_CASE(RDPKRU)
33606 NODE_NAME_CASE(WRPKRU)
33607 NODE_NAME_CASE(VPMADDUBSW)
33608 NODE_NAME_CASE(VPMADDWD)
33609 NODE_NAME_CASE(VPSHA)
33610 NODE_NAME_CASE(VPSHL)
33611 NODE_NAME_CASE(VPCOM)
33612 NODE_NAME_CASE(VPCOMU)
33613 NODE_NAME_CASE(VPERMIL2)
33614 NODE_NAME_CASE(FMSUB)
33615 NODE_NAME_CASE(STRICT_FMSUB)
33616 NODE_NAME_CASE(FNMADD)
33617 NODE_NAME_CASE(STRICT_FNMADD)
33618 NODE_NAME_CASE(FNMSUB)
33619 NODE_NAME_CASE(STRICT_FNMSUB)
33620 NODE_NAME_CASE(FMADDSUB)
33621 NODE_NAME_CASE(FMSUBADD)
33622 NODE_NAME_CASE(FMADD_RND)
33623 NODE_NAME_CASE(FNMADD_RND)
33624 NODE_NAME_CASE(FMSUB_RND)
33625 NODE_NAME_CASE(FNMSUB_RND)
33626 NODE_NAME_CASE(FMADDSUB_RND)
33627 NODE_NAME_CASE(FMSUBADD_RND)
33628 NODE_NAME_CASE(VFMADDC)
33629 NODE_NAME_CASE(VFMADDC_RND)
33630 NODE_NAME_CASE(VFCMADDC)
33631 NODE_NAME_CASE(VFCMADDC_RND)
33632 NODE_NAME_CASE(VFMULC)
33633 NODE_NAME_CASE(VFMULC_RND)
33634 NODE_NAME_CASE(VFCMULC)
33635 NODE_NAME_CASE(VFCMULC_RND)
33636 NODE_NAME_CASE(VFMULCSH)
33637 NODE_NAME_CASE(VFMULCSH_RND)
33638 NODE_NAME_CASE(VFCMULCSH)
33639 NODE_NAME_CASE(VFCMULCSH_RND)
33640 NODE_NAME_CASE(VFMADDCSH)
33641 NODE_NAME_CASE(VFMADDCSH_RND)
33642 NODE_NAME_CASE(VFCMADDCSH)
33643 NODE_NAME_CASE(VFCMADDCSH_RND)
33644 NODE_NAME_CASE(VPMADD52H)
33645 NODE_NAME_CASE(VPMADD52L)
33646 NODE_NAME_CASE(VRNDSCALE)
33647 NODE_NAME_CASE(STRICT_VRNDSCALE)
33648 NODE_NAME_CASE(VRNDSCALE_SAE)
33649 NODE_NAME_CASE(VRNDSCALES)
33650 NODE_NAME_CASE(VRNDSCALES_SAE)
33651 NODE_NAME_CASE(VREDUCE)
33652 NODE_NAME_CASE(VREDUCE_SAE)
33653 NODE_NAME_CASE(VREDUCES)
33654 NODE_NAME_CASE(VREDUCES_SAE)
33655 NODE_NAME_CASE(VGETMANT)
33656 NODE_NAME_CASE(VGETMANT_SAE)
33657 NODE_NAME_CASE(VGETMANTS)
33658 NODE_NAME_CASE(VGETMANTS_SAE)
33659 NODE_NAME_CASE(PCMPESTR)
33660 NODE_NAME_CASE(PCMPISTR)
33661 NODE_NAME_CASE(XTEST)
33662 NODE_NAME_CASE(COMPRESS)
33663 NODE_NAME_CASE(EXPAND)
33664 NODE_NAME_CASE(SELECTS)
33665 NODE_NAME_CASE(ADDSUB)
33666 NODE_NAME_CASE(RCP14)
33667 NODE_NAME_CASE(RCP14S)
33668 NODE_NAME_CASE(RCP28)
33669 NODE_NAME_CASE(RCP28_SAE)
33670 NODE_NAME_CASE(RCP28S)
33671 NODE_NAME_CASE(RCP28S_SAE)
33672 NODE_NAME_CASE(EXP2)
33673 NODE_NAME_CASE(EXP2_SAE)
33674 NODE_NAME_CASE(RSQRT14)
33675 NODE_NAME_CASE(RSQRT14S)
33676 NODE_NAME_CASE(RSQRT28)
33677 NODE_NAME_CASE(RSQRT28_SAE)
33678 NODE_NAME_CASE(RSQRT28S)
33679 NODE_NAME_CASE(RSQRT28S_SAE)
33680 NODE_NAME_CASE(FADD_RND)
33681 NODE_NAME_CASE(FADDS)
33682 NODE_NAME_CASE(FADDS_RND)
33683 NODE_NAME_CASE(FSUB_RND)
33684 NODE_NAME_CASE(FSUBS)
33685 NODE_NAME_CASE(FSUBS_RND)
33686 NODE_NAME_CASE(FMUL_RND)
33687 NODE_NAME_CASE(FMULS)
33688 NODE_NAME_CASE(FMULS_RND)
33689 NODE_NAME_CASE(FDIV_RND)
33690 NODE_NAME_CASE(FDIVS)
33691 NODE_NAME_CASE(FDIVS_RND)
33692 NODE_NAME_CASE(FSQRT_RND)
33693 NODE_NAME_CASE(FSQRTS)
33694 NODE_NAME_CASE(FSQRTS_RND)
33695 NODE_NAME_CASE(FGETEXP)
33696 NODE_NAME_CASE(FGETEXP_SAE)
33697 NODE_NAME_CASE(FGETEXPS)
33698 NODE_NAME_CASE(FGETEXPS_SAE)
33699 NODE_NAME_CASE(SCALEF)
33700 NODE_NAME_CASE(SCALEF_RND)
33701 NODE_NAME_CASE(SCALEFS)
33702 NODE_NAME_CASE(SCALEFS_RND)
33703 NODE_NAME_CASE(MULHRS)
33704 NODE_NAME_CASE(SINT_TO_FP_RND)
33705 NODE_NAME_CASE(UINT_TO_FP_RND)
33706 NODE_NAME_CASE(CVTTP2SI)
33707 NODE_NAME_CASE(CVTTP2UI)
33708 NODE_NAME_CASE(STRICT_CVTTP2SI)
33709 NODE_NAME_CASE(STRICT_CVTTP2UI)
33710 NODE_NAME_CASE(MCVTTP2SI)
33711 NODE_NAME_CASE(MCVTTP2UI)
33712 NODE_NAME_CASE(CVTTP2SI_SAE)
33713 NODE_NAME_CASE(CVTTP2UI_SAE)
33714 NODE_NAME_CASE(CVTTS2SI)
33715 NODE_NAME_CASE(CVTTS2UI)
33716 NODE_NAME_CASE(CVTTS2SI_SAE)
33717 NODE_NAME_CASE(CVTTS2UI_SAE)
33718 NODE_NAME_CASE(CVTSI2P)
33719 NODE_NAME_CASE(CVTUI2P)
33720 NODE_NAME_CASE(STRICT_CVTSI2P)
33721 NODE_NAME_CASE(STRICT_CVTUI2P)
33722 NODE_NAME_CASE(MCVTSI2P)
33723 NODE_NAME_CASE(MCVTUI2P)
33724 NODE_NAME_CASE(VFPCLASS)
33725 NODE_NAME_CASE(VFPCLASSS)
33726 NODE_NAME_CASE(MULTISHIFT)
33727 NODE_NAME_CASE(SCALAR_SINT_TO_FP)
33728 NODE_NAME_CASE(SCALAR_SINT_TO_FP_RND)
33729 NODE_NAME_CASE(SCALAR_UINT_TO_FP)
33730 NODE_NAME_CASE(SCALAR_UINT_TO_FP_RND)
33731 NODE_NAME_CASE(CVTPS2PH)
33732 NODE_NAME_CASE(STRICT_CVTPS2PH)
33733 NODE_NAME_CASE(MCVTPS2PH)
33734 NODE_NAME_CASE(CVTPH2PS)
33735 NODE_NAME_CASE(STRICT_CVTPH2PS)
33736 NODE_NAME_CASE(CVTPH2PS_SAE)
33737 NODE_NAME_CASE(CVTP2SI)
33738 NODE_NAME_CASE(CVTP2UI)
33739 NODE_NAME_CASE(MCVTP2SI)
33740 NODE_NAME_CASE(MCVTP2UI)
33741 NODE_NAME_CASE(CVTP2SI_RND)
33742 NODE_NAME_CASE(CVTP2UI_RND)
33743 NODE_NAME_CASE(CVTS2SI)
33744 NODE_NAME_CASE(CVTS2UI)
33745 NODE_NAME_CASE(CVTS2SI_RND)
33746 NODE_NAME_CASE(CVTS2UI_RND)
33747 NODE_NAME_CASE(CVTNE2PS2BF16)
33748 NODE_NAME_CASE(CVTNEPS2BF16)
33749 NODE_NAME_CASE(MCVTNEPS2BF16)
33750 NODE_NAME_CASE(DPBF16PS)
33751 NODE_NAME_CASE(LWPINS)
33752 NODE_NAME_CASE(MGATHER)
33753 NODE_NAME_CASE(MSCATTER)
33754 NODE_NAME_CASE(VPDPBUSD)
33755 NODE_NAME_CASE(VPDPBUSDS)
33756 NODE_NAME_CASE(VPDPWSSD)
33757 NODE_NAME_CASE(VPDPWSSDS)
33758 NODE_NAME_CASE(VPSHUFBITQMB)
33759 NODE_NAME_CASE(GF2P8MULB)
33760 NODE_NAME_CASE(GF2P8AFFINEQB)
33761 NODE_NAME_CASE(GF2P8AFFINEINVQB)
33762 NODE_NAME_CASE(NT_CALL)
33763 NODE_NAME_CASE(NT_BRIND)
33764 NODE_NAME_CASE(UMWAIT)
33765 NODE_NAME_CASE(TPAUSE)
33766 NODE_NAME_CASE(ENQCMD)
33767 NODE_NAME_CASE(ENQCMDS)
33768 NODE_NAME_CASE(VP2INTERSECT)
33769 NODE_NAME_CASE(AESENC128KL)
33770 NODE_NAME_CASE(AESDEC128KL)
33771 NODE_NAME_CASE(AESENC256KL)
33772 NODE_NAME_CASE(AESDEC256KL)
33773 NODE_NAME_CASE(AESENCWIDE128KL)
33774 NODE_NAME_CASE(AESDECWIDE128KL)
33775 NODE_NAME_CASE(AESENCWIDE256KL)
33776 NODE_NAME_CASE(AESDECWIDE256KL)
33777 NODE_NAME_CASE(TESTUI)
33780 #undef NODE_NAME_CASE
33783 /// Return true if the addressing mode represented by AM is legal for this
33784 /// target, for a load/store of the specified type.
33785 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
33786 const AddrMode &AM, Type *Ty,
33788 Instruction *I) const {
33789 // X86 supports extremely general addressing modes.
33790 CodeModel::Model M = getTargetMachine().getCodeModel();
33792 // X86 allows a sign-extended 32-bit immediate field as a displacement.
33793 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
33797 unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
33799 // If a reference to this global requires an extra load, we can't fold it.
33800 if (isGlobalStubReference(GVFlags))
33803 // If BaseGV requires a register for the PIC base, we cannot also have a
33804 // BaseReg specified.
33805 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
33808 // If lower 4G is not available, then we must use rip-relative addressing.
33809 if ((M != CodeModel::Small || isPositionIndependent()) &&
33810 Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
33814 switch (AM.Scale) {
33820 // These scales always work.
33825 // These scales are formed with basereg+scalereg. Only accept if there is
33830 default: // Other stuff never works.
33837 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
33838 unsigned Bits = Ty->getScalarSizeInBits();
33840 // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
33841 // Splitting for v32i8/v16i16 on XOP+AVX2 targets is still preferred.
33842 if (Subtarget.hasXOP() &&
33843 (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
33846 // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
33847 // shifts just as cheap as scalar ones.
33848 if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
33851 // AVX512BW has shifts such as vpsllvw.
33852 if (Subtarget.hasBWI() && Bits == 16)
33855 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
33856 // fully general vector.
33860 bool X86TargetLowering::isBinOp(unsigned Opcode) const {
33862 // These are non-commutative binops.
33863 // TODO: Add more X86ISD opcodes once we have test coverage.
33864 case X86ISD::ANDNP:
33865 case X86ISD::PCMPGT:
33868 case X86ISD::FANDN:
33869 case X86ISD::VPSHA:
33870 case X86ISD::VPSHL:
33871 case X86ISD::VSHLV:
33872 case X86ISD::VSRLV:
33873 case X86ISD::VSRAV:
33877 return TargetLoweringBase::isBinOp(Opcode);
33880 bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
33882 // TODO: Add more X86ISD opcodes once we have test coverage.
33883 case X86ISD::PCMPEQ:
33884 case X86ISD::PMULDQ:
33885 case X86ISD::PMULUDQ:
33886 case X86ISD::FMAXC:
33887 case X86ISD::FMINC:
33894 return TargetLoweringBase::isCommutativeBinOp(Opcode);
33897 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
33898 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
33900 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
33901 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
33902 return NumBits1 > NumBits2;
33905 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
33906 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
33909 if (!isTypeLegal(EVT::getEVT(Ty1)))
33912 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
33914 // Assuming the caller doesn't have a zeroext or signext return parameter,
33915 // truncation all the way down to i1 is valid.
33919 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
33920 return isInt<32>(Imm);
33923 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
33924 // Can also use sub to handle negated immediates.
33925 return isInt<32>(Imm);
33928 bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
33929 return isInt<32>(Imm);
33932 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
33933 if (!VT1.isScalarInteger() || !VT2.isScalarInteger())
33935 unsigned NumBits1 = VT1.getSizeInBits();
33936 unsigned NumBits2 = VT2.getSizeInBits();
33937 return NumBits1 > NumBits2;
33940 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
33941 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
33942 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
33945 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
33946 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
33947 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
33950 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
33951 EVT VT1 = Val.getValueType();
33952 if (isZExtFree(VT1, VT2))
33955 if (Val.getOpcode() != ISD::LOAD)
33958 if (!VT1.isSimple() || !VT1.isInteger() ||
33959 !VT2.isSimple() || !VT2.isInteger())
33962 switch (VT1.getSimpleVT().SimpleTy) {
33967 // X86 has 8, 16, and 32-bit zero-extending loads.
33974 bool X86TargetLowering::shouldSinkOperands(Instruction *I,
33975 SmallVectorImpl<Use *> &Ops) const {
33976 using namespace llvm::PatternMatch;
33978 FixedVectorType *VTy = dyn_cast<FixedVectorType>(I->getType());
33982 if (I->getOpcode() == Instruction::Mul &&
33983 VTy->getElementType()->isIntegerTy(64)) {
33984 for (auto &Op : I->operands()) {
33985 // Make sure we are not already sinking this operand
33986 if (any_of(Ops, [&](Use *U) { return U->get() == Op; }))
33989 // Look for PMULDQ pattern where the input is a sext_inreg from vXi32 or
33990 // the PMULUDQ pattern where the input is a zext_inreg from vXi32.
33991 if (Subtarget.hasSSE41() &&
33992 match(Op.get(), m_AShr(m_Shl(m_Value(), m_SpecificInt(32)),
33993 m_SpecificInt(32)))) {
33994 Ops.push_back(&cast<Instruction>(Op)->getOperandUse(0));
33995 Ops.push_back(&Op);
33996 } else if (Subtarget.hasSSE2() &&
33998 m_And(m_Value(), m_SpecificInt(UINT64_C(0xffffffff))))) {
33999 Ops.push_back(&Op);
34003 return !Ops.empty();
34006 // A uniform shift amount in a vector shift or funnel shift may be much
34007 // cheaper than a generic variable vector shift, so make that pattern visible
34008 // to SDAG by sinking the shuffle instruction next to the shift.
34009 int ShiftAmountOpNum = -1;
34011 ShiftAmountOpNum = 1;
34012 else if (auto *II = dyn_cast<IntrinsicInst>(I)) {
34013 if (II->getIntrinsicID() == Intrinsic::fshl ||
34014 II->getIntrinsicID() == Intrinsic::fshr)
34015 ShiftAmountOpNum = 2;
34018 if (ShiftAmountOpNum == -1)
34021 auto *Shuf = dyn_cast<ShuffleVectorInst>(I->getOperand(ShiftAmountOpNum));
34022 if (Shuf && getSplatIndex(Shuf->getShuffleMask()) >= 0 &&
34023 isVectorShiftByScalarCheap(I->getType())) {
34024 Ops.push_back(&I->getOperandUse(ShiftAmountOpNum));
34031 bool X86TargetLowering::shouldConvertPhiType(Type *From, Type *To) const {
34032 if (!Subtarget.is64Bit())
34034 return TargetLowering::shouldConvertPhiType(From, To);
34037 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
34038 if (isa<MaskedLoadSDNode>(ExtVal.getOperand(0)))
34041 EVT SrcVT = ExtVal.getOperand(0).getValueType();
34043 // There is no extending load for vXi1.
34044 if (SrcVT.getScalarType() == MVT::i1)
34050 bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
34052 if (!Subtarget.hasAnyFMA())
34055 VT = VT.getScalarType();
34057 if (!VT.isSimple())
34060 switch (VT.getSimpleVT().SimpleTy) {
34062 return Subtarget.hasFP16();
34073 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
34074 // i16 instructions are longer (0x66 prefix) and potentially slower.
34075 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
34078 bool X86TargetLowering::shouldFoldSelectWithIdentityConstant(unsigned Opcode,
34080 // TODO: This is too general. There are cases where pre-AVX512 codegen would
34081 // benefit. The transform may also be profitable for scalar code.
34082 if (!Subtarget.hasAVX512())
34084 if (!Subtarget.hasVLX() && !VT.is512BitVector())
34086 if (!VT.isVector())
34092 /// Targets can use this to indicate that they only support *some*
34093 /// VECTOR_SHUFFLE operations, those with specific masks.
34094 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
34095 /// are assumed to be legal.
34096 bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const {
34097 if (!VT.isSimple())
34100 // Not for i1 vectors
34101 if (VT.getSimpleVT().getScalarType() == MVT::i1)
34104 // Very little shuffling can be done for 64-bit vectors right now.
34105 if (VT.getSimpleVT().getSizeInBits() == 64)
34108 // We only care that the types being shuffled are legal. The lowering can
34109 // handle any possible shuffle mask that results.
34110 return isTypeLegal(VT.getSimpleVT());
34113 bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
34115 // Don't convert an 'and' into a shuffle that we don't directly support.
34116 // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
34117 if (!Subtarget.hasAVX2())
34118 if (VT == MVT::v32i8 || VT == MVT::v16i16)
34121 // Just delegate to the generic legality, clear masks aren't special.
34122 return isShuffleMaskLegal(Mask, VT);
34125 bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
34126 // If the subtarget is using thunks, we need to not generate jump tables.
34127 if (Subtarget.useIndirectThunkBranches())
34130 // Otherwise, fallback on the generic logic.
34131 return TargetLowering::areJTsAllowed(Fn);
34134 MVT X86TargetLowering::getPreferredSwitchConditionType(LLVMContext &Context,
34135 EVT ConditionVT) const {
34136 // Avoid 8 and 16 bit types because they increase the chance for unnecessary
34137 // zero-extensions.
34138 if (ConditionVT.getSizeInBits() < 32)
34140 return TargetLoweringBase::getPreferredSwitchConditionType(Context,
34144 //===----------------------------------------------------------------------===//
34145 // X86 Scheduler Hooks
34146 //===----------------------------------------------------------------------===//
34148 // Returns true if EFLAG is consumed after this iterator in the rest of the
34149 // basic block or any successors of the basic block.
34150 static bool isEFLAGSLiveAfter(MachineBasicBlock::iterator Itr,
34151 MachineBasicBlock *BB) {
34152 // Scan forward through BB for a use/def of EFLAGS.
34153 for (const MachineInstr &mi : llvm::make_range(std::next(Itr), BB->end())) {
34154 if (mi.readsRegister(X86::EFLAGS))
34156 // If we found a def, we can stop searching.
34157 if (mi.definesRegister(X86::EFLAGS))
34161 // If we hit the end of the block, check whether EFLAGS is live into a
34163 for (MachineBasicBlock *Succ : BB->successors())
34164 if (Succ->isLiveIn(X86::EFLAGS))
34170 /// Utility function to emit xbegin specifying the start of an RTM region.
34171 static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
34172 const TargetInstrInfo *TII) {
34173 const DebugLoc &DL = MI.getDebugLoc();
34175 const BasicBlock *BB = MBB->getBasicBlock();
34176 MachineFunction::iterator I = ++MBB->getIterator();
34178 // For the v = xbegin(), we generate
34187 // eax = # XABORT_DEF
34191 // v = phi(s0/mainBB, s1/fallBB)
34193 MachineBasicBlock *thisMBB = MBB;
34194 MachineFunction *MF = MBB->getParent();
34195 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
34196 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
34197 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
34198 MF->insert(I, mainMBB);
34199 MF->insert(I, fallMBB);
34200 MF->insert(I, sinkMBB);
34202 if (isEFLAGSLiveAfter(MI, MBB)) {
34203 mainMBB->addLiveIn(X86::EFLAGS);
34204 fallMBB->addLiveIn(X86::EFLAGS);
34205 sinkMBB->addLiveIn(X86::EFLAGS);
34208 // Transfer the remainder of BB and its successor edges to sinkMBB.
34209 sinkMBB->splice(sinkMBB->begin(), MBB,
34210 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
34211 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
34213 MachineRegisterInfo &MRI = MF->getRegInfo();
34214 Register DstReg = MI.getOperand(0).getReg();
34215 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
34216 Register mainDstReg = MRI.createVirtualRegister(RC);
34217 Register fallDstReg = MRI.createVirtualRegister(RC);
34221 // # fallthrough to mainMBB
34222 // # abortion to fallMBB
34223 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
34224 thisMBB->addSuccessor(mainMBB);
34225 thisMBB->addSuccessor(fallMBB);
34228 // mainDstReg := -1
34229 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
34230 BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
34231 mainMBB->addSuccessor(sinkMBB);
34234 // ; pseudo instruction to model hardware's definition from XABORT
34235 // EAX := XABORT_DEF
34236 // fallDstReg := EAX
34237 BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
34238 BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
34240 fallMBB->addSuccessor(sinkMBB);
34243 // DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
34244 BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
34245 .addReg(mainDstReg).addMBB(mainMBB)
34246 .addReg(fallDstReg).addMBB(fallMBB);
34248 MI.eraseFromParent();
34252 MachineBasicBlock *
34253 X86TargetLowering::EmitVAARGWithCustomInserter(MachineInstr &MI,
34254 MachineBasicBlock *MBB) const {
34255 // Emit va_arg instruction on X86-64.
34257 // Operands to this pseudo-instruction:
34258 // 0 ) Output : destination address (reg)
34259 // 1-5) Input : va_list address (addr, i64mem)
34260 // 6 ) ArgSize : Size (in bytes) of vararg type
34261 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
34262 // 8 ) Align : Alignment of type
34263 // 9 ) EFLAGS (implicit-def)
34265 assert(MI.getNumOperands() == 10 && "VAARG should have 10 operands!");
34266 static_assert(X86::AddrNumOperands == 5, "VAARG assumes 5 address operands");
34268 Register DestReg = MI.getOperand(0).getReg();
34269 MachineOperand &Base = MI.getOperand(1);
34270 MachineOperand &Scale = MI.getOperand(2);
34271 MachineOperand &Index = MI.getOperand(3);
34272 MachineOperand &Disp = MI.getOperand(4);
34273 MachineOperand &Segment = MI.getOperand(5);
34274 unsigned ArgSize = MI.getOperand(6).getImm();
34275 unsigned ArgMode = MI.getOperand(7).getImm();
34276 Align Alignment = Align(MI.getOperand(8).getImm());
34278 MachineFunction *MF = MBB->getParent();
34280 // Memory Reference
34281 assert(MI.hasOneMemOperand() && "Expected VAARG to have one memoperand");
34283 MachineMemOperand *OldMMO = MI.memoperands().front();
34285 // Clone the MMO into two separate MMOs for loading and storing
34286 MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
34287 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
34288 MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
34289 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
34291 // Machine Information
34292 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34293 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
34294 const TargetRegisterClass *AddrRegClass =
34295 getRegClassFor(getPointerTy(MBB->getParent()->getDataLayout()));
34296 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
34297 const DebugLoc &DL = MI.getDebugLoc();
34299 // struct va_list {
34302 // i64 overflow_area (address)
34303 // i64 reg_save_area (address)
34305 // sizeof(va_list) = 24
34306 // alignment(va_list) = 8
34308 unsigned TotalNumIntRegs = 6;
34309 unsigned TotalNumXMMRegs = 8;
34310 bool UseGPOffset = (ArgMode == 1);
34311 bool UseFPOffset = (ArgMode == 2);
34312 unsigned MaxOffset = TotalNumIntRegs * 8 +
34313 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
34315 /* Align ArgSize to a multiple of 8 */
34316 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
34317 bool NeedsAlign = (Alignment > 8);
34319 MachineBasicBlock *thisMBB = MBB;
34320 MachineBasicBlock *overflowMBB;
34321 MachineBasicBlock *offsetMBB;
34322 MachineBasicBlock *endMBB;
34324 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
34325 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
34326 unsigned OffsetReg = 0;
34328 if (!UseGPOffset && !UseFPOffset) {
34329 // If we only pull from the overflow region, we don't create a branch.
34330 // We don't need to alter control flow.
34331 OffsetDestReg = 0; // unused
34332 OverflowDestReg = DestReg;
34334 offsetMBB = nullptr;
34335 overflowMBB = thisMBB;
34338 // First emit code to check if gp_offset (or fp_offset) is below the bound.
34339 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
34340 // If not, pull from overflow_area. (branch to overflowMBB)
34345 // offsetMBB overflowMBB
34350 // Registers for the PHI in endMBB
34351 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
34352 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
34354 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
34355 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34356 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34357 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34359 MachineFunction::iterator MBBIter = ++MBB->getIterator();
34361 // Insert the new basic blocks
34362 MF->insert(MBBIter, offsetMBB);
34363 MF->insert(MBBIter, overflowMBB);
34364 MF->insert(MBBIter, endMBB);
34366 // Transfer the remainder of MBB and its successor edges to endMBB.
34367 endMBB->splice(endMBB->begin(), thisMBB,
34368 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
34369 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
34371 // Make offsetMBB and overflowMBB successors of thisMBB
34372 thisMBB->addSuccessor(offsetMBB);
34373 thisMBB->addSuccessor(overflowMBB);
34375 // endMBB is a successor of both offsetMBB and overflowMBB
34376 offsetMBB->addSuccessor(endMBB);
34377 overflowMBB->addSuccessor(endMBB);
34379 // Load the offset value into a register
34380 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
34381 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
34385 .addDisp(Disp, UseFPOffset ? 4 : 0)
34387 .setMemRefs(LoadOnlyMMO);
34389 // Check if there is enough room left to pull this argument.
34390 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
34392 .addImm(MaxOffset + 8 - ArgSizeA8);
34394 // Branch to "overflowMBB" if offset >= max
34395 // Fall through to "offsetMBB" otherwise
34396 BuildMI(thisMBB, DL, TII->get(X86::JCC_1))
34397 .addMBB(overflowMBB).addImm(X86::COND_AE);
34400 // In offsetMBB, emit code to use the reg_save_area.
34402 assert(OffsetReg != 0);
34404 // Read the reg_save_area address.
34405 Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
34408 TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
34413 .addDisp(Disp, Subtarget.isTarget64BitLP64() ? 16 : 12)
34415 .setMemRefs(LoadOnlyMMO);
34417 if (Subtarget.isTarget64BitLP64()) {
34418 // Zero-extend the offset
34419 Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
34420 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
34423 .addImm(X86::sub_32bit);
34425 // Add the offset to the reg_save_area to get the final address.
34426 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
34427 .addReg(OffsetReg64)
34428 .addReg(RegSaveReg);
34430 // Add the offset to the reg_save_area to get the final address.
34431 BuildMI(offsetMBB, DL, TII->get(X86::ADD32rr), OffsetDestReg)
34433 .addReg(RegSaveReg);
34436 // Compute the offset for the next argument
34437 Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
34438 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
34440 .addImm(UseFPOffset ? 16 : 8);
34442 // Store it back into the va_list.
34443 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
34447 .addDisp(Disp, UseFPOffset ? 4 : 0)
34449 .addReg(NextOffsetReg)
34450 .setMemRefs(StoreOnlyMMO);
34453 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
34458 // Emit code to use overflow area
34461 // Load the overflow_area address into a register.
34462 Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
34463 BuildMI(overflowMBB, DL,
34464 TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
34471 .setMemRefs(LoadOnlyMMO);
34473 // If we need to align it, do so. Otherwise, just copy the address
34474 // to OverflowDestReg.
34476 // Align the overflow address
34477 Register TmpReg = MRI.createVirtualRegister(AddrRegClass);
34479 // aligned_addr = (addr + (align-1)) & ~(align-1)
34482 TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
34484 .addReg(OverflowAddrReg)
34485 .addImm(Alignment.value() - 1);
34489 TII->get(Subtarget.isTarget64BitLP64() ? X86::AND64ri32 : X86::AND32ri),
34492 .addImm(~(uint64_t)(Alignment.value() - 1));
34494 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
34495 .addReg(OverflowAddrReg);
34498 // Compute the next overflow address after this argument.
34499 // (the overflow address should be kept 8-byte aligned)
34500 Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
34503 TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
34505 .addReg(OverflowDestReg)
34506 .addImm(ArgSizeA8);
34508 // Store the new overflow address.
34509 BuildMI(overflowMBB, DL,
34510 TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64mr : X86::MOV32mr))
34516 .addReg(NextAddrReg)
34517 .setMemRefs(StoreOnlyMMO);
34519 // If we branched, emit the PHI to the front of endMBB.
34521 BuildMI(*endMBB, endMBB->begin(), DL,
34522 TII->get(X86::PHI), DestReg)
34523 .addReg(OffsetDestReg).addMBB(offsetMBB)
34524 .addReg(OverflowDestReg).addMBB(overflowMBB);
34527 // Erase the pseudo instruction
34528 MI.eraseFromParent();
34533 // The EFLAGS operand of SelectItr might be missing a kill marker
34534 // because there were multiple uses of EFLAGS, and ISel didn't know
34535 // which to mark. Figure out whether SelectItr should have had a
34536 // kill marker, and set it if it should. Returns the correct kill
34538 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
34539 MachineBasicBlock* BB,
34540 const TargetRegisterInfo* TRI) {
34541 if (isEFLAGSLiveAfter(SelectItr, BB))
34544 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
34545 // out. SelectMI should have a kill flag on EFLAGS.
34546 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
34550 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
34551 // together with other CMOV pseudo-opcodes into a single basic-block with
34552 // conditional jump around it.
34553 static bool isCMOVPseudo(MachineInstr &MI) {
34554 switch (MI.getOpcode()) {
34555 case X86::CMOV_FR16:
34556 case X86::CMOV_FR16X:
34557 case X86::CMOV_FR32:
34558 case X86::CMOV_FR32X:
34559 case X86::CMOV_FR64:
34560 case X86::CMOV_FR64X:
34561 case X86::CMOV_GR8:
34562 case X86::CMOV_GR16:
34563 case X86::CMOV_GR32:
34564 case X86::CMOV_RFP32:
34565 case X86::CMOV_RFP64:
34566 case X86::CMOV_RFP80:
34567 case X86::CMOV_VR64:
34568 case X86::CMOV_VR128:
34569 case X86::CMOV_VR128X:
34570 case X86::CMOV_VR256:
34571 case X86::CMOV_VR256X:
34572 case X86::CMOV_VR512:
34573 case X86::CMOV_VK1:
34574 case X86::CMOV_VK2:
34575 case X86::CMOV_VK4:
34576 case X86::CMOV_VK8:
34577 case X86::CMOV_VK16:
34578 case X86::CMOV_VK32:
34579 case X86::CMOV_VK64:
34587 // Helper function, which inserts PHI functions into SinkMBB:
34588 // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
34589 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
34590 // in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
34591 // the last PHI function inserted.
34592 static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
34593 MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
34594 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
34595 MachineBasicBlock *SinkMBB) {
34596 MachineFunction *MF = TrueMBB->getParent();
34597 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
34598 const DebugLoc &DL = MIItBegin->getDebugLoc();
34600 X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
34601 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
34603 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
34605 // As we are creating the PHIs, we have to be careful if there is more than
34606 // one. Later CMOVs may reference the results of earlier CMOVs, but later
34607 // PHIs have to reference the individual true/false inputs from earlier PHIs.
34608 // That also means that PHI construction must work forward from earlier to
34609 // later, and that the code must maintain a mapping from earlier PHI's
34610 // destination registers, and the registers that went into the PHI.
34611 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
34612 MachineInstrBuilder MIB;
34614 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
34615 Register DestReg = MIIt->getOperand(0).getReg();
34616 Register Op1Reg = MIIt->getOperand(1).getReg();
34617 Register Op2Reg = MIIt->getOperand(2).getReg();
34619 // If this CMOV we are generating is the opposite condition from
34620 // the jump we generated, then we have to swap the operands for the
34621 // PHI that is going to be generated.
34622 if (MIIt->getOperand(3).getImm() == OppCC)
34623 std::swap(Op1Reg, Op2Reg);
34625 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
34626 Op1Reg = RegRewriteTable[Op1Reg].first;
34628 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
34629 Op2Reg = RegRewriteTable[Op2Reg].second;
34631 MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
34637 // Add this PHI to the rewrite table.
34638 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
34644 // Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
34645 MachineBasicBlock *
34646 X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
34647 MachineInstr &SecondCascadedCMOV,
34648 MachineBasicBlock *ThisMBB) const {
34649 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34650 const DebugLoc &DL = FirstCMOV.getDebugLoc();
34652 // We lower cascaded CMOVs such as
34654 // (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
34656 // to two successive branches.
34658 // Without this, we would add a PHI between the two jumps, which ends up
34659 // creating a few copies all around. For instance, for
34661 // (sitofp (zext (fcmp une)))
34663 // we would generate:
34665 // ucomiss %xmm1, %xmm0
34666 // movss <1.0f>, %xmm0
34667 // movaps %xmm0, %xmm1
34669 // xorps %xmm1, %xmm1
34672 // movaps %xmm1, %xmm0
34676 // because this custom-inserter would have generated:
34688 // A: X = ...; Y = ...
34690 // C: Z = PHI [X, A], [Y, B]
34692 // E: PHI [X, C], [Z, D]
34694 // If we lower both CMOVs in a single step, we can instead generate:
34706 // A: X = ...; Y = ...
34708 // E: PHI [X, A], [X, C], [Y, D]
34710 // Which, in our sitofp/fcmp example, gives us something like:
34712 // ucomiss %xmm1, %xmm0
34713 // movss <1.0f>, %xmm0
34716 // xorps %xmm0, %xmm0
34721 // We lower cascaded CMOV into two successive branches to the same block.
34722 // EFLAGS is used by both, so mark it as live in the second.
34723 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
34724 MachineFunction *F = ThisMBB->getParent();
34725 MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
34726 MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
34727 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
34729 MachineFunction::iterator It = ++ThisMBB->getIterator();
34730 F->insert(It, FirstInsertedMBB);
34731 F->insert(It, SecondInsertedMBB);
34732 F->insert(It, SinkMBB);
34734 // For a cascaded CMOV, we lower it to two successive branches to
34735 // the same block (SinkMBB). EFLAGS is used by both, so mark it as live in
34736 // the FirstInsertedMBB.
34737 FirstInsertedMBB->addLiveIn(X86::EFLAGS);
34739 // If the EFLAGS register isn't dead in the terminator, then claim that it's
34740 // live into the sink and copy blocks.
34741 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
34742 if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
34743 !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
34744 SecondInsertedMBB->addLiveIn(X86::EFLAGS);
34745 SinkMBB->addLiveIn(X86::EFLAGS);
34748 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
34749 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
34750 std::next(MachineBasicBlock::iterator(FirstCMOV)),
34752 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
34754 // Fallthrough block for ThisMBB.
34755 ThisMBB->addSuccessor(FirstInsertedMBB);
34756 // The true block target of the first branch is always SinkMBB.
34757 ThisMBB->addSuccessor(SinkMBB);
34758 // Fallthrough block for FirstInsertedMBB.
34759 FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
34760 // The true block for the branch of FirstInsertedMBB.
34761 FirstInsertedMBB->addSuccessor(SinkMBB);
34762 // This is fallthrough.
34763 SecondInsertedMBB->addSuccessor(SinkMBB);
34765 // Create the conditional branch instructions.
34766 X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
34767 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
34769 X86::CondCode SecondCC =
34770 X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
34771 BuildMI(FirstInsertedMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(SecondCC);
34774 // %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
34775 Register DestReg = SecondCascadedCMOV.getOperand(0).getReg();
34776 Register Op1Reg = FirstCMOV.getOperand(1).getReg();
34777 Register Op2Reg = FirstCMOV.getOperand(2).getReg();
34778 MachineInstrBuilder MIB =
34779 BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg)
34781 .addMBB(SecondInsertedMBB)
34785 // The second SecondInsertedMBB provides the same incoming value as the
34786 // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
34787 MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
34789 // Now remove the CMOVs.
34790 FirstCMOV.eraseFromParent();
34791 SecondCascadedCMOV.eraseFromParent();
34796 MachineBasicBlock *
34797 X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
34798 MachineBasicBlock *ThisMBB) const {
34799 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34800 const DebugLoc &DL = MI.getDebugLoc();
34802 // To "insert" a SELECT_CC instruction, we actually have to insert the
34803 // diamond control-flow pattern. The incoming instruction knows the
34804 // destination vreg to set, the condition code register to branch on, the
34805 // true/false values to select between and a branch opcode to use.
34810 // cmpTY ccX, r1, r2
34812 // fallthrough --> FalseMBB
34814 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
34815 // as described above, by inserting a BB, and then making a PHI at the join
34816 // point to select the true and false operands of the CMOV in the PHI.
34818 // The code also handles two different cases of multiple CMOV opcodes
34822 // In this case, there are multiple CMOVs in a row, all which are based on
34823 // the same condition setting (or the exact opposite condition setting).
34824 // In this case we can lower all the CMOVs using a single inserted BB, and
34825 // then make a number of PHIs at the join point to model the CMOVs. The only
34826 // trickiness here, is that in a case like:
34828 // t2 = CMOV cond1 t1, f1
34829 // t3 = CMOV cond1 t2, f2
34831 // when rewriting this into PHIs, we have to perform some renaming on the
34832 // temps since you cannot have a PHI operand refer to a PHI result earlier
34833 // in the same block. The "simple" but wrong lowering would be:
34835 // t2 = PHI t1(BB1), f1(BB2)
34836 // t3 = PHI t2(BB1), f2(BB2)
34838 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
34839 // renaming is to note that on the path through BB1, t2 is really just a
34840 // copy of t1, and do that renaming, properly generating:
34842 // t2 = PHI t1(BB1), f1(BB2)
34843 // t3 = PHI t1(BB1), f2(BB2)
34846 // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
34847 // function - EmitLoweredCascadedSelect.
34849 X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
34850 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
34851 MachineInstr *LastCMOV = &MI;
34852 MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
34854 // Check for case 1, where there are multiple CMOVs with the same condition
34855 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
34856 // number of jumps the most.
34858 if (isCMOVPseudo(MI)) {
34859 // See if we have a string of CMOVS with the same condition. Skip over
34860 // intervening debug insts.
34861 while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
34862 (NextMIIt->getOperand(3).getImm() == CC ||
34863 NextMIIt->getOperand(3).getImm() == OppCC)) {
34864 LastCMOV = &*NextMIIt;
34865 NextMIIt = next_nodbg(NextMIIt, ThisMBB->end());
34869 // This checks for case 2, but only do this if we didn't already find
34870 // case 1, as indicated by LastCMOV == MI.
34871 if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
34872 NextMIIt->getOpcode() == MI.getOpcode() &&
34873 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
34874 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
34875 NextMIIt->getOperand(1).isKill()) {
34876 return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
34879 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
34880 MachineFunction *F = ThisMBB->getParent();
34881 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
34882 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
34884 MachineFunction::iterator It = ++ThisMBB->getIterator();
34885 F->insert(It, FalseMBB);
34886 F->insert(It, SinkMBB);
34888 // If the EFLAGS register isn't dead in the terminator, then claim that it's
34889 // live into the sink and copy blocks.
34890 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
34891 if (!LastCMOV->killsRegister(X86::EFLAGS) &&
34892 !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
34893 FalseMBB->addLiveIn(X86::EFLAGS);
34894 SinkMBB->addLiveIn(X86::EFLAGS);
34897 // Transfer any debug instructions inside the CMOV sequence to the sunk block.
34898 auto DbgRange = llvm::make_range(MachineBasicBlock::iterator(MI),
34899 MachineBasicBlock::iterator(LastCMOV));
34900 for (MachineInstr &MI : llvm::make_early_inc_range(DbgRange))
34901 if (MI.isDebugInstr())
34902 SinkMBB->push_back(MI.removeFromParent());
34904 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
34905 SinkMBB->splice(SinkMBB->end(), ThisMBB,
34906 std::next(MachineBasicBlock::iterator(LastCMOV)),
34908 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
34910 // Fallthrough block for ThisMBB.
34911 ThisMBB->addSuccessor(FalseMBB);
34912 // The true block target of the first (or only) branch is always a SinkMBB.
34913 ThisMBB->addSuccessor(SinkMBB);
34914 // Fallthrough block for FalseMBB.
34915 FalseMBB->addSuccessor(SinkMBB);
34917 // Create the conditional branch instruction.
34918 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
34921 // %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
34923 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
34924 MachineBasicBlock::iterator MIItEnd =
34925 std::next(MachineBasicBlock::iterator(LastCMOV));
34926 createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
34928 // Now remove the CMOV(s).
34929 ThisMBB->erase(MIItBegin, MIItEnd);
34934 static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) {
34937 return X86::SUB64ri8;
34938 return X86::SUB64ri32;
34941 return X86::SUB32ri8;
34942 return X86::SUB32ri;
34946 MachineBasicBlock *
34947 X86TargetLowering::EmitLoweredProbedAlloca(MachineInstr &MI,
34948 MachineBasicBlock *MBB) const {
34949 MachineFunction *MF = MBB->getParent();
34950 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34951 const X86FrameLowering &TFI = *Subtarget.getFrameLowering();
34952 const DebugLoc &DL = MI.getDebugLoc();
34953 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
34955 const unsigned ProbeSize = getStackProbeSize(*MF);
34957 MachineRegisterInfo &MRI = MF->getRegInfo();
34958 MachineBasicBlock *testMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34959 MachineBasicBlock *tailMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34960 MachineBasicBlock *blockMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34962 MachineFunction::iterator MBBIter = ++MBB->getIterator();
34963 MF->insert(MBBIter, testMBB);
34964 MF->insert(MBBIter, blockMBB);
34965 MF->insert(MBBIter, tailMBB);
34967 Register sizeVReg = MI.getOperand(1).getReg();
34969 Register physSPReg = TFI.Uses64BitFramePtr ? X86::RSP : X86::ESP;
34971 Register TmpStackPtr = MRI.createVirtualRegister(
34972 TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
34973 Register FinalStackPtr = MRI.createVirtualRegister(
34974 TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
34976 BuildMI(*MBB, {MI}, DL, TII->get(TargetOpcode::COPY), TmpStackPtr)
34977 .addReg(physSPReg);
34979 const unsigned Opc = TFI.Uses64BitFramePtr ? X86::SUB64rr : X86::SUB32rr;
34980 BuildMI(*MBB, {MI}, DL, TII->get(Opc), FinalStackPtr)
34981 .addReg(TmpStackPtr)
34987 BuildMI(testMBB, DL,
34988 TII->get(TFI.Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
34989 .addReg(FinalStackPtr)
34990 .addReg(physSPReg);
34992 BuildMI(testMBB, DL, TII->get(X86::JCC_1))
34994 .addImm(X86::COND_GE);
34995 testMBB->addSuccessor(blockMBB);
34996 testMBB->addSuccessor(tailMBB);
34998 // Touch the block then extend it. This is done on the opposite side of
34999 // static probe where we allocate then touch, to avoid the need of probing the
35000 // tail of the static alloca. Possible scenarios are:
35002 // + ---- <- ------------ <- ------------- <- ------------ +
35004 // [free probe] -> [page alloc] -> [alloc probe] -> [tail alloc] + -> [dyn probe] -> [page alloc] -> [dyn probe] -> [tail alloc] +
35006 // + <- ----------- <- ------------ <- ----------- <- ------------ +
35008 // The property we want to enforce is to never have more than [page alloc] between two probes.
35010 const unsigned XORMIOpc =
35011 TFI.Uses64BitFramePtr ? X86::XOR64mi8 : X86::XOR32mi8;
35012 addRegOffset(BuildMI(blockMBB, DL, TII->get(XORMIOpc)), physSPReg, false, 0)
35015 BuildMI(blockMBB, DL,
35016 TII->get(getSUBriOpcode(TFI.Uses64BitFramePtr, ProbeSize)), physSPReg)
35018 .addImm(ProbeSize);
35021 BuildMI(blockMBB, DL, TII->get(X86::JMP_1)).addMBB(testMBB);
35022 blockMBB->addSuccessor(testMBB);
35024 // Replace original instruction by the expected stack ptr
35025 BuildMI(tailMBB, DL, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
35026 .addReg(FinalStackPtr);
35028 tailMBB->splice(tailMBB->end(), MBB,
35029 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
35030 tailMBB->transferSuccessorsAndUpdatePHIs(MBB);
35031 MBB->addSuccessor(testMBB);
35033 // Delete the original pseudo instruction.
35034 MI.eraseFromParent();
35040 MachineBasicBlock *
35041 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
35042 MachineBasicBlock *BB) const {
35043 MachineFunction *MF = BB->getParent();
35044 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35045 const DebugLoc &DL = MI.getDebugLoc();
35046 const BasicBlock *LLVM_BB = BB->getBasicBlock();
35048 assert(MF->shouldSplitStack());
35050 const bool Is64Bit = Subtarget.is64Bit();
35051 const bool IsLP64 = Subtarget.isTarget64BitLP64();
35053 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
35054 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
35057 // ... [Till the alloca]
35058 // If stacklet is not large enough, jump to mallocMBB
35061 // Allocate by subtracting from RSP
35062 // Jump to continueMBB
35065 // Allocate by call to runtime
35069 // [rest of original BB]
35072 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
35073 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
35074 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
35076 MachineRegisterInfo &MRI = MF->getRegInfo();
35077 const TargetRegisterClass *AddrRegClass =
35078 getRegClassFor(getPointerTy(MF->getDataLayout()));
35080 Register mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
35081 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
35082 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
35083 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
35084 sizeVReg = MI.getOperand(1).getReg(),
35086 IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
35088 MachineFunction::iterator MBBIter = ++BB->getIterator();
35090 MF->insert(MBBIter, bumpMBB);
35091 MF->insert(MBBIter, mallocMBB);
35092 MF->insert(MBBIter, continueMBB);
35094 continueMBB->splice(continueMBB->begin(), BB,
35095 std::next(MachineBasicBlock::iterator(MI)), BB->end());
35096 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
35098 // Add code to the main basic block to check if the stack limit has been hit,
35099 // and if so, jump to mallocMBB otherwise to bumpMBB.
35100 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
35101 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
35102 .addReg(tmpSPVReg).addReg(sizeVReg);
35103 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
35104 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
35105 .addReg(SPLimitVReg);
35106 BuildMI(BB, DL, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
35108 // bumpMBB simply decreases the stack pointer, since we know the current
35109 // stacklet has enough space.
35110 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
35111 .addReg(SPLimitVReg);
35112 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
35113 .addReg(SPLimitVReg);
35114 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
35116 // Calls into a routine in libgcc to allocate more space from the heap.
35117 const uint32_t *RegMask =
35118 Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
35120 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
35122 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
35123 .addExternalSymbol("__morestack_allocate_stack_space")
35124 .addRegMask(RegMask)
35125 .addReg(X86::RDI, RegState::Implicit)
35126 .addReg(X86::RAX, RegState::ImplicitDefine);
35127 } else if (Is64Bit) {
35128 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
35130 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
35131 .addExternalSymbol("__morestack_allocate_stack_space")
35132 .addRegMask(RegMask)
35133 .addReg(X86::EDI, RegState::Implicit)
35134 .addReg(X86::EAX, RegState::ImplicitDefine);
35136 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
35138 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
35139 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
35140 .addExternalSymbol("__morestack_allocate_stack_space")
35141 .addRegMask(RegMask)
35142 .addReg(X86::EAX, RegState::ImplicitDefine);
35146 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
35149 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
35150 .addReg(IsLP64 ? X86::RAX : X86::EAX);
35151 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
35153 // Set up the CFG correctly.
35154 BB->addSuccessor(bumpMBB);
35155 BB->addSuccessor(mallocMBB);
35156 mallocMBB->addSuccessor(continueMBB);
35157 bumpMBB->addSuccessor(continueMBB);
35159 // Take care of the PHI nodes.
35160 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
35161 MI.getOperand(0).getReg())
35162 .addReg(mallocPtrVReg)
35164 .addReg(bumpSPPtrVReg)
35167 // Delete the original pseudo instruction.
35168 MI.eraseFromParent();
35171 return continueMBB;
35174 MachineBasicBlock *
35175 X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
35176 MachineBasicBlock *BB) const {
35177 MachineFunction *MF = BB->getParent();
35178 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
35179 MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
35180 const DebugLoc &DL = MI.getDebugLoc();
35182 assert(!isAsynchronousEHPersonality(
35183 classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
35184 "SEH does not use catchret!");
35186 // Only 32-bit EH needs to worry about manually restoring stack pointers.
35187 if (!Subtarget.is32Bit())
35190 // C++ EH creates a new target block to hold the restore code, and wires up
35191 // the new block to the return destination with a normal JMP_4.
35192 MachineBasicBlock *RestoreMBB =
35193 MF->CreateMachineBasicBlock(BB->getBasicBlock());
35194 assert(BB->succ_size() == 1);
35195 MF->insert(std::next(BB->getIterator()), RestoreMBB);
35196 RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
35197 BB->addSuccessor(RestoreMBB);
35198 MI.getOperand(0).setMBB(RestoreMBB);
35200 // Marking this as an EH pad but not a funclet entry block causes PEI to
35201 // restore stack pointers in the block.
35202 RestoreMBB->setIsEHPad(true);
35204 auto RestoreMBBI = RestoreMBB->begin();
35205 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
35209 MachineBasicBlock *
35210 X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
35211 MachineBasicBlock *BB) const {
35212 // So, here we replace TLSADDR with the sequence:
35213 // adjust_stackdown -> TLSADDR -> adjust_stackup.
35214 // We need this because TLSADDR is lowered into calls
35215 // inside MC, therefore without the two markers shrink-wrapping
35216 // may push the prologue/epilogue pass them.
35217 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
35218 const DebugLoc &DL = MI.getDebugLoc();
35219 MachineFunction &MF = *BB->getParent();
35221 // Emit CALLSEQ_START right before the instruction.
35222 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
35223 MachineInstrBuilder CallseqStart =
35224 BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
35225 BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
35227 // Emit CALLSEQ_END right after the instruction.
35228 // We don't call erase from parent because we want to keep the
35229 // original instruction around.
35230 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
35231 MachineInstrBuilder CallseqEnd =
35232 BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
35233 BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
35238 MachineBasicBlock *
35239 X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
35240 MachineBasicBlock *BB) const {
35241 // This is pretty easy. We're taking the value that we received from
35242 // our load from the relocation, sticking it in either RDI (x86-64)
35243 // or EAX and doing an indirect call. The return value will then
35244 // be in the normal return register.
35245 MachineFunction *F = BB->getParent();
35246 const X86InstrInfo *TII = Subtarget.getInstrInfo();
35247 const DebugLoc &DL = MI.getDebugLoc();
35249 assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
35250 assert(MI.getOperand(3).isGlobal() && "This should be a global");
35252 // Get a register mask for the lowered call.
35253 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
35254 // proper register mask.
35255 const uint32_t *RegMask =
35256 Subtarget.is64Bit() ?
35257 Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
35258 Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
35259 if (Subtarget.is64Bit()) {
35260 MachineInstrBuilder MIB =
35261 BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
35265 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
35266 MI.getOperand(3).getTargetFlags())
35268 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
35269 addDirectMem(MIB, X86::RDI);
35270 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
35271 } else if (!isPositionIndependent()) {
35272 MachineInstrBuilder MIB =
35273 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
35277 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
35278 MI.getOperand(3).getTargetFlags())
35280 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
35281 addDirectMem(MIB, X86::EAX);
35282 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
35284 MachineInstrBuilder MIB =
35285 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
35286 .addReg(TII->getGlobalBaseReg(F))
35289 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
35290 MI.getOperand(3).getTargetFlags())
35292 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
35293 addDirectMem(MIB, X86::EAX);
35294 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
35297 MI.eraseFromParent(); // The pseudo instruction is gone now.
35301 static unsigned getOpcodeForIndirectThunk(unsigned RPOpc) {
35303 case X86::INDIRECT_THUNK_CALL32:
35304 return X86::CALLpcrel32;
35305 case X86::INDIRECT_THUNK_CALL64:
35306 return X86::CALL64pcrel32;
35307 case X86::INDIRECT_THUNK_TCRETURN32:
35308 return X86::TCRETURNdi;
35309 case X86::INDIRECT_THUNK_TCRETURN64:
35310 return X86::TCRETURNdi64;
35312 llvm_unreachable("not indirect thunk opcode");
35315 static const char *getIndirectThunkSymbol(const X86Subtarget &Subtarget,
35317 if (Subtarget.useRetpolineExternalThunk()) {
35318 // When using an external thunk for retpolines, we pick names that match the
35319 // names GCC happens to use as well. This helps simplify the implementation
35320 // of the thunks for kernels where they have no easy ability to create
35321 // aliases and are doing non-trivial configuration of the thunk's body. For
35322 // example, the Linux kernel will do boot-time hot patching of the thunk
35323 // bodies and cannot easily export aliases of these to loaded modules.
35325 // Note that at any point in the future, we may need to change the semantics
35326 // of how we implement retpolines and at that time will likely change the
35327 // name of the called thunk. Essentially, there is no hard guarantee that
35328 // LLVM will generate calls to specific thunks, we merely make a best-effort
35329 // attempt to help out kernels and other systems where duplicating the
35330 // thunks is costly.
35333 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35334 return "__x86_indirect_thunk_eax";
35336 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35337 return "__x86_indirect_thunk_ecx";
35339 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35340 return "__x86_indirect_thunk_edx";
35342 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35343 return "__x86_indirect_thunk_edi";
35345 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
35346 return "__x86_indirect_thunk_r11";
35348 llvm_unreachable("unexpected reg for external indirect thunk");
35351 if (Subtarget.useRetpolineIndirectCalls() ||
35352 Subtarget.useRetpolineIndirectBranches()) {
35353 // When targeting an internal COMDAT thunk use an LLVM-specific name.
35356 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35357 return "__llvm_retpoline_eax";
35359 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35360 return "__llvm_retpoline_ecx";
35362 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35363 return "__llvm_retpoline_edx";
35365 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35366 return "__llvm_retpoline_edi";
35368 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
35369 return "__llvm_retpoline_r11";
35371 llvm_unreachable("unexpected reg for retpoline");
35374 if (Subtarget.useLVIControlFlowIntegrity()) {
35375 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
35376 return "__llvm_lvi_thunk_r11";
35378 llvm_unreachable("getIndirectThunkSymbol() invoked without thunk feature");
35381 MachineBasicBlock *
35382 X86TargetLowering::EmitLoweredIndirectThunk(MachineInstr &MI,
35383 MachineBasicBlock *BB) const {
35384 // Copy the virtual register into the R11 physical register and
35385 // call the retpoline thunk.
35386 const DebugLoc &DL = MI.getDebugLoc();
35387 const X86InstrInfo *TII = Subtarget.getInstrInfo();
35388 Register CalleeVReg = MI.getOperand(0).getReg();
35389 unsigned Opc = getOpcodeForIndirectThunk(MI.getOpcode());
35391 // Find an available scratch register to hold the callee. On 64-bit, we can
35392 // just use R11, but we scan for uses anyway to ensure we don't generate
35393 // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
35394 // already a register use operand to the call to hold the callee. If none
35395 // are available, use EDI instead. EDI is chosen because EBX is the PIC base
35396 // register and ESI is the base pointer to realigned stack frames with VLAs.
35397 SmallVector<unsigned, 3> AvailableRegs;
35398 if (Subtarget.is64Bit())
35399 AvailableRegs.push_back(X86::R11);
35401 AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
35403 // Zero out any registers that are already used.
35404 for (const auto &MO : MI.operands()) {
35405 if (MO.isReg() && MO.isUse())
35406 for (unsigned &Reg : AvailableRegs)
35407 if (Reg == MO.getReg())
35411 // Choose the first remaining non-zero available register.
35412 unsigned AvailableReg = 0;
35413 for (unsigned MaybeReg : AvailableRegs) {
35415 AvailableReg = MaybeReg;
35420 report_fatal_error("calling convention incompatible with retpoline, no "
35421 "available registers");
35423 const char *Symbol = getIndirectThunkSymbol(Subtarget, AvailableReg);
35425 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
35426 .addReg(CalleeVReg);
35427 MI.getOperand(0).ChangeToES(Symbol);
35428 MI.setDesc(TII->get(Opc));
35429 MachineInstrBuilder(*BB->getParent(), &MI)
35430 .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
35434 /// SetJmp implies future control flow change upon calling the corresponding
35436 /// Instead of using the 'return' instruction, the long jump fixes the stack and
35437 /// performs an indirect branch. To do so it uses the registers that were stored
35438 /// in the jump buffer (when calling SetJmp).
35439 /// In case the shadow stack is enabled we need to fix it as well, because some
35440 /// return addresses will be skipped.
35441 /// The function will save the SSP for future fixing in the function
35442 /// emitLongJmpShadowStackFix.
35443 /// \sa emitLongJmpShadowStackFix
35444 /// \param [in] MI The temporary Machine Instruction for the builtin.
35445 /// \param [in] MBB The Machine Basic Block that will be modified.
35446 void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
35447 MachineBasicBlock *MBB) const {
35448 const DebugLoc &DL = MI.getDebugLoc();
35449 MachineFunction *MF = MBB->getParent();
35450 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35451 MachineRegisterInfo &MRI = MF->getRegInfo();
35452 MachineInstrBuilder MIB;
35454 // Memory Reference.
35455 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35456 MI.memoperands_end());
35458 // Initialize a register with zero.
35459 MVT PVT = getPointerTy(MF->getDataLayout());
35460 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
35461 Register ZReg = MRI.createVirtualRegister(PtrRC);
35462 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
35463 BuildMI(*MBB, MI, DL, TII->get(XorRROpc))
35465 .addReg(ZReg, RegState::Undef)
35466 .addReg(ZReg, RegState::Undef);
35468 // Read the current SSP Register value to the zeroed register.
35469 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
35470 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
35471 BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
35473 // Write the SSP register value to offset 3 in input memory buffer.
35474 unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
35475 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc));
35476 const int64_t SSPOffset = 3 * PVT.getStoreSize();
35477 const unsigned MemOpndSlot = 1;
35478 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35479 if (i == X86::AddrDisp)
35480 MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
35482 MIB.add(MI.getOperand(MemOpndSlot + i));
35484 MIB.addReg(SSPCopyReg);
35485 MIB.setMemRefs(MMOs);
35488 MachineBasicBlock *
35489 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
35490 MachineBasicBlock *MBB) const {
35491 const DebugLoc &DL = MI.getDebugLoc();
35492 MachineFunction *MF = MBB->getParent();
35493 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35494 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
35495 MachineRegisterInfo &MRI = MF->getRegInfo();
35497 const BasicBlock *BB = MBB->getBasicBlock();
35498 MachineFunction::iterator I = ++MBB->getIterator();
35500 // Memory Reference
35501 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35502 MI.memoperands_end());
35505 unsigned MemOpndSlot = 0;
35507 unsigned CurOp = 0;
35509 DstReg = MI.getOperand(CurOp++).getReg();
35510 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
35511 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
35513 Register mainDstReg = MRI.createVirtualRegister(RC);
35514 Register restoreDstReg = MRI.createVirtualRegister(RC);
35516 MemOpndSlot = CurOp;
35518 MVT PVT = getPointerTy(MF->getDataLayout());
35519 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
35520 "Invalid Pointer Size!");
35522 // For v = setjmp(buf), we generate
35525 // buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
35526 // SjLjSetup restoreMBB
35532 // v = phi(main, restore)
35535 // if base pointer being used, load it from frame
35538 MachineBasicBlock *thisMBB = MBB;
35539 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
35540 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
35541 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
35542 MF->insert(I, mainMBB);
35543 MF->insert(I, sinkMBB);
35544 MF->push_back(restoreMBB);
35545 restoreMBB->setHasAddressTaken();
35547 MachineInstrBuilder MIB;
35549 // Transfer the remainder of BB and its successor edges to sinkMBB.
35550 sinkMBB->splice(sinkMBB->begin(), MBB,
35551 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
35552 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
35555 unsigned PtrStoreOpc = 0;
35556 unsigned LabelReg = 0;
35557 const int64_t LabelOffset = 1 * PVT.getStoreSize();
35558 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
35559 !isPositionIndependent();
35561 // Prepare IP either in reg or imm.
35562 if (!UseImmLabel) {
35563 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
35564 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
35565 LabelReg = MRI.createVirtualRegister(PtrRC);
35566 if (Subtarget.is64Bit()) {
35567 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
35571 .addMBB(restoreMBB)
35574 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
35575 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
35576 .addReg(XII->getGlobalBaseReg(MF))
35579 .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
35583 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
35585 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
35586 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35587 if (i == X86::AddrDisp)
35588 MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
35590 MIB.add(MI.getOperand(MemOpndSlot + i));
35593 MIB.addReg(LabelReg);
35595 MIB.addMBB(restoreMBB);
35596 MIB.setMemRefs(MMOs);
35598 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
35599 emitSetJmpShadowStackFix(MI, thisMBB);
35603 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
35604 .addMBB(restoreMBB);
35606 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
35607 MIB.addRegMask(RegInfo->getNoPreservedMask());
35608 thisMBB->addSuccessor(mainMBB);
35609 thisMBB->addSuccessor(restoreMBB);
35613 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
35614 mainMBB->addSuccessor(sinkMBB);
35617 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
35618 TII->get(X86::PHI), DstReg)
35619 .addReg(mainDstReg).addMBB(mainMBB)
35620 .addReg(restoreDstReg).addMBB(restoreMBB);
35623 if (RegInfo->hasBasePointer(*MF)) {
35624 const bool Uses64BitFramePtr =
35625 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
35626 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
35627 X86FI->setRestoreBasePointer(MF);
35628 Register FramePtr = RegInfo->getFrameRegister(*MF);
35629 Register BasePtr = RegInfo->getBaseRegister();
35630 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
35631 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
35632 FramePtr, true, X86FI->getRestoreBasePointerOffset())
35633 .setMIFlag(MachineInstr::FrameSetup);
35635 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
35636 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
35637 restoreMBB->addSuccessor(sinkMBB);
35639 MI.eraseFromParent();
35643 /// Fix the shadow stack using the previously saved SSP pointer.
35644 /// \sa emitSetJmpShadowStackFix
35645 /// \param [in] MI The temporary Machine Instruction for the builtin.
35646 /// \param [in] MBB The Machine Basic Block that will be modified.
35647 /// \return The sink MBB that will perform the future indirect branch.
35648 MachineBasicBlock *
35649 X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
35650 MachineBasicBlock *MBB) const {
35651 const DebugLoc &DL = MI.getDebugLoc();
35652 MachineFunction *MF = MBB->getParent();
35653 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35654 MachineRegisterInfo &MRI = MF->getRegInfo();
35656 // Memory Reference
35657 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35658 MI.memoperands_end());
35660 MVT PVT = getPointerTy(MF->getDataLayout());
35661 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
35664 // xor vreg1, vreg1
35666 // test vreg1, vreg1
35667 // je sinkMBB # Jump if Shadow Stack is not supported
35669 // mov buf+24/12(%rip), vreg2
35670 // sub vreg1, vreg2
35671 // jbe sinkMBB # No need to fix the Shadow Stack
35674 // incssp vreg2 # fix the SSP according to the lower 8 bits
35677 // fixShadowLoopPrepareMBB:
35680 // fixShadowLoopMBB:
35683 // jne fixShadowLoopMBB # Iterate until you finish fixing
35684 // # the Shadow Stack
35687 MachineFunction::iterator I = ++MBB->getIterator();
35688 const BasicBlock *BB = MBB->getBasicBlock();
35690 MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
35691 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
35692 MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
35693 MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
35694 MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
35695 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
35696 MF->insert(I, checkSspMBB);
35697 MF->insert(I, fallMBB);
35698 MF->insert(I, fixShadowMBB);
35699 MF->insert(I, fixShadowLoopPrepareMBB);
35700 MF->insert(I, fixShadowLoopMBB);
35701 MF->insert(I, sinkMBB);
35703 // Transfer the remainder of BB and its successor edges to sinkMBB.
35704 sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
35706 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
35708 MBB->addSuccessor(checkSspMBB);
35710 // Initialize a register with zero.
35711 Register ZReg = MRI.createVirtualRegister(&X86::GR32RegClass);
35712 BuildMI(checkSspMBB, DL, TII->get(X86::MOV32r0), ZReg);
35714 if (PVT == MVT::i64) {
35715 Register TmpZReg = MRI.createVirtualRegister(PtrRC);
35716 BuildMI(checkSspMBB, DL, TII->get(X86::SUBREG_TO_REG), TmpZReg)
35719 .addImm(X86::sub_32bit);
35723 // Read the current SSP Register value to the zeroed register.
35724 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
35725 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
35726 BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
35728 // Check whether the result of the SSP register is zero and jump directly
35730 unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
35731 BuildMI(checkSspMBB, DL, TII->get(TestRROpc))
35732 .addReg(SSPCopyReg)
35733 .addReg(SSPCopyReg);
35734 BuildMI(checkSspMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
35735 checkSspMBB->addSuccessor(sinkMBB);
35736 checkSspMBB->addSuccessor(fallMBB);
35738 // Reload the previously saved SSP register value.
35739 Register PrevSSPReg = MRI.createVirtualRegister(PtrRC);
35740 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
35741 const int64_t SPPOffset = 3 * PVT.getStoreSize();
35742 MachineInstrBuilder MIB =
35743 BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg);
35744 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35745 const MachineOperand &MO = MI.getOperand(i);
35746 if (i == X86::AddrDisp)
35747 MIB.addDisp(MO, SPPOffset);
35748 else if (MO.isReg()) // Don't add the whole operand, we don't want to
35749 // preserve kill flags.
35750 MIB.addReg(MO.getReg());
35754 MIB.setMemRefs(MMOs);
35756 // Subtract the current SSP from the previous SSP.
35757 Register SspSubReg = MRI.createVirtualRegister(PtrRC);
35758 unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
35759 BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg)
35760 .addReg(PrevSSPReg)
35761 .addReg(SSPCopyReg);
35763 // Jump to sink in case PrevSSPReg <= SSPCopyReg.
35764 BuildMI(fallMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_BE);
35765 fallMBB->addSuccessor(sinkMBB);
35766 fallMBB->addSuccessor(fixShadowMBB);
35768 // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
35769 unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
35770 unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
35771 Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
35772 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg)
35776 // Increase SSP when looking only on the lower 8 bits of the delta.
35777 unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
35778 BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
35780 // Reset the lower 8 bits.
35781 Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
35782 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg)
35783 .addReg(SspFirstShrReg)
35786 // Jump if the result of the shift is zero.
35787 BuildMI(fixShadowMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
35788 fixShadowMBB->addSuccessor(sinkMBB);
35789 fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
35791 // Do a single shift left.
35792 unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1;
35793 Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
35794 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg)
35795 .addReg(SspSecondShrReg);
35797 // Save the value 128 to a register (will be used next with incssp).
35798 Register Value128InReg = MRI.createVirtualRegister(PtrRC);
35799 unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
35800 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg)
35802 fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
35804 // Since incssp only looks at the lower 8 bits, we might need to do several
35805 // iterations of incssp until we finish fixing the shadow stack.
35806 Register DecReg = MRI.createVirtualRegister(PtrRC);
35807 Register CounterReg = MRI.createVirtualRegister(PtrRC);
35808 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg)
35809 .addReg(SspAfterShlReg)
35810 .addMBB(fixShadowLoopPrepareMBB)
35812 .addMBB(fixShadowLoopMBB);
35814 // Every iteration we increase the SSP by 128.
35815 BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg);
35817 // Every iteration we decrement the counter by 1.
35818 unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
35819 BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg);
35821 // Jump if the counter is not zero yet.
35822 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JCC_1)).addMBB(fixShadowLoopMBB).addImm(X86::COND_NE);
35823 fixShadowLoopMBB->addSuccessor(sinkMBB);
35824 fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
35829 MachineBasicBlock *
35830 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
35831 MachineBasicBlock *MBB) const {
35832 const DebugLoc &DL = MI.getDebugLoc();
35833 MachineFunction *MF = MBB->getParent();
35834 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35835 MachineRegisterInfo &MRI = MF->getRegInfo();
35837 // Memory Reference
35838 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35839 MI.memoperands_end());
35841 MVT PVT = getPointerTy(MF->getDataLayout());
35842 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
35843 "Invalid Pointer Size!");
35845 const TargetRegisterClass *RC =
35846 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
35847 Register Tmp = MRI.createVirtualRegister(RC);
35848 // Since FP is only updated here but NOT referenced, it's treated as GPR.
35849 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
35850 Register FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
35851 Register SP = RegInfo->getStackRegister();
35853 MachineInstrBuilder MIB;
35855 const int64_t LabelOffset = 1 * PVT.getStoreSize();
35856 const int64_t SPOffset = 2 * PVT.getStoreSize();
35858 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
35859 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
35861 MachineBasicBlock *thisMBB = MBB;
35863 // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
35864 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
35865 thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
35869 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
35870 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35871 const MachineOperand &MO = MI.getOperand(i);
35872 if (MO.isReg()) // Don't add the whole operand, we don't want to
35873 // preserve kill flags.
35874 MIB.addReg(MO.getReg());
35878 MIB.setMemRefs(MMOs);
35881 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
35882 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35883 const MachineOperand &MO = MI.getOperand(i);
35884 if (i == X86::AddrDisp)
35885 MIB.addDisp(MO, LabelOffset);
35886 else if (MO.isReg()) // Don't add the whole operand, we don't want to
35887 // preserve kill flags.
35888 MIB.addReg(MO.getReg());
35892 MIB.setMemRefs(MMOs);
35895 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
35896 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35897 if (i == X86::AddrDisp)
35898 MIB.addDisp(MI.getOperand(i), SPOffset);
35900 MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
35901 // the last instruction of the expansion.
35903 MIB.setMemRefs(MMOs);
35906 BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
35908 MI.eraseFromParent();
35912 void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
35913 MachineBasicBlock *MBB,
35914 MachineBasicBlock *DispatchBB,
35916 const DebugLoc &DL = MI.getDebugLoc();
35917 MachineFunction *MF = MBB->getParent();
35918 MachineRegisterInfo *MRI = &MF->getRegInfo();
35919 const X86InstrInfo *TII = Subtarget.getInstrInfo();
35921 MVT PVT = getPointerTy(MF->getDataLayout());
35922 assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
35927 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
35928 !isPositionIndependent();
35931 Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
35933 const TargetRegisterClass *TRC =
35934 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
35935 VR = MRI->createVirtualRegister(TRC);
35936 Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
35938 if (Subtarget.is64Bit())
35939 BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
35943 .addMBB(DispatchBB)
35946 BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
35947 .addReg(0) /* TII->getGlobalBaseReg(MF) */
35950 .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
35954 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
35955 addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
35957 MIB.addMBB(DispatchBB);
35962 MachineBasicBlock *
35963 X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
35964 MachineBasicBlock *BB) const {
35965 const DebugLoc &DL = MI.getDebugLoc();
35966 MachineFunction *MF = BB->getParent();
35967 MachineRegisterInfo *MRI = &MF->getRegInfo();
35968 const X86InstrInfo *TII = Subtarget.getInstrInfo();
35969 int FI = MF->getFrameInfo().getFunctionContextIndex();
35971 // Get a mapping of the call site numbers to all of the landing pads they're
35972 // associated with.
35973 DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
35974 unsigned MaxCSNum = 0;
35975 for (auto &MBB : *MF) {
35976 if (!MBB.isEHPad())
35979 MCSymbol *Sym = nullptr;
35980 for (const auto &MI : MBB) {
35981 if (MI.isDebugInstr())
35984 assert(MI.isEHLabel() && "expected EH_LABEL");
35985 Sym = MI.getOperand(0).getMCSymbol();
35989 if (!MF->hasCallSiteLandingPad(Sym))
35992 for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
35993 CallSiteNumToLPad[CSI].push_back(&MBB);
35994 MaxCSNum = std::max(MaxCSNum, CSI);
35998 // Get an ordered list of the machine basic blocks for the jump table.
35999 std::vector<MachineBasicBlock *> LPadList;
36000 SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
36001 LPadList.reserve(CallSiteNumToLPad.size());
36003 for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
36004 for (auto &LP : CallSiteNumToLPad[CSI]) {
36005 LPadList.push_back(LP);
36006 InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
36010 assert(!LPadList.empty() &&
36011 "No landing pad destinations for the dispatch jump table!");
36013 // Create the MBBs for the dispatch code.
36015 // Shove the dispatch's address into the return slot in the function context.
36016 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
36017 DispatchBB->setIsEHPad(true);
36019 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
36020 BuildMI(TrapBB, DL, TII->get(X86::TRAP));
36021 DispatchBB->addSuccessor(TrapBB);
36023 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
36024 DispatchBB->addSuccessor(DispContBB);
36027 MF->push_back(DispatchBB);
36028 MF->push_back(DispContBB);
36029 MF->push_back(TrapBB);
36031 // Insert code into the entry block that creates and registers the function
36033 SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
36035 // Create the jump table and associated information
36036 unsigned JTE = getJumpTableEncoding();
36037 MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
36038 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
36040 const X86RegisterInfo &RI = TII->getRegisterInfo();
36041 // Add a register mask with no preserved registers. This results in all
36042 // registers being marked as clobbered.
36043 if (RI.hasBasePointer(*MF)) {
36044 const bool FPIs64Bit =
36045 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
36046 X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
36047 MFI->setRestoreBasePointer(MF);
36049 Register FP = RI.getFrameRegister(*MF);
36050 Register BP = RI.getBaseRegister();
36051 unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
36052 addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
36053 MFI->getRestoreBasePointerOffset())
36054 .addRegMask(RI.getNoPreservedMask());
36056 BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
36057 .addRegMask(RI.getNoPreservedMask());
36060 // IReg is used as an index in a memory operand and therefore can't be SP
36061 Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
36062 addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
36063 Subtarget.is64Bit() ? 8 : 4);
36064 BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
36066 .addImm(LPadList.size());
36067 BuildMI(DispatchBB, DL, TII->get(X86::JCC_1)).addMBB(TrapBB).addImm(X86::COND_AE);
36069 if (Subtarget.is64Bit()) {
36070 Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
36071 Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
36073 // leaq .LJTI0_0(%rip), BReg
36074 BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg)
36078 .addJumpTableIndex(MJTI)
36080 // movzx IReg64, IReg
36081 BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
36084 .addImm(X86::sub_32bit);
36087 case MachineJumpTableInfo::EK_BlockAddress:
36088 // jmpq *(BReg,IReg64,8)
36089 BuildMI(DispContBB, DL, TII->get(X86::JMP64m))
36096 case MachineJumpTableInfo::EK_LabelDifference32: {
36097 Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
36098 Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
36099 Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
36101 // movl (BReg,IReg64,4), OReg
36102 BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg)
36108 // movsx OReg64, OReg
36109 BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg);
36110 // addq BReg, OReg64, TReg
36111 BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg)
36115 BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg);
36119 llvm_unreachable("Unexpected jump table encoding");
36122 // jmpl *.LJTI0_0(,IReg,4)
36123 BuildMI(DispContBB, DL, TII->get(X86::JMP32m))
36127 .addJumpTableIndex(MJTI)
36131 // Add the jump table entries as successors to the MBB.
36132 SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
36133 for (auto &LP : LPadList)
36134 if (SeenMBBs.insert(LP).second)
36135 DispContBB->addSuccessor(LP);
36137 // N.B. the order the invoke BBs are processed in doesn't matter here.
36138 SmallVector<MachineBasicBlock *, 64> MBBLPads;
36139 const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
36140 for (MachineBasicBlock *MBB : InvokeBBs) {
36141 // Remove the landing pad successor from the invoke block and replace it
36142 // with the new dispatch block.
36143 // Keep a copy of Successors since it's modified inside the loop.
36144 SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
36146 // FIXME: Avoid quadratic complexity.
36147 for (auto MBBS : Successors) {
36148 if (MBBS->isEHPad()) {
36149 MBB->removeSuccessor(MBBS);
36150 MBBLPads.push_back(MBBS);
36154 MBB->addSuccessor(DispatchBB);
36156 // Find the invoke call and mark all of the callee-saved registers as
36157 // 'implicit defined' so that they're spilled. This prevents code from
36158 // moving instructions to before the EH block, where they will never be
36160 for (auto &II : reverse(*MBB)) {
36164 DenseMap<unsigned, bool> DefRegs;
36165 for (auto &MOp : II.operands())
36167 DefRegs[MOp.getReg()] = true;
36169 MachineInstrBuilder MIB(*MF, &II);
36170 for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
36171 unsigned Reg = SavedRegs[RegIdx];
36173 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
36180 // Mark all former landing pads as non-landing pads. The dispatch is the only
36181 // landing pad now.
36182 for (auto &LP : MBBLPads)
36183 LP->setIsEHPad(false);
36185 // The instruction is gone now.
36186 MI.eraseFromParent();
36190 MachineBasicBlock *
36191 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
36192 MachineBasicBlock *BB) const {
36193 MachineFunction *MF = BB->getParent();
36194 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
36195 const DebugLoc &DL = MI.getDebugLoc();
36197 auto TMMImmToTMMReg = [](unsigned Imm) {
36198 assert (Imm < 8 && "Illegal tmm index");
36199 return X86::TMM0 + Imm;
36201 switch (MI.getOpcode()) {
36202 default: llvm_unreachable("Unexpected instr type to insert");
36203 case X86::TLS_addr32:
36204 case X86::TLS_addr64:
36205 case X86::TLS_addrX32:
36206 case X86::TLS_base_addr32:
36207 case X86::TLS_base_addr64:
36208 case X86::TLS_base_addrX32:
36209 return EmitLoweredTLSAddr(MI, BB);
36210 case X86::INDIRECT_THUNK_CALL32:
36211 case X86::INDIRECT_THUNK_CALL64:
36212 case X86::INDIRECT_THUNK_TCRETURN32:
36213 case X86::INDIRECT_THUNK_TCRETURN64:
36214 return EmitLoweredIndirectThunk(MI, BB);
36215 case X86::CATCHRET:
36216 return EmitLoweredCatchRet(MI, BB);
36217 case X86::SEG_ALLOCA_32:
36218 case X86::SEG_ALLOCA_64:
36219 return EmitLoweredSegAlloca(MI, BB);
36220 case X86::PROBED_ALLOCA_32:
36221 case X86::PROBED_ALLOCA_64:
36222 return EmitLoweredProbedAlloca(MI, BB);
36223 case X86::TLSCall_32:
36224 case X86::TLSCall_64:
36225 return EmitLoweredTLSCall(MI, BB);
36226 case X86::CMOV_FR16:
36227 case X86::CMOV_FR16X:
36228 case X86::CMOV_FR32:
36229 case X86::CMOV_FR32X:
36230 case X86::CMOV_FR64:
36231 case X86::CMOV_FR64X:
36232 case X86::CMOV_GR8:
36233 case X86::CMOV_GR16:
36234 case X86::CMOV_GR32:
36235 case X86::CMOV_RFP32:
36236 case X86::CMOV_RFP64:
36237 case X86::CMOV_RFP80:
36238 case X86::CMOV_VR64:
36239 case X86::CMOV_VR128:
36240 case X86::CMOV_VR128X:
36241 case X86::CMOV_VR256:
36242 case X86::CMOV_VR256X:
36243 case X86::CMOV_VR512:
36244 case X86::CMOV_VK1:
36245 case X86::CMOV_VK2:
36246 case X86::CMOV_VK4:
36247 case X86::CMOV_VK8:
36248 case X86::CMOV_VK16:
36249 case X86::CMOV_VK32:
36250 case X86::CMOV_VK64:
36251 return EmitLoweredSelect(MI, BB);
36253 case X86::RDFLAGS32:
36254 case X86::RDFLAGS64: {
36256 MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
36257 unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
36258 MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
36259 // Permit reads of the EFLAGS and DF registers without them being defined.
36260 // This intrinsic exists to read external processor state in flags, such as
36261 // the trap flag, interrupt flag, and direction flag, none of which are
36262 // modeled by the backend.
36263 assert(Push->getOperand(2).getReg() == X86::EFLAGS &&
36264 "Unexpected register in operand!");
36265 Push->getOperand(2).setIsUndef();
36266 assert(Push->getOperand(3).getReg() == X86::DF &&
36267 "Unexpected register in operand!");
36268 Push->getOperand(3).setIsUndef();
36269 BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
36271 MI.eraseFromParent(); // The pseudo is gone now.
36275 case X86::WRFLAGS32:
36276 case X86::WRFLAGS64: {
36278 MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
36280 MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
36281 BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
36282 BuildMI(*BB, MI, DL, TII->get(PopF));
36284 MI.eraseFromParent(); // The pseudo is gone now.
36288 case X86::FP32_TO_INT16_IN_MEM:
36289 case X86::FP32_TO_INT32_IN_MEM:
36290 case X86::FP32_TO_INT64_IN_MEM:
36291 case X86::FP64_TO_INT16_IN_MEM:
36292 case X86::FP64_TO_INT32_IN_MEM:
36293 case X86::FP64_TO_INT64_IN_MEM:
36294 case X86::FP80_TO_INT16_IN_MEM:
36295 case X86::FP80_TO_INT32_IN_MEM:
36296 case X86::FP80_TO_INT64_IN_MEM: {
36297 // Change the floating point control register to use "round towards zero"
36298 // mode when truncating to an integer value.
36299 int OrigCWFrameIdx =
36300 MF->getFrameInfo().CreateStackObject(2, Align(2), false);
36301 addFrameReference(BuildMI(*BB, MI, DL,
36302 TII->get(X86::FNSTCW16m)), OrigCWFrameIdx);
36304 // Load the old value of the control word...
36305 Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36306 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
36309 // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
36310 Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36311 BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
36312 .addReg(OldCW, RegState::Kill).addImm(0xC00);
36314 // Extract to 16 bits.
36316 MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
36317 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
36318 .addReg(NewCW, RegState::Kill, X86::sub_16bit);
36320 // Prepare memory for FLDCW.
36321 int NewCWFrameIdx =
36322 MF->getFrameInfo().CreateStackObject(2, Align(2), false);
36323 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
36325 .addReg(NewCW16, RegState::Kill);
36327 // Reload the modified control word now...
36328 addFrameReference(BuildMI(*BB, MI, DL,
36329 TII->get(X86::FLDCW16m)), NewCWFrameIdx);
36331 // Get the X86 opcode to use.
36333 switch (MI.getOpcode()) {
36334 default: llvm_unreachable("illegal opcode!");
36335 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
36336 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
36337 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
36338 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
36339 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
36340 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
36341 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
36342 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
36343 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
36346 X86AddressMode AM = getAddressFromInstr(&MI, 0);
36347 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
36348 .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
36350 // Reload the original control word now.
36351 addFrameReference(BuildMI(*BB, MI, DL,
36352 TII->get(X86::FLDCW16m)), OrigCWFrameIdx);
36354 MI.eraseFromParent(); // The pseudo instruction is gone now.
36360 return emitXBegin(MI, BB, Subtarget.getInstrInfo());
36362 case X86::VAARG_64:
36363 case X86::VAARG_X32:
36364 return EmitVAARGWithCustomInserter(MI, BB);
36366 case X86::EH_SjLj_SetJmp32:
36367 case X86::EH_SjLj_SetJmp64:
36368 return emitEHSjLjSetJmp(MI, BB);
36370 case X86::EH_SjLj_LongJmp32:
36371 case X86::EH_SjLj_LongJmp64:
36372 return emitEHSjLjLongJmp(MI, BB);
36374 case X86::Int_eh_sjlj_setup_dispatch:
36375 return EmitSjLjDispatchBlock(MI, BB);
36377 case TargetOpcode::STATEPOINT:
36378 // As an implementation detail, STATEPOINT shares the STACKMAP format at
36379 // this point in the process. We diverge later.
36380 return emitPatchPoint(MI, BB);
36382 case TargetOpcode::STACKMAP:
36383 case TargetOpcode::PATCHPOINT:
36384 return emitPatchPoint(MI, BB);
36386 case TargetOpcode::PATCHABLE_EVENT_CALL:
36387 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
36390 case X86::LCMPXCHG8B: {
36391 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
36392 // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
36393 // requires a memory operand. If it happens that current architecture is
36394 // i686 and for current function we need a base pointer
36395 // - which is ESI for i686 - register allocator would not be able to
36396 // allocate registers for an address in form of X(%reg, %reg, Y)
36397 // - there never would be enough unreserved registers during regalloc
36398 // (without the need for base ptr the only option would be X(%edi, %esi, Y).
36399 // We are giving a hand to register allocator by precomputing the address in
36400 // a new vreg using LEA.
36402 // If it is not i686 or there is no base pointer - nothing to do here.
36403 if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
36406 // Even though this code does not necessarily needs the base pointer to
36407 // be ESI, we check for that. The reason: if this assert fails, there are
36408 // some changes happened in the compiler base pointer handling, which most
36409 // probably have to be addressed somehow here.
36410 assert(TRI->getBaseRegister() == X86::ESI &&
36411 "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
36412 "base pointer in mind");
36414 MachineRegisterInfo &MRI = MF->getRegInfo();
36415 MVT SPTy = getPointerTy(MF->getDataLayout());
36416 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
36417 Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
36419 X86AddressMode AM = getAddressFromInstr(&MI, 0);
36420 // Regalloc does not need any help when the memory operand of CMPXCHG8B
36421 // does not use index register.
36422 if (AM.IndexReg == X86::NoRegister)
36425 // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
36426 // four operand definitions that are E[ABCD] registers. We skip them and
36427 // then insert the LEA.
36428 MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
36429 while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
36430 RMBBI->definesRegister(X86::EBX) ||
36431 RMBBI->definesRegister(X86::ECX) ||
36432 RMBBI->definesRegister(X86::EDX))) {
36435 MachineBasicBlock::iterator MBBI(RMBBI);
36437 BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);
36439 setDirectAddressInInstr(&MI, 0, computedAddrVReg);
36443 case X86::LCMPXCHG16B_NO_RBX: {
36444 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
36445 Register BasePtr = TRI->getBaseRegister();
36446 if (TRI->hasBasePointer(*MF) &&
36447 (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
36448 if (!BB->isLiveIn(BasePtr))
36449 BB->addLiveIn(BasePtr);
36450 // Save RBX into a virtual register.
36452 MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36453 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), SaveRBX)
36455 Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36456 MachineInstrBuilder MIB =
36457 BuildMI(*BB, MI, DL, TII->get(X86::LCMPXCHG16B_SAVE_RBX), Dst);
36458 for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
36459 MIB.add(MI.getOperand(Idx));
36460 MIB.add(MI.getOperand(X86::AddrNumOperands));
36461 MIB.addReg(SaveRBX);
36463 // Simple case, just copy the virtual register to RBX.
36464 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::RBX)
36465 .add(MI.getOperand(X86::AddrNumOperands));
36466 MachineInstrBuilder MIB =
36467 BuildMI(*BB, MI, DL, TII->get(X86::LCMPXCHG16B));
36468 for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
36469 MIB.add(MI.getOperand(Idx));
36471 MI.eraseFromParent();
36474 case X86::MWAITX: {
36475 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
36476 Register BasePtr = TRI->getBaseRegister();
36477 bool IsRBX = (BasePtr == X86::RBX || BasePtr == X86::EBX);
36478 // If no need to save the base pointer, we generate MWAITXrrr,
36479 // else we generate pseudo MWAITX_SAVE_RBX.
36480 if (!IsRBX || !TRI->hasBasePointer(*MF)) {
36481 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::ECX)
36482 .addReg(MI.getOperand(0).getReg());
36483 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::EAX)
36484 .addReg(MI.getOperand(1).getReg());
36485 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::EBX)
36486 .addReg(MI.getOperand(2).getReg());
36487 BuildMI(*BB, MI, DL, TII->get(X86::MWAITXrrr));
36488 MI.eraseFromParent();
36490 if (!BB->isLiveIn(BasePtr)) {
36491 BB->addLiveIn(BasePtr);
36493 // Parameters can be copied into ECX and EAX but not EBX yet.
36494 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::ECX)
36495 .addReg(MI.getOperand(0).getReg());
36496 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::EAX)
36497 .addReg(MI.getOperand(1).getReg());
36498 assert(Subtarget.is64Bit() && "Expected 64-bit mode!");
36499 // Save RBX into a virtual register.
36501 MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36502 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), SaveRBX)
36504 // Generate mwaitx pseudo.
36505 Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36506 BuildMI(*BB, MI, DL, TII->get(X86::MWAITX_SAVE_RBX))
36507 .addDef(Dst) // Destination tied in with SaveRBX.
36508 .addReg(MI.getOperand(2).getReg()) // input value of EBX.
36509 .addUse(SaveRBX); // Save of base pointer.
36510 MI.eraseFromParent();
36514 case TargetOpcode::PREALLOCATED_SETUP: {
36515 assert(Subtarget.is32Bit() && "preallocated only used in 32-bit");
36516 auto MFI = MF->getInfo<X86MachineFunctionInfo>();
36517 MFI->setHasPreallocatedCall(true);
36518 int64_t PreallocatedId = MI.getOperand(0).getImm();
36519 size_t StackAdjustment = MFI->getPreallocatedStackSize(PreallocatedId);
36520 assert(StackAdjustment != 0 && "0 stack adjustment");
36521 LLVM_DEBUG(dbgs() << "PREALLOCATED_SETUP stack adjustment "
36522 << StackAdjustment << "\n");
36523 BuildMI(*BB, MI, DL, TII->get(X86::SUB32ri), X86::ESP)
36525 .addImm(StackAdjustment);
36526 MI.eraseFromParent();
36529 case TargetOpcode::PREALLOCATED_ARG: {
36530 assert(Subtarget.is32Bit() && "preallocated calls only used in 32-bit");
36531 int64_t PreallocatedId = MI.getOperand(1).getImm();
36532 int64_t ArgIdx = MI.getOperand(2).getImm();
36533 auto MFI = MF->getInfo<X86MachineFunctionInfo>();
36534 size_t ArgOffset = MFI->getPreallocatedArgOffsets(PreallocatedId)[ArgIdx];
36535 LLVM_DEBUG(dbgs() << "PREALLOCATED_ARG arg index " << ArgIdx
36536 << ", arg offset " << ArgOffset << "\n");
36537 // stack pointer + offset
36539 BuildMI(*BB, MI, DL, TII->get(X86::LEA32r), MI.getOperand(0).getReg()),
36540 X86::ESP, false, ArgOffset);
36541 MI.eraseFromParent();
36544 case X86::PTDPBSSD:
36545 case X86::PTDPBSUD:
36546 case X86::PTDPBUSD:
36547 case X86::PTDPBUUD:
36548 case X86::PTDPBF16PS: {
36550 switch (MI.getOpcode()) {
36551 default: llvm_unreachable("illegal opcode!");
36552 case X86::PTDPBSSD: Opc = X86::TDPBSSD; break;
36553 case X86::PTDPBSUD: Opc = X86::TDPBSUD; break;
36554 case X86::PTDPBUSD: Opc = X86::TDPBUSD; break;
36555 case X86::PTDPBUUD: Opc = X86::TDPBUUD; break;
36556 case X86::PTDPBF16PS: Opc = X86::TDPBF16PS; break;
36559 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(Opc));
36560 MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Define);
36561 MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Undef);
36562 MIB.addReg(TMMImmToTMMReg(MI.getOperand(1).getImm()), RegState::Undef);
36563 MIB.addReg(TMMImmToTMMReg(MI.getOperand(2).getImm()), RegState::Undef);
36565 MI.eraseFromParent(); // The pseudo is gone now.
36568 case X86::PTILEZERO: {
36569 unsigned Imm = MI.getOperand(0).getImm();
36570 BuildMI(*BB, MI, DL, TII->get(X86::TILEZERO), TMMImmToTMMReg(Imm));
36571 MI.eraseFromParent(); // The pseudo is gone now.
36574 case X86::PTILELOADD:
36575 case X86::PTILELOADDT1:
36576 case X86::PTILESTORED: {
36578 switch (MI.getOpcode()) {
36579 default: llvm_unreachable("illegal opcode!");
36580 case X86::PTILELOADD: Opc = X86::TILELOADD; break;
36581 case X86::PTILELOADDT1: Opc = X86::TILELOADDT1; break;
36582 case X86::PTILESTORED: Opc = X86::TILESTORED; break;
36585 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(Opc));
36586 unsigned CurOp = 0;
36587 if (Opc != X86::TILESTORED)
36588 MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
36591 MIB.add(MI.getOperand(CurOp++)); // base
36592 MIB.add(MI.getOperand(CurOp++)); // scale
36593 MIB.add(MI.getOperand(CurOp++)); // index -- stride
36594 MIB.add(MI.getOperand(CurOp++)); // displacement
36595 MIB.add(MI.getOperand(CurOp++)); // segment
36597 if (Opc == X86::TILESTORED)
36598 MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
36601 MI.eraseFromParent(); // The pseudo is gone now.
36607 //===----------------------------------------------------------------------===//
36608 // X86 Optimization Hooks
36609 //===----------------------------------------------------------------------===//
36612 X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
36613 const APInt &DemandedBits,
36614 const APInt &DemandedElts,
36615 TargetLoweringOpt &TLO) const {
36616 EVT VT = Op.getValueType();
36617 unsigned Opcode = Op.getOpcode();
36618 unsigned EltSize = VT.getScalarSizeInBits();
36620 if (VT.isVector()) {
36621 // If the constant is only all signbits in the active bits, then we should
36622 // extend it to the entire constant to allow it act as a boolean constant
36624 auto NeedsSignExtension = [&](SDValue V, unsigned ActiveBits) {
36625 if (!ISD::isBuildVectorOfConstantSDNodes(V.getNode()))
36627 for (unsigned i = 0, e = V.getNumOperands(); i != e; ++i) {
36628 if (!DemandedElts[i] || V.getOperand(i).isUndef())
36630 const APInt &Val = V.getConstantOperandAPInt(i);
36631 if (Val.getBitWidth() > Val.getNumSignBits() &&
36632 Val.trunc(ActiveBits).getNumSignBits() == ActiveBits)
36637 // For vectors - if we have a constant, then try to sign extend.
36638 // TODO: Handle AND/ANDN cases.
36639 unsigned ActiveBits = DemandedBits.getActiveBits();
36640 if (EltSize > ActiveBits && EltSize > 1 && isTypeLegal(VT) &&
36641 (Opcode == ISD::OR || Opcode == ISD::XOR) &&
36642 NeedsSignExtension(Op.getOperand(1), ActiveBits)) {
36643 EVT ExtSVT = EVT::getIntegerVT(*TLO.DAG.getContext(), ActiveBits);
36644 EVT ExtVT = EVT::getVectorVT(*TLO.DAG.getContext(), ExtSVT,
36645 VT.getVectorNumElements());
36647 TLO.DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(Op), VT,
36648 Op.getOperand(1), TLO.DAG.getValueType(ExtVT));
36650 TLO.DAG.getNode(Opcode, SDLoc(Op), VT, Op.getOperand(0), NewC);
36651 return TLO.CombineTo(Op, NewOp);
36656 // Only optimize Ands to prevent shrinking a constant that could be
36657 // matched by movzx.
36658 if (Opcode != ISD::AND)
36661 // Make sure the RHS really is a constant.
36662 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
36666 const APInt &Mask = C->getAPIntValue();
36668 // Clear all non-demanded bits initially.
36669 APInt ShrunkMask = Mask & DemandedBits;
36671 // Find the width of the shrunk mask.
36672 unsigned Width = ShrunkMask.getActiveBits();
36674 // If the mask is all 0s there's nothing to do here.
36678 // Find the next power of 2 width, rounding up to a byte.
36679 Width = PowerOf2Ceil(std::max(Width, 8U));
36680 // Truncate the width to size to handle illegal types.
36681 Width = std::min(Width, EltSize);
36683 // Calculate a possible zero extend mask for this constant.
36684 APInt ZeroExtendMask = APInt::getLowBitsSet(EltSize, Width);
36686 // If we aren't changing the mask, just return true to keep it and prevent
36687 // the caller from optimizing.
36688 if (ZeroExtendMask == Mask)
36691 // Make sure the new mask can be represented by a combination of mask bits
36692 // and non-demanded bits.
36693 if (!ZeroExtendMask.isSubsetOf(Mask | ~DemandedBits))
36696 // Replace the constant with the zero extend mask.
36698 SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
36699 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
36700 return TLO.CombineTo(Op, NewOp);
36703 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
36705 const APInt &DemandedElts,
36706 const SelectionDAG &DAG,
36707 unsigned Depth) const {
36708 unsigned BitWidth = Known.getBitWidth();
36709 unsigned NumElts = DemandedElts.getBitWidth();
36710 unsigned Opc = Op.getOpcode();
36711 EVT VT = Op.getValueType();
36712 assert((Opc >= ISD::BUILTIN_OP_END ||
36713 Opc == ISD::INTRINSIC_WO_CHAIN ||
36714 Opc == ISD::INTRINSIC_W_CHAIN ||
36715 Opc == ISD::INTRINSIC_VOID) &&
36716 "Should use MaskedValueIsZero if you don't know whether Op"
36717 " is a target node!");
36722 case X86ISD::SETCC:
36723 Known.Zero.setBitsFrom(1);
36725 case X86ISD::MOVMSK: {
36726 unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
36727 Known.Zero.setBitsFrom(NumLoBits);
36730 case X86ISD::PEXTRB:
36731 case X86ISD::PEXTRW: {
36732 SDValue Src = Op.getOperand(0);
36733 EVT SrcVT = Src.getValueType();
36734 APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
36735 Op.getConstantOperandVal(1));
36736 Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
36737 Known = Known.anyextOrTrunc(BitWidth);
36738 Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
36741 case X86ISD::VSRAI:
36742 case X86ISD::VSHLI:
36743 case X86ISD::VSRLI: {
36744 unsigned ShAmt = Op.getConstantOperandVal(1);
36745 if (ShAmt >= VT.getScalarSizeInBits()) {
36746 // Out of range logical bit shifts are guaranteed to be zero.
36747 // Out of range arithmetic bit shifts splat the sign bit.
36748 if (Opc != X86ISD::VSRAI) {
36749 Known.setAllZero();
36753 ShAmt = VT.getScalarSizeInBits() - 1;
36756 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36757 if (Opc == X86ISD::VSHLI) {
36758 Known.Zero <<= ShAmt;
36759 Known.One <<= ShAmt;
36760 // Low bits are known zero.
36761 Known.Zero.setLowBits(ShAmt);
36762 } else if (Opc == X86ISD::VSRLI) {
36763 Known.Zero.lshrInPlace(ShAmt);
36764 Known.One.lshrInPlace(ShAmt);
36765 // High bits are known zero.
36766 Known.Zero.setHighBits(ShAmt);
36768 Known.Zero.ashrInPlace(ShAmt);
36769 Known.One.ashrInPlace(ShAmt);
36773 case X86ISD::PACKUS: {
36774 // PACKUS is just a truncation if the upper half is zero.
36775 APInt DemandedLHS, DemandedRHS;
36776 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
36778 Known.One = APInt::getAllOnes(BitWidth * 2);
36779 Known.Zero = APInt::getAllOnes(BitWidth * 2);
36782 if (!!DemandedLHS) {
36783 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
36784 Known = KnownBits::commonBits(Known, Known2);
36786 if (!!DemandedRHS) {
36787 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
36788 Known = KnownBits::commonBits(Known, Known2);
36791 if (Known.countMinLeadingZeros() < BitWidth)
36793 Known = Known.trunc(BitWidth);
36796 case X86ISD::VBROADCAST: {
36797 SDValue Src = Op.getOperand(0);
36798 if (!Src.getSimpleValueType().isVector()) {
36799 Known = DAG.computeKnownBits(Src, Depth + 1);
36804 case X86ISD::AND: {
36805 if (Op.getResNo() == 0) {
36807 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36808 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36813 case X86ISD::ANDNP: {
36815 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36816 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36818 // ANDNP = (~X & Y);
36819 Known.One &= Known2.Zero;
36820 Known.Zero |= Known2.One;
36823 case X86ISD::FOR: {
36825 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36826 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36831 case X86ISD::PSADBW: {
36832 assert(VT.getScalarType() == MVT::i64 &&
36833 Op.getOperand(0).getValueType().getScalarType() == MVT::i8 &&
36834 "Unexpected PSADBW types");
36836 // PSADBW - fills low 16 bits and zeros upper 48 bits of each i64 result.
36837 Known.Zero.setBitsFrom(16);
36840 case X86ISD::PMULUDQ: {
36842 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36843 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36845 Known = Known.trunc(BitWidth / 2).zext(BitWidth);
36846 Known2 = Known2.trunc(BitWidth / 2).zext(BitWidth);
36847 Known = KnownBits::mul(Known, Known2);
36850 case X86ISD::CMOV: {
36851 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
36852 // If we don't know any bits, early out.
36853 if (Known.isUnknown())
36855 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
36857 // Only known if known in both the LHS and RHS.
36858 Known = KnownBits::commonBits(Known, Known2);
36861 case X86ISD::BEXTR:
36862 case X86ISD::BEXTRI: {
36863 SDValue Op0 = Op.getOperand(0);
36864 SDValue Op1 = Op.getOperand(1);
36866 if (auto* Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
36867 unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
36868 unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
36870 // If the length is 0, the result is 0.
36872 Known.setAllZero();
36876 if ((Shift + Length) <= BitWidth) {
36877 Known = DAG.computeKnownBits(Op0, Depth + 1);
36878 Known = Known.extractBits(Length, Shift);
36879 Known = Known.zextOrTrunc(BitWidth);
36884 case X86ISD::PDEP: {
36886 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36887 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36888 // Zeros are retained from the mask operand. But not ones.
36889 Known.One.clearAllBits();
36890 // The result will have at least as many trailing zeros as the non-mask
36891 // operand since bits can only map to the same or higher bit position.
36892 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
36895 case X86ISD::PEXT: {
36896 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36897 // The result has as many leading zeros as the number of zeroes in the mask.
36898 unsigned Count = Known.Zero.countPopulation();
36899 Known.Zero = APInt::getHighBitsSet(BitWidth, Count);
36900 Known.One.clearAllBits();
36903 case X86ISD::VTRUNC:
36904 case X86ISD::VTRUNCS:
36905 case X86ISD::VTRUNCUS:
36906 case X86ISD::CVTSI2P:
36907 case X86ISD::CVTUI2P:
36908 case X86ISD::CVTP2SI:
36909 case X86ISD::CVTP2UI:
36910 case X86ISD::MCVTP2SI:
36911 case X86ISD::MCVTP2UI:
36912 case X86ISD::CVTTP2SI:
36913 case X86ISD::CVTTP2UI:
36914 case X86ISD::MCVTTP2SI:
36915 case X86ISD::MCVTTP2UI:
36916 case X86ISD::MCVTSI2P:
36917 case X86ISD::MCVTUI2P:
36918 case X86ISD::VFPROUND:
36919 case X86ISD::VMFPROUND:
36920 case X86ISD::CVTPS2PH:
36921 case X86ISD::MCVTPS2PH: {
36922 // Truncations/Conversions - upper elements are known zero.
36923 EVT SrcVT = Op.getOperand(0).getValueType();
36924 if (SrcVT.isVector()) {
36925 unsigned NumSrcElts = SrcVT.getVectorNumElements();
36926 if (NumElts > NumSrcElts &&
36927 DemandedElts.countTrailingZeros() >= NumSrcElts)
36928 Known.setAllZero();
36932 case X86ISD::STRICT_CVTTP2SI:
36933 case X86ISD::STRICT_CVTTP2UI:
36934 case X86ISD::STRICT_CVTSI2P:
36935 case X86ISD::STRICT_CVTUI2P:
36936 case X86ISD::STRICT_VFPROUND:
36937 case X86ISD::STRICT_CVTPS2PH: {
36938 // Strict Conversions - upper elements are known zero.
36939 EVT SrcVT = Op.getOperand(1).getValueType();
36940 if (SrcVT.isVector()) {
36941 unsigned NumSrcElts = SrcVT.getVectorNumElements();
36942 if (NumElts > NumSrcElts &&
36943 DemandedElts.countTrailingZeros() >= NumSrcElts)
36944 Known.setAllZero();
36948 case X86ISD::MOVQ2DQ: {
36949 // Move from MMX to XMM. Upper half of XMM should be 0.
36950 if (DemandedElts.countTrailingZeros() >= (NumElts / 2))
36951 Known.setAllZero();
36954 case X86ISD::VBROADCAST_LOAD: {
36956 SmallVector<APInt, 16> EltBits;
36957 if (getTargetConstantBitsFromNode(Op, BitWidth, UndefElts, EltBits,
36958 /*AllowWholeUndefs*/ false,
36959 /*AllowPartialUndefs*/ false)) {
36960 Known.Zero.setAllBits();
36961 Known.One.setAllBits();
36962 for (unsigned I = 0; I != NumElts; ++I) {
36963 if (!DemandedElts[I])
36965 if (UndefElts[I]) {
36969 KnownBits Known2 = KnownBits::makeConstant(EltBits[I]);
36970 Known = KnownBits::commonBits(Known, Known2);
36978 // Handle target shuffles.
36979 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
36980 if (isTargetShuffle(Opc)) {
36981 SmallVector<int, 64> Mask;
36982 SmallVector<SDValue, 2> Ops;
36983 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask)) {
36984 unsigned NumOps = Ops.size();
36985 unsigned NumElts = VT.getVectorNumElements();
36986 if (Mask.size() == NumElts) {
36987 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
36988 Known.Zero.setAllBits(); Known.One.setAllBits();
36989 for (unsigned i = 0; i != NumElts; ++i) {
36990 if (!DemandedElts[i])
36993 if (M == SM_SentinelUndef) {
36994 // For UNDEF elements, we don't know anything about the common state
36995 // of the shuffle result.
36999 if (M == SM_SentinelZero) {
37000 Known.One.clearAllBits();
37003 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
37004 "Shuffle index out of range");
37006 unsigned OpIdx = (unsigned)M / NumElts;
37007 unsigned EltIdx = (unsigned)M % NumElts;
37008 if (Ops[OpIdx].getValueType() != VT) {
37009 // TODO - handle target shuffle ops with different value types.
37013 DemandedOps[OpIdx].setBit(EltIdx);
37015 // Known bits are the values that are shared by every demanded element.
37016 for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
37017 if (!DemandedOps[i])
37020 DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
37021 Known = KnownBits::commonBits(Known, Known2);
37028 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
37029 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
37030 unsigned Depth) const {
37031 EVT VT = Op.getValueType();
37032 unsigned VTBits = VT.getScalarSizeInBits();
37033 unsigned Opcode = Op.getOpcode();
37035 case X86ISD::SETCC_CARRY:
37036 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
37039 case X86ISD::VTRUNC: {
37040 SDValue Src = Op.getOperand(0);
37041 MVT SrcVT = Src.getSimpleValueType();
37042 unsigned NumSrcBits = SrcVT.getScalarSizeInBits();
37043 assert(VTBits < NumSrcBits && "Illegal truncation input type");
37044 APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
37045 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
37046 if (Tmp > (NumSrcBits - VTBits))
37047 return Tmp - (NumSrcBits - VTBits);
37051 case X86ISD::PACKSS: {
37052 // PACKSS is just a truncation if the sign bits extend to the packed size.
37053 APInt DemandedLHS, DemandedRHS;
37054 getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
37057 unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
37058 unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
37060 Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
37062 Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
37063 unsigned Tmp = std::min(Tmp0, Tmp1);
37064 if (Tmp > (SrcBits - VTBits))
37065 return Tmp - (SrcBits - VTBits);
37069 case X86ISD::VBROADCAST: {
37070 SDValue Src = Op.getOperand(0);
37071 if (!Src.getSimpleValueType().isVector())
37072 return DAG.ComputeNumSignBits(Src, Depth + 1);
37076 case X86ISD::VSHLI: {
37077 SDValue Src = Op.getOperand(0);
37078 const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
37079 if (ShiftVal.uge(VTBits))
37080 return VTBits; // Shifted all bits out --> zero.
37081 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
37082 if (ShiftVal.uge(Tmp))
37083 return 1; // Shifted all sign bits out --> unknown.
37084 return Tmp - ShiftVal.getZExtValue();
37087 case X86ISD::VSRAI: {
37088 SDValue Src = Op.getOperand(0);
37089 APInt ShiftVal = Op.getConstantOperandAPInt(1);
37090 if (ShiftVal.uge(VTBits - 1))
37091 return VTBits; // Sign splat.
37092 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
37094 return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
37097 case X86ISD::FSETCC:
37098 // cmpss/cmpsd return zero/all-bits result values in the bottom element.
37099 if (VT == MVT::f32 || VT == MVT::f64 ||
37100 ((VT == MVT::v4f32 || VT == MVT::v2f64) && DemandedElts == 1))
37104 case X86ISD::PCMPGT:
37105 case X86ISD::PCMPEQ:
37107 case X86ISD::VPCOM:
37108 case X86ISD::VPCOMU:
37109 // Vector compares return zero/all-bits result values.
37112 case X86ISD::ANDNP: {
37114 DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
37115 if (Tmp0 == 1) return 1; // Early out.
37117 DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
37118 return std::min(Tmp0, Tmp1);
37121 case X86ISD::CMOV: {
37122 unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
37123 if (Tmp0 == 1) return 1; // Early out.
37124 unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
37125 return std::min(Tmp0, Tmp1);
37129 // Handle target shuffles.
37130 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
37131 if (isTargetShuffle(Opcode)) {
37132 SmallVector<int, 64> Mask;
37133 SmallVector<SDValue, 2> Ops;
37134 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask)) {
37135 unsigned NumOps = Ops.size();
37136 unsigned NumElts = VT.getVectorNumElements();
37137 if (Mask.size() == NumElts) {
37138 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
37139 for (unsigned i = 0; i != NumElts; ++i) {
37140 if (!DemandedElts[i])
37143 if (M == SM_SentinelUndef) {
37144 // For UNDEF elements, we don't know anything about the common state
37145 // of the shuffle result.
37147 } else if (M == SM_SentinelZero) {
37148 // Zero = all sign bits.
37151 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
37152 "Shuffle index out of range");
37154 unsigned OpIdx = (unsigned)M / NumElts;
37155 unsigned EltIdx = (unsigned)M % NumElts;
37156 if (Ops[OpIdx].getValueType() != VT) {
37157 // TODO - handle target shuffle ops with different value types.
37160 DemandedOps[OpIdx].setBit(EltIdx);
37162 unsigned Tmp0 = VTBits;
37163 for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
37164 if (!DemandedOps[i])
37167 DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
37168 Tmp0 = std::min(Tmp0, Tmp1);
37179 SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
37180 if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
37181 return N->getOperand(0);
37185 // Helper to look for a normal load that can be narrowed into a vzload with the
37186 // specified VT and memory VT. Returns SDValue() on failure.
37187 static SDValue narrowLoadToVZLoad(LoadSDNode *LN, MVT MemVT, MVT VT,
37188 SelectionDAG &DAG) {
37189 // Can't if the load is volatile or atomic.
37190 if (!LN->isSimple())
37193 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
37194 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
37195 return DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, SDLoc(LN), Tys, Ops, MemVT,
37196 LN->getPointerInfo(), LN->getOriginalAlign(),
37197 LN->getMemOperand()->getFlags());
37200 // Attempt to match a combined shuffle mask against supported unary shuffle
37202 // TODO: Investigate sharing more of this with shuffle lowering.
37203 static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
37204 bool AllowFloatDomain, bool AllowIntDomain,
37205 SDValue V1, const SelectionDAG &DAG,
37206 const X86Subtarget &Subtarget, unsigned &Shuffle,
37207 MVT &SrcVT, MVT &DstVT) {
37208 unsigned NumMaskElts = Mask.size();
37209 unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
37211 // Match against a VZEXT_MOVL vXi32 and vXi16 zero-extending instruction.
37212 if (Mask[0] == 0 &&
37213 (MaskEltSize == 32 || (MaskEltSize == 16 && Subtarget.hasFP16()))) {
37214 if ((isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) ||
37215 (V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
37216 isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1))) {
37217 Shuffle = X86ISD::VZEXT_MOVL;
37218 if (MaskEltSize == 16)
37219 SrcVT = DstVT = MaskVT.changeVectorElementType(MVT::f16);
37221 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
37226 // Match against a ANY/ZERO_EXTEND_VECTOR_INREG instruction.
37227 // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
37228 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
37229 (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
37230 unsigned MaxScale = 64 / MaskEltSize;
37231 for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
37232 bool MatchAny = true;
37233 bool MatchZero = true;
37234 unsigned NumDstElts = NumMaskElts / Scale;
37235 for (unsigned i = 0; i != NumDstElts && (MatchAny || MatchZero); ++i) {
37236 if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
37237 MatchAny = MatchZero = false;
37240 MatchAny &= isUndefInRange(Mask, (i * Scale) + 1, Scale - 1);
37241 MatchZero &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
37243 if (MatchAny || MatchZero) {
37244 assert(MatchZero && "Failed to match zext but matched aext?");
37245 unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
37246 MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
37247 MVT::getIntegerVT(MaskEltSize);
37248 SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
37250 Shuffle = unsigned(MatchAny ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND);
37251 if (SrcVT.getVectorNumElements() != NumDstElts)
37252 Shuffle = getOpcode_EXTEND_VECTOR_INREG(Shuffle);
37254 DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
37255 DstVT = MVT::getVectorVT(DstVT, NumDstElts);
37261 // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
37262 if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2()) ||
37263 (MaskEltSize == 16 && Subtarget.hasFP16())) &&
37264 isUndefOrEqual(Mask[0], 0) &&
37265 isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
37266 Shuffle = X86ISD::VZEXT_MOVL;
37267 if (MaskEltSize == 16)
37268 SrcVT = DstVT = MaskVT.changeVectorElementType(MVT::f16);
37270 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
37274 // Check if we have SSE3 which will let us use MOVDDUP etc. The
37275 // instructions are no slower than UNPCKLPD but has the option to
37276 // fold the input operand into even an unaligned memory load.
37277 if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
37278 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG, V1)) {
37279 Shuffle = X86ISD::MOVDDUP;
37280 SrcVT = DstVT = MVT::v2f64;
37283 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
37284 Shuffle = X86ISD::MOVSLDUP;
37285 SrcVT = DstVT = MVT::v4f32;
37288 if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3}, DAG, V1)) {
37289 Shuffle = X86ISD::MOVSHDUP;
37290 SrcVT = DstVT = MVT::v4f32;
37295 if (MaskVT.is256BitVector() && AllowFloatDomain) {
37296 assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
37297 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
37298 Shuffle = X86ISD::MOVDDUP;
37299 SrcVT = DstVT = MVT::v4f64;
37302 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
37304 Shuffle = X86ISD::MOVSLDUP;
37305 SrcVT = DstVT = MVT::v8f32;
37308 if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3, 5, 5, 7, 7}, DAG,
37310 Shuffle = X86ISD::MOVSHDUP;
37311 SrcVT = DstVT = MVT::v8f32;
37316 if (MaskVT.is512BitVector() && AllowFloatDomain) {
37317 assert(Subtarget.hasAVX512() &&
37318 "AVX512 required for 512-bit vector shuffles");
37319 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
37321 Shuffle = X86ISD::MOVDDUP;
37322 SrcVT = DstVT = MVT::v8f64;
37325 if (isTargetShuffleEquivalent(
37327 {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14}, DAG, V1)) {
37328 Shuffle = X86ISD::MOVSLDUP;
37329 SrcVT = DstVT = MVT::v16f32;
37332 if (isTargetShuffleEquivalent(
37334 {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}, DAG, V1)) {
37335 Shuffle = X86ISD::MOVSHDUP;
37336 SrcVT = DstVT = MVT::v16f32;
37344 // Attempt to match a combined shuffle mask against supported unary immediate
37345 // permute instructions.
37346 // TODO: Investigate sharing more of this with shuffle lowering.
37347 static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
37348 const APInt &Zeroable,
37349 bool AllowFloatDomain, bool AllowIntDomain,
37350 const SelectionDAG &DAG,
37351 const X86Subtarget &Subtarget,
37352 unsigned &Shuffle, MVT &ShuffleVT,
37353 unsigned &PermuteImm) {
37354 unsigned NumMaskElts = Mask.size();
37355 unsigned InputSizeInBits = MaskVT.getSizeInBits();
37356 unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
37357 MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
37358 bool ContainsZeros = isAnyZero(Mask);
37360 // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
37361 if (!ContainsZeros && MaskScalarSizeInBits == 64) {
37362 // Check for lane crossing permutes.
37363 if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
37364 // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
37365 if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
37366 Shuffle = X86ISD::VPERMI;
37367 ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
37368 PermuteImm = getV4X86ShuffleImm(Mask);
37371 if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
37372 SmallVector<int, 4> RepeatedMask;
37373 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
37374 Shuffle = X86ISD::VPERMI;
37375 ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
37376 PermuteImm = getV4X86ShuffleImm(RepeatedMask);
37380 } else if (AllowFloatDomain && Subtarget.hasAVX()) {
37381 // VPERMILPD can permute with a non-repeating shuffle.
37382 Shuffle = X86ISD::VPERMILPI;
37383 ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
37385 for (int i = 0, e = Mask.size(); i != e; ++i) {
37387 if (M == SM_SentinelUndef)
37389 assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
37390 PermuteImm |= (M & 1) << i;
37396 // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
37397 // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
37398 // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
37399 if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
37400 !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
37401 SmallVector<int, 4> RepeatedMask;
37402 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
37403 // Narrow the repeated mask to create 32-bit element permutes.
37404 SmallVector<int, 4> WordMask = RepeatedMask;
37405 if (MaskScalarSizeInBits == 64)
37406 narrowShuffleMaskElts(2, RepeatedMask, WordMask);
37408 Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
37409 ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
37410 ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
37411 PermuteImm = getV4X86ShuffleImm(WordMask);
37416 // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
37417 if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16 &&
37418 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37419 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37420 (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
37421 SmallVector<int, 4> RepeatedMask;
37422 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
37423 ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
37424 ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);
37426 // PSHUFLW: permute lower 4 elements only.
37427 if (isUndefOrInRange(LoMask, 0, 4) &&
37428 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
37429 Shuffle = X86ISD::PSHUFLW;
37430 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
37431 PermuteImm = getV4X86ShuffleImm(LoMask);
37435 // PSHUFHW: permute upper 4 elements only.
37436 if (isUndefOrInRange(HiMask, 4, 8) &&
37437 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
37438 // Offset the HiMask so that we can create the shuffle immediate.
37439 int OffsetHiMask[4];
37440 for (int i = 0; i != 4; ++i)
37441 OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
37443 Shuffle = X86ISD::PSHUFHW;
37444 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
37445 PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
37451 // Attempt to match against byte/bit shifts.
37452 if (AllowIntDomain &&
37453 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37454 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37455 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37456 int ShiftAmt = matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits,
37457 Mask, 0, Zeroable, Subtarget);
37458 if (0 < ShiftAmt && (!ShuffleVT.is512BitVector() || Subtarget.hasBWI() ||
37459 32 <= ShuffleVT.getScalarSizeInBits())) {
37460 PermuteImm = (unsigned)ShiftAmt;
37465 // Attempt to match against bit rotates.
37466 if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits < 64 &&
37467 ((MaskVT.is128BitVector() && Subtarget.hasXOP()) ||
37468 Subtarget.hasAVX512())) {
37469 int RotateAmt = matchShuffleAsBitRotate(ShuffleVT, MaskScalarSizeInBits,
37471 if (0 < RotateAmt) {
37472 Shuffle = X86ISD::VROTLI;
37473 PermuteImm = (unsigned)RotateAmt;
37481 // Attempt to match a combined unary shuffle mask against supported binary
37482 // shuffle instructions.
37483 // TODO: Investigate sharing more of this with shuffle lowering.
37484 static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
37485 bool AllowFloatDomain, bool AllowIntDomain,
37486 SDValue &V1, SDValue &V2, const SDLoc &DL,
37487 SelectionDAG &DAG, const X86Subtarget &Subtarget,
37488 unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
37490 unsigned NumMaskElts = Mask.size();
37491 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
37492 unsigned SizeInBits = MaskVT.getSizeInBits();
37494 if (MaskVT.is128BitVector()) {
37495 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG) &&
37496 AllowFloatDomain) {
37498 V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
37499 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
37500 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
37503 if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1}, DAG) &&
37504 AllowFloatDomain) {
37506 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
37507 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
37510 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 3}, DAG) &&
37511 Subtarget.hasSSE2() && (AllowFloatDomain || !Subtarget.hasSSE41())) {
37513 Shuffle = X86ISD::MOVSD;
37514 SrcVT = DstVT = MVT::v2f64;
37517 if (isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG) &&
37518 (AllowFloatDomain || !Subtarget.hasSSE41())) {
37519 Shuffle = X86ISD::MOVSS;
37520 SrcVT = DstVT = MVT::v4f32;
37523 if (isTargetShuffleEquivalent(MaskVT, Mask, {8, 1, 2, 3, 4, 5, 6, 7},
37525 Subtarget.hasFP16()) {
37526 Shuffle = X86ISD::MOVSH;
37527 SrcVT = DstVT = MVT::v8f16;
37532 // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
37533 if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
37534 ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
37535 ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
37536 if (matchShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
37543 // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
37544 if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
37545 (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37546 (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
37547 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37548 (MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
37549 if (matchShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL, DAG,
37551 SrcVT = DstVT = MaskVT;
37552 if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
37553 SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
37558 // Attempt to match against a OR if we're performing a blend shuffle and the
37559 // non-blended source element is zero in each case.
37560 // TODO: Handle cases where V1/V2 sizes doesn't match SizeInBits.
37561 if (SizeInBits == V1.getValueSizeInBits() &&
37562 SizeInBits == V2.getValueSizeInBits() &&
37563 (EltSizeInBits % V1.getScalarValueSizeInBits()) == 0 &&
37564 (EltSizeInBits % V2.getScalarValueSizeInBits()) == 0) {
37565 bool IsBlend = true;
37566 unsigned NumV1Elts = V1.getValueType().getVectorNumElements();
37567 unsigned NumV2Elts = V2.getValueType().getVectorNumElements();
37568 unsigned Scale1 = NumV1Elts / NumMaskElts;
37569 unsigned Scale2 = NumV2Elts / NumMaskElts;
37570 APInt DemandedZeroV1 = APInt::getZero(NumV1Elts);
37571 APInt DemandedZeroV2 = APInt::getZero(NumV2Elts);
37572 for (unsigned i = 0; i != NumMaskElts; ++i) {
37574 if (M == SM_SentinelUndef)
37576 if (M == SM_SentinelZero) {
37577 DemandedZeroV1.setBits(i * Scale1, (i + 1) * Scale1);
37578 DemandedZeroV2.setBits(i * Scale2, (i + 1) * Scale2);
37582 DemandedZeroV2.setBits(i * Scale2, (i + 1) * Scale2);
37585 if (M == (int)(i + NumMaskElts)) {
37586 DemandedZeroV1.setBits(i * Scale1, (i + 1) * Scale1);
37593 if (DAG.MaskedVectorIsZero(V1, DemandedZeroV1) &&
37594 DAG.MaskedVectorIsZero(V2, DemandedZeroV2)) {
37596 SrcVT = DstVT = MaskVT.changeTypeToInteger();
37599 if (NumV1Elts == NumV2Elts && NumV1Elts == NumMaskElts) {
37600 // FIXME: handle mismatched sizes?
37601 // TODO: investigate if `ISD::OR` handling in
37602 // `TargetLowering::SimplifyDemandedVectorElts` can be improved instead.
37603 auto computeKnownBitsElementWise = [&DAG](SDValue V) {
37604 unsigned NumElts = V.getValueType().getVectorNumElements();
37605 KnownBits Known(NumElts);
37606 for (unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
37607 APInt Mask = APInt::getOneBitSet(NumElts, EltIdx);
37608 KnownBits PeepholeKnown = DAG.computeKnownBits(V, Mask);
37609 if (PeepholeKnown.isZero())
37610 Known.Zero.setBit(EltIdx);
37611 if (PeepholeKnown.isAllOnes())
37612 Known.One.setBit(EltIdx);
37617 KnownBits V1Known = computeKnownBitsElementWise(V1);
37618 KnownBits V2Known = computeKnownBitsElementWise(V2);
37620 for (unsigned i = 0; i != NumMaskElts && IsBlend; ++i) {
37622 if (M == SM_SentinelUndef)
37624 if (M == SM_SentinelZero) {
37625 IsBlend &= V1Known.Zero[i] && V2Known.Zero[i];
37629 IsBlend &= V2Known.Zero[i] || V1Known.One[i];
37632 if (M == (int)(i + NumMaskElts)) {
37633 IsBlend &= V1Known.Zero[i] || V2Known.One[i];
37636 llvm_unreachable("will not get here.");
37640 SrcVT = DstVT = MaskVT.changeTypeToInteger();
37650 static bool matchBinaryPermuteShuffle(
37651 MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
37652 bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
37653 const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
37654 unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
37655 unsigned NumMaskElts = Mask.size();
37656 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
37658 // Attempt to match against VALIGND/VALIGNQ rotate.
37659 if (AllowIntDomain && (EltSizeInBits == 64 || EltSizeInBits == 32) &&
37660 ((MaskVT.is128BitVector() && Subtarget.hasVLX()) ||
37661 (MaskVT.is256BitVector() && Subtarget.hasVLX()) ||
37662 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37663 if (!isAnyZero(Mask)) {
37664 int Rotation = matchShuffleAsElementRotate(V1, V2, Mask);
37665 if (0 < Rotation) {
37666 Shuffle = X86ISD::VALIGN;
37667 if (EltSizeInBits == 64)
37668 ShuffleVT = MVT::getVectorVT(MVT::i64, MaskVT.getSizeInBits() / 64);
37670 ShuffleVT = MVT::getVectorVT(MVT::i32, MaskVT.getSizeInBits() / 32);
37671 PermuteImm = Rotation;
37677 // Attempt to match against PALIGNR byte rotate.
37678 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
37679 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37680 (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
37681 int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
37682 if (0 < ByteRotation) {
37683 Shuffle = X86ISD::PALIGNR;
37684 ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
37685 PermuteImm = ByteRotation;
37690 // Attempt to combine to X86ISD::BLENDI.
37691 if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
37692 (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
37693 (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
37694 uint64_t BlendMask = 0;
37695 bool ForceV1Zero = false, ForceV2Zero = false;
37696 SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end());
37697 if (matchShuffleAsBlend(V1, V2, TargetMask, Zeroable, ForceV1Zero,
37698 ForceV2Zero, BlendMask)) {
37699 if (MaskVT == MVT::v16i16) {
37700 // We can only use v16i16 PBLENDW if the lanes are repeated.
37701 SmallVector<int, 8> RepeatedMask;
37702 if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
37704 assert(RepeatedMask.size() == 8 &&
37705 "Repeated mask size doesn't match!");
37707 for (int i = 0; i < 8; ++i)
37708 if (RepeatedMask[i] >= 8)
37709 PermuteImm |= 1 << i;
37710 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
37711 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
37712 Shuffle = X86ISD::BLENDI;
37713 ShuffleVT = MaskVT;
37717 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
37718 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
37719 PermuteImm = (unsigned)BlendMask;
37720 Shuffle = X86ISD::BLENDI;
37721 ShuffleVT = MaskVT;
37727 // Attempt to combine to INSERTPS, but only if it has elements that need to
37729 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
37730 MaskVT.is128BitVector() && isAnyZero(Mask) &&
37731 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
37732 Shuffle = X86ISD::INSERTPS;
37733 ShuffleVT = MVT::v4f32;
37737 // Attempt to combine to SHUFPD.
37738 if (AllowFloatDomain && EltSizeInBits == 64 &&
37739 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37740 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
37741 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37742 bool ForceV1Zero = false, ForceV2Zero = false;
37743 if (matchShuffleWithSHUFPD(MaskVT, V1, V2, ForceV1Zero, ForceV2Zero,
37744 PermuteImm, Mask, Zeroable)) {
37745 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
37746 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
37747 Shuffle = X86ISD::SHUFP;
37748 ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
37753 // Attempt to combine to SHUFPS.
37754 if (AllowFloatDomain && EltSizeInBits == 32 &&
37755 ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
37756 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
37757 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37758 SmallVector<int, 4> RepeatedMask;
37759 if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
37760 // Match each half of the repeated mask, to determine if its just
37761 // referencing one of the vectors, is zeroable or entirely undef.
37762 auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
37763 int M0 = RepeatedMask[Offset];
37764 int M1 = RepeatedMask[Offset + 1];
37766 if (isUndefInRange(RepeatedMask, Offset, 2)) {
37767 return DAG.getUNDEF(MaskVT);
37768 } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
37769 S0 = (SM_SentinelUndef == M0 ? -1 : 0);
37770 S1 = (SM_SentinelUndef == M1 ? -1 : 1);
37771 return getZeroVector(MaskVT, Subtarget, DAG, DL);
37772 } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
37773 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
37774 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
37776 } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
37777 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
37778 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
37785 int ShufMask[4] = {-1, -1, -1, -1};
37786 SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
37787 SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
37792 Shuffle = X86ISD::SHUFP;
37793 ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
37794 PermuteImm = getV4X86ShuffleImm(ShufMask);
37800 // Attempt to combine to INSERTPS more generally if X86ISD::SHUFP failed.
37801 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
37802 MaskVT.is128BitVector() &&
37803 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
37804 Shuffle = X86ISD::INSERTPS;
37805 ShuffleVT = MVT::v4f32;
37812 static SDValue combineX86ShuffleChainWithExtract(
37813 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
37814 bool HasVariableMask, bool AllowVariableCrossLaneMask,
37815 bool AllowVariablePerLaneMask, SelectionDAG &DAG,
37816 const X86Subtarget &Subtarget);
37818 /// Combine an arbitrary chain of shuffles into a single instruction if
37821 /// This is the leaf of the recursive combine below. When we have found some
37822 /// chain of single-use x86 shuffle instructions and accumulated the combined
37823 /// shuffle mask represented by them, this will try to pattern match that mask
37824 /// into either a single instruction if there is a special purpose instruction
37825 /// for this operation, or into a PSHUFB instruction which is a fully general
37826 /// instruction but should only be used to replace chains over a certain depth.
37827 static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
37828 ArrayRef<int> BaseMask, int Depth,
37829 bool HasVariableMask,
37830 bool AllowVariableCrossLaneMask,
37831 bool AllowVariablePerLaneMask,
37833 const X86Subtarget &Subtarget) {
37834 assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
37835 assert((Inputs.size() == 1 || Inputs.size() == 2) &&
37836 "Unexpected number of shuffle inputs!");
37839 MVT RootVT = Root.getSimpleValueType();
37840 unsigned RootSizeInBits = RootVT.getSizeInBits();
37841 unsigned NumRootElts = RootVT.getVectorNumElements();
37843 // Canonicalize shuffle input op to the requested type.
37844 auto CanonicalizeShuffleInput = [&](MVT VT, SDValue Op) {
37845 if (VT.getSizeInBits() > Op.getValueSizeInBits())
37846 Op = widenSubVector(Op, false, Subtarget, DAG, DL, VT.getSizeInBits());
37847 else if (VT.getSizeInBits() < Op.getValueSizeInBits())
37848 Op = extractSubVector(Op, 0, DAG, DL, VT.getSizeInBits());
37849 return DAG.getBitcast(VT, Op);
37852 // Find the inputs that enter the chain. Note that multiple uses are OK
37853 // here, we're not going to remove the operands we find.
37854 bool UnaryShuffle = (Inputs.size() == 1);
37855 SDValue V1 = peekThroughBitcasts(Inputs[0]);
37856 SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
37857 : peekThroughBitcasts(Inputs[1]));
37859 MVT VT1 = V1.getSimpleValueType();
37860 MVT VT2 = V2.getSimpleValueType();
37861 assert((RootSizeInBits % VT1.getSizeInBits()) == 0 &&
37862 (RootSizeInBits % VT2.getSizeInBits()) == 0 && "Vector size mismatch");
37866 unsigned NumBaseMaskElts = BaseMask.size();
37867 if (NumBaseMaskElts == 1) {
37868 assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
37869 return CanonicalizeShuffleInput(RootVT, V1);
37872 bool OptForSize = DAG.shouldOptForSize();
37873 unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
37874 bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
37875 (RootVT.isFloatingPoint() && Depth >= 1) ||
37876 (RootVT.is256BitVector() && !Subtarget.hasAVX2());
37878 // Don't combine if we are a AVX512/EVEX target and the mask element size
37879 // is different from the root element size - this would prevent writemasks
37880 // from being reused.
37881 bool IsMaskedShuffle = false;
37882 if (RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128)) {
37883 if (Root.hasOneUse() && Root->use_begin()->getOpcode() == ISD::VSELECT &&
37884 Root->use_begin()->getOperand(0).getScalarValueSizeInBits() == 1) {
37885 IsMaskedShuffle = true;
37889 // If we are shuffling a splat (and not introducing zeros) then we can just
37890 // use it directly. This works for smaller elements as well as they already
37891 // repeat across each mask element.
37892 if (UnaryShuffle && !isAnyZero(BaseMask) &&
37893 V1.getValueSizeInBits() >= RootSizeInBits &&
37894 (BaseMaskEltSizeInBits % V1.getScalarValueSizeInBits()) == 0 &&
37895 DAG.isSplatValue(V1, /*AllowUndefs*/ false)) {
37896 return CanonicalizeShuffleInput(RootVT, V1);
37899 SmallVector<int, 64> Mask(BaseMask.begin(), BaseMask.end());
37901 // See if the shuffle is a hidden identity shuffle - repeated args in HOPs
37902 // etc. can be simplified.
37903 if (VT1 == VT2 && VT1.getSizeInBits() == RootSizeInBits && VT1.isVector()) {
37904 SmallVector<int> ScaledMask, IdentityMask;
37905 unsigned NumElts = VT1.getVectorNumElements();
37906 if (Mask.size() <= NumElts &&
37907 scaleShuffleElements(Mask, NumElts, ScaledMask)) {
37908 for (unsigned i = 0; i != NumElts; ++i)
37909 IdentityMask.push_back(i);
37910 if (isTargetShuffleEquivalent(RootVT, ScaledMask, IdentityMask, DAG, V1,
37912 return CanonicalizeShuffleInput(RootVT, V1);
37916 // Handle 128/256-bit lane shuffles of 512-bit vectors.
37917 if (RootVT.is512BitVector() &&
37918 (NumBaseMaskElts == 2 || NumBaseMaskElts == 4)) {
37919 // If the upper subvectors are zeroable, then an extract+insert is more
37920 // optimal than using X86ISD::SHUF128. The insertion is free, even if it has
37921 // to zero the upper subvectors.
37922 if (isUndefOrZeroInRange(Mask, 1, NumBaseMaskElts - 1)) {
37923 if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
37924 return SDValue(); // Nothing to do!
37925 assert(isInRange(Mask[0], 0, NumBaseMaskElts) &&
37926 "Unexpected lane shuffle");
37927 Res = CanonicalizeShuffleInput(RootVT, V1);
37928 unsigned SubIdx = Mask[0] * (NumRootElts / NumBaseMaskElts);
37929 bool UseZero = isAnyZero(Mask);
37930 Res = extractSubVector(Res, SubIdx, DAG, DL, BaseMaskEltSizeInBits);
37931 return widenSubVector(Res, UseZero, Subtarget, DAG, DL, RootSizeInBits);
37934 // Narrow shuffle mask to v4x128.
37935 SmallVector<int, 4> ScaledMask;
37936 assert((BaseMaskEltSizeInBits % 128) == 0 && "Illegal mask size");
37937 narrowShuffleMaskElts(BaseMaskEltSizeInBits / 128, Mask, ScaledMask);
37939 // Try to lower to vshuf64x2/vshuf32x4.
37940 auto MatchSHUF128 = [&](MVT ShuffleVT, const SDLoc &DL,
37941 ArrayRef<int> ScaledMask, SDValue V1, SDValue V2,
37942 SelectionDAG &DAG) {
37943 unsigned PermMask = 0;
37944 // Insure elements came from the same Op.
37945 SDValue Ops[2] = {DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT)};
37946 for (int i = 0; i < 4; ++i) {
37947 assert(ScaledMask[i] >= -1 && "Illegal shuffle sentinel value");
37948 if (ScaledMask[i] < 0)
37951 SDValue Op = ScaledMask[i] >= 4 ? V2 : V1;
37952 unsigned OpIndex = i / 2;
37953 if (Ops[OpIndex].isUndef())
37955 else if (Ops[OpIndex] != Op)
37958 // Convert the 128-bit shuffle mask selection values into 128-bit
37959 // selection bits defined by a vshuf64x2 instruction's immediate control
37961 PermMask |= (ScaledMask[i] % 4) << (i * 2);
37964 return DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT,
37965 CanonicalizeShuffleInput(ShuffleVT, Ops[0]),
37966 CanonicalizeShuffleInput(ShuffleVT, Ops[1]),
37967 DAG.getTargetConstant(PermMask, DL, MVT::i8));
37970 // FIXME: Is there a better way to do this? is256BitLaneRepeatedShuffleMask
37971 // doesn't work because our mask is for 128 bits and we don't have an MVT
37973 bool PreferPERMQ = UnaryShuffle && isUndefOrInRange(ScaledMask[0], 0, 2) &&
37974 isUndefOrInRange(ScaledMask[1], 0, 2) &&
37975 isUndefOrInRange(ScaledMask[2], 2, 4) &&
37976 isUndefOrInRange(ScaledMask[3], 2, 4) &&
37977 (ScaledMask[0] < 0 || ScaledMask[2] < 0 ||
37978 ScaledMask[0] == (ScaledMask[2] % 2)) &&
37979 (ScaledMask[1] < 0 || ScaledMask[3] < 0 ||
37980 ScaledMask[1] == (ScaledMask[3] % 2));
37982 if (!isAnyZero(ScaledMask) && !PreferPERMQ) {
37983 if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
37984 return SDValue(); // Nothing to do!
37985 MVT ShuffleVT = (FloatDomain ? MVT::v8f64 : MVT::v8i64);
37986 if (SDValue V = MatchSHUF128(ShuffleVT, DL, ScaledMask, V1, V2, DAG))
37987 return DAG.getBitcast(RootVT, V);
37991 // Handle 128-bit lane shuffles of 256-bit vectors.
37992 if (RootVT.is256BitVector() && NumBaseMaskElts == 2) {
37993 // If the upper half is zeroable, then an extract+insert is more optimal
37994 // than using X86ISD::VPERM2X128. The insertion is free, even if it has to
37995 // zero the upper half.
37996 if (isUndefOrZero(Mask[1])) {
37997 if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
37998 return SDValue(); // Nothing to do!
37999 assert(isInRange(Mask[0], 0, 2) && "Unexpected lane shuffle");
38000 Res = CanonicalizeShuffleInput(RootVT, V1);
38001 Res = extract128BitVector(Res, Mask[0] * (NumRootElts / 2), DAG, DL);
38002 return widenSubVector(Res, Mask[1] == SM_SentinelZero, Subtarget, DAG, DL,
38006 // If we're inserting the low subvector, an insert-subvector 'concat'
38007 // pattern is quicker than VPERM2X128.
38008 // TODO: Add AVX2 support instead of VPERMQ/VPERMPD.
38009 if (BaseMask[0] == 0 && (BaseMask[1] == 0 || BaseMask[1] == 2) &&
38010 !Subtarget.hasAVX2()) {
38011 if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
38012 return SDValue(); // Nothing to do!
38013 SDValue Lo = CanonicalizeShuffleInput(RootVT, V1);
38014 SDValue Hi = CanonicalizeShuffleInput(RootVT, BaseMask[1] == 0 ? V1 : V2);
38015 Hi = extractSubVector(Hi, 0, DAG, DL, 128);
38016 return insertSubVector(Lo, Hi, NumRootElts / 2, DAG, DL, 128);
38019 if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)
38020 return SDValue(); // Nothing to do!
38022 // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
38023 // we need to use the zeroing feature.
38024 // Prefer blends for sequential shuffles unless we are optimizing for size.
38025 if (UnaryShuffle &&
38026 !(Subtarget.hasAVX2() && isUndefOrInRange(Mask, 0, 2)) &&
38027 (OptForSize || !isSequentialOrUndefOrZeroInRange(Mask, 0, 2, 0))) {
38028 unsigned PermMask = 0;
38029 PermMask |= ((Mask[0] < 0 ? 0x8 : (Mask[0] & 1)) << 0);
38030 PermMask |= ((Mask[1] < 0 ? 0x8 : (Mask[1] & 1)) << 4);
38031 return DAG.getNode(
38032 X86ISD::VPERM2X128, DL, RootVT, CanonicalizeShuffleInput(RootVT, V1),
38033 DAG.getUNDEF(RootVT), DAG.getTargetConstant(PermMask, DL, MVT::i8));
38036 if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
38037 return SDValue(); // Nothing to do!
38039 // TODO - handle AVX512VL cases with X86ISD::SHUF128.
38040 if (!UnaryShuffle && !IsMaskedShuffle) {
38041 assert(llvm::all_of(Mask, [](int M) { return 0 <= M && M < 4; }) &&
38042 "Unexpected shuffle sentinel value");
38043 // Prefer blends to X86ISD::VPERM2X128.
38044 if (!((Mask[0] == 0 && Mask[1] == 3) || (Mask[0] == 2 && Mask[1] == 1))) {
38045 unsigned PermMask = 0;
38046 PermMask |= ((Mask[0] & 3) << 0);
38047 PermMask |= ((Mask[1] & 3) << 4);
38048 SDValue LHS = isInRange(Mask[0], 0, 2) ? V1 : V2;
38049 SDValue RHS = isInRange(Mask[1], 0, 2) ? V1 : V2;
38050 return DAG.getNode(X86ISD::VPERM2X128, DL, RootVT,
38051 CanonicalizeShuffleInput(RootVT, LHS),
38052 CanonicalizeShuffleInput(RootVT, RHS),
38053 DAG.getTargetConstant(PermMask, DL, MVT::i8));
38058 // For masks that have been widened to 128-bit elements or more,
38059 // narrow back down to 64-bit elements.
38060 if (BaseMaskEltSizeInBits > 64) {
38061 assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
38062 int MaskScale = BaseMaskEltSizeInBits / 64;
38063 SmallVector<int, 64> ScaledMask;
38064 narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
38065 Mask = std::move(ScaledMask);
38068 // For masked shuffles, we're trying to match the root width for better
38069 // writemask folding, attempt to scale the mask.
38070 // TODO - variable shuffles might need this to be widened again.
38071 if (IsMaskedShuffle && NumRootElts > Mask.size()) {
38072 assert((NumRootElts % Mask.size()) == 0 && "Illegal mask size");
38073 int MaskScale = NumRootElts / Mask.size();
38074 SmallVector<int, 64> ScaledMask;
38075 narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
38076 Mask = std::move(ScaledMask);
38079 unsigned NumMaskElts = Mask.size();
38080 unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
38082 // Determine the effective mask value type.
38083 FloatDomain &= (32 <= MaskEltSizeInBits);
38084 MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
38085 : MVT::getIntegerVT(MaskEltSizeInBits);
38086 MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
38088 // Only allow legal mask types.
38089 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
38092 // Attempt to match the mask against known shuffle patterns.
38093 MVT ShuffleSrcVT, ShuffleVT;
38094 unsigned Shuffle, PermuteImm;
38096 // Which shuffle domains are permitted?
38097 // Permit domain crossing at higher combine depths.
38098 // TODO: Should we indicate which domain is preferred if both are allowed?
38099 bool AllowFloatDomain = FloatDomain || (Depth >= 3);
38100 bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() &&
38101 (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
38103 // Determine zeroable mask elements.
38104 APInt KnownUndef, KnownZero;
38105 resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
38106 APInt Zeroable = KnownUndef | KnownZero;
38108 if (UnaryShuffle) {
38109 // Attempt to match against broadcast-from-vector.
38110 // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
38111 if ((Subtarget.hasAVX2() ||
38112 (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits)) &&
38113 (!IsMaskedShuffle || NumRootElts == NumMaskElts)) {
38114 if (isUndefOrEqual(Mask, 0)) {
38115 if (V1.getValueType() == MaskVT &&
38116 V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
38117 X86::mayFoldLoad(V1.getOperand(0), Subtarget)) {
38118 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
38119 return SDValue(); // Nothing to do!
38120 Res = V1.getOperand(0);
38121 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
38122 return DAG.getBitcast(RootVT, Res);
38124 if (Subtarget.hasAVX2()) {
38125 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
38126 return SDValue(); // Nothing to do!
38127 Res = CanonicalizeShuffleInput(MaskVT, V1);
38128 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
38129 return DAG.getBitcast(RootVT, Res);
38134 if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, V1,
38135 DAG, Subtarget, Shuffle, ShuffleSrcVT, ShuffleVT) &&
38136 (!IsMaskedShuffle ||
38137 (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38138 if (Depth == 0 && Root.getOpcode() == Shuffle)
38139 return SDValue(); // Nothing to do!
38140 Res = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
38141 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
38142 return DAG.getBitcast(RootVT, Res);
38145 if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
38146 AllowIntDomain, DAG, Subtarget, Shuffle, ShuffleVT,
38148 (!IsMaskedShuffle ||
38149 (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38150 if (Depth == 0 && Root.getOpcode() == Shuffle)
38151 return SDValue(); // Nothing to do!
38152 Res = CanonicalizeShuffleInput(ShuffleVT, V1);
38153 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
38154 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38155 return DAG.getBitcast(RootVT, Res);
38159 // Attempt to combine to INSERTPS, but only if the inserted element has come
38161 // TODO: Handle other insertions here as well?
38162 if (!UnaryShuffle && AllowFloatDomain && RootSizeInBits == 128 &&
38163 Subtarget.hasSSE41() &&
38164 !isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG)) {
38165 if (MaskEltSizeInBits == 32) {
38166 SDValue SrcV1 = V1, SrcV2 = V2;
38167 if (matchShuffleAsInsertPS(SrcV1, SrcV2, PermuteImm, Zeroable, Mask,
38169 SrcV2.getOpcode() == ISD::SCALAR_TO_VECTOR) {
38170 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
38171 return SDValue(); // Nothing to do!
38172 Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
38173 CanonicalizeShuffleInput(MVT::v4f32, SrcV1),
38174 CanonicalizeShuffleInput(MVT::v4f32, SrcV2),
38175 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38176 return DAG.getBitcast(RootVT, Res);
38179 if (MaskEltSizeInBits == 64 &&
38180 isTargetShuffleEquivalent(MaskVT, Mask, {0, 2}, DAG) &&
38181 V2.getOpcode() == ISD::SCALAR_TO_VECTOR &&
38182 V2.getScalarValueSizeInBits() <= 32) {
38183 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
38184 return SDValue(); // Nothing to do!
38185 PermuteImm = (/*DstIdx*/ 2 << 4) | (/*SrcIdx*/ 0 << 0);
38186 Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
38187 CanonicalizeShuffleInput(MVT::v4f32, V1),
38188 CanonicalizeShuffleInput(MVT::v4f32, V2),
38189 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38190 return DAG.getBitcast(RootVT, Res);
38194 SDValue NewV1 = V1; // Save operands in case early exit happens.
38195 SDValue NewV2 = V2;
38196 if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
38197 NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
38198 ShuffleVT, UnaryShuffle) &&
38199 (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38200 if (Depth == 0 && Root.getOpcode() == Shuffle)
38201 return SDValue(); // Nothing to do!
38202 NewV1 = CanonicalizeShuffleInput(ShuffleSrcVT, NewV1);
38203 NewV2 = CanonicalizeShuffleInput(ShuffleSrcVT, NewV2);
38204 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
38205 return DAG.getBitcast(RootVT, Res);
38208 NewV1 = V1; // Save operands in case early exit happens.
38210 if (matchBinaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
38211 AllowIntDomain, NewV1, NewV2, DL, DAG,
38212 Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
38213 (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38214 if (Depth == 0 && Root.getOpcode() == Shuffle)
38215 return SDValue(); // Nothing to do!
38216 NewV1 = CanonicalizeShuffleInput(ShuffleVT, NewV1);
38217 NewV2 = CanonicalizeShuffleInput(ShuffleVT, NewV2);
38218 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
38219 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38220 return DAG.getBitcast(RootVT, Res);
38223 // Typically from here on, we need an integer version of MaskVT.
38224 MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
38225 IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
38227 // Annoyingly, SSE4A instructions don't map into the above match helpers.
38228 if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
38229 uint64_t BitLen, BitIdx;
38230 if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
38232 if (Depth == 0 && Root.getOpcode() == X86ISD::EXTRQI)
38233 return SDValue(); // Nothing to do!
38234 V1 = CanonicalizeShuffleInput(IntMaskVT, V1);
38235 Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
38236 DAG.getTargetConstant(BitLen, DL, MVT::i8),
38237 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
38238 return DAG.getBitcast(RootVT, Res);
38241 if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
38242 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTQI)
38243 return SDValue(); // Nothing to do!
38244 V1 = CanonicalizeShuffleInput(IntMaskVT, V1);
38245 V2 = CanonicalizeShuffleInput(IntMaskVT, V2);
38246 Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
38247 DAG.getTargetConstant(BitLen, DL, MVT::i8),
38248 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
38249 return DAG.getBitcast(RootVT, Res);
38253 // Match shuffle against TRUNCATE patterns.
38254 if (AllowIntDomain && MaskEltSizeInBits < 64 && Subtarget.hasAVX512()) {
38255 // Match against a VTRUNC instruction, accounting for src/dst sizes.
38256 if (matchShuffleAsVTRUNC(ShuffleSrcVT, ShuffleVT, IntMaskVT, Mask, Zeroable,
38258 bool IsTRUNCATE = ShuffleVT.getVectorNumElements() ==
38259 ShuffleSrcVT.getVectorNumElements();
38261 IsTRUNCATE ? (unsigned)ISD::TRUNCATE : (unsigned)X86ISD::VTRUNC;
38262 if (Depth == 0 && Root.getOpcode() == Opc)
38263 return SDValue(); // Nothing to do!
38264 V1 = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
38265 Res = DAG.getNode(Opc, DL, ShuffleVT, V1);
38266 if (ShuffleVT.getSizeInBits() < RootSizeInBits)
38267 Res = widenSubVector(Res, true, Subtarget, DAG, DL, RootSizeInBits);
38268 return DAG.getBitcast(RootVT, Res);
38271 // Do we need a more general binary truncation pattern?
38272 if (RootSizeInBits < 512 &&
38273 ((RootVT.is256BitVector() && Subtarget.useAVX512Regs()) ||
38274 (RootVT.is128BitVector() && Subtarget.hasVLX())) &&
38275 (MaskEltSizeInBits > 8 || Subtarget.hasBWI()) &&
38276 isSequentialOrUndefInRange(Mask, 0, NumMaskElts, 0, 2)) {
38277 // Bail if this was already a truncation or PACK node.
38278 // We sometimes fail to match PACK if we demand known undef elements.
38279 if (Depth == 0 && (Root.getOpcode() == ISD::TRUNCATE ||
38280 Root.getOpcode() == X86ISD::PACKSS ||
38281 Root.getOpcode() == X86ISD::PACKUS))
38282 return SDValue(); // Nothing to do!
38283 ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
38284 ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts / 2);
38285 V1 = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
38286 V2 = CanonicalizeShuffleInput(ShuffleSrcVT, V2);
38287 ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
38288 ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts);
38289 Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, ShuffleSrcVT, V1, V2);
38290 Res = DAG.getNode(ISD::TRUNCATE, DL, IntMaskVT, Res);
38291 return DAG.getBitcast(RootVT, Res);
38295 // Don't try to re-form single instruction chains under any circumstances now
38296 // that we've done encoding canonicalization for them.
38300 // Depth threshold above which we can efficiently use variable mask shuffles.
38301 int VariableCrossLaneShuffleDepth =
38302 Subtarget.hasFastVariableCrossLaneShuffle() ? 1 : 2;
38303 int VariablePerLaneShuffleDepth =
38304 Subtarget.hasFastVariablePerLaneShuffle() ? 1 : 2;
38305 AllowVariableCrossLaneMask &=
38306 (Depth >= VariableCrossLaneShuffleDepth) || HasVariableMask;
38307 AllowVariablePerLaneMask &=
38308 (Depth >= VariablePerLaneShuffleDepth) || HasVariableMask;
38309 // VPERMI2W/VPERMI2B are 3 uops on Skylake and Icelake so we require a
38310 // higher depth before combining them.
38311 bool AllowBWIVPERMV3 =
38312 (Depth >= (VariableCrossLaneShuffleDepth + 2) || HasVariableMask);
38314 bool MaskContainsZeros = isAnyZero(Mask);
38316 if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
38317 // If we have a single input lane-crossing shuffle then lower to VPERMV.
38318 if (UnaryShuffle && AllowVariableCrossLaneMask && !MaskContainsZeros) {
38319 if (Subtarget.hasAVX2() &&
38320 (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) {
38321 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
38322 Res = CanonicalizeShuffleInput(MaskVT, V1);
38323 Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
38324 return DAG.getBitcast(RootVT, Res);
38326 // AVX512 variants (non-VLX will pad to 512-bit shuffles).
38327 if ((Subtarget.hasAVX512() &&
38328 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
38329 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
38330 (Subtarget.hasBWI() &&
38331 (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
38332 (Subtarget.hasVBMI() &&
38333 (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8))) {
38334 V1 = CanonicalizeShuffleInput(MaskVT, V1);
38335 V2 = DAG.getUNDEF(MaskVT);
38336 Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38337 return DAG.getBitcast(RootVT, Res);
38341 // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
38342 // vector as the second source (non-VLX will pad to 512-bit shuffles).
38343 if (UnaryShuffle && AllowVariableCrossLaneMask &&
38344 ((Subtarget.hasAVX512() &&
38345 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
38346 MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
38347 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32 ||
38348 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
38349 (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
38350 (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
38351 (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
38352 (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8)))) {
38353 // Adjust shuffle mask - replace SM_SentinelZero with second source index.
38354 for (unsigned i = 0; i != NumMaskElts; ++i)
38355 if (Mask[i] == SM_SentinelZero)
38356 Mask[i] = NumMaskElts + i;
38357 V1 = CanonicalizeShuffleInput(MaskVT, V1);
38358 V2 = getZeroVector(MaskVT, Subtarget, DAG, DL);
38359 Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38360 return DAG.getBitcast(RootVT, Res);
38363 // If that failed and either input is extracted then try to combine as a
38364 // shuffle with the larger type.
38365 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
38366 Inputs, Root, BaseMask, Depth, HasVariableMask,
38367 AllowVariableCrossLaneMask, AllowVariablePerLaneMask, DAG,
38369 return WideShuffle;
38371 // If we have a dual input lane-crossing shuffle then lower to VPERMV3,
38372 // (non-VLX will pad to 512-bit shuffles).
38373 if (AllowVariableCrossLaneMask && !MaskContainsZeros &&
38374 ((Subtarget.hasAVX512() &&
38375 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
38376 MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
38377 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32 ||
38378 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
38379 (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
38380 (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
38381 (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
38382 (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8)))) {
38383 V1 = CanonicalizeShuffleInput(MaskVT, V1);
38384 V2 = CanonicalizeShuffleInput(MaskVT, V2);
38385 Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38386 return DAG.getBitcast(RootVT, Res);
38391 // See if we can combine a single input shuffle with zeros to a bit-mask,
38392 // which is much simpler than any shuffle.
38393 if (UnaryShuffle && MaskContainsZeros && AllowVariablePerLaneMask &&
38394 isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
38395 DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
38396 APInt Zero = APInt::getZero(MaskEltSizeInBits);
38397 APInt AllOnes = APInt::getAllOnes(MaskEltSizeInBits);
38398 APInt UndefElts(NumMaskElts, 0);
38399 SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
38400 for (unsigned i = 0; i != NumMaskElts; ++i) {
38402 if (M == SM_SentinelUndef) {
38403 UndefElts.setBit(i);
38406 if (M == SM_SentinelZero)
38408 EltBits[i] = AllOnes;
38410 SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
38411 Res = CanonicalizeShuffleInput(MaskVT, V1);
38412 unsigned AndOpcode =
38413 MaskVT.isFloatingPoint() ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
38414 Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
38415 return DAG.getBitcast(RootVT, Res);
38418 // If we have a single input shuffle with different shuffle patterns in the
38419 // the 128-bit lanes use the variable mask to VPERMILPS.
38420 // TODO Combine other mask types at higher depths.
38421 if (UnaryShuffle && AllowVariablePerLaneMask && !MaskContainsZeros &&
38422 ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
38423 (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
38424 SmallVector<SDValue, 16> VPermIdx;
38425 for (int M : Mask) {
38427 M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
38428 VPermIdx.push_back(Idx);
38430 SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
38431 Res = CanonicalizeShuffleInput(MaskVT, V1);
38432 Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
38433 return DAG.getBitcast(RootVT, Res);
38436 // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
38437 // to VPERMIL2PD/VPERMIL2PS.
38438 if (AllowVariablePerLaneMask && Subtarget.hasXOP() &&
38439 (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
38440 MaskVT == MVT::v8f32)) {
38441 // VPERMIL2 Operation.
38442 // Bits[3] - Match Bit.
38443 // Bits[2:1] - (Per Lane) PD Shuffle Mask.
38444 // Bits[2:0] - (Per Lane) PS Shuffle Mask.
38445 unsigned NumLanes = MaskVT.getSizeInBits() / 128;
38446 unsigned NumEltsPerLane = NumMaskElts / NumLanes;
38447 SmallVector<int, 8> VPerm2Idx;
38448 unsigned M2ZImm = 0;
38449 for (int M : Mask) {
38450 if (M == SM_SentinelUndef) {
38451 VPerm2Idx.push_back(-1);
38454 if (M == SM_SentinelZero) {
38456 VPerm2Idx.push_back(8);
38459 int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
38460 Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
38461 VPerm2Idx.push_back(Index);
38463 V1 = CanonicalizeShuffleInput(MaskVT, V1);
38464 V2 = CanonicalizeShuffleInput(MaskVT, V2);
38465 SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
38466 Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
38467 DAG.getTargetConstant(M2ZImm, DL, MVT::i8));
38468 return DAG.getBitcast(RootVT, Res);
38471 // If we have 3 or more shuffle instructions or a chain involving a variable
38472 // mask, we can replace them with a single PSHUFB instruction profitably.
38473 // Intel's manuals suggest only using PSHUFB if doing so replacing 5
38474 // instructions, but in practice PSHUFB tends to be *very* fast so we're
38475 // more aggressive.
38476 if (UnaryShuffle && AllowVariablePerLaneMask &&
38477 ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
38478 (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
38479 (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
38480 SmallVector<SDValue, 16> PSHUFBMask;
38481 int NumBytes = RootVT.getSizeInBits() / 8;
38482 int Ratio = NumBytes / NumMaskElts;
38483 for (int i = 0; i < NumBytes; ++i) {
38484 int M = Mask[i / Ratio];
38485 if (M == SM_SentinelUndef) {
38486 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
38489 if (M == SM_SentinelZero) {
38490 PSHUFBMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
38493 M = Ratio * M + i % Ratio;
38494 assert((M / 16) == (i / 16) && "Lane crossing detected");
38495 PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
38497 MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
38498 Res = CanonicalizeShuffleInput(ByteVT, V1);
38499 SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
38500 Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
38501 return DAG.getBitcast(RootVT, Res);
38504 // With XOP, if we have a 128-bit binary input shuffle we can always combine
38505 // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
38506 // slower than PSHUFB on targets that support both.
38507 if (AllowVariablePerLaneMask && RootVT.is128BitVector() &&
38508 Subtarget.hasXOP()) {
38509 // VPPERM Mask Operation
38510 // Bits[4:0] - Byte Index (0 - 31)
38511 // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
38512 SmallVector<SDValue, 16> VPPERMMask;
38514 int Ratio = NumBytes / NumMaskElts;
38515 for (int i = 0; i < NumBytes; ++i) {
38516 int M = Mask[i / Ratio];
38517 if (M == SM_SentinelUndef) {
38518 VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
38521 if (M == SM_SentinelZero) {
38522 VPPERMMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
38525 M = Ratio * M + i % Ratio;
38526 VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
38528 MVT ByteVT = MVT::v16i8;
38529 V1 = CanonicalizeShuffleInput(ByteVT, V1);
38530 V2 = CanonicalizeShuffleInput(ByteVT, V2);
38531 SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
38532 Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
38533 return DAG.getBitcast(RootVT, Res);
38536 // If that failed and either input is extracted then try to combine as a
38537 // shuffle with the larger type.
38538 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
38539 Inputs, Root, BaseMask, Depth, HasVariableMask,
38540 AllowVariableCrossLaneMask, AllowVariablePerLaneMask, DAG, Subtarget))
38541 return WideShuffle;
38543 // If we have a dual input shuffle then lower to VPERMV3,
38544 // (non-VLX will pad to 512-bit shuffles)
38545 if (!UnaryShuffle && AllowVariablePerLaneMask && !MaskContainsZeros &&
38546 ((Subtarget.hasAVX512() &&
38547 (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v8f64 ||
38548 MaskVT == MVT::v2i64 || MaskVT == MVT::v4i64 || MaskVT == MVT::v8i64 ||
38549 MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 || MaskVT == MVT::v8f32 ||
38550 MaskVT == MVT::v8i32 || MaskVT == MVT::v16f32 ||
38551 MaskVT == MVT::v16i32)) ||
38552 (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
38553 (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16 ||
38554 MaskVT == MVT::v32i16)) ||
38555 (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
38556 (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8 ||
38557 MaskVT == MVT::v64i8)))) {
38558 V1 = CanonicalizeShuffleInput(MaskVT, V1);
38559 V2 = CanonicalizeShuffleInput(MaskVT, V2);
38560 Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38561 return DAG.getBitcast(RootVT, Res);
38564 // Failed to find any combines.
38568 // Combine an arbitrary chain of shuffles + extract_subvectors into a single
38569 // instruction if possible.
38571 // Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
38572 // type size to attempt to combine:
38573 // shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
38575 // extract_subvector(shuffle(x,y,m2),0)
38576 static SDValue combineX86ShuffleChainWithExtract(
38577 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
38578 bool HasVariableMask, bool AllowVariableCrossLaneMask,
38579 bool AllowVariablePerLaneMask, SelectionDAG &DAG,
38580 const X86Subtarget &Subtarget) {
38581 unsigned NumMaskElts = BaseMask.size();
38582 unsigned NumInputs = Inputs.size();
38583 if (NumInputs == 0)
38586 EVT RootVT = Root.getValueType();
38587 unsigned RootSizeInBits = RootVT.getSizeInBits();
38588 assert((RootSizeInBits % NumMaskElts) == 0 && "Unexpected root shuffle mask");
38590 // Bail if we have any smaller inputs.
38591 if (llvm::any_of(Inputs, [RootSizeInBits](SDValue Input) {
38592 return Input.getValueSizeInBits() < RootSizeInBits;
38596 SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
38597 SmallVector<unsigned, 4> Offsets(NumInputs, 0);
38599 // Peek through subvectors.
38600 // TODO: Support inter-mixed EXTRACT_SUBVECTORs + BITCASTs?
38601 unsigned WideSizeInBits = RootSizeInBits;
38602 for (unsigned i = 0; i != NumInputs; ++i) {
38603 SDValue &Src = WideInputs[i];
38604 unsigned &Offset = Offsets[i];
38605 Src = peekThroughBitcasts(Src);
38606 EVT BaseVT = Src.getValueType();
38607 while (Src.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
38608 Offset += Src.getConstantOperandVal(1);
38609 Src = Src.getOperand(0);
38611 WideSizeInBits = std::max(WideSizeInBits,
38612 (unsigned)Src.getValueSizeInBits());
38613 assert((Offset % BaseVT.getVectorNumElements()) == 0 &&
38614 "Unexpected subvector extraction");
38615 Offset /= BaseVT.getVectorNumElements();
38616 Offset *= NumMaskElts;
38619 // Bail if we're always extracting from the lowest subvectors,
38620 // combineX86ShuffleChain should match this for the current width.
38621 if (llvm::all_of(Offsets, [](unsigned Offset) { return Offset == 0; }))
38624 unsigned Scale = WideSizeInBits / RootSizeInBits;
38625 assert((WideSizeInBits % RootSizeInBits) == 0 &&
38626 "Unexpected subvector extraction");
38628 // If the src vector types aren't the same, see if we can extend
38629 // them to match each other.
38630 // TODO: Support different scalar types?
38631 EVT WideSVT = WideInputs[0].getValueType().getScalarType();
38632 if (llvm::any_of(WideInputs, [&WideSVT, &DAG](SDValue Op) {
38633 return !DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()) ||
38634 Op.getValueType().getScalarType() != WideSVT;
38638 // Create new mask for larger type.
38639 for (unsigned i = 1; i != NumInputs; ++i)
38640 Offsets[i] += i * Scale * NumMaskElts;
38642 SmallVector<int, 64> WideMask(BaseMask.begin(), BaseMask.end());
38643 for (int &M : WideMask) {
38646 M = (M % NumMaskElts) + Offsets[M / NumMaskElts];
38648 WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
38650 // Remove unused/repeated shuffle source ops.
38651 resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
38652 assert(!WideInputs.empty() && "Shuffle with no inputs detected");
38654 if (WideInputs.size() > 2)
38657 // Increase depth for every upper subvector we've peeked through.
38658 Depth += count_if(Offsets, [](unsigned Offset) { return Offset > 0; });
38660 // Attempt to combine wider chain.
38661 // TODO: Can we use a better Root?
38662 SDValue WideRoot = WideInputs.front().getValueSizeInBits() >
38663 WideInputs.back().getValueSizeInBits()
38664 ? WideInputs.front()
38665 : WideInputs.back();
38666 if (SDValue WideShuffle =
38667 combineX86ShuffleChain(WideInputs, WideRoot, WideMask, Depth,
38668 HasVariableMask, AllowVariableCrossLaneMask,
38669 AllowVariablePerLaneMask, DAG, Subtarget)) {
38671 extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
38672 return DAG.getBitcast(RootVT, WideShuffle);
38677 // Canonicalize the combined shuffle mask chain with horizontal ops.
38678 // NOTE: This may update the Ops and Mask.
38679 static SDValue canonicalizeShuffleMaskWithHorizOp(
38680 MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
38681 unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
38682 const X86Subtarget &Subtarget) {
38683 if (Mask.empty() || Ops.empty())
38686 SmallVector<SDValue> BC;
38687 for (SDValue Op : Ops)
38688 BC.push_back(peekThroughBitcasts(Op));
38690 // All ops must be the same horizop + type.
38691 SDValue BC0 = BC[0];
38692 EVT VT0 = BC0.getValueType();
38693 unsigned Opcode0 = BC0.getOpcode();
38694 if (VT0.getSizeInBits() != RootSizeInBits || llvm::any_of(BC, [&](SDValue V) {
38695 return V.getOpcode() != Opcode0 || V.getValueType() != VT0;
38699 bool isHoriz = (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
38700 Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB);
38701 bool isPack = (Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS);
38702 if (!isHoriz && !isPack)
38705 // Do all ops have a single use?
38706 bool OneUseOps = llvm::all_of(Ops, [](SDValue Op) {
38707 return Op.hasOneUse() &&
38708 peekThroughBitcasts(Op) == peekThroughOneUseBitcasts(Op);
38711 int NumElts = VT0.getVectorNumElements();
38712 int NumLanes = VT0.getSizeInBits() / 128;
38713 int NumEltsPerLane = NumElts / NumLanes;
38714 int NumHalfEltsPerLane = NumEltsPerLane / 2;
38715 MVT SrcVT = BC0.getOperand(0).getSimpleValueType();
38716 unsigned EltSizeInBits = RootSizeInBits / Mask.size();
38718 if (NumEltsPerLane >= 4 &&
38719 (isPack || shouldUseHorizontalOp(Ops.size() == 1, DAG, Subtarget))) {
38720 SmallVector<int> LaneMask, ScaledMask;
38721 if (isRepeatedTargetShuffleMask(128, EltSizeInBits, Mask, LaneMask) &&
38722 scaleShuffleElements(LaneMask, 4, ScaledMask)) {
38723 // See if we can remove the shuffle by resorting the HOP chain so that
38724 // the HOP args are pre-shuffled.
38725 // TODO: Generalize to any sized/depth chain.
38726 // TODO: Add support for PACKSS/PACKUS.
38728 // Attempt to find a HOP(HOP(X,Y),HOP(Z,W)) source operand.
38729 auto GetHOpSrc = [&](int M) {
38730 if (M == SM_SentinelUndef)
38731 return DAG.getUNDEF(VT0);
38732 if (M == SM_SentinelZero)
38733 return getZeroVector(VT0.getSimpleVT(), Subtarget, DAG, DL);
38734 SDValue Src0 = BC[M / 4];
38735 SDValue Src1 = Src0.getOperand((M % 4) >= 2);
38736 if (Src1.getOpcode() == Opcode0 && Src0->isOnlyUserOf(Src1.getNode()))
38737 return Src1.getOperand(M % 2);
38740 SDValue M0 = GetHOpSrc(ScaledMask[0]);
38741 SDValue M1 = GetHOpSrc(ScaledMask[1]);
38742 SDValue M2 = GetHOpSrc(ScaledMask[2]);
38743 SDValue M3 = GetHOpSrc(ScaledMask[3]);
38744 if (M0 && M1 && M2 && M3) {
38745 SDValue LHS = DAG.getNode(Opcode0, DL, SrcVT, M0, M1);
38746 SDValue RHS = DAG.getNode(Opcode0, DL, SrcVT, M2, M3);
38747 return DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
38750 // shuffle(hop(x,y),hop(z,w)) -> permute(hop(x,z)) etc.
38751 if (Ops.size() >= 2) {
38753 auto GetHOpSrc = [&](int M, int &OutM) {
38754 // TODO: Support SM_SentinelZero
38756 return M == SM_SentinelUndef;
38757 SDValue Src = BC[M / 4].getOperand((M % 4) >= 2);
38758 if (!LHS || LHS == Src) {
38763 if (!RHS || RHS == Src) {
38765 OutM = (M % 2) + 2;
38770 int PostMask[4] = {-1, -1, -1, -1};
38771 if (GetHOpSrc(ScaledMask[0], PostMask[0]) &&
38772 GetHOpSrc(ScaledMask[1], PostMask[1]) &&
38773 GetHOpSrc(ScaledMask[2], PostMask[2]) &&
38774 GetHOpSrc(ScaledMask[3], PostMask[3])) {
38775 LHS = DAG.getBitcast(SrcVT, LHS);
38776 RHS = DAG.getBitcast(SrcVT, RHS ? RHS : LHS);
38777 SDValue Res = DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
38778 // Use SHUFPS for the permute so this will work on SSE3 targets,
38779 // shuffle combining and domain handling will simplify this later on.
38780 MVT ShuffleVT = MVT::getVectorVT(MVT::f32, RootSizeInBits / 32);
38781 Res = DAG.getBitcast(ShuffleVT, Res);
38782 return DAG.getNode(X86ISD::SHUFP, DL, ShuffleVT, Res, Res,
38783 getV4X86ShuffleImm8ForMask(PostMask, DL, DAG));
38789 if (2 < Ops.size())
38792 SDValue BC1 = BC[BC.size() - 1];
38793 if (Mask.size() == VT0.getVectorNumElements()) {
38794 // Canonicalize binary shuffles of horizontal ops that use the
38795 // same sources to an unary shuffle.
38796 // TODO: Try to perform this fold even if the shuffle remains.
38797 if (Ops.size() == 2) {
38798 auto ContainsOps = [](SDValue HOp, SDValue Op) {
38799 return Op == HOp.getOperand(0) || Op == HOp.getOperand(1);
38801 // Commute if all BC0's ops are contained in BC1.
38802 if (ContainsOps(BC1, BC0.getOperand(0)) &&
38803 ContainsOps(BC1, BC0.getOperand(1))) {
38804 ShuffleVectorSDNode::commuteMask(Mask);
38805 std::swap(Ops[0], Ops[1]);
38806 std::swap(BC0, BC1);
38809 // If BC1 can be represented by BC0, then convert to unary shuffle.
38810 if (ContainsOps(BC0, BC1.getOperand(0)) &&
38811 ContainsOps(BC0, BC1.getOperand(1))) {
38812 for (int &M : Mask) {
38813 if (M < NumElts) // BC0 element or UNDEF/Zero sentinel.
38815 int SubLane = ((M % NumEltsPerLane) >= NumHalfEltsPerLane) ? 1 : 0;
38816 M -= NumElts + (SubLane * NumHalfEltsPerLane);
38817 if (BC1.getOperand(SubLane) != BC0.getOperand(0))
38818 M += NumHalfEltsPerLane;
38823 // Canonicalize unary horizontal ops to only refer to lower halves.
38824 for (int i = 0; i != NumElts; ++i) {
38826 if (isUndefOrZero(M))
38828 if (M < NumElts && BC0.getOperand(0) == BC0.getOperand(1) &&
38829 (M % NumEltsPerLane) >= NumHalfEltsPerLane)
38830 M -= NumHalfEltsPerLane;
38831 if (NumElts <= M && BC1.getOperand(0) == BC1.getOperand(1) &&
38832 (M % NumEltsPerLane) >= NumHalfEltsPerLane)
38833 M -= NumHalfEltsPerLane;
38837 // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
38838 // single instruction. Attempt to match a v2X64 repeating shuffle pattern that
38839 // represents the LHS/RHS inputs for the lower/upper halves.
38840 SmallVector<int, 16> TargetMask128, WideMask128;
38841 if (isRepeatedTargetShuffleMask(128, EltSizeInBits, Mask, TargetMask128) &&
38842 scaleShuffleElements(TargetMask128, 2, WideMask128)) {
38843 assert(isUndefOrZeroOrInRange(WideMask128, 0, 4) && "Illegal shuffle");
38844 bool SingleOp = (Ops.size() == 1);
38845 if (isPack || OneUseOps ||
38846 shouldUseHorizontalOp(SingleOp, DAG, Subtarget)) {
38847 SDValue Lo = isInRange(WideMask128[0], 0, 2) ? BC0 : BC1;
38848 SDValue Hi = isInRange(WideMask128[1], 0, 2) ? BC0 : BC1;
38849 Lo = Lo.getOperand(WideMask128[0] & 1);
38850 Hi = Hi.getOperand(WideMask128[1] & 1);
38852 SDValue Undef = DAG.getUNDEF(SrcVT);
38853 SDValue Zero = getZeroVector(SrcVT, Subtarget, DAG, DL);
38854 Lo = (WideMask128[0] == SM_SentinelZero ? Zero : Lo);
38855 Hi = (WideMask128[1] == SM_SentinelZero ? Zero : Hi);
38856 Lo = (WideMask128[0] == SM_SentinelUndef ? Undef : Lo);
38857 Hi = (WideMask128[1] == SM_SentinelUndef ? Undef : Hi);
38859 return DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
38866 // Attempt to constant fold all of the constant source ops.
38867 // Returns true if the entire shuffle is folded to a constant.
38868 // TODO: Extend this to merge multiple constant Ops and update the mask.
38869 static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
38870 ArrayRef<int> Mask, SDValue Root,
38871 bool HasVariableMask,
38873 const X86Subtarget &Subtarget) {
38874 MVT VT = Root.getSimpleValueType();
38876 unsigned SizeInBits = VT.getSizeInBits();
38877 unsigned NumMaskElts = Mask.size();
38878 unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
38879 unsigned NumOps = Ops.size();
38881 // Extract constant bits from each source op.
38882 bool OneUseConstantOp = false;
38883 SmallVector<APInt, 16> UndefEltsOps(NumOps);
38884 SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
38885 for (unsigned i = 0; i != NumOps; ++i) {
38886 SDValue SrcOp = Ops[i];
38887 OneUseConstantOp |= SrcOp.hasOneUse();
38888 if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
38893 // If we're optimizing for size, only fold if at least one of the constants is
38894 // only used once or the combined shuffle has included a variable mask
38895 // shuffle, this is to avoid constant pool bloat.
38896 bool IsOptimizingSize = DAG.shouldOptForSize();
38897 if (IsOptimizingSize && !OneUseConstantOp && !HasVariableMask)
38900 // Shuffle the constant bits according to the mask.
38902 APInt UndefElts(NumMaskElts, 0);
38903 APInt ZeroElts(NumMaskElts, 0);
38904 APInt ConstantElts(NumMaskElts, 0);
38905 SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
38906 APInt::getZero(MaskSizeInBits));
38907 for (unsigned i = 0; i != NumMaskElts; ++i) {
38909 if (M == SM_SentinelUndef) {
38910 UndefElts.setBit(i);
38912 } else if (M == SM_SentinelZero) {
38913 ZeroElts.setBit(i);
38916 assert(0 <= M && M < (int)(NumMaskElts * NumOps));
38918 unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
38919 unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
38921 auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
38922 if (SrcUndefElts[SrcMaskIdx]) {
38923 UndefElts.setBit(i);
38927 auto &SrcEltBits = RawBitsOps[SrcOpIdx];
38928 APInt &Bits = SrcEltBits[SrcMaskIdx];
38930 ZeroElts.setBit(i);
38934 ConstantElts.setBit(i);
38935 ConstantBitData[i] = Bits;
38937 assert((UndefElts | ZeroElts | ConstantElts).isAllOnes());
38939 // Attempt to create a zero vector.
38940 if ((UndefElts | ZeroElts).isAllOnes())
38941 return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG, DL);
38943 // Create the constant data.
38945 if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
38946 MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
38948 MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
38950 MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
38951 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
38954 SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
38955 return DAG.getBitcast(VT, CstOp);
38961 MaxShuffleCombineDepth = 8
38964 } // namespace llvm
38966 /// Fully generic combining of x86 shuffle instructions.
38968 /// This should be the last combine run over the x86 shuffle instructions. Once
38969 /// they have been fully optimized, this will recursively consider all chains
38970 /// of single-use shuffle instructions, build a generic model of the cumulative
38971 /// shuffle operation, and check for simpler instructions which implement this
38972 /// operation. We use this primarily for two purposes:
38974 /// 1) Collapse generic shuffles to specialized single instructions when
38975 /// equivalent. In most cases, this is just an encoding size win, but
38976 /// sometimes we will collapse multiple generic shuffles into a single
38977 /// special-purpose shuffle.
38978 /// 2) Look for sequences of shuffle instructions with 3 or more total
38979 /// instructions, and replace them with the slightly more expensive SSSE3
38980 /// PSHUFB instruction if available. We do this as the last combining step
38981 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
38982 /// a suitable short sequence of other instructions. The PSHUFB will either
38983 /// use a register or have to read from memory and so is slightly (but only
38984 /// slightly) more expensive than the other shuffle instructions.
38986 /// Because this is inherently a quadratic operation (for each shuffle in
38987 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
38988 /// This should never be an issue in practice as the shuffle lowering doesn't
38989 /// produce sequences of more than 8 instructions.
38991 /// FIXME: We will currently miss some cases where the redundant shuffling
38992 /// would simplify under the threshold for PSHUFB formation because of
38993 /// combine-ordering. To fix this, we should do the redundant instruction
38994 /// combining in this recursive walk.
38995 static SDValue combineX86ShufflesRecursively(
38996 ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
38997 ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
38998 unsigned MaxDepth, bool HasVariableMask, bool AllowVariableCrossLaneMask,
38999 bool AllowVariablePerLaneMask, SelectionDAG &DAG,
39000 const X86Subtarget &Subtarget) {
39001 assert(RootMask.size() > 0 &&
39002 (RootMask.size() > 1 || (RootMask[0] == 0 && SrcOpIndex == 0)) &&
39003 "Illegal shuffle root mask");
39004 MVT RootVT = Root.getSimpleValueType();
39005 assert(RootVT.isVector() && "Shuffles operate on vector types!");
39006 unsigned RootSizeInBits = RootVT.getSizeInBits();
39008 // Bound the depth of our recursive combine because this is ultimately
39009 // quadratic in nature.
39010 if (Depth >= MaxDepth)
39013 // Directly rip through bitcasts to find the underlying operand.
39014 SDValue Op = SrcOps[SrcOpIndex];
39015 Op = peekThroughOneUseBitcasts(Op);
39017 EVT VT = Op.getValueType();
39018 if (!VT.isVector() || !VT.isSimple())
39019 return SDValue(); // Bail if we hit a non-simple non-vector.
39021 // FIXME: Just bail on f16 for now.
39022 if (VT.getVectorElementType() == MVT::f16)
39025 assert((RootSizeInBits % VT.getSizeInBits()) == 0 &&
39026 "Can only combine shuffles upto size of the root op.");
39028 // Create a demanded elts mask from the referenced elements of Op.
39029 APInt OpDemandedElts = APInt::getZero(RootMask.size());
39030 for (int M : RootMask) {
39031 int BaseIdx = RootMask.size() * SrcOpIndex;
39032 if (isInRange(M, BaseIdx, BaseIdx + RootMask.size()))
39033 OpDemandedElts.setBit(M - BaseIdx);
39035 if (RootSizeInBits != VT.getSizeInBits()) {
39036 // Op is smaller than Root - extract the demanded elts for the subvector.
39037 unsigned Scale = RootSizeInBits / VT.getSizeInBits();
39038 unsigned NumOpMaskElts = RootMask.size() / Scale;
39039 assert((RootMask.size() % Scale) == 0 && "Root mask size mismatch");
39040 assert(OpDemandedElts
39041 .extractBits(RootMask.size() - NumOpMaskElts, NumOpMaskElts)
39043 "Out of range elements referenced in root mask");
39044 OpDemandedElts = OpDemandedElts.extractBits(NumOpMaskElts, 0);
39047 APIntOps::ScaleBitMask(OpDemandedElts, VT.getVectorNumElements());
39049 // Extract target shuffle mask and resolve sentinels and inputs.
39050 SmallVector<int, 64> OpMask;
39051 SmallVector<SDValue, 2> OpInputs;
39052 APInt OpUndef, OpZero;
39053 bool IsOpVariableMask = isTargetShuffleVariableMask(Op.getOpcode());
39054 if (getTargetShuffleInputs(Op, OpDemandedElts, OpInputs, OpMask, OpUndef,
39055 OpZero, DAG, Depth, false)) {
39056 // Shuffle inputs must not be larger than the shuffle result.
39057 // TODO: Relax this for single input faux shuffles (e.g. trunc).
39058 if (llvm::any_of(OpInputs, [VT](SDValue OpInput) {
39059 return OpInput.getValueSizeInBits() > VT.getSizeInBits();
39062 } else if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
39063 (RootSizeInBits % Op.getOperand(0).getValueSizeInBits()) == 0 &&
39064 !isNullConstant(Op.getOperand(1))) {
39065 SDValue SrcVec = Op.getOperand(0);
39066 int ExtractIdx = Op.getConstantOperandVal(1);
39067 unsigned NumElts = VT.getVectorNumElements();
39068 OpInputs.assign({SrcVec});
39069 OpMask.assign(NumElts, SM_SentinelUndef);
39070 std::iota(OpMask.begin(), OpMask.end(), ExtractIdx);
39071 OpZero = OpUndef = APInt::getNullValue(NumElts);
39076 // If the shuffle result was smaller than the root, we need to adjust the
39077 // mask indices and pad the mask with undefs.
39078 if (RootSizeInBits > VT.getSizeInBits()) {
39079 unsigned NumSubVecs = RootSizeInBits / VT.getSizeInBits();
39080 unsigned OpMaskSize = OpMask.size();
39081 if (OpInputs.size() > 1) {
39082 unsigned PaddedMaskSize = NumSubVecs * OpMaskSize;
39083 for (int &M : OpMask) {
39086 int EltIdx = M % OpMaskSize;
39087 int OpIdx = M / OpMaskSize;
39088 M = (PaddedMaskSize * OpIdx) + EltIdx;
39091 OpZero = OpZero.zext(NumSubVecs * OpMaskSize);
39092 OpUndef = OpUndef.zext(NumSubVecs * OpMaskSize);
39093 OpMask.append((NumSubVecs - 1) * OpMaskSize, SM_SentinelUndef);
39096 SmallVector<int, 64> Mask;
39097 SmallVector<SDValue, 16> Ops;
39099 // We don't need to merge masks if the root is empty.
39100 bool EmptyRoot = (Depth == 0) && (RootMask.size() == 1);
39102 // Only resolve zeros if it will remove an input, otherwise we might end
39103 // up in an infinite loop.
39104 bool ResolveKnownZeros = true;
39105 if (!OpZero.isZero()) {
39106 APInt UsedInputs = APInt::getZero(OpInputs.size());
39107 for (int i = 0, e = OpMask.size(); i != e; ++i) {
39109 if (OpUndef[i] || OpZero[i] || isUndefOrZero(M))
39111 UsedInputs.setBit(M / OpMask.size());
39112 if (UsedInputs.isAllOnes()) {
39113 ResolveKnownZeros = false;
39118 resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero,
39119 ResolveKnownZeros);
39122 Ops.append(OpInputs.begin(), OpInputs.end());
39124 resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero);
39126 // Add the inputs to the Ops list, avoiding duplicates.
39127 Ops.append(SrcOps.begin(), SrcOps.end());
39129 auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
39130 // Attempt to find an existing match.
39131 SDValue InputBC = peekThroughBitcasts(Input);
39132 for (int i = 0, e = Ops.size(); i < e; ++i)
39133 if (InputBC == peekThroughBitcasts(Ops[i]))
39135 // Match failed - should we replace an existing Op?
39136 if (InsertionPoint >= 0) {
39137 Ops[InsertionPoint] = Input;
39138 return InsertionPoint;
39140 // Add to the end of the Ops list.
39141 Ops.push_back(Input);
39142 return Ops.size() - 1;
39145 SmallVector<int, 2> OpInputIdx;
39146 for (SDValue OpInput : OpInputs)
39147 OpInputIdx.push_back(
39148 AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
39150 assert(((RootMask.size() > OpMask.size() &&
39151 RootMask.size() % OpMask.size() == 0) ||
39152 (OpMask.size() > RootMask.size() &&
39153 OpMask.size() % RootMask.size() == 0) ||
39154 OpMask.size() == RootMask.size()) &&
39155 "The smaller number of elements must divide the larger.");
39157 // This function can be performance-critical, so we rely on the power-of-2
39158 // knowledge that we have about the mask sizes to replace div/rem ops with
39159 // bit-masks and shifts.
39160 assert(isPowerOf2_32(RootMask.size()) &&
39161 "Non-power-of-2 shuffle mask sizes");
39162 assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes");
39163 unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size());
39164 unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size());
39166 unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
39167 unsigned RootRatio =
39168 std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
39169 unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
39170 assert((RootRatio == 1 || OpRatio == 1) &&
39171 "Must not have a ratio for both incoming and op masks!");
39173 assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
39174 assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
39175 assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
39176 unsigned RootRatioLog2 = countTrailingZeros(RootRatio);
39177 unsigned OpRatioLog2 = countTrailingZeros(OpRatio);
39179 Mask.resize(MaskWidth, SM_SentinelUndef);
39181 // Merge this shuffle operation's mask into our accumulated mask. Note that
39182 // this shuffle's mask will be the first applied to the input, followed by
39183 // the root mask to get us all the way to the root value arrangement. The
39184 // reason for this order is that we are recursing up the operation chain.
39185 for (unsigned i = 0; i < MaskWidth; ++i) {
39186 unsigned RootIdx = i >> RootRatioLog2;
39187 if (RootMask[RootIdx] < 0) {
39188 // This is a zero or undef lane, we're done.
39189 Mask[i] = RootMask[RootIdx];
39193 unsigned RootMaskedIdx =
39195 ? RootMask[RootIdx]
39196 : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
39198 // Just insert the scaled root mask value if it references an input other
39199 // than the SrcOp we're currently inserting.
39200 if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
39201 (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
39202 Mask[i] = RootMaskedIdx;
39206 RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
39207 unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
39208 if (OpMask[OpIdx] < 0) {
39209 // The incoming lanes are zero or undef, it doesn't matter which ones we
39211 Mask[i] = OpMask[OpIdx];
39215 // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
39216 unsigned OpMaskedIdx = OpRatio == 1 ? OpMask[OpIdx]
39217 : (OpMask[OpIdx] << OpRatioLog2) +
39218 (RootMaskedIdx & (OpRatio - 1));
39220 OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
39221 int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
39222 assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
39223 OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
39225 Mask[i] = OpMaskedIdx;
39229 // Remove unused/repeated shuffle source ops.
39230 resolveTargetShuffleInputsAndMask(Ops, Mask);
39232 // Handle the all undef/zero/ones cases early.
39233 if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
39234 return DAG.getUNDEF(RootVT);
39235 if (all_of(Mask, [](int Idx) { return Idx < 0; }))
39236 return getZeroVector(RootVT, Subtarget, DAG, SDLoc(Root));
39237 if (Ops.size() == 1 && ISD::isBuildVectorAllOnes(Ops[0].getNode()) &&
39238 none_of(Mask, [](int M) { return M == SM_SentinelZero; }))
39239 return getOnesVector(RootVT, DAG, SDLoc(Root));
39241 assert(!Ops.empty() && "Shuffle with no inputs detected");
39242 HasVariableMask |= IsOpVariableMask;
39244 // Update the list of shuffle nodes that have been combined so far.
39245 SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
39247 CombinedNodes.push_back(Op.getNode());
39249 // See if we can recurse into each shuffle source op (if it's a target
39250 // shuffle). The source op should only be generally combined if it either has
39251 // a single use (i.e. current Op) or all its users have already been combined,
39252 // if not then we can still combine but should prevent generation of variable
39253 // shuffles to avoid constant pool bloat.
39254 // Don't recurse if we already have more source ops than we can combine in
39255 // the remaining recursion depth.
39256 if (Ops.size() < (MaxDepth - Depth)) {
39257 for (int i = 0, e = Ops.size(); i < e; ++i) {
39258 // For empty roots, we need to resolve zeroable elements before combining
39259 // them with other shuffles.
39260 SmallVector<int, 64> ResolvedMask = Mask;
39262 resolveTargetShuffleFromZeroables(ResolvedMask, OpUndef, OpZero);
39263 bool AllowCrossLaneVar = false;
39264 bool AllowPerLaneVar = false;
39265 if (Ops[i].getNode()->hasOneUse() ||
39266 SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode())) {
39267 AllowCrossLaneVar = AllowVariableCrossLaneMask;
39268 AllowPerLaneVar = AllowVariablePerLaneMask;
39270 if (SDValue Res = combineX86ShufflesRecursively(
39271 Ops, i, Root, ResolvedMask, CombinedNodes, Depth + 1, MaxDepth,
39272 HasVariableMask, AllowCrossLaneVar, AllowPerLaneVar, DAG,
39278 // Attempt to constant fold all of the constant source ops.
39279 if (SDValue Cst = combineX86ShufflesConstants(
39280 Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
39283 // If constant fold failed and we only have constants - then we have
39284 // multiple uses by a single non-variable shuffle - just bail.
39285 if (Depth == 0 && llvm::all_of(Ops, [&](SDValue Op) {
39287 SmallVector<APInt> RawBits;
39288 unsigned EltSizeInBits = RootSizeInBits / Mask.size();
39289 return getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
39295 // Canonicalize the combined shuffle mask chain with horizontal ops.
39296 // NOTE: This will update the Ops and Mask.
39297 if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
39298 Ops, Mask, RootSizeInBits, SDLoc(Root), DAG, Subtarget))
39299 return DAG.getBitcast(RootVT, HOp);
39301 // Try to refine our inputs given our knowledge of target shuffle mask.
39302 for (auto I : enumerate(Ops)) {
39303 int OpIdx = I.index();
39304 SDValue &Op = I.value();
39306 // What range of shuffle mask element values results in picking from Op?
39307 int Lo = OpIdx * Mask.size();
39308 int Hi = Lo + Mask.size();
39310 // Which elements of Op do we demand, given the mask's granularity?
39311 APInt OpDemandedElts(Mask.size(), 0);
39312 for (int MaskElt : Mask) {
39313 if (isInRange(MaskElt, Lo, Hi)) { // Picks from Op?
39314 int OpEltIdx = MaskElt - Lo;
39315 OpDemandedElts.setBit(OpEltIdx);
39319 // Is the shuffle result smaller than the root?
39320 if (Op.getValueSizeInBits() < RootSizeInBits) {
39321 // We padded the mask with undefs. But we now need to undo that.
39322 unsigned NumExpectedVectorElts = Mask.size();
39323 unsigned EltSizeInBits = RootSizeInBits / NumExpectedVectorElts;
39324 unsigned NumOpVectorElts = Op.getValueSizeInBits() / EltSizeInBits;
39325 assert(!OpDemandedElts.extractBits(
39326 NumExpectedVectorElts - NumOpVectorElts, NumOpVectorElts) &&
39327 "Demanding the virtual undef widening padding?");
39328 OpDemandedElts = OpDemandedElts.trunc(NumOpVectorElts); // NUW
39331 // The Op itself may be of different VT, so we need to scale the mask.
39332 unsigned NumOpElts = Op.getValueType().getVectorNumElements();
39333 APInt OpScaledDemandedElts = APIntOps::ScaleBitMask(OpDemandedElts, NumOpElts);
39335 // Can this operand be simplified any further, given it's demanded elements?
39336 if (SDValue NewOp =
39337 DAG.getTargetLoweringInfo().SimplifyMultipleUseDemandedVectorElts(
39338 Op, OpScaledDemandedElts, DAG))
39341 // FIXME: should we rerun resolveTargetShuffleInputsAndMask() now?
39343 // Widen any subvector shuffle inputs we've collected.
39344 // TODO: Remove this to avoid generating temporary nodes, we should only
39345 // widen once combineX86ShuffleChain has found a match.
39346 if (any_of(Ops, [RootSizeInBits](SDValue Op) {
39347 return Op.getValueSizeInBits() < RootSizeInBits;
39349 for (SDValue &Op : Ops)
39350 if (Op.getValueSizeInBits() < RootSizeInBits)
39351 Op = widenSubVector(Op, false, Subtarget, DAG, SDLoc(Op),
39353 // Reresolve - we might have repeated subvector sources.
39354 resolveTargetShuffleInputsAndMask(Ops, Mask);
39357 // We can only combine unary and binary shuffle mask cases.
39358 if (Ops.size() <= 2) {
39359 // Minor canonicalization of the accumulated shuffle mask to make it easier
39360 // to match below. All this does is detect masks with sequential pairs of
39361 // elements, and shrink them to the half-width mask. It does this in a loop
39362 // so it will reduce the size of the mask to the minimal width mask which
39363 // performs an equivalent shuffle.
39364 while (Mask.size() > 1) {
39365 SmallVector<int, 64> WidenedMask;
39366 if (!canWidenShuffleElements(Mask, WidenedMask))
39368 Mask = std::move(WidenedMask);
39371 // Canonicalization of binary shuffle masks to improve pattern matching by
39372 // commuting the inputs.
39373 if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
39374 ShuffleVectorSDNode::commuteMask(Mask);
39375 std::swap(Ops[0], Ops[1]);
39378 // Finally, try to combine into a single shuffle instruction.
39379 return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask,
39380 AllowVariableCrossLaneMask,
39381 AllowVariablePerLaneMask, DAG, Subtarget);
39384 // If that failed and any input is extracted then try to combine as a
39385 // shuffle with the larger type.
39386 return combineX86ShuffleChainWithExtract(
39387 Ops, Root, Mask, Depth, HasVariableMask, AllowVariableCrossLaneMask,
39388 AllowVariablePerLaneMask, DAG, Subtarget);
39391 /// Helper entry wrapper to combineX86ShufflesRecursively.
39392 static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
39393 const X86Subtarget &Subtarget) {
39394 return combineX86ShufflesRecursively(
39395 {Op}, 0, Op, {0}, {}, /*Depth*/ 0, X86::MaxShuffleCombineDepth,
39396 /*HasVarMask*/ false,
39397 /*AllowCrossLaneVarMask*/ true, /*AllowPerLaneVarMask*/ true, DAG,
39401 /// Get the PSHUF-style mask from PSHUF node.
39403 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
39404 /// PSHUF-style masks that can be reused with such instructions.
39405 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
39406 MVT VT = N.getSimpleValueType();
39407 SmallVector<int, 4> Mask;
39408 SmallVector<SDValue, 2> Ops;
39410 getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask);
39414 // If we have more than 128-bits, only the low 128-bits of shuffle mask
39415 // matter. Check that the upper masks are repeats and remove them.
39416 if (VT.getSizeInBits() > 128) {
39417 int LaneElts = 128 / VT.getScalarSizeInBits();
39419 for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
39420 for (int j = 0; j < LaneElts; ++j)
39421 assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
39422 "Mask doesn't repeat in high 128-bit lanes!");
39424 Mask.resize(LaneElts);
39427 switch (N.getOpcode()) {
39428 case X86ISD::PSHUFD:
39430 case X86ISD::PSHUFLW:
39433 case X86ISD::PSHUFHW:
39434 Mask.erase(Mask.begin(), Mask.begin() + 4);
39435 for (int &M : Mask)
39439 llvm_unreachable("No valid shuffle instruction found!");
39443 /// Search for a combinable shuffle across a chain ending in pshufd.
39445 /// We walk up the chain and look for a combinable shuffle, skipping over
39446 /// shuffles that we could hoist this shuffle's transformation past without
39447 /// altering anything.
39449 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
39450 SelectionDAG &DAG) {
39451 assert(N.getOpcode() == X86ISD::PSHUFD &&
39452 "Called with something other than an x86 128-bit half shuffle!");
39455 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
39456 // of the shuffles in the chain so that we can form a fresh chain to replace
39458 SmallVector<SDValue, 8> Chain;
39459 SDValue V = N.getOperand(0);
39460 for (; V.hasOneUse(); V = V.getOperand(0)) {
39461 switch (V.getOpcode()) {
39463 return SDValue(); // Nothing combined!
39466 // Skip bitcasts as we always know the type for the target specific
39470 case X86ISD::PSHUFD:
39471 // Found another dword shuffle.
39474 case X86ISD::PSHUFLW:
39475 // Check that the low words (being shuffled) are the identity in the
39476 // dword shuffle, and the high words are self-contained.
39477 if (Mask[0] != 0 || Mask[1] != 1 ||
39478 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
39481 Chain.push_back(V);
39484 case X86ISD::PSHUFHW:
39485 // Check that the high words (being shuffled) are the identity in the
39486 // dword shuffle, and the low words are self-contained.
39487 if (Mask[2] != 2 || Mask[3] != 3 ||
39488 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
39491 Chain.push_back(V);
39494 case X86ISD::UNPCKL:
39495 case X86ISD::UNPCKH:
39496 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
39497 // shuffle into a preceding word shuffle.
39498 if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
39499 V.getSimpleValueType().getVectorElementType() != MVT::i16)
39502 // Search for a half-shuffle which we can combine with.
39503 unsigned CombineOp =
39504 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
39505 if (V.getOperand(0) != V.getOperand(1) ||
39506 !V->isOnlyUserOf(V.getOperand(0).getNode()))
39508 Chain.push_back(V);
39509 V = V.getOperand(0);
39511 switch (V.getOpcode()) {
39513 return SDValue(); // Nothing to combine.
39515 case X86ISD::PSHUFLW:
39516 case X86ISD::PSHUFHW:
39517 if (V.getOpcode() == CombineOp)
39520 Chain.push_back(V);
39524 V = V.getOperand(0);
39528 } while (V.hasOneUse());
39531 // Break out of the loop if we break out of the switch.
39535 if (!V.hasOneUse())
39536 // We fell out of the loop without finding a viable combining instruction.
39539 // Merge this node's mask and our incoming mask.
39540 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
39541 for (int &M : Mask)
39543 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
39544 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
39546 // Rebuild the chain around this new shuffle.
39547 while (!Chain.empty()) {
39548 SDValue W = Chain.pop_back_val();
39550 if (V.getValueType() != W.getOperand(0).getValueType())
39551 V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
39553 switch (W.getOpcode()) {
39555 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
39557 case X86ISD::UNPCKL:
39558 case X86ISD::UNPCKH:
39559 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
39562 case X86ISD::PSHUFD:
39563 case X86ISD::PSHUFLW:
39564 case X86ISD::PSHUFHW:
39565 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
39569 if (V.getValueType() != N.getValueType())
39570 V = DAG.getBitcast(N.getValueType(), V);
39572 // Return the new chain to replace N.
39576 // Attempt to commute shufps LHS loads:
39577 // permilps(shufps(load(),x)) --> permilps(shufps(x,load()))
39578 static SDValue combineCommutableSHUFP(SDValue N, MVT VT, const SDLoc &DL,
39579 SelectionDAG &DAG) {
39580 // TODO: Add vXf64 support.
39581 if (VT != MVT::v4f32 && VT != MVT::v8f32 && VT != MVT::v16f32)
39584 // SHUFP(LHS, RHS) -> SHUFP(RHS, LHS) iff LHS is foldable + RHS is not.
39585 auto commuteSHUFP = [&VT, &DL, &DAG](SDValue Parent, SDValue V) {
39586 if (V.getOpcode() != X86ISD::SHUFP || !Parent->isOnlyUserOf(V.getNode()))
39588 SDValue N0 = V.getOperand(0);
39589 SDValue N1 = V.getOperand(1);
39590 unsigned Imm = V.getConstantOperandVal(2);
39591 const X86Subtarget &Subtarget = DAG.getSubtarget<X86Subtarget>();
39592 if (!X86::mayFoldLoad(peekThroughOneUseBitcasts(N0), Subtarget) ||
39593 X86::mayFoldLoad(peekThroughOneUseBitcasts(N1), Subtarget))
39595 Imm = ((Imm & 0x0F) << 4) | ((Imm & 0xF0) >> 4);
39596 return DAG.getNode(X86ISD::SHUFP, DL, VT, N1, N0,
39597 DAG.getTargetConstant(Imm, DL, MVT::i8));
39600 switch (N.getOpcode()) {
39601 case X86ISD::VPERMILPI:
39602 if (SDValue NewSHUFP = commuteSHUFP(N, N.getOperand(0))) {
39603 unsigned Imm = N.getConstantOperandVal(1);
39604 return DAG.getNode(X86ISD::VPERMILPI, DL, VT, NewSHUFP,
39605 DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
39608 case X86ISD::SHUFP: {
39609 SDValue N0 = N.getOperand(0);
39610 SDValue N1 = N.getOperand(1);
39611 unsigned Imm = N.getConstantOperandVal(2);
39613 if (SDValue NewSHUFP = commuteSHUFP(N, N0))
39614 return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, NewSHUFP,
39615 DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
39616 } else if (SDValue NewSHUFP = commuteSHUFP(N, N0)) {
39617 return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, N1,
39618 DAG.getTargetConstant(Imm ^ 0x0A, DL, MVT::i8));
39619 } else if (SDValue NewSHUFP = commuteSHUFP(N, N1)) {
39620 return DAG.getNode(X86ISD::SHUFP, DL, VT, N0, NewSHUFP,
39621 DAG.getTargetConstant(Imm ^ 0xA0, DL, MVT::i8));
39630 // Canonicalize SHUFFLE(BINOP(X,Y)) -> BINOP(SHUFFLE(X),SHUFFLE(Y)).
39631 static SDValue canonicalizeShuffleWithBinOps(SDValue N, SelectionDAG &DAG,
39633 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39634 EVT ShuffleVT = N.getValueType();
39636 auto IsMergeableWithShuffle = [&DAG](SDValue Op, bool FoldLoad = false) {
39637 // AllZeros/AllOnes constants are freely shuffled and will peek through
39638 // bitcasts. Other constant build vectors do not peek through bitcasts. Only
39639 // merge with target shuffles if it has one use so shuffle combining is
39640 // likely to kick in. Shuffles of splats are expected to be removed.
39641 return ISD::isBuildVectorAllOnes(Op.getNode()) ||
39642 ISD::isBuildVectorAllZeros(Op.getNode()) ||
39643 ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
39644 ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()) ||
39645 (isTargetShuffle(Op.getOpcode()) && Op->hasOneUse()) ||
39646 (FoldLoad && isShuffleFoldableLoad(Op)) ||
39647 DAG.isSplatValue(Op, /*AllowUndefs*/ false);
39649 auto IsSafeToMoveShuffle = [ShuffleVT](SDValue Op, unsigned BinOp) {
39650 // Ensure we only shuffle whole vector src elements, unless its a logical
39651 // binops where we can more aggressively move shuffles from dst to src.
39652 return BinOp == ISD::AND || BinOp == ISD::OR || BinOp == ISD::XOR ||
39653 BinOp == X86ISD::ANDNP ||
39654 (Op.getScalarValueSizeInBits() <= ShuffleVT.getScalarSizeInBits());
39657 unsigned Opc = N.getOpcode();
39659 // Unary and Unary+Permute Shuffles.
39660 case X86ISD::PSHUFB: {
39661 // Don't merge PSHUFB if it contains zero'd elements.
39662 SmallVector<int> Mask;
39663 SmallVector<SDValue> Ops;
39664 if (!getTargetShuffleMask(N.getNode(), ShuffleVT.getSimpleVT(), false, Ops,
39669 case X86ISD::VBROADCAST:
39670 case X86ISD::MOVDDUP:
39671 case X86ISD::PSHUFD:
39672 case X86ISD::PSHUFHW:
39673 case X86ISD::PSHUFLW:
39674 case X86ISD::VPERMI:
39675 case X86ISD::VPERMILPI: {
39676 if (N.getOperand(0).getValueType() == ShuffleVT &&
39677 N->isOnlyUserOf(N.getOperand(0).getNode())) {
39678 SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
39679 unsigned SrcOpcode = N0.getOpcode();
39680 if (TLI.isBinOp(SrcOpcode) && IsSafeToMoveShuffle(N0, SrcOpcode)) {
39681 SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
39682 SDValue Op01 = peekThroughOneUseBitcasts(N0.getOperand(1));
39683 if (IsMergeableWithShuffle(Op00, Opc != X86ISD::PSHUFB) ||
39684 IsMergeableWithShuffle(Op01, Opc != X86ISD::PSHUFB)) {
39686 Op00 = DAG.getBitcast(ShuffleVT, Op00);
39687 Op01 = DAG.getBitcast(ShuffleVT, Op01);
39688 if (N.getNumOperands() == 2) {
39689 LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, N.getOperand(1));
39690 RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, N.getOperand(1));
39692 LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00);
39693 RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01);
39695 EVT OpVT = N0.getValueType();
39696 return DAG.getBitcast(ShuffleVT,
39697 DAG.getNode(SrcOpcode, DL, OpVT,
39698 DAG.getBitcast(OpVT, LHS),
39699 DAG.getBitcast(OpVT, RHS)));
39705 // Binary and Binary+Permute Shuffles.
39706 case X86ISD::INSERTPS: {
39707 // Don't merge INSERTPS if it contains zero'd elements.
39708 unsigned InsertPSMask = N.getConstantOperandVal(2);
39709 unsigned ZeroMask = InsertPSMask & 0xF;
39714 case X86ISD::MOVSD:
39715 case X86ISD::MOVSS:
39716 case X86ISD::BLENDI:
39717 case X86ISD::SHUFP:
39718 case X86ISD::UNPCKH:
39719 case X86ISD::UNPCKL: {
39720 if (N->isOnlyUserOf(N.getOperand(0).getNode()) &&
39721 N->isOnlyUserOf(N.getOperand(1).getNode())) {
39722 SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
39723 SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
39724 unsigned SrcOpcode = N0.getOpcode();
39725 if (TLI.isBinOp(SrcOpcode) && N1.getOpcode() == SrcOpcode &&
39726 IsSafeToMoveShuffle(N0, SrcOpcode) &&
39727 IsSafeToMoveShuffle(N1, SrcOpcode)) {
39728 SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
39729 SDValue Op10 = peekThroughOneUseBitcasts(N1.getOperand(0));
39730 SDValue Op01 = peekThroughOneUseBitcasts(N0.getOperand(1));
39731 SDValue Op11 = peekThroughOneUseBitcasts(N1.getOperand(1));
39732 // Ensure the total number of shuffles doesn't increase by folding this
39733 // shuffle through to the source ops.
39734 if (((IsMergeableWithShuffle(Op00) && IsMergeableWithShuffle(Op10)) ||
39735 (IsMergeableWithShuffle(Op01) && IsMergeableWithShuffle(Op11))) ||
39736 ((IsMergeableWithShuffle(Op00) || IsMergeableWithShuffle(Op10)) &&
39737 (IsMergeableWithShuffle(Op01) || IsMergeableWithShuffle(Op11)))) {
39739 Op00 = DAG.getBitcast(ShuffleVT, Op00);
39740 Op10 = DAG.getBitcast(ShuffleVT, Op10);
39741 Op01 = DAG.getBitcast(ShuffleVT, Op01);
39742 Op11 = DAG.getBitcast(ShuffleVT, Op11);
39743 if (N.getNumOperands() == 3) {
39744 LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10, N.getOperand(2));
39745 RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, Op11, N.getOperand(2));
39747 LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10);
39748 RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, Op11);
39750 EVT OpVT = N0.getValueType();
39751 return DAG.getBitcast(ShuffleVT,
39752 DAG.getNode(SrcOpcode, DL, OpVT,
39753 DAG.getBitcast(OpVT, LHS),
39754 DAG.getBitcast(OpVT, RHS)));
39764 /// Attempt to fold vpermf128(op(),op()) -> op(vpermf128(),vpermf128()).
39765 static SDValue canonicalizeLaneShuffleWithRepeatedOps(SDValue V,
39768 assert(V.getOpcode() == X86ISD::VPERM2X128 && "Unknown lane shuffle");
39770 MVT VT = V.getSimpleValueType();
39771 SDValue Src0 = peekThroughBitcasts(V.getOperand(0));
39772 SDValue Src1 = peekThroughBitcasts(V.getOperand(1));
39773 unsigned SrcOpc0 = Src0.getOpcode();
39774 unsigned SrcOpc1 = Src1.getOpcode();
39775 EVT SrcVT0 = Src0.getValueType();
39776 EVT SrcVT1 = Src1.getValueType();
39778 if (!Src1.isUndef() && (SrcVT0 != SrcVT1 || SrcOpc0 != SrcOpc1))
39782 case X86ISD::MOVDDUP: {
39783 SDValue LHS = Src0.getOperand(0);
39784 SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
39786 DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS, V.getOperand(2));
39787 Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res);
39788 return DAG.getBitcast(VT, Res);
39790 case X86ISD::VPERMILPI:
39791 // TODO: Handle v4f64 permutes with different low/high lane masks.
39792 if (SrcVT0 == MVT::v4f64) {
39793 uint64_t Mask = Src0.getConstantOperandVal(1);
39794 if ((Mask & 0x3) != ((Mask >> 2) & 0x3))
39798 case X86ISD::VSHLI:
39799 case X86ISD::VSRLI:
39800 case X86ISD::VSRAI:
39801 case X86ISD::PSHUFD:
39802 if (Src1.isUndef() || Src0.getOperand(1) == Src1.getOperand(1)) {
39803 SDValue LHS = Src0.getOperand(0);
39804 SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
39805 SDValue Res = DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS,
39807 Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res, Src0.getOperand(1));
39808 return DAG.getBitcast(VT, Res);
39816 /// Try to combine x86 target specific shuffles.
39817 static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
39818 TargetLowering::DAGCombinerInfo &DCI,
39819 const X86Subtarget &Subtarget) {
39821 MVT VT = N.getSimpleValueType();
39822 SmallVector<int, 4> Mask;
39823 unsigned Opcode = N.getOpcode();
39825 if (SDValue R = combineCommutableSHUFP(N, VT, DL, DAG))
39828 // Handle specific target shuffles.
39830 case X86ISD::MOVDDUP: {
39831 SDValue Src = N.getOperand(0);
39832 // Turn a 128-bit MOVDDUP of a full vector load into movddup+vzload.
39833 if (VT == MVT::v2f64 && Src.hasOneUse() &&
39834 ISD::isNormalLoad(Src.getNode())) {
39835 LoadSDNode *LN = cast<LoadSDNode>(Src);
39836 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::f64, MVT::v2f64, DAG)) {
39837 SDValue Movddup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, VZLoad);
39838 DCI.CombineTo(N.getNode(), Movddup);
39839 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
39840 DCI.recursivelyDeleteUnusedNodes(LN);
39841 return N; // Return N so it doesn't get rechecked!
39847 case X86ISD::VBROADCAST: {
39848 SDValue Src = N.getOperand(0);
39849 SDValue BC = peekThroughBitcasts(Src);
39850 EVT SrcVT = Src.getValueType();
39851 EVT BCVT = BC.getValueType();
39853 // If broadcasting from another shuffle, attempt to simplify it.
39854 // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
39855 if (isTargetShuffle(BC.getOpcode()) &&
39856 VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
39857 unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
39858 SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
39860 for (unsigned i = 0; i != Scale; ++i)
39861 DemandedMask[i] = i;
39862 if (SDValue Res = combineX86ShufflesRecursively(
39863 {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 0,
39864 X86::MaxShuffleCombineDepth,
39865 /*HasVarMask*/ false, /*AllowCrossLaneVarMask*/ true,
39866 /*AllowPerLaneVarMask*/ true, DAG, Subtarget))
39867 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
39868 DAG.getBitcast(SrcVT, Res));
39871 // broadcast(bitcast(src)) -> bitcast(broadcast(src))
39872 // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
39873 if (Src.getOpcode() == ISD::BITCAST &&
39874 SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits() &&
39875 DAG.getTargetLoweringInfo().isTypeLegal(BCVT) &&
39876 FixedVectorType::isValidElementType(
39877 BCVT.getScalarType().getTypeForEVT(*DAG.getContext()))) {
39878 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
39879 VT.getVectorNumElements());
39880 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
39883 // Reduce broadcast source vector to lowest 128-bits.
39884 if (SrcVT.getSizeInBits() > 128)
39885 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
39886 extract128BitVector(Src, 0, DAG, DL));
39888 // broadcast(scalar_to_vector(x)) -> broadcast(x).
39889 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR)
39890 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
39892 // broadcast(extract_vector_elt(x, 0)) -> broadcast(x).
39893 if (Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
39894 isNullConstant(Src.getOperand(1)) &&
39895 DAG.getTargetLoweringInfo().isTypeLegal(
39896 Src.getOperand(0).getValueType()))
39897 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
39899 // Share broadcast with the longest vector and extract low subvector (free).
39900 // Ensure the same SDValue from the SDNode use is being used.
39901 for (SDNode *User : Src->uses())
39902 if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
39903 Src == User->getOperand(0) &&
39904 User->getValueSizeInBits(0).getFixedSize() >
39905 VT.getFixedSizeInBits()) {
39906 return extractSubVector(SDValue(User, 0), 0, DAG, DL,
39907 VT.getSizeInBits());
39910 // vbroadcast(scalarload X) -> vbroadcast_load X
39911 // For float loads, extract other uses of the scalar from the broadcast.
39912 if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
39913 ISD::isNormalLoad(Src.getNode())) {
39914 LoadSDNode *LN = cast<LoadSDNode>(Src);
39915 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
39916 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
39918 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
39919 LN->getMemoryVT(), LN->getMemOperand());
39920 // If the load value is used only by N, replace it via CombineTo N.
39921 bool NoReplaceExtract = Src.hasOneUse();
39922 DCI.CombineTo(N.getNode(), BcastLd);
39923 if (NoReplaceExtract) {
39924 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
39925 DCI.recursivelyDeleteUnusedNodes(LN);
39927 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
39928 DAG.getIntPtrConstant(0, DL));
39929 DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
39931 return N; // Return N so it doesn't get rechecked!
39934 // Due to isTypeDesirableForOp, we won't always shrink a load truncated to
39935 // i16. So shrink it ourselves if we can make a broadcast_load.
39936 if (SrcVT == MVT::i16 && Src.getOpcode() == ISD::TRUNCATE &&
39937 Src.hasOneUse() && Src.getOperand(0).hasOneUse()) {
39938 assert(Subtarget.hasAVX2() && "Expected AVX2");
39939 SDValue TruncIn = Src.getOperand(0);
39941 // If this is a truncate of a non extending load we can just narrow it to
39942 // use a broadcast_load.
39943 if (ISD::isNormalLoad(TruncIn.getNode())) {
39944 LoadSDNode *LN = cast<LoadSDNode>(TruncIn);
39945 // Unless its volatile or atomic.
39946 if (LN->isSimple()) {
39947 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
39948 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
39949 SDValue BcastLd = DAG.getMemIntrinsicNode(
39950 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
39951 LN->getPointerInfo(), LN->getOriginalAlign(),
39952 LN->getMemOperand()->getFlags());
39953 DCI.CombineTo(N.getNode(), BcastLd);
39954 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
39955 DCI.recursivelyDeleteUnusedNodes(Src.getNode());
39956 return N; // Return N so it doesn't get rechecked!
39960 // If this is a truncate of an i16 extload, we can directly replace it.
39961 if (ISD::isUNINDEXEDLoad(Src.getOperand(0).getNode()) &&
39962 ISD::isEXTLoad(Src.getOperand(0).getNode())) {
39963 LoadSDNode *LN = cast<LoadSDNode>(Src.getOperand(0));
39964 if (LN->getMemoryVT().getSizeInBits() == 16) {
39965 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
39966 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
39968 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
39969 LN->getMemoryVT(), LN->getMemOperand());
39970 DCI.CombineTo(N.getNode(), BcastLd);
39971 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
39972 DCI.recursivelyDeleteUnusedNodes(Src.getNode());
39973 return N; // Return N so it doesn't get rechecked!
39977 // If this is a truncate of load that has been shifted right, we can
39978 // offset the pointer and use a narrower load.
39979 if (TruncIn.getOpcode() == ISD::SRL &&
39980 TruncIn.getOperand(0).hasOneUse() &&
39981 isa<ConstantSDNode>(TruncIn.getOperand(1)) &&
39982 ISD::isNormalLoad(TruncIn.getOperand(0).getNode())) {
39983 LoadSDNode *LN = cast<LoadSDNode>(TruncIn.getOperand(0));
39984 unsigned ShiftAmt = TruncIn.getConstantOperandVal(1);
39985 // Make sure the shift amount and the load size are divisible by 16.
39986 // Don't do this if the load is volatile or atomic.
39987 if (ShiftAmt % 16 == 0 && TruncIn.getValueSizeInBits() % 16 == 0 &&
39989 unsigned Offset = ShiftAmt / 8;
39990 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
39991 SDValue Ptr = DAG.getMemBasePlusOffset(LN->getBasePtr(),
39992 TypeSize::Fixed(Offset), DL);
39993 SDValue Ops[] = { LN->getChain(), Ptr };
39994 SDValue BcastLd = DAG.getMemIntrinsicNode(
39995 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
39996 LN->getPointerInfo().getWithOffset(Offset),
39997 LN->getOriginalAlign(),
39998 LN->getMemOperand()->getFlags());
39999 DCI.CombineTo(N.getNode(), BcastLd);
40000 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
40001 DCI.recursivelyDeleteUnusedNodes(Src.getNode());
40002 return N; // Return N so it doesn't get rechecked!
40007 // vbroadcast(vzload X) -> vbroadcast_load X
40008 if (Src.getOpcode() == X86ISD::VZEXT_LOAD && Src.hasOneUse()) {
40009 MemSDNode *LN = cast<MemIntrinsicSDNode>(Src);
40010 if (LN->getMemoryVT().getSizeInBits() == VT.getScalarSizeInBits()) {
40011 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40012 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
40014 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
40015 LN->getMemoryVT(), LN->getMemOperand());
40016 DCI.CombineTo(N.getNode(), BcastLd);
40017 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
40018 DCI.recursivelyDeleteUnusedNodes(LN);
40019 return N; // Return N so it doesn't get rechecked!
40023 // vbroadcast(vector load X) -> vbroadcast_load
40024 if ((SrcVT == MVT::v2f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v2i64 ||
40025 SrcVT == MVT::v4i32) &&
40026 Src.hasOneUse() && ISD::isNormalLoad(Src.getNode())) {
40027 LoadSDNode *LN = cast<LoadSDNode>(Src);
40028 // Unless the load is volatile or atomic.
40029 if (LN->isSimple()) {
40030 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40031 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
40032 SDValue BcastLd = DAG.getMemIntrinsicNode(
40033 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SrcVT.getScalarType(),
40034 LN->getPointerInfo(), LN->getOriginalAlign(),
40035 LN->getMemOperand()->getFlags());
40036 DCI.CombineTo(N.getNode(), BcastLd);
40037 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
40038 DCI.recursivelyDeleteUnusedNodes(LN);
40039 return N; // Return N so it doesn't get rechecked!
40045 case X86ISD::VZEXT_MOVL: {
40046 SDValue N0 = N.getOperand(0);
40048 // If this a vzmovl of a full vector load, replace it with a vzload, unless
40049 // the load is volatile.
40050 if (N0.hasOneUse() && ISD::isNormalLoad(N0.getNode())) {
40051 auto *LN = cast<LoadSDNode>(N0);
40052 if (SDValue VZLoad =
40053 narrowLoadToVZLoad(LN, VT.getVectorElementType(), VT, DAG)) {
40054 DCI.CombineTo(N.getNode(), VZLoad);
40055 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
40056 DCI.recursivelyDeleteUnusedNodes(LN);
40061 // If this a VZEXT_MOVL of a VBROADCAST_LOAD, we don't need the broadcast
40062 // and can just use a VZEXT_LOAD.
40063 // FIXME: Is there some way to do this with SimplifyDemandedVectorElts?
40064 if (N0.hasOneUse() && N0.getOpcode() == X86ISD::VBROADCAST_LOAD) {
40065 auto *LN = cast<MemSDNode>(N0);
40066 if (VT.getScalarSizeInBits() == LN->getMemoryVT().getSizeInBits()) {
40067 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40068 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
40070 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops,
40071 LN->getMemoryVT(), LN->getMemOperand());
40072 DCI.CombineTo(N.getNode(), VZLoad);
40073 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
40074 DCI.recursivelyDeleteUnusedNodes(LN);
40079 // Turn (v2i64 (vzext_movl (scalar_to_vector (i64 X)))) into
40080 // (v2i64 (bitcast (v4i32 (vzext_movl (scalar_to_vector (i32 (trunc X)))))))
40081 // if the upper bits of the i64 are zero.
40082 if (N0.hasOneUse() && N0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
40083 N0.getOperand(0).hasOneUse() &&
40084 N0.getOperand(0).getValueType() == MVT::i64) {
40085 SDValue In = N0.getOperand(0);
40086 APInt Mask = APInt::getHighBitsSet(64, 32);
40087 if (DAG.MaskedValueIsZero(In, Mask)) {
40088 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, In);
40089 MVT VecVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
40090 SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Trunc);
40091 SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, VecVT, SclVec);
40092 return DAG.getBitcast(VT, Movl);
40096 // Load a scalar integer constant directly to XMM instead of transferring an
40097 // immediate value from GPR.
40098 // vzext_movl (scalar_to_vector C) --> load [C,0...]
40099 if (N0.getOpcode() == ISD::SCALAR_TO_VECTOR) {
40100 if (auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
40101 // Create a vector constant - scalar constant followed by zeros.
40102 EVT ScalarVT = N0.getOperand(0).getValueType();
40103 Type *ScalarTy = ScalarVT.getTypeForEVT(*DAG.getContext());
40104 unsigned NumElts = VT.getVectorNumElements();
40105 Constant *Zero = ConstantInt::getNullValue(ScalarTy);
40106 SmallVector<Constant *, 32> ConstantVec(NumElts, Zero);
40107 ConstantVec[0] = const_cast<ConstantInt *>(C->getConstantIntValue());
40109 // Load the vector constant from constant pool.
40110 MVT PVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
40111 SDValue CP = DAG.getConstantPool(ConstantVector::get(ConstantVec), PVT);
40112 MachinePointerInfo MPI =
40113 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
40114 Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
40115 return DAG.getLoad(VT, DL, DAG.getEntryNode(), CP, MPI, Alignment,
40116 MachineMemOperand::MOLoad);
40120 // Pull subvector inserts into undef through VZEXT_MOVL by making it an
40121 // insert into a zero vector. This helps get VZEXT_MOVL closer to
40122 // scalar_to_vectors where 256/512 are canonicalized to an insert and a
40123 // 128-bit scalar_to_vector. This reduces the number of isel patterns.
40124 if (!DCI.isBeforeLegalizeOps() && N0.hasOneUse()) {
40125 SDValue V = peekThroughOneUseBitcasts(N0);
40127 if (V.getOpcode() == ISD::INSERT_SUBVECTOR && V.getOperand(0).isUndef() &&
40128 isNullConstant(V.getOperand(2))) {
40129 SDValue In = V.getOperand(1);
40130 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
40131 In.getValueSizeInBits() /
40132 VT.getScalarSizeInBits());
40133 In = DAG.getBitcast(SubVT, In);
40134 SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, SubVT, In);
40135 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
40136 getZeroVector(VT, Subtarget, DAG, DL), Movl,
40143 case X86ISD::BLENDI: {
40144 SDValue N0 = N.getOperand(0);
40145 SDValue N1 = N.getOperand(1);
40147 // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
40148 // TODO: Handle MVT::v16i16 repeated blend mask.
40149 if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
40150 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
40151 MVT SrcVT = N0.getOperand(0).getSimpleValueType();
40152 if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
40153 SrcVT.getScalarSizeInBits() >= 32) {
40154 unsigned BlendMask = N.getConstantOperandVal(2);
40155 unsigned Size = VT.getVectorNumElements();
40156 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
40157 BlendMask = scaleVectorShuffleBlendMask(BlendMask, Size, Scale);
40158 return DAG.getBitcast(
40159 VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
40161 DAG.getTargetConstant(BlendMask, DL, MVT::i8)));
40166 case X86ISD::SHUFP: {
40167 // Fold shufps(shuffle(x),shuffle(y)) -> shufps(x,y).
40168 // This is a more relaxed shuffle combiner that can ignore oneuse limits.
40169 // TODO: Support types other than v4f32.
40170 if (VT == MVT::v4f32) {
40171 bool Updated = false;
40172 SmallVector<int> Mask;
40173 SmallVector<SDValue> Ops;
40174 if (getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask) &&
40176 for (int i = 0; i != 2; ++i) {
40177 SmallVector<SDValue> SubOps;
40178 SmallVector<int> SubMask, SubScaledMask;
40179 SDValue Sub = peekThroughBitcasts(Ops[i]);
40180 // TODO: Scaling might be easier if we specify the demanded elts.
40181 if (getTargetShuffleInputs(Sub, SubOps, SubMask, DAG, 0, false) &&
40182 scaleShuffleElements(SubMask, 4, SubScaledMask) &&
40183 SubOps.size() == 1 && isUndefOrInRange(SubScaledMask, 0, 4)) {
40185 Mask[Ofs + 0] = SubScaledMask[Mask[Ofs + 0] % 4] + (i * 4);
40186 Mask[Ofs + 1] = SubScaledMask[Mask[Ofs + 1] % 4] + (i * 4);
40187 Ops[i] = DAG.getBitcast(VT, SubOps[0]);
40193 for (int &M : Mask)
40195 Ops.push_back(getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
40196 return DAG.getNode(X86ISD::SHUFP, DL, VT, Ops);
40201 case X86ISD::VPERMI: {
40202 // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
40203 // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
40204 SDValue N0 = N.getOperand(0);
40205 SDValue N1 = N.getOperand(1);
40206 unsigned EltSizeInBits = VT.getScalarSizeInBits();
40207 if (N0.getOpcode() == ISD::BITCAST &&
40208 N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
40209 SDValue Src = N0.getOperand(0);
40210 EVT SrcVT = Src.getValueType();
40211 SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
40212 return DAG.getBitcast(VT, Res);
40216 case X86ISD::VPERM2X128: {
40217 // Fold vperm2x128(bitcast(x),bitcast(y),c) -> bitcast(vperm2x128(x,y,c)).
40218 SDValue LHS = N->getOperand(0);
40219 SDValue RHS = N->getOperand(1);
40220 if (LHS.getOpcode() == ISD::BITCAST &&
40221 (RHS.getOpcode() == ISD::BITCAST || RHS.isUndef())) {
40222 EVT SrcVT = LHS.getOperand(0).getValueType();
40223 if (RHS.isUndef() || SrcVT == RHS.getOperand(0).getValueType()) {
40224 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT,
40225 DAG.getBitcast(SrcVT, LHS),
40226 DAG.getBitcast(SrcVT, RHS),
40227 N->getOperand(2)));
40231 // Fold vperm2x128(op(),op()) -> op(vperm2x128(),vperm2x128()).
40232 if (SDValue Res = canonicalizeLaneShuffleWithRepeatedOps(N, DAG, DL))
40235 // Fold vperm2x128 subvector shuffle with an inner concat pattern.
40236 // vperm2x128(concat(X,Y),concat(Z,W)) --> concat X,Y etc.
40237 auto FindSubVector128 = [&](unsigned Idx) {
40240 SDValue Src = peekThroughBitcasts(N.getOperand(Idx < 2 ? 0 : 1));
40241 SmallVector<SDValue> SubOps;
40242 if (collectConcatOps(Src.getNode(), SubOps, DAG) && SubOps.size() == 2)
40243 return SubOps[Idx & 1];
40244 unsigned NumElts = Src.getValueType().getVectorNumElements();
40245 if ((Idx & 1) == 1 && Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
40246 Src.getOperand(1).getValueSizeInBits() == 128 &&
40247 Src.getConstantOperandAPInt(2) == (NumElts / 2)) {
40248 return Src.getOperand(1);
40252 unsigned Imm = N.getConstantOperandVal(2);
40253 if (SDValue SubLo = FindSubVector128(Imm & 0x0F)) {
40254 if (SDValue SubHi = FindSubVector128((Imm & 0xF0) >> 4)) {
40255 MVT SubVT = VT.getHalfNumVectorElementsVT();
40256 SubLo = DAG.getBitcast(SubVT, SubLo);
40257 SubHi = DAG.getBitcast(SubVT, SubHi);
40258 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, SubLo, SubHi);
40263 case X86ISD::PSHUFD:
40264 case X86ISD::PSHUFLW:
40265 case X86ISD::PSHUFHW:
40266 Mask = getPSHUFShuffleMask(N);
40267 assert(Mask.size() == 4);
40269 case X86ISD::MOVSD:
40270 case X86ISD::MOVSH:
40271 case X86ISD::MOVSS: {
40272 SDValue N0 = N.getOperand(0);
40273 SDValue N1 = N.getOperand(1);
40275 // Canonicalize scalar FPOps:
40276 // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
40277 // If commutable, allow OP(N1[0], N0[0]).
40278 unsigned Opcode1 = N1.getOpcode();
40279 if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
40280 Opcode1 == ISD::FDIV) {
40281 SDValue N10 = N1.getOperand(0);
40282 SDValue N11 = N1.getOperand(1);
40284 (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
40286 std::swap(N10, N11);
40287 MVT SVT = VT.getVectorElementType();
40288 SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
40289 N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
40290 N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
40291 SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
40292 SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
40293 return DAG.getNode(Opcode, DL, VT, N0, SclVec);
40299 case X86ISD::INSERTPS: {
40300 assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
40301 SDValue Op0 = N.getOperand(0);
40302 SDValue Op1 = N.getOperand(1);
40303 unsigned InsertPSMask = N.getConstantOperandVal(2);
40304 unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
40305 unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
40306 unsigned ZeroMask = InsertPSMask & 0xF;
40308 // If we zero out all elements from Op0 then we don't need to reference it.
40309 if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
40310 return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
40311 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40313 // If we zero out the element from Op1 then we don't need to reference it.
40314 if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
40315 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
40316 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40318 // Attempt to merge insertps Op1 with an inner target shuffle node.
40319 SmallVector<int, 8> TargetMask1;
40320 SmallVector<SDValue, 2> Ops1;
40321 APInt KnownUndef1, KnownZero1;
40322 if (getTargetShuffleAndZeroables(Op1, TargetMask1, Ops1, KnownUndef1,
40324 if (KnownUndef1[SrcIdx] || KnownZero1[SrcIdx]) {
40325 // Zero/UNDEF insertion - zero out element and remove dependency.
40326 InsertPSMask |= (1u << DstIdx);
40327 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
40328 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40330 // Update insertps mask srcidx and reference the source input directly.
40331 int M = TargetMask1[SrcIdx];
40332 assert(0 <= M && M < 8 && "Shuffle index out of range");
40333 InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
40334 Op1 = Ops1[M < 4 ? 0 : 1];
40335 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
40336 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40339 // Attempt to merge insertps Op0 with an inner target shuffle node.
40340 SmallVector<int, 8> TargetMask0;
40341 SmallVector<SDValue, 2> Ops0;
40342 APInt KnownUndef0, KnownZero0;
40343 if (getTargetShuffleAndZeroables(Op0, TargetMask0, Ops0, KnownUndef0,
40345 bool Updated = false;
40346 bool UseInput00 = false;
40347 bool UseInput01 = false;
40348 for (int i = 0; i != 4; ++i) {
40349 if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
40350 // No change if element is already zero or the inserted element.
40354 if (KnownUndef0[i] || KnownZero0[i]) {
40355 // If the target mask is undef/zero then we must zero the element.
40356 InsertPSMask |= (1u << i);
40361 // The input vector element must be inline.
40362 int M = TargetMask0[i];
40363 if (M != i && M != (i + 4))
40366 // Determine which inputs of the target shuffle we're using.
40367 UseInput00 |= (0 <= M && M < 4);
40368 UseInput01 |= (4 <= M);
40371 // If we're not using both inputs of the target shuffle then use the
40372 // referenced input directly.
40373 if (UseInput00 && !UseInput01) {
40376 } else if (!UseInput00 && UseInput01) {
40382 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
40383 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40386 // If we're inserting an element from a vbroadcast load, fold the
40387 // load into the X86insertps instruction. We need to convert the scalar
40388 // load to a vector and clear the source lane of the INSERTPS control.
40389 if (Op1.getOpcode() == X86ISD::VBROADCAST_LOAD && Op1.hasOneUse()) {
40390 auto *MemIntr = cast<MemIntrinsicSDNode>(Op1);
40391 if (MemIntr->getMemoryVT().getScalarSizeInBits() == 32) {
40392 SDValue Load = DAG.getLoad(MVT::f32, DL, MemIntr->getChain(),
40393 MemIntr->getBasePtr(),
40394 MemIntr->getMemOperand());
40395 SDValue Insert = DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0,
40396 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT,
40398 DAG.getTargetConstant(InsertPSMask & 0x3f, DL, MVT::i8));
40399 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
40410 // Nuke no-op shuffles that show up after combining.
40411 if (isNoopShuffleMask(Mask))
40412 return N.getOperand(0);
40414 // Look for simplifications involving one or two shuffle instructions.
40415 SDValue V = N.getOperand(0);
40416 switch (N.getOpcode()) {
40419 case X86ISD::PSHUFLW:
40420 case X86ISD::PSHUFHW:
40421 assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
40423 // See if this reduces to a PSHUFD which is no more expensive and can
40424 // combine with more operations. Note that it has to at least flip the
40425 // dwords as otherwise it would have been removed as a no-op.
40426 if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
40427 int DMask[] = {0, 1, 2, 3};
40428 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
40429 DMask[DOffset + 0] = DOffset + 1;
40430 DMask[DOffset + 1] = DOffset + 0;
40431 MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
40432 V = DAG.getBitcast(DVT, V);
40433 V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
40434 getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
40435 return DAG.getBitcast(VT, V);
40438 // Look for shuffle patterns which can be implemented as a single unpack.
40439 // FIXME: This doesn't handle the location of the PSHUFD generically, and
40440 // only works when we have a PSHUFD followed by two half-shuffles.
40441 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
40442 (V.getOpcode() == X86ISD::PSHUFLW ||
40443 V.getOpcode() == X86ISD::PSHUFHW) &&
40444 V.getOpcode() != N.getOpcode() &&
40445 V.hasOneUse() && V.getOperand(0).hasOneUse()) {
40446 SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
40447 if (D.getOpcode() == X86ISD::PSHUFD) {
40448 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
40449 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
40450 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
40451 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
40453 for (int i = 0; i < 4; ++i) {
40454 WordMask[i + NOffset] = Mask[i] + NOffset;
40455 WordMask[i + VOffset] = VMask[i] + VOffset;
40457 // Map the word mask through the DWord mask.
40459 for (int i = 0; i < 8; ++i)
40460 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
40461 if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
40462 makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
40463 // We can replace all three shuffles with an unpack.
40464 V = DAG.getBitcast(VT, D.getOperand(0));
40465 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
40474 case X86ISD::PSHUFD:
40475 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
40484 /// Checks if the shuffle mask takes subsequent elements
40485 /// alternately from two vectors.
40486 /// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
40487 static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
40489 int ParitySrc[2] = {-1, -1};
40490 unsigned Size = Mask.size();
40491 for (unsigned i = 0; i != Size; ++i) {
40496 // Make sure we are using the matching element from the input.
40497 if ((M % Size) != i)
40500 // Make sure we use the same input for all elements of the same parity.
40501 int Src = M / Size;
40502 if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
40504 ParitySrc[i % 2] = Src;
40507 // Make sure each input is used.
40508 if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
40511 Op0Even = ParitySrc[0] == 0;
40515 /// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
40516 /// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
40517 /// are written to the parameters \p Opnd0 and \p Opnd1.
40519 /// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
40520 /// so it is easier to generically match. We also insert dummy vector shuffle
40521 /// nodes for the operands which explicitly discard the lanes which are unused
40522 /// by this operation to try to flow through the rest of the combiner
40523 /// the fact that they're unused.
40524 static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
40525 SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
40528 EVT VT = N->getValueType(0);
40529 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40530 if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
40531 !VT.getSimpleVT().isFloatingPoint())
40534 // We only handle target-independent shuffles.
40535 // FIXME: It would be easy and harmless to use the target shuffle mask
40536 // extraction tool to support more.
40537 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
40540 SDValue V1 = N->getOperand(0);
40541 SDValue V2 = N->getOperand(1);
40543 // Make sure we have an FADD and an FSUB.
40544 if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
40545 (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
40546 V1.getOpcode() == V2.getOpcode())
40549 // If there are other uses of these operations we can't fold them.
40550 if (!V1->hasOneUse() || !V2->hasOneUse())
40553 // Ensure that both operations have the same operands. Note that we can
40554 // commute the FADD operands.
40556 if (V1.getOpcode() == ISD::FSUB) {
40557 LHS = V1->getOperand(0); RHS = V1->getOperand(1);
40558 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
40559 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
40562 assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
40563 LHS = V2->getOperand(0); RHS = V2->getOperand(1);
40564 if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
40565 (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
40569 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
40571 if (!isAddSubOrSubAddMask(Mask, Op0Even))
40574 // It's a subadd if the vector in the even parity is an FADD.
40575 IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
40576 : V2->getOpcode() == ISD::FADD;
40583 /// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
40584 static SDValue combineShuffleToFMAddSub(SDNode *N,
40585 const X86Subtarget &Subtarget,
40586 SelectionDAG &DAG) {
40587 // We only handle target-independent shuffles.
40588 // FIXME: It would be easy and harmless to use the target shuffle mask
40589 // extraction tool to support more.
40590 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
40593 MVT VT = N->getSimpleValueType(0);
40594 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40595 if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
40598 // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
40599 SDValue Op0 = N->getOperand(0);
40600 SDValue Op1 = N->getOperand(1);
40601 SDValue FMAdd = Op0, FMSub = Op1;
40602 if (FMSub.getOpcode() != X86ISD::FMSUB)
40603 std::swap(FMAdd, FMSub);
40605 if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
40606 FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
40607 FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
40608 FMAdd.getOperand(2) != FMSub.getOperand(2))
40611 // Check for correct shuffle mask.
40612 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
40614 if (!isAddSubOrSubAddMask(Mask, Op0Even))
40617 // FMAddSub takes zeroth operand from FMSub node.
40619 bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
40620 unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
40621 return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
40622 FMAdd.getOperand(2));
40625 /// Try to combine a shuffle into a target-specific add-sub or
40626 /// mul-add-sub node.
40627 static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
40628 const X86Subtarget &Subtarget,
40629 SelectionDAG &DAG) {
40630 if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
40633 SDValue Opnd0, Opnd1;
40635 if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
40638 MVT VT = N->getSimpleValueType(0);
40641 // Try to generate X86ISD::FMADDSUB node here.
40643 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
40644 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
40645 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
40651 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
40652 // the ADDSUB idiom has been successfully recognized. There are no known
40653 // X86 targets with 512-bit ADDSUB instructions!
40654 if (VT.is512BitVector())
40657 // Do not generate X86ISD::ADDSUB node for FP16's vector types even though
40658 // the ADDSUB idiom has been successfully recognized. There are no known
40659 // X86 targets with FP16 ADDSUB instructions!
40660 if (VT.getVectorElementType() == MVT::f16)
40663 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
40666 // We are looking for a shuffle where both sources are concatenated with undef
40667 // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
40668 // if we can express this as a single-source shuffle, that's preferable.
40669 static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
40670 const X86Subtarget &Subtarget) {
40671 if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
40674 EVT VT = N->getValueType(0);
40676 // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
40677 if (!VT.is128BitVector() && !VT.is256BitVector())
40680 if (VT.getVectorElementType() != MVT::i32 &&
40681 VT.getVectorElementType() != MVT::i64 &&
40682 VT.getVectorElementType() != MVT::f32 &&
40683 VT.getVectorElementType() != MVT::f64)
40686 SDValue N0 = N->getOperand(0);
40687 SDValue N1 = N->getOperand(1);
40689 // Check that both sources are concats with undef.
40690 if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
40691 N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
40692 N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
40693 !N1.getOperand(1).isUndef())
40696 // Construct the new shuffle mask. Elements from the first source retain their
40697 // index, but elements from the second source no longer need to skip an undef.
40698 SmallVector<int, 8> Mask;
40699 int NumElts = VT.getVectorNumElements();
40701 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
40702 for (int Elt : SVOp->getMask())
40703 Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
40706 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
40708 return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
40711 /// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
40712 /// low half of each source vector and does not set any high half elements in
40713 /// the destination vector, narrow the shuffle to half its original size.
40714 static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
40715 if (!Shuf->getValueType(0).isSimple())
40717 MVT VT = Shuf->getSimpleValueType(0);
40718 if (!VT.is256BitVector() && !VT.is512BitVector())
40721 // See if we can ignore all of the high elements of the shuffle.
40722 ArrayRef<int> Mask = Shuf->getMask();
40723 if (!isUndefUpperHalf(Mask))
40726 // Check if the shuffle mask accesses only the low half of each input vector
40727 // (half-index output is 0 or 2).
40728 int HalfIdx1, HalfIdx2;
40729 SmallVector<int, 8> HalfMask(Mask.size() / 2);
40730 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
40731 (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
40734 // Create a half-width shuffle to replace the unnecessarily wide shuffle.
40735 // The trick is knowing that all of the insert/extract are actually free
40736 // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
40737 // of narrow inputs into a narrow output, and that is always cheaper than
40738 // the wide shuffle that we started with.
40739 return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
40740 Shuf->getOperand(1), HalfMask, HalfIdx1,
40741 HalfIdx2, false, DAG, /*UseConcat*/true);
40744 static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
40745 TargetLowering::DAGCombinerInfo &DCI,
40746 const X86Subtarget &Subtarget) {
40747 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
40748 if (SDValue V = narrowShuffle(Shuf, DAG))
40751 // If we have legalized the vector types, look for blends of FADD and FSUB
40752 // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
40754 EVT VT = N->getValueType(0);
40755 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40756 if (TLI.isTypeLegal(VT))
40757 if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
40760 // Attempt to combine into a vector load/broadcast.
40761 if (SDValue LD = combineToConsecutiveLoads(
40762 VT, SDValue(N, 0), dl, DAG, Subtarget, /*IsAfterLegalize*/ true))
40765 // For AVX2, we sometimes want to combine
40766 // (vector_shuffle <mask> (concat_vectors t1, undef)
40767 // (concat_vectors t2, undef))
40769 // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
40770 // Since the latter can be efficiently lowered with VPERMD/VPERMQ
40771 if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
40774 if (isTargetShuffle(N->getOpcode())) {
40776 if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
40779 // Try recursively combining arbitrary sequences of x86 shuffle
40780 // instructions into higher-order shuffles. We do this after combining
40781 // specific PSHUF instruction sequences into their minimal form so that we
40782 // can evaluate how many specialized shuffle instructions are involved in
40783 // a particular chain.
40784 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
40787 // Simplify source operands based on shuffle mask.
40788 // TODO - merge this into combineX86ShufflesRecursively.
40789 APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
40790 if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, DCI))
40791 return SDValue(N, 0);
40793 // Canonicalize SHUFFLE(BINOP(X,Y)) -> BINOP(SHUFFLE(X),SHUFFLE(Y)).
40794 // Perform this after other shuffle combines to allow inner shuffles to be
40795 // combined away first.
40796 if (SDValue BinOp = canonicalizeShuffleWithBinOps(Op, DAG, dl))
40803 // Simplify variable target shuffle masks based on the demanded elements.
40804 // TODO: Handle DemandedBits in mask indices as well?
40805 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetShuffle(
40806 SDValue Op, const APInt &DemandedElts, unsigned MaskIndex,
40807 TargetLowering::TargetLoweringOpt &TLO, unsigned Depth) const {
40808 // If we're demanding all elements don't bother trying to simplify the mask.
40809 unsigned NumElts = DemandedElts.getBitWidth();
40810 if (DemandedElts.isAllOnes())
40813 SDValue Mask = Op.getOperand(MaskIndex);
40814 if (!Mask.hasOneUse())
40817 // Attempt to generically simplify the variable shuffle mask.
40818 APInt MaskUndef, MaskZero;
40819 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
40823 // Attempt to extract+simplify a (constant pool load) shuffle mask.
40824 // TODO: Support other types from getTargetShuffleMaskIndices?
40825 SDValue BC = peekThroughOneUseBitcasts(Mask);
40826 EVT BCVT = BC.getValueType();
40827 auto *Load = dyn_cast<LoadSDNode>(BC);
40831 const Constant *C = getTargetConstantFromNode(Load);
40835 Type *CTy = C->getType();
40836 if (!CTy->isVectorTy() ||
40837 CTy->getPrimitiveSizeInBits() != Mask.getValueSizeInBits())
40840 // Handle scaling for i64 elements on 32-bit targets.
40841 unsigned NumCstElts = cast<FixedVectorType>(CTy)->getNumElements();
40842 if (NumCstElts != NumElts && NumCstElts != (NumElts * 2))
40844 unsigned Scale = NumCstElts / NumElts;
40846 // Simplify mask if we have an undemanded element that is not undef.
40847 bool Simplified = false;
40848 SmallVector<Constant *, 32> ConstVecOps;
40849 for (unsigned i = 0; i != NumCstElts; ++i) {
40850 Constant *Elt = C->getAggregateElement(i);
40851 if (!DemandedElts[i / Scale] && !isa<UndefValue>(Elt)) {
40852 ConstVecOps.push_back(UndefValue::get(Elt->getType()));
40856 ConstVecOps.push_back(Elt);
40861 // Generate new constant pool entry + legalize immediately for the load.
40863 SDValue CV = TLO.DAG.getConstantPool(ConstantVector::get(ConstVecOps), BCVT);
40864 SDValue LegalCV = LowerConstantPool(CV, TLO.DAG);
40865 SDValue NewMask = TLO.DAG.getLoad(
40866 BCVT, DL, TLO.DAG.getEntryNode(), LegalCV,
40867 MachinePointerInfo::getConstantPool(TLO.DAG.getMachineFunction()),
40869 return TLO.CombineTo(Mask, TLO.DAG.getBitcast(Mask.getValueType(), NewMask));
40872 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
40873 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
40874 TargetLoweringOpt &TLO, unsigned Depth) const {
40875 int NumElts = DemandedElts.getBitWidth();
40876 unsigned Opc = Op.getOpcode();
40877 EVT VT = Op.getValueType();
40879 // Handle special case opcodes.
40881 case X86ISD::PMULDQ:
40882 case X86ISD::PMULUDQ: {
40883 APInt LHSUndef, LHSZero;
40884 APInt RHSUndef, RHSZero;
40885 SDValue LHS = Op.getOperand(0);
40886 SDValue RHS = Op.getOperand(1);
40887 if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
40890 if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
40893 // Multiply by zero.
40894 KnownZero = LHSZero | RHSZero;
40897 case X86ISD::VPMADDWD: {
40898 APInt LHSUndef, LHSZero;
40899 APInt RHSUndef, RHSZero;
40900 SDValue LHS = Op.getOperand(0);
40901 SDValue RHS = Op.getOperand(1);
40902 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, 2 * NumElts);
40904 if (SimplifyDemandedVectorElts(LHS, DemandedSrcElts, LHSUndef, LHSZero, TLO,
40907 if (SimplifyDemandedVectorElts(RHS, DemandedSrcElts, RHSUndef, RHSZero, TLO,
40911 // TODO: Multiply by zero.
40913 // If RHS/LHS elements are known zero then we don't need the LHS/RHS equivalent.
40914 APInt DemandedLHSElts = DemandedSrcElts & ~RHSZero;
40915 if (SimplifyDemandedVectorElts(LHS, DemandedLHSElts, LHSUndef, LHSZero, TLO,
40918 APInt DemandedRHSElts = DemandedSrcElts & ~LHSZero;
40919 if (SimplifyDemandedVectorElts(RHS, DemandedRHSElts, RHSUndef, RHSZero, TLO,
40924 case X86ISD::PSADBW: {
40925 SDValue LHS = Op.getOperand(0);
40926 SDValue RHS = Op.getOperand(1);
40927 assert(VT.getScalarType() == MVT::i64 &&
40928 LHS.getValueType() == RHS.getValueType() &&
40929 LHS.getValueType().getScalarType() == MVT::i8 &&
40930 "Unexpected PSADBW types");
40932 // Aggressively peek through ops to get at the demanded elts.
40933 if (!DemandedElts.isAllOnes()) {
40934 unsigned NumSrcElts = LHS.getValueType().getVectorNumElements();
40935 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts);
40936 SDValue NewLHS = SimplifyMultipleUseDemandedVectorElts(
40937 LHS, DemandedSrcElts, TLO.DAG, Depth + 1);
40938 SDValue NewRHS = SimplifyMultipleUseDemandedVectorElts(
40939 RHS, DemandedSrcElts, TLO.DAG, Depth + 1);
40940 if (NewLHS || NewRHS) {
40941 NewLHS = NewLHS ? NewLHS : LHS;
40942 NewRHS = NewRHS ? NewRHS : RHS;
40943 return TLO.CombineTo(
40944 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewLHS, NewRHS));
40951 case X86ISD::VSRA: {
40952 // We only need the bottom 64-bits of the (128-bit) shift amount.
40953 SDValue Amt = Op.getOperand(1);
40954 MVT AmtVT = Amt.getSimpleValueType();
40955 assert(AmtVT.is128BitVector() && "Unexpected value type");
40957 // If we reuse the shift amount just for sse shift amounts then we know that
40958 // only the bottom 64-bits are only ever used.
40959 bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
40960 unsigned UseOpc = Use->getOpcode();
40961 return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
40962 UseOpc == X86ISD::VSRA) &&
40963 Use->getOperand(0) != Amt;
40966 APInt AmtUndef, AmtZero;
40967 unsigned NumAmtElts = AmtVT.getVectorNumElements();
40968 APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
40969 if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
40970 Depth + 1, AssumeSingleUse))
40974 case X86ISD::VSHLI:
40975 case X86ISD::VSRLI:
40976 case X86ISD::VSRAI: {
40977 SDValue Src = Op.getOperand(0);
40979 if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
40983 // Fold shift(0,x) -> 0
40984 if (DemandedElts.isSubsetOf(KnownZero))
40985 return TLO.CombineTo(
40986 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
40988 // Aggressively peek through ops to get at the demanded elts.
40989 if (!DemandedElts.isAllOnes())
40990 if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
40991 Src, DemandedElts, TLO.DAG, Depth + 1))
40992 return TLO.CombineTo(
40993 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc, Op.getOperand(1)));
40996 case X86ISD::VPSHA:
40997 case X86ISD::VPSHL:
40998 case X86ISD::VSHLV:
40999 case X86ISD::VSRLV:
41000 case X86ISD::VSRAV: {
41001 APInt LHSUndef, LHSZero;
41002 APInt RHSUndef, RHSZero;
41003 SDValue LHS = Op.getOperand(0);
41004 SDValue RHS = Op.getOperand(1);
41005 if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
41009 // Fold shift(0,x) -> 0
41010 if (DemandedElts.isSubsetOf(LHSZero))
41011 return TLO.CombineTo(
41012 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
41014 if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
41018 KnownZero = LHSZero;
41021 case X86ISD::KSHIFTL: {
41022 SDValue Src = Op.getOperand(0);
41023 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
41024 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
41025 unsigned ShiftAmt = Amt->getZExtValue();
41028 return TLO.CombineTo(Op, Src);
41030 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
41031 // single shift. We can do this if the bottom bits (which are shifted
41032 // out) are never demanded.
41033 if (Src.getOpcode() == X86ISD::KSHIFTR) {
41034 if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
41035 unsigned C1 = Src.getConstantOperandVal(1);
41036 unsigned NewOpc = X86ISD::KSHIFTL;
41037 int Diff = ShiftAmt - C1;
41040 NewOpc = X86ISD::KSHIFTR;
41044 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
41045 return TLO.CombineTo(
41046 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
41050 APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
41051 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
41055 KnownUndef <<= ShiftAmt;
41056 KnownZero <<= ShiftAmt;
41057 KnownZero.setLowBits(ShiftAmt);
41060 case X86ISD::KSHIFTR: {
41061 SDValue Src = Op.getOperand(0);
41062 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
41063 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
41064 unsigned ShiftAmt = Amt->getZExtValue();
41067 return TLO.CombineTo(Op, Src);
41069 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
41070 // single shift. We can do this if the top bits (which are shifted
41071 // out) are never demanded.
41072 if (Src.getOpcode() == X86ISD::KSHIFTL) {
41073 if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
41074 unsigned C1 = Src.getConstantOperandVal(1);
41075 unsigned NewOpc = X86ISD::KSHIFTR;
41076 int Diff = ShiftAmt - C1;
41079 NewOpc = X86ISD::KSHIFTL;
41083 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
41084 return TLO.CombineTo(
41085 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
41089 APInt DemandedSrc = DemandedElts.shl(ShiftAmt);
41090 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
41094 KnownUndef.lshrInPlace(ShiftAmt);
41095 KnownZero.lshrInPlace(ShiftAmt);
41096 KnownZero.setHighBits(ShiftAmt);
41099 case X86ISD::ANDNP: {
41100 // ANDNP = (~LHS & RHS);
41101 SDValue LHS = Op.getOperand(0);
41102 SDValue RHS = Op.getOperand(1);
41104 auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
41106 SmallVector<APInt> EltBits;
41107 int NumElts = VT.getVectorNumElements();
41108 int EltSizeInBits = VT.getScalarSizeInBits();
41109 APInt OpBits = APInt::getAllOnes(EltSizeInBits);
41110 APInt OpElts = DemandedElts;
41111 if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
41113 OpBits.clearAllBits();
41114 OpElts.clearAllBits();
41115 for (int I = 0; I != NumElts; ++I) {
41116 if (!DemandedElts[I])
41118 if (UndefElts[I]) {
41119 // We can't assume an undef src element gives an undef dst - the
41120 // other src might be zero.
41121 OpBits.setAllBits();
41123 } else if ((Invert && !EltBits[I].isAllOnes()) ||
41124 (!Invert && !EltBits[I].isZero())) {
41125 OpBits |= Invert ? ~EltBits[I] : EltBits[I];
41130 return std::make_pair(OpBits, OpElts);
41132 APInt BitsLHS, EltsLHS;
41133 APInt BitsRHS, EltsRHS;
41134 std::tie(BitsLHS, EltsLHS) = GetDemandedMasks(RHS);
41135 std::tie(BitsRHS, EltsRHS) = GetDemandedMasks(LHS, true);
41137 APInt LHSUndef, LHSZero;
41138 APInt RHSUndef, RHSZero;
41139 if (SimplifyDemandedVectorElts(LHS, EltsLHS, LHSUndef, LHSZero, TLO,
41142 if (SimplifyDemandedVectorElts(RHS, EltsRHS, RHSUndef, RHSZero, TLO,
41146 if (!DemandedElts.isAllOnes()) {
41147 SDValue NewLHS = SimplifyMultipleUseDemandedBits(LHS, BitsLHS, EltsLHS,
41148 TLO.DAG, Depth + 1);
41149 SDValue NewRHS = SimplifyMultipleUseDemandedBits(RHS, BitsRHS, EltsRHS,
41150 TLO.DAG, Depth + 1);
41151 if (NewLHS || NewRHS) {
41152 NewLHS = NewLHS ? NewLHS : LHS;
41153 NewRHS = NewRHS ? NewRHS : RHS;
41154 return TLO.CombineTo(
41155 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewLHS, NewRHS));
41160 case X86ISD::CVTSI2P:
41161 case X86ISD::CVTUI2P: {
41162 SDValue Src = Op.getOperand(0);
41163 MVT SrcVT = Src.getSimpleValueType();
41164 APInt SrcUndef, SrcZero;
41165 APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
41166 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
41171 case X86ISD::PACKSS:
41172 case X86ISD::PACKUS: {
41173 SDValue N0 = Op.getOperand(0);
41174 SDValue N1 = Op.getOperand(1);
41176 APInt DemandedLHS, DemandedRHS;
41177 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
41179 APInt LHSUndef, LHSZero;
41180 if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
41183 APInt RHSUndef, RHSZero;
41184 if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
41188 // TODO - pass on known zero/undef.
41190 // Aggressively peek through ops to get at the demanded elts.
41191 // TODO - we should do this for all target/faux shuffles ops.
41192 if (!DemandedElts.isAllOnes()) {
41193 SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
41194 TLO.DAG, Depth + 1);
41195 SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
41196 TLO.DAG, Depth + 1);
41197 if (NewN0 || NewN1) {
41198 NewN0 = NewN0 ? NewN0 : N0;
41199 NewN1 = NewN1 ? NewN1 : N1;
41200 return TLO.CombineTo(Op,
41201 TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
41208 case X86ISD::FHADD:
41209 case X86ISD::FHSUB: {
41210 SDValue N0 = Op.getOperand(0);
41211 SDValue N1 = Op.getOperand(1);
41213 APInt DemandedLHS, DemandedRHS;
41214 getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
41216 APInt LHSUndef, LHSZero;
41217 if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
41220 APInt RHSUndef, RHSZero;
41221 if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
41225 // TODO - pass on known zero/undef.
41227 // Aggressively peek through ops to get at the demanded elts.
41228 // TODO: Handle repeated operands.
41229 if (N0 != N1 && !DemandedElts.isAllOnes()) {
41230 SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
41231 TLO.DAG, Depth + 1);
41232 SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
41233 TLO.DAG, Depth + 1);
41234 if (NewN0 || NewN1) {
41235 NewN0 = NewN0 ? NewN0 : N0;
41236 NewN1 = NewN1 ? NewN1 : N1;
41237 return TLO.CombineTo(Op,
41238 TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
41243 case X86ISD::VTRUNC:
41244 case X86ISD::VTRUNCS:
41245 case X86ISD::VTRUNCUS: {
41246 SDValue Src = Op.getOperand(0);
41247 MVT SrcVT = Src.getSimpleValueType();
41248 APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
41249 APInt SrcUndef, SrcZero;
41250 if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
41253 KnownZero = SrcZero.zextOrTrunc(NumElts);
41254 KnownUndef = SrcUndef.zextOrTrunc(NumElts);
41257 case X86ISD::BLENDV: {
41258 APInt SelUndef, SelZero;
41259 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
41260 SelZero, TLO, Depth + 1))
41263 // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
41264 APInt LHSUndef, LHSZero;
41265 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
41266 LHSZero, TLO, Depth + 1))
41269 APInt RHSUndef, RHSZero;
41270 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
41271 RHSZero, TLO, Depth + 1))
41274 KnownZero = LHSZero & RHSZero;
41275 KnownUndef = LHSUndef & RHSUndef;
41278 case X86ISD::VZEXT_MOVL: {
41279 // If upper demanded elements are already zero then we have nothing to do.
41280 SDValue Src = Op.getOperand(0);
41281 APInt DemandedUpperElts = DemandedElts;
41282 DemandedUpperElts.clearLowBits(1);
41283 if (TLO.DAG.MaskedVectorIsZero(Src, DemandedUpperElts, Depth + 1))
41284 return TLO.CombineTo(Op, Src);
41287 case X86ISD::VBROADCAST: {
41288 SDValue Src = Op.getOperand(0);
41289 MVT SrcVT = Src.getSimpleValueType();
41290 if (!SrcVT.isVector())
41292 // Don't bother broadcasting if we just need the 0'th element.
41293 if (DemandedElts == 1) {
41294 if (Src.getValueType() != VT)
41295 Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
41297 return TLO.CombineTo(Op, Src);
41299 APInt SrcUndef, SrcZero;
41300 APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
41301 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
41304 // Aggressively peek through src to get at the demanded elt.
41305 // TODO - we should do this for all target/faux shuffles ops.
41306 if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
41307 Src, SrcElts, TLO.DAG, Depth + 1))
41308 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
41311 case X86ISD::VPERMV:
41312 if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 0, TLO,
41316 case X86ISD::PSHUFB:
41317 case X86ISD::VPERMV3:
41318 case X86ISD::VPERMILPV:
41319 if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 1, TLO,
41323 case X86ISD::VPPERM:
41324 case X86ISD::VPERMIL2:
41325 if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 2, TLO,
41331 // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
41332 // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
41333 // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
41334 if ((VT.is256BitVector() || VT.is512BitVector()) &&
41335 DemandedElts.lshr(NumElts / 2) == 0) {
41336 unsigned SizeInBits = VT.getSizeInBits();
41337 unsigned ExtSizeInBits = SizeInBits / 2;
41339 // See if 512-bit ops only use the bottom 128-bits.
41340 if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
41341 ExtSizeInBits = SizeInBits / 4;
41344 // Scalar broadcast.
41345 case X86ISD::VBROADCAST: {
41347 SDValue Src = Op.getOperand(0);
41348 if (Src.getValueSizeInBits() > ExtSizeInBits)
41349 Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
41350 EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
41351 ExtSizeInBits / VT.getScalarSizeInBits());
41352 SDValue Bcst = TLO.DAG.getNode(X86ISD::VBROADCAST, DL, BcstVT, Src);
41353 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Bcst, 0,
41354 TLO.DAG, DL, ExtSizeInBits));
41356 case X86ISD::VBROADCAST_LOAD: {
41358 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
41359 EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
41360 ExtSizeInBits / VT.getScalarSizeInBits());
41361 SDVTList Tys = TLO.DAG.getVTList(BcstVT, MVT::Other);
41362 SDValue Ops[] = {MemIntr->getOperand(0), MemIntr->getOperand(1)};
41363 SDValue Bcst = TLO.DAG.getMemIntrinsicNode(
41364 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MemIntr->getMemoryVT(),
41365 MemIntr->getMemOperand());
41366 TLO.DAG.makeEquivalentMemoryOrdering(SDValue(MemIntr, 1),
41368 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Bcst, 0,
41369 TLO.DAG, DL, ExtSizeInBits));
41371 // Subvector broadcast.
41372 case X86ISD::SUBV_BROADCAST_LOAD: {
41373 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
41374 EVT MemVT = MemIntr->getMemoryVT();
41375 if (ExtSizeInBits == MemVT.getStoreSizeInBits()) {
41378 TLO.DAG.getLoad(MemVT, DL, MemIntr->getChain(),
41379 MemIntr->getBasePtr(), MemIntr->getMemOperand());
41380 TLO.DAG.makeEquivalentMemoryOrdering(SDValue(MemIntr, 1),
41382 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Ld, 0,
41383 TLO.DAG, DL, ExtSizeInBits));
41384 } else if ((ExtSizeInBits % MemVT.getStoreSizeInBits()) == 0) {
41386 EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
41387 ExtSizeInBits / VT.getScalarSizeInBits());
41388 if (SDValue BcstLd =
41389 getBROADCAST_LOAD(Opc, DL, BcstVT, MemVT, MemIntr, 0, TLO.DAG))
41390 return TLO.CombineTo(Op,
41391 insertSubVector(TLO.DAG.getUNDEF(VT), BcstLd, 0,
41392 TLO.DAG, DL, ExtSizeInBits));
41396 // Byte shifts by immediate.
41397 case X86ISD::VSHLDQ:
41398 case X86ISD::VSRLDQ:
41399 // Shift by uniform.
41403 // Shift by immediate.
41404 case X86ISD::VSHLI:
41405 case X86ISD::VSRLI:
41406 case X86ISD::VSRAI: {
41409 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
41411 TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
41412 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41414 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
41415 return TLO.CombineTo(Op, Insert);
41417 case X86ISD::VPERMI: {
41418 // Simplify PERMPD/PERMQ to extract_subvector.
41419 // TODO: This should be done in shuffle combining.
41420 if (VT == MVT::v4f64 || VT == MVT::v4i64) {
41421 SmallVector<int, 4> Mask;
41422 DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
41423 if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
41425 SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
41426 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41427 SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
41428 return TLO.CombineTo(Op, Insert);
41433 case X86ISD::VPERM2X128: {
41434 // Simplify VPERM2F128/VPERM2I128 to extract_subvector.
41436 unsigned LoMask = Op.getConstantOperandVal(2) & 0xF;
41438 return TLO.CombineTo(
41439 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, DL));
41440 unsigned EltIdx = (LoMask & 0x1) * (NumElts / 2);
41441 unsigned SrcIdx = (LoMask & 0x2) >> 1;
41443 extractSubVector(Op.getOperand(SrcIdx), EltIdx, TLO.DAG, DL, 128);
41444 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41446 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
41447 return TLO.CombineTo(Op, Insert);
41449 // Zero upper elements.
41450 case X86ISD::VZEXT_MOVL:
41451 // Target unary shuffles by immediate:
41452 case X86ISD::PSHUFD:
41453 case X86ISD::PSHUFLW:
41454 case X86ISD::PSHUFHW:
41455 case X86ISD::VPERMILPI:
41456 // (Non-Lane Crossing) Target Shuffles.
41457 case X86ISD::VPERMILPV:
41458 case X86ISD::VPERMIL2:
41459 case X86ISD::PSHUFB:
41460 case X86ISD::UNPCKL:
41461 case X86ISD::UNPCKH:
41462 case X86ISD::BLENDI:
41464 case X86ISD::PACKSS:
41465 case X86ISD::PACKUS:
41469 case X86ISD::FHADD:
41470 case X86ISD::FHSUB: {
41472 SmallVector<SDValue, 4> Ops;
41473 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
41474 SDValue SrcOp = Op.getOperand(i);
41475 EVT SrcVT = SrcOp.getValueType();
41476 assert((!SrcVT.isVector() || SrcVT.getSizeInBits() == SizeInBits) &&
41477 "Unsupported vector size");
41478 Ops.push_back(SrcVT.isVector() ? extractSubVector(SrcOp, 0, TLO.DAG, DL,
41482 MVT ExtVT = VT.getSimpleVT();
41483 ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
41484 ExtSizeInBits / ExtVT.getScalarSizeInBits());
41485 SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ops);
41486 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41488 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
41489 return TLO.CombineTo(Op, Insert);
41494 // For splats, unless we *only* demand the 0'th element,
41495 // stop attempts at simplification here, we aren't going to improve things,
41496 // this is better than any potential shuffle.
41497 if (!DemandedElts.isOne() && TLO.DAG.isSplatValue(Op, /*AllowUndefs*/false))
41500 // Get target/faux shuffle mask.
41501 APInt OpUndef, OpZero;
41502 SmallVector<int, 64> OpMask;
41503 SmallVector<SDValue, 2> OpInputs;
41504 if (!getTargetShuffleInputs(Op, DemandedElts, OpInputs, OpMask, OpUndef,
41505 OpZero, TLO.DAG, Depth, false))
41508 // Shuffle inputs must be the same size as the result.
41509 if (OpMask.size() != (unsigned)NumElts ||
41510 llvm::any_of(OpInputs, [VT](SDValue V) {
41511 return VT.getSizeInBits() != V.getValueSizeInBits() ||
41512 !V.getValueType().isVector();
41516 KnownZero = OpZero;
41517 KnownUndef = OpUndef;
41519 // Check if shuffle mask can be simplified to undef/zero/identity.
41520 int NumSrcs = OpInputs.size();
41521 for (int i = 0; i != NumElts; ++i)
41522 if (!DemandedElts[i])
41523 OpMask[i] = SM_SentinelUndef;
41525 if (isUndefInRange(OpMask, 0, NumElts)) {
41526 KnownUndef.setAllBits();
41527 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
41529 if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
41530 KnownZero.setAllBits();
41531 return TLO.CombineTo(
41532 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
41534 for (int Src = 0; Src != NumSrcs; ++Src)
41535 if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
41536 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, OpInputs[Src]));
41538 // Attempt to simplify inputs.
41539 for (int Src = 0; Src != NumSrcs; ++Src) {
41540 // TODO: Support inputs of different types.
41541 if (OpInputs[Src].getValueType() != VT)
41544 int Lo = Src * NumElts;
41545 APInt SrcElts = APInt::getZero(NumElts);
41546 for (int i = 0; i != NumElts; ++i)
41547 if (DemandedElts[i]) {
41548 int M = OpMask[i] - Lo;
41549 if (0 <= M && M < NumElts)
41553 // TODO - Propagate input undef/zero elts.
41554 APInt SrcUndef, SrcZero;
41555 if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
41560 // If we don't demand all elements, then attempt to combine to a simpler
41562 // We need to convert the depth to something combineX86ShufflesRecursively
41563 // can handle - so pretend its Depth == 0 again, and reduce the max depth
41564 // to match. This prevents combineX86ShuffleChain from returning a
41565 // combined shuffle that's the same as the original root, causing an
41567 if (!DemandedElts.isAllOnes()) {
41568 assert(Depth < X86::MaxShuffleCombineDepth && "Depth out of range");
41570 SmallVector<int, 64> DemandedMask(NumElts, SM_SentinelUndef);
41571 for (int i = 0; i != NumElts; ++i)
41572 if (DemandedElts[i])
41573 DemandedMask[i] = i;
41575 SDValue NewShuffle = combineX86ShufflesRecursively(
41576 {Op}, 0, Op, DemandedMask, {}, 0, X86::MaxShuffleCombineDepth - Depth,
41577 /*HasVarMask*/ false,
41578 /*AllowCrossLaneVarMask*/ true, /*AllowPerLaneVarMask*/ true, TLO.DAG,
41581 return TLO.CombineTo(Op, NewShuffle);
41587 bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
41588 SDValue Op, const APInt &OriginalDemandedBits,
41589 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
41590 unsigned Depth) const {
41591 EVT VT = Op.getValueType();
41592 unsigned BitWidth = OriginalDemandedBits.getBitWidth();
41593 unsigned Opc = Op.getOpcode();
41595 case X86ISD::VTRUNC: {
41597 SDValue Src = Op.getOperand(0);
41598 MVT SrcVT = Src.getSimpleValueType();
41600 // Simplify the input, using demanded bit information.
41601 APInt TruncMask = OriginalDemandedBits.zext(SrcVT.getScalarSizeInBits());
41602 APInt DemandedElts = OriginalDemandedElts.trunc(SrcVT.getVectorNumElements());
41603 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, KnownOp, TLO, Depth + 1))
41607 case X86ISD::PMULDQ:
41608 case X86ISD::PMULUDQ: {
41609 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
41611 SDValue LHS = Op.getOperand(0);
41612 SDValue RHS = Op.getOperand(1);
41614 // Don't mask bits on 32-bit AVX512 targets which might lose a broadcast.
41615 // FIXME: Can we bound this better?
41616 APInt DemandedMask = APInt::getLowBitsSet(64, 32);
41617 APInt DemandedMaskLHS = APInt::getAllOnes(64);
41618 APInt DemandedMaskRHS = APInt::getAllOnes(64);
41620 bool Is32BitAVX512 = !Subtarget.is64Bit() && Subtarget.hasAVX512();
41621 if (!Is32BitAVX512 || !TLO.DAG.isSplatValue(LHS))
41622 DemandedMaskLHS = DemandedMask;
41623 if (!Is32BitAVX512 || !TLO.DAG.isSplatValue(RHS))
41624 DemandedMaskRHS = DemandedMask;
41626 if (SimplifyDemandedBits(LHS, DemandedMaskLHS, OriginalDemandedElts,
41627 KnownOp, TLO, Depth + 1))
41629 if (SimplifyDemandedBits(RHS, DemandedMaskRHS, OriginalDemandedElts,
41630 KnownOp, TLO, Depth + 1))
41633 // Aggressively peek through ops to get at the demanded low bits.
41634 SDValue DemandedLHS = SimplifyMultipleUseDemandedBits(
41635 LHS, DemandedMaskLHS, OriginalDemandedElts, TLO.DAG, Depth + 1);
41636 SDValue DemandedRHS = SimplifyMultipleUseDemandedBits(
41637 RHS, DemandedMaskRHS, OriginalDemandedElts, TLO.DAG, Depth + 1);
41638 if (DemandedLHS || DemandedRHS) {
41639 DemandedLHS = DemandedLHS ? DemandedLHS : LHS;
41640 DemandedRHS = DemandedRHS ? DemandedRHS : RHS;
41641 return TLO.CombineTo(
41642 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, DemandedLHS, DemandedRHS));
41646 case X86ISD::VSHLI: {
41647 SDValue Op0 = Op.getOperand(0);
41649 unsigned ShAmt = Op.getConstantOperandVal(1);
41650 if (ShAmt >= BitWidth)
41653 APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
41655 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
41656 // single shift. We can do this if the bottom bits (which are shifted
41657 // out) are never demanded.
41658 if (Op0.getOpcode() == X86ISD::VSRLI &&
41659 OriginalDemandedBits.countTrailingZeros() >= ShAmt) {
41660 unsigned Shift2Amt = Op0.getConstantOperandVal(1);
41661 if (Shift2Amt < BitWidth) {
41662 int Diff = ShAmt - Shift2Amt;
41664 return TLO.CombineTo(Op, Op0.getOperand(0));
41666 unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
41667 SDValue NewShift = TLO.DAG.getNode(
41668 NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
41669 TLO.DAG.getTargetConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
41670 return TLO.CombineTo(Op, NewShift);
41674 // If we are only demanding sign bits then we can use the shift source directly.
41675 unsigned NumSignBits =
41676 TLO.DAG.ComputeNumSignBits(Op0, OriginalDemandedElts, Depth + 1);
41677 unsigned UpperDemandedBits =
41678 BitWidth - OriginalDemandedBits.countTrailingZeros();
41679 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
41680 return TLO.CombineTo(Op, Op0);
41682 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
41686 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
41687 Known.Zero <<= ShAmt;
41688 Known.One <<= ShAmt;
41690 // Low bits known zero.
41691 Known.Zero.setLowBits(ShAmt);
41694 case X86ISD::VSRLI: {
41695 unsigned ShAmt = Op.getConstantOperandVal(1);
41696 if (ShAmt >= BitWidth)
41699 APInt DemandedMask = OriginalDemandedBits << ShAmt;
41701 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
41702 OriginalDemandedElts, Known, TLO, Depth + 1))
41705 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
41706 Known.Zero.lshrInPlace(ShAmt);
41707 Known.One.lshrInPlace(ShAmt);
41709 // High bits known zero.
41710 Known.Zero.setHighBits(ShAmt);
41713 case X86ISD::VSRAI: {
41714 SDValue Op0 = Op.getOperand(0);
41715 SDValue Op1 = Op.getOperand(1);
41717 unsigned ShAmt = cast<ConstantSDNode>(Op1)->getZExtValue();
41718 if (ShAmt >= BitWidth)
41721 APInt DemandedMask = OriginalDemandedBits << ShAmt;
41723 // If we just want the sign bit then we don't need to shift it.
41724 if (OriginalDemandedBits.isSignMask())
41725 return TLO.CombineTo(Op, Op0);
41727 // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
41728 if (Op0.getOpcode() == X86ISD::VSHLI &&
41729 Op.getOperand(1) == Op0.getOperand(1)) {
41730 SDValue Op00 = Op0.getOperand(0);
41731 unsigned NumSignBits =
41732 TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
41733 if (ShAmt < NumSignBits)
41734 return TLO.CombineTo(Op, Op00);
41737 // If any of the demanded bits are produced by the sign extension, we also
41738 // demand the input sign bit.
41739 if (OriginalDemandedBits.countLeadingZeros() < ShAmt)
41740 DemandedMask.setSignBit();
41742 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
41746 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
41747 Known.Zero.lshrInPlace(ShAmt);
41748 Known.One.lshrInPlace(ShAmt);
41750 // If the input sign bit is known to be zero, or if none of the top bits
41751 // are demanded, turn this into an unsigned shift right.
41752 if (Known.Zero[BitWidth - ShAmt - 1] ||
41753 OriginalDemandedBits.countLeadingZeros() >= ShAmt)
41754 return TLO.CombineTo(
41755 Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
41757 // High bits are known one.
41758 if (Known.One[BitWidth - ShAmt - 1])
41759 Known.One.setHighBits(ShAmt);
41762 case X86ISD::BLENDV: {
41763 SDValue Sel = Op.getOperand(0);
41764 SDValue LHS = Op.getOperand(1);
41765 SDValue RHS = Op.getOperand(2);
41767 APInt SignMask = APInt::getSignMask(BitWidth);
41768 SDValue NewSel = SimplifyMultipleUseDemandedBits(
41769 Sel, SignMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
41770 SDValue NewLHS = SimplifyMultipleUseDemandedBits(
41771 LHS, OriginalDemandedBits, OriginalDemandedElts, TLO.DAG, Depth + 1);
41772 SDValue NewRHS = SimplifyMultipleUseDemandedBits(
41773 RHS, OriginalDemandedBits, OriginalDemandedElts, TLO.DAG, Depth + 1);
41775 if (NewSel || NewLHS || NewRHS) {
41776 NewSel = NewSel ? NewSel : Sel;
41777 NewLHS = NewLHS ? NewLHS : LHS;
41778 NewRHS = NewRHS ? NewRHS : RHS;
41779 return TLO.CombineTo(Op, TLO.DAG.getNode(X86ISD::BLENDV, SDLoc(Op), VT,
41780 NewSel, NewLHS, NewRHS));
41784 case X86ISD::PEXTRB:
41785 case X86ISD::PEXTRW: {
41786 SDValue Vec = Op.getOperand(0);
41787 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
41788 MVT VecVT = Vec.getSimpleValueType();
41789 unsigned NumVecElts = VecVT.getVectorNumElements();
41791 if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
41792 unsigned Idx = CIdx->getZExtValue();
41793 unsigned VecBitWidth = VecVT.getScalarSizeInBits();
41795 // If we demand no bits from the vector then we must have demanded
41796 // bits from the implict zext - simplify to zero.
41797 APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
41798 if (DemandedVecBits == 0)
41799 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
41801 APInt KnownUndef, KnownZero;
41802 APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
41803 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
41804 KnownZero, TLO, Depth + 1))
41807 KnownBits KnownVec;
41808 if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
41809 KnownVec, TLO, Depth + 1))
41812 if (SDValue V = SimplifyMultipleUseDemandedBits(
41813 Vec, DemandedVecBits, DemandedVecElts, TLO.DAG, Depth + 1))
41814 return TLO.CombineTo(
41815 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, V, Op.getOperand(1)));
41817 Known = KnownVec.zext(BitWidth);
41822 case X86ISD::PINSRB:
41823 case X86ISD::PINSRW: {
41824 SDValue Vec = Op.getOperand(0);
41825 SDValue Scl = Op.getOperand(1);
41826 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
41827 MVT VecVT = Vec.getSimpleValueType();
41829 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
41830 unsigned Idx = CIdx->getZExtValue();
41831 if (!OriginalDemandedElts[Idx])
41832 return TLO.CombineTo(Op, Vec);
41834 KnownBits KnownVec;
41835 APInt DemandedVecElts(OriginalDemandedElts);
41836 DemandedVecElts.clearBit(Idx);
41837 if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
41838 KnownVec, TLO, Depth + 1))
41841 KnownBits KnownScl;
41842 unsigned NumSclBits = Scl.getScalarValueSizeInBits();
41843 APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
41844 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
41847 KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
41848 Known = KnownBits::commonBits(KnownVec, KnownScl);
41853 case X86ISD::PACKSS:
41854 // PACKSS saturates to MIN/MAX integer values. So if we just want the
41855 // sign bit then we can just ask for the source operands sign bit.
41856 // TODO - add known bits handling.
41857 if (OriginalDemandedBits.isSignMask()) {
41858 APInt DemandedLHS, DemandedRHS;
41859 getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
41861 KnownBits KnownLHS, KnownRHS;
41862 APInt SignMask = APInt::getSignMask(BitWidth * 2);
41863 if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
41864 KnownLHS, TLO, Depth + 1))
41866 if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
41867 KnownRHS, TLO, Depth + 1))
41870 // Attempt to avoid multi-use ops if we don't need anything from them.
41871 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
41872 Op.getOperand(0), SignMask, DemandedLHS, TLO.DAG, Depth + 1);
41873 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
41874 Op.getOperand(1), SignMask, DemandedRHS, TLO.DAG, Depth + 1);
41875 if (DemandedOp0 || DemandedOp1) {
41876 SDValue Op0 = DemandedOp0 ? DemandedOp0 : Op.getOperand(0);
41877 SDValue Op1 = DemandedOp1 ? DemandedOp1 : Op.getOperand(1);
41878 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, Op0, Op1));
41881 // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
41883 case X86ISD::VBROADCAST: {
41884 SDValue Src = Op.getOperand(0);
41885 MVT SrcVT = Src.getSimpleValueType();
41886 APInt DemandedElts = APInt::getOneBitSet(
41887 SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1, 0);
41888 if (SimplifyDemandedBits(Src, OriginalDemandedBits, DemandedElts, Known,
41891 // If we don't need the upper bits, attempt to narrow the broadcast source.
41892 // Don't attempt this on AVX512 as it might affect broadcast folding.
41893 // TODO: Should we attempt this for i32/i16 splats? They tend to be slower.
41894 if ((BitWidth == 64) && SrcVT.isScalarInteger() && !Subtarget.hasAVX512() &&
41895 OriginalDemandedBits.countLeadingZeros() >= (BitWidth / 2) &&
41896 Src->hasOneUse()) {
41897 MVT NewSrcVT = MVT::getIntegerVT(BitWidth / 2);
41899 TLO.DAG.getNode(ISD::TRUNCATE, SDLoc(Src), NewSrcVT, Src);
41900 MVT NewVT = MVT::getVectorVT(NewSrcVT, VT.getVectorNumElements() * 2);
41902 TLO.DAG.getNode(X86ISD::VBROADCAST, SDLoc(Op), NewVT, NewSrc);
41903 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, NewBcst));
41907 case X86ISD::PCMPGT:
41908 // icmp sgt(0, R) == ashr(R, BitWidth-1).
41909 // iff we only need the sign bit then we can use R directly.
41910 if (OriginalDemandedBits.isSignMask() &&
41911 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
41912 return TLO.CombineTo(Op, Op.getOperand(1));
41914 case X86ISD::MOVMSK: {
41915 SDValue Src = Op.getOperand(0);
41916 MVT SrcVT = Src.getSimpleValueType();
41917 unsigned SrcBits = SrcVT.getScalarSizeInBits();
41918 unsigned NumElts = SrcVT.getVectorNumElements();
41920 // If we don't need the sign bits at all just return zero.
41921 if (OriginalDemandedBits.countTrailingZeros() >= NumElts)
41922 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
41924 // See if we only demand bits from the lower 128-bit vector.
41925 if (SrcVT.is256BitVector() &&
41926 OriginalDemandedBits.getActiveBits() <= (NumElts / 2)) {
41927 SDValue NewSrc = extract128BitVector(Src, 0, TLO.DAG, SDLoc(Src));
41928 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
41931 // Only demand the vector elements of the sign bits we need.
41932 APInt KnownUndef, KnownZero;
41933 APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
41934 if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
41938 Known.Zero = KnownZero.zext(BitWidth);
41939 Known.Zero.setHighBits(BitWidth - NumElts);
41941 // MOVMSK only uses the MSB from each vector element.
41942 KnownBits KnownSrc;
41943 APInt DemandedSrcBits = APInt::getSignMask(SrcBits);
41944 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, KnownSrc, TLO,
41948 if (KnownSrc.One[SrcBits - 1])
41949 Known.One.setLowBits(NumElts);
41950 else if (KnownSrc.Zero[SrcBits - 1])
41951 Known.Zero.setLowBits(NumElts);
41953 // Attempt to avoid multi-use os if we don't need anything from it.
41954 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
41955 Src, DemandedSrcBits, DemandedElts, TLO.DAG, Depth + 1))
41956 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
41959 case X86ISD::BEXTR:
41960 case X86ISD::BEXTRI: {
41961 SDValue Op0 = Op.getOperand(0);
41962 SDValue Op1 = Op.getOperand(1);
41964 // Only bottom 16-bits of the control bits are required.
41965 if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
41966 // NOTE: SimplifyDemandedBits won't do this for constants.
41967 uint64_t Val1 = Cst1->getZExtValue();
41968 uint64_t MaskedVal1 = Val1 & 0xFFFF;
41969 if (Opc == X86ISD::BEXTR && MaskedVal1 != Val1) {
41971 return TLO.CombineTo(
41972 Op, TLO.DAG.getNode(X86ISD::BEXTR, DL, VT, Op0,
41973 TLO.DAG.getConstant(MaskedVal1, DL, VT)));
41976 unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
41977 unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
41979 // If the length is 0, the result is 0.
41981 Known.setAllZero();
41985 if ((Shift + Length) <= BitWidth) {
41986 APInt DemandedMask = APInt::getBitsSet(BitWidth, Shift, Shift + Length);
41987 if (SimplifyDemandedBits(Op0, DemandedMask, Known, TLO, Depth + 1))
41990 Known = Known.extractBits(Length, Shift);
41991 Known = Known.zextOrTrunc(BitWidth);
41995 assert(Opc == X86ISD::BEXTR && "Unexpected opcode!");
41997 APInt DemandedMask(APInt::getLowBitsSet(BitWidth, 16));
41998 if (SimplifyDemandedBits(Op1, DemandedMask, Known1, TLO, Depth + 1))
42001 // If the length is 0, replace with 0.
42002 KnownBits LengthBits = Known1.extractBits(8, 8);
42003 if (LengthBits.isZero())
42004 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
42009 case X86ISD::PDEP: {
42010 SDValue Op0 = Op.getOperand(0);
42011 SDValue Op1 = Op.getOperand(1);
42013 unsigned DemandedBitsLZ = OriginalDemandedBits.countLeadingZeros();
42014 APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ);
42016 // If the demanded bits has leading zeroes, we don't demand those from the
42018 if (SimplifyDemandedBits(Op1, LoMask, Known, TLO, Depth + 1))
42021 // The number of possible 1s in the mask determines the number of LSBs of
42022 // operand 0 used. Undemanded bits from the mask don't matter so filter
42023 // them before counting.
42025 uint64_t Count = (~Known.Zero & LoMask).countPopulation();
42026 APInt DemandedMask(APInt::getLowBitsSet(BitWidth, Count));
42027 if (SimplifyDemandedBits(Op0, DemandedMask, Known2, TLO, Depth + 1))
42030 // Zeroes are retained from the mask, but not ones.
42031 Known.One.clearAllBits();
42032 // The result will have at least as many trailing zeros as the non-mask
42033 // operand since bits can only map to the same or higher bit position.
42034 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
42039 return TargetLowering::SimplifyDemandedBitsForTargetNode(
42040 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
42043 SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
42044 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
42045 SelectionDAG &DAG, unsigned Depth) const {
42046 int NumElts = DemandedElts.getBitWidth();
42047 unsigned Opc = Op.getOpcode();
42048 EVT VT = Op.getValueType();
42051 case X86ISD::PINSRB:
42052 case X86ISD::PINSRW: {
42053 // If we don't demand the inserted element, return the base vector.
42054 SDValue Vec = Op.getOperand(0);
42055 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
42056 MVT VecVT = Vec.getSimpleValueType();
42057 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
42058 !DemandedElts[CIdx->getZExtValue()])
42062 case X86ISD::VSHLI: {
42063 // If we are only demanding sign bits then we can use the shift source
42065 SDValue Op0 = Op.getOperand(0);
42066 unsigned ShAmt = Op.getConstantOperandVal(1);
42067 unsigned BitWidth = DemandedBits.getBitWidth();
42068 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
42069 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros();
42070 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
42074 case X86ISD::VSRAI:
42075 // iff we only need the sign bit then we can use the source directly.
42076 // TODO: generalize where we only demand extended signbits.
42077 if (DemandedBits.isSignMask())
42078 return Op.getOperand(0);
42080 case X86ISD::PCMPGT:
42081 // icmp sgt(0, R) == ashr(R, BitWidth-1).
42082 // iff we only need the sign bit then we can use R directly.
42083 if (DemandedBits.isSignMask() &&
42084 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
42085 return Op.getOperand(1);
42089 APInt ShuffleUndef, ShuffleZero;
42090 SmallVector<int, 16> ShuffleMask;
42091 SmallVector<SDValue, 2> ShuffleOps;
42092 if (getTargetShuffleInputs(Op, DemandedElts, ShuffleOps, ShuffleMask,
42093 ShuffleUndef, ShuffleZero, DAG, Depth, false)) {
42094 // If all the demanded elts are from one operand and are inline,
42095 // then we can use the operand directly.
42096 int NumOps = ShuffleOps.size();
42097 if (ShuffleMask.size() == (unsigned)NumElts &&
42098 llvm::all_of(ShuffleOps, [VT](SDValue V) {
42099 return VT.getSizeInBits() == V.getValueSizeInBits();
42102 if (DemandedElts.isSubsetOf(ShuffleUndef))
42103 return DAG.getUNDEF(VT);
42104 if (DemandedElts.isSubsetOf(ShuffleUndef | ShuffleZero))
42105 return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));
42107 // Bitmask that indicates which ops have only been accessed 'inline'.
42108 APInt IdentityOp = APInt::getAllOnes(NumOps);
42109 for (int i = 0; i != NumElts; ++i) {
42110 int M = ShuffleMask[i];
42111 if (!DemandedElts[i] || ShuffleUndef[i])
42113 int OpIdx = M / NumElts;
42114 int EltIdx = M % NumElts;
42115 if (M < 0 || EltIdx != i) {
42116 IdentityOp.clearAllBits();
42119 IdentityOp &= APInt::getOneBitSet(NumOps, OpIdx);
42120 if (IdentityOp == 0)
42123 assert((IdentityOp == 0 || IdentityOp.countPopulation() == 1) &&
42124 "Multiple identity shuffles detected");
42126 if (IdentityOp != 0)
42127 return DAG.getBitcast(VT, ShuffleOps[IdentityOp.countTrailingZeros()]);
42131 return TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
42132 Op, DemandedBits, DemandedElts, DAG, Depth);
42135 bool X86TargetLowering::isSplatValueForTargetNode(SDValue Op,
42136 const APInt &DemandedElts,
42138 unsigned Depth) const {
42139 unsigned NumElts = DemandedElts.getBitWidth();
42140 unsigned Opc = Op.getOpcode();
42143 case X86ISD::VBROADCAST:
42144 case X86ISD::VBROADCAST_LOAD:
42145 UndefElts = APInt::getNullValue(NumElts);
42149 return TargetLowering::isSplatValueForTargetNode(Op, DemandedElts, UndefElts,
42153 // Helper to peek through bitops/trunc/setcc to determine size of source vector.
42154 // Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
42155 static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size,
42156 bool AllowTruncate) {
42157 switch (Src.getOpcode()) {
42158 case ISD::TRUNCATE:
42159 if (!AllowTruncate)
42163 return Src.getOperand(0).getValueSizeInBits() == Size;
42167 return checkBitcastSrcVectorSize(Src.getOperand(0), Size, AllowTruncate) &&
42168 checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate);
42173 // Helper to flip between AND/OR/XOR opcodes and their X86ISD FP equivalents.
42174 static unsigned getAltBitOpcode(unsigned Opcode) {
42176 case ISD::AND: return X86ISD::FAND;
42177 case ISD::OR: return X86ISD::FOR;
42178 case ISD::XOR: return X86ISD::FXOR;
42179 case X86ISD::ANDNP: return X86ISD::FANDN;
42181 llvm_unreachable("Unknown bitwise opcode");
42184 // Helper to adjust v4i32 MOVMSK expansion to work with SSE1-only targets.
42185 static SDValue adjustBitcastSrcVectorSSE1(SelectionDAG &DAG, SDValue Src,
42187 EVT SrcVT = Src.getValueType();
42188 if (SrcVT != MVT::v4i1)
42191 switch (Src.getOpcode()) {
42193 if (Src.getOperand(0).getValueType() == MVT::v4i32 &&
42194 ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode()) &&
42195 cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT) {
42196 SDValue Op0 = Src.getOperand(0);
42197 if (ISD::isNormalLoad(Op0.getNode()))
42198 return DAG.getBitcast(MVT::v4f32, Op0);
42199 if (Op0.getOpcode() == ISD::BITCAST &&
42200 Op0.getOperand(0).getValueType() == MVT::v4f32)
42201 return Op0.getOperand(0);
42207 SDValue Op0 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(0), DL);
42208 SDValue Op1 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(1), DL);
42210 return DAG.getNode(getAltBitOpcode(Src.getOpcode()), DL, MVT::v4f32, Op0,
42218 // Helper to push sign extension of vXi1 SETCC result through bitops.
42219 static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
42220 SDValue Src, const SDLoc &DL) {
42221 switch (Src.getOpcode()) {
42223 case ISD::TRUNCATE:
42224 return DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
42228 return DAG.getNode(
42229 Src.getOpcode(), DL, SExtVT,
42230 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(0), DL),
42231 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL));
42233 llvm_unreachable("Unexpected node type for vXi1 sign extension");
42236 // Try to match patterns such as
42237 // (i16 bitcast (v16i1 x))
42239 // (i16 movmsk (16i8 sext (v16i1 x)))
42240 // before the illegal vector is scalarized on subtargets that don't have legal
42242 static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
42244 const X86Subtarget &Subtarget) {
42245 EVT SrcVT = Src.getValueType();
42246 if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
42249 // Recognize the IR pattern for the movmsk intrinsic under SSE1 before type
42250 // legalization destroys the v4i32 type.
42251 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2()) {
42252 if (SDValue V = adjustBitcastSrcVectorSSE1(DAG, Src, DL)) {
42253 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32,
42254 DAG.getBitcast(MVT::v4f32, V));
42255 return DAG.getZExtOrTrunc(V, DL, VT);
42259 // If the input is a truncate from v16i8 or v32i8 go ahead and use a
42260 // movmskb even with avx512. This will be better than truncating to vXi1 and
42261 // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
42262 // vpcmpeqb/vpcmpgtb.
42263 bool PreferMovMsk = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
42264 (Src.getOperand(0).getValueType() == MVT::v16i8 ||
42265 Src.getOperand(0).getValueType() == MVT::v32i8 ||
42266 Src.getOperand(0).getValueType() == MVT::v64i8);
42268 // Prefer movmsk for AVX512 for (bitcast (setlt X, 0)) which can be handled
42269 // directly with vpmovmskb/vmovmskps/vmovmskpd.
42270 if (Src.getOpcode() == ISD::SETCC && Src.hasOneUse() &&
42271 cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT &&
42272 ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode())) {
42273 EVT CmpVT = Src.getOperand(0).getValueType();
42274 EVT EltVT = CmpVT.getVectorElementType();
42275 if (CmpVT.getSizeInBits() <= 256 &&
42276 (EltVT == MVT::i8 || EltVT == MVT::i32 || EltVT == MVT::i64))
42277 PreferMovMsk = true;
42280 // With AVX512 vxi1 types are legal and we prefer using k-regs.
42281 // MOVMSK is supported in SSE2 or later.
42282 if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !PreferMovMsk))
42285 // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
42286 // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
42287 // v8i16 and v16i16.
42288 // For these two cases, we can shuffle the upper element bytes to a
42289 // consecutive sequence at the start of the vector and treat the results as
42290 // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
42291 // for v16i16 this is not the case, because the shuffle is expensive, so we
42292 // avoid sign-extending to this type entirely.
42293 // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
42294 // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
42296 bool PropagateSExt = false;
42297 switch (SrcVT.getSimpleVT().SimpleTy) {
42301 SExtVT = MVT::v2i64;
42304 SExtVT = MVT::v4i32;
42305 // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
42306 // sign-extend to a 256-bit operation to avoid truncation.
42307 if (Subtarget.hasAVX() &&
42308 checkBitcastSrcVectorSize(Src, 256, Subtarget.hasAVX2())) {
42309 SExtVT = MVT::v4i64;
42310 PropagateSExt = true;
42314 SExtVT = MVT::v8i16;
42315 // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
42316 // sign-extend to a 256-bit operation to match the compare.
42317 // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
42318 // 256-bit because the shuffle is cheaper than sign extending the result of
42320 if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256, true) ||
42321 checkBitcastSrcVectorSize(Src, 512, true))) {
42322 SExtVT = MVT::v8i32;
42323 PropagateSExt = true;
42327 SExtVT = MVT::v16i8;
42328 // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
42329 // it is not profitable to sign-extend to 256-bit because this will
42330 // require an extra cross-lane shuffle which is more expensive than
42331 // truncating the result of the compare to 128-bits.
42334 SExtVT = MVT::v32i8;
42337 // If we have AVX512F, but not AVX512BW and the input is truncated from
42338 // v64i8 checked earlier. Then split the input and make two pmovmskbs.
42339 if (Subtarget.hasAVX512()) {
42340 if (Subtarget.hasBWI())
42342 SExtVT = MVT::v64i8;
42345 // Split if this is a <64 x i8> comparison result.
42346 if (checkBitcastSrcVectorSize(Src, 512, false)) {
42347 SExtVT = MVT::v64i8;
42353 SDValue V = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
42354 : DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
42356 if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8 || SExtVT == MVT::v64i8) {
42357 V = getPMOVMSKB(DL, V, DAG, Subtarget);
42359 if (SExtVT == MVT::v8i16)
42360 V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
42361 DAG.getUNDEF(MVT::v8i16));
42362 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
42366 EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
42367 V = DAG.getZExtOrTrunc(V, DL, IntVT);
42368 return DAG.getBitcast(VT, V);
42371 // Convert a vXi1 constant build vector to the same width scalar integer.
42372 static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
42373 EVT SrcVT = Op.getValueType();
42374 assert(SrcVT.getVectorElementType() == MVT::i1 &&
42375 "Expected a vXi1 vector");
42376 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
42377 "Expected a constant build vector");
42379 APInt Imm(SrcVT.getVectorNumElements(), 0);
42380 for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
42381 SDValue In = Op.getOperand(Idx);
42382 if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
42385 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
42386 return DAG.getConstant(Imm, SDLoc(Op), IntVT);
42389 static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
42390 TargetLowering::DAGCombinerInfo &DCI,
42391 const X86Subtarget &Subtarget) {
42392 assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
42394 if (!DCI.isBeforeLegalizeOps())
42397 // Only do this if we have k-registers.
42398 if (!Subtarget.hasAVX512())
42401 EVT DstVT = N->getValueType(0);
42402 SDValue Op = N->getOperand(0);
42403 EVT SrcVT = Op.getValueType();
42405 if (!Op.hasOneUse())
42408 // Look for logic ops.
42409 if (Op.getOpcode() != ISD::AND &&
42410 Op.getOpcode() != ISD::OR &&
42411 Op.getOpcode() != ISD::XOR)
42414 // Make sure we have a bitcast between mask registers and a scalar type.
42415 if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
42416 DstVT.isScalarInteger()) &&
42417 !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
42418 SrcVT.isScalarInteger()))
42421 SDValue LHS = Op.getOperand(0);
42422 SDValue RHS = Op.getOperand(1);
42424 if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
42425 LHS.getOperand(0).getValueType() == DstVT)
42426 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
42427 DAG.getBitcast(DstVT, RHS));
42429 if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
42430 RHS.getOperand(0).getValueType() == DstVT)
42431 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
42432 DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
42434 // If the RHS is a vXi1 build vector, this is a good reason to flip too.
42435 // Most of these have to move a constant from the scalar domain anyway.
42436 if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
42437 RHS = combinevXi1ConstantToInteger(RHS, DAG);
42438 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
42439 DAG.getBitcast(DstVT, LHS), RHS);
42445 static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
42446 const X86Subtarget &Subtarget) {
42448 unsigned NumElts = BV->getNumOperands();
42449 SDValue Splat = BV->getSplatValue();
42451 // Build MMX element from integer GPR or SSE float values.
42452 auto CreateMMXElement = [&](SDValue V) {
42454 return DAG.getUNDEF(MVT::x86mmx);
42455 if (V.getValueType().isFloatingPoint()) {
42456 if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
42457 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
42458 V = DAG.getBitcast(MVT::v2i64, V);
42459 return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
42461 V = DAG.getBitcast(MVT::i32, V);
42463 V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
42465 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
42468 // Convert build vector ops to MMX data in the bottom elements.
42469 SmallVector<SDValue, 8> Ops;
42471 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42473 // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
42475 if (Splat.isUndef())
42476 return DAG.getUNDEF(MVT::x86mmx);
42478 Splat = CreateMMXElement(Splat);
42480 if (Subtarget.hasSSE1()) {
42481 // Unpack v8i8 to splat i8 elements to lowest 16-bits.
42483 Splat = DAG.getNode(
42484 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
42485 DAG.getTargetConstant(Intrinsic::x86_mmx_punpcklbw, DL,
42486 TLI.getPointerTy(DAG.getDataLayout())),
42489 // Use PSHUFW to repeat 16-bit elements.
42490 unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
42491 return DAG.getNode(
42492 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
42493 DAG.getTargetConstant(Intrinsic::x86_sse_pshuf_w, DL,
42494 TLI.getPointerTy(DAG.getDataLayout())),
42495 Splat, DAG.getTargetConstant(ShufMask, DL, MVT::i8));
42497 Ops.append(NumElts, Splat);
42499 for (unsigned i = 0; i != NumElts; ++i)
42500 Ops.push_back(CreateMMXElement(BV->getOperand(i)));
42503 // Use tree of PUNPCKLs to build up general MMX vector.
42504 while (Ops.size() > 1) {
42505 unsigned NumOps = Ops.size();
42506 unsigned IntrinOp =
42507 (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
42508 : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
42509 : Intrinsic::x86_mmx_punpcklbw));
42510 SDValue Intrin = DAG.getTargetConstant(
42511 IntrinOp, DL, TLI.getPointerTy(DAG.getDataLayout()));
42512 for (unsigned i = 0; i != NumOps; i += 2)
42513 Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
42514 Ops[i], Ops[i + 1]);
42515 Ops.resize(NumOps / 2);
42521 // Recursive function that attempts to find if a bool vector node was originally
42522 // a vector/float/double that got truncated/extended/bitcast to/from a scalar
42523 // integer. If so, replace the scalar ops with bool vector equivalents back down
42525 static SDValue combineBitcastToBoolVector(EVT VT, SDValue V, const SDLoc &DL,
42527 const X86Subtarget &Subtarget) {
42528 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42529 unsigned Opc = V.getOpcode();
42531 case ISD::BITCAST: {
42532 // Bitcast from a vector/float/double, we can cheaply bitcast to VT.
42533 SDValue Src = V.getOperand(0);
42534 EVT SrcVT = Src.getValueType();
42535 if (SrcVT.isVector() || SrcVT.isFloatingPoint())
42536 return DAG.getBitcast(VT, Src);
42539 case ISD::TRUNCATE: {
42540 // If we find a suitable source, a truncated scalar becomes a subvector.
42541 SDValue Src = V.getOperand(0);
42543 EVT::getVectorVT(*DAG.getContext(), MVT::i1, Src.getValueSizeInBits());
42544 if (TLI.isTypeLegal(NewSrcVT))
42546 combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
42547 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N0,
42548 DAG.getIntPtrConstant(0, DL));
42551 case ISD::ANY_EXTEND:
42552 case ISD::ZERO_EXTEND: {
42553 // If we find a suitable source, an extended scalar becomes a subvector.
42554 SDValue Src = V.getOperand(0);
42555 EVT NewSrcVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
42556 Src.getScalarValueSizeInBits());
42557 if (TLI.isTypeLegal(NewSrcVT))
42559 combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
42560 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
42561 Opc == ISD::ANY_EXTEND ? DAG.getUNDEF(VT)
42562 : DAG.getConstant(0, DL, VT),
42563 N0, DAG.getIntPtrConstant(0, DL));
42567 // If we find suitable sources, we can just move an OR to the vector domain.
42568 SDValue Src0 = V.getOperand(0);
42569 SDValue Src1 = V.getOperand(1);
42570 if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
42571 if (SDValue N1 = combineBitcastToBoolVector(VT, Src1, DL, DAG, Subtarget))
42572 return DAG.getNode(Opc, DL, VT, N0, N1);
42576 // If we find a suitable source, a SHL becomes a KSHIFTL.
42577 SDValue Src0 = V.getOperand(0);
42578 if ((VT == MVT::v8i1 && !Subtarget.hasDQI()) ||
42579 ((VT == MVT::v32i1 || VT == MVT::v64i1) && !Subtarget.hasBWI()))
42582 if (auto *Amt = dyn_cast<ConstantSDNode>(V.getOperand(1)))
42583 if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
42584 return DAG.getNode(
42585 X86ISD::KSHIFTL, DL, VT, N0,
42586 DAG.getTargetConstant(Amt->getZExtValue(), DL, MVT::i8));
42593 static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
42594 TargetLowering::DAGCombinerInfo &DCI,
42595 const X86Subtarget &Subtarget) {
42596 SDValue N0 = N->getOperand(0);
42597 EVT VT = N->getValueType(0);
42598 EVT SrcVT = N0.getValueType();
42599 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42601 // Try to match patterns such as
42602 // (i16 bitcast (v16i1 x))
42604 // (i16 movmsk (16i8 sext (v16i1 x)))
42605 // before the setcc result is scalarized on subtargets that don't have legal
42607 if (DCI.isBeforeLegalize()) {
42609 if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
42612 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
42613 // type, widen both sides to avoid a trip through memory.
42614 if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
42615 Subtarget.hasAVX512()) {
42616 N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
42617 N0 = DAG.getBitcast(MVT::v8i1, N0);
42618 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
42619 DAG.getIntPtrConstant(0, dl));
42622 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
42623 // type, widen both sides to avoid a trip through memory.
42624 if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
42625 Subtarget.hasAVX512()) {
42626 // Use zeros for the widening if we already have some zeroes. This can
42627 // allow SimplifyDemandedBits to remove scalar ANDs that may be down
42629 // FIXME: It might make sense to detect a concat_vectors with a mix of
42630 // zeroes and undef and turn it into insert_subvector for i1 vectors as
42631 // a separate combine. What we can't do is canonicalize the operands of
42632 // such a concat or we'll get into a loop with SimplifyDemandedBits.
42633 if (N0.getOpcode() == ISD::CONCAT_VECTORS) {
42634 SDValue LastOp = N0.getOperand(N0.getNumOperands() - 1);
42635 if (ISD::isBuildVectorAllZeros(LastOp.getNode())) {
42636 SrcVT = LastOp.getValueType();
42637 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
42638 SmallVector<SDValue, 4> Ops(N0->op_begin(), N0->op_end());
42639 Ops.resize(NumConcats, DAG.getConstant(0, dl, SrcVT));
42640 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
42641 N0 = DAG.getBitcast(MVT::i8, N0);
42642 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
42646 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
42647 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
42649 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
42650 N0 = DAG.getBitcast(MVT::i8, N0);
42651 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
42654 // If we're bitcasting from iX to vXi1, see if the integer originally
42655 // began as a vXi1 and whether we can remove the bitcast entirely.
42656 if (VT.isVector() && VT.getScalarType() == MVT::i1 &&
42657 SrcVT.isScalarInteger() && TLI.isTypeLegal(VT)) {
42659 combineBitcastToBoolVector(VT, N0, SDLoc(N), DAG, Subtarget))
42664 // Look for (i8 (bitcast (v8i1 (extract_subvector (v16i1 X), 0)))) and
42665 // replace with (i8 (trunc (i16 (bitcast (v16i1 X))))). This can occur
42666 // due to insert_subvector legalization on KNL. By promoting the copy to i16
42667 // we can help with known bits propagation from the vXi1 domain to the
42669 if (VT == MVT::i8 && SrcVT == MVT::v8i1 && Subtarget.hasAVX512() &&
42670 !Subtarget.hasDQI() && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
42671 N0.getOperand(0).getValueType() == MVT::v16i1 &&
42672 isNullConstant(N0.getOperand(1)))
42673 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
42674 DAG.getBitcast(MVT::i16, N0.getOperand(0)));
42676 // Canonicalize (bitcast (vbroadcast_load)) so that the output of the bitcast
42677 // and the vbroadcast_load are both integer or both fp. In some cases this
42678 // will remove the bitcast entirely.
42679 if (N0.getOpcode() == X86ISD::VBROADCAST_LOAD && N0.hasOneUse() &&
42680 VT.isFloatingPoint() != SrcVT.isFloatingPoint() && VT.isVector()) {
42681 auto *BCast = cast<MemIntrinsicSDNode>(N0);
42682 unsigned SrcVTSize = SrcVT.getScalarSizeInBits();
42683 unsigned MemSize = BCast->getMemoryVT().getScalarSizeInBits();
42684 // Don't swap i8/i16 since don't have fp types that size.
42685 if (MemSize >= 32) {
42686 MVT MemVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(MemSize)
42687 : MVT::getIntegerVT(MemSize);
42688 MVT LoadVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(SrcVTSize)
42689 : MVT::getIntegerVT(SrcVTSize);
42690 LoadVT = MVT::getVectorVT(LoadVT, SrcVT.getVectorNumElements());
42692 SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
42693 SDValue Ops[] = { BCast->getChain(), BCast->getBasePtr() };
42695 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
42696 MemVT, BCast->getMemOperand());
42697 DAG.ReplaceAllUsesOfValueWith(SDValue(BCast, 1), ResNode.getValue(1));
42698 return DAG.getBitcast(VT, ResNode);
42702 // Since MMX types are special and don't usually play with other vector types,
42703 // it's better to handle them early to be sure we emit efficient code by
42704 // avoiding store-load conversions.
42705 if (VT == MVT::x86mmx) {
42706 // Detect MMX constant vectors.
42708 SmallVector<APInt, 1> EltBits;
42709 if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
42711 // Handle zero-extension of i32 with MOVD.
42712 if (EltBits[0].countLeadingZeros() >= 32)
42713 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
42714 DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
42715 // Else, bitcast to a double.
42716 // TODO - investigate supporting sext 32-bit immediates on x86_64.
42717 APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
42718 return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
42721 // Detect bitcasts to x86mmx low word.
42722 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
42723 (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
42724 N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
42725 bool LowUndef = true, AllUndefOrZero = true;
42726 for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
42727 SDValue Op = N0.getOperand(i);
42728 LowUndef &= Op.isUndef() || (i >= e/2);
42729 AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
42731 if (AllUndefOrZero) {
42732 SDValue N00 = N0.getOperand(0);
42734 N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
42735 : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
42736 return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
42740 // Detect bitcasts of 64-bit build vectors and convert to a
42741 // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
42743 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
42744 (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
42745 SrcVT == MVT::v8i8))
42746 return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
42748 // Detect bitcasts between element or subvector extraction to x86mmx.
42749 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
42750 N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
42751 isNullConstant(N0.getOperand(1))) {
42752 SDValue N00 = N0.getOperand(0);
42753 if (N00.getValueType().is128BitVector())
42754 return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
42755 DAG.getBitcast(MVT::v2i64, N00));
42758 // Detect bitcasts from FP_TO_SINT to x86mmx.
42759 if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
42761 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
42762 DAG.getUNDEF(MVT::v2i32));
42763 return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
42764 DAG.getBitcast(MVT::v2i64, Res));
42768 // Try to remove a bitcast of constant vXi1 vector. We have to legalize
42769 // most of these to scalar anyway.
42770 if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
42771 SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
42772 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
42773 return combinevXi1ConstantToInteger(N0, DAG);
42776 if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
42777 VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
42778 isa<ConstantSDNode>(N0)) {
42779 auto *C = cast<ConstantSDNode>(N0);
42780 if (C->isAllOnes())
42781 return DAG.getConstant(1, SDLoc(N0), VT);
42783 return DAG.getConstant(0, SDLoc(N0), VT);
42786 // Look for MOVMSK that is maybe truncated and then bitcasted to vXi1.
42787 // Turn it into a sign bit compare that produces a k-register. This avoids
42788 // a trip through a GPR.
42789 if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
42790 VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
42791 isPowerOf2_32(VT.getVectorNumElements())) {
42792 unsigned NumElts = VT.getVectorNumElements();
42795 // Peek through truncate.
42796 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
42797 Src = N0.getOperand(0);
42799 if (Src.getOpcode() == X86ISD::MOVMSK && Src.hasOneUse()) {
42800 SDValue MovmskIn = Src.getOperand(0);
42801 MVT MovmskVT = MovmskIn.getSimpleValueType();
42802 unsigned MovMskElts = MovmskVT.getVectorNumElements();
42804 // We allow extra bits of the movmsk to be used since they are known zero.
42805 // We can't convert a VPMOVMSKB without avx512bw.
42806 if (MovMskElts <= NumElts &&
42807 (Subtarget.hasBWI() || MovmskVT.getVectorElementType() != MVT::i8)) {
42808 EVT IntVT = EVT(MovmskVT).changeVectorElementTypeToInteger();
42809 MovmskIn = DAG.getBitcast(IntVT, MovmskIn);
42811 MVT CmpVT = MVT::getVectorVT(MVT::i1, MovMskElts);
42812 SDValue Cmp = DAG.getSetCC(dl, CmpVT, MovmskIn,
42813 DAG.getConstant(0, dl, IntVT), ISD::SETLT);
42814 if (EVT(CmpVT) == VT)
42817 // Pad with zeroes up to original VT to replace the zeroes that were
42818 // being used from the MOVMSK.
42819 unsigned NumConcats = NumElts / MovMskElts;
42820 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, CmpVT));
42822 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Ops);
42827 // Try to remove bitcasts from input and output of mask arithmetic to
42828 // remove GPR<->K-register crossings.
42829 if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
42832 // Convert a bitcasted integer logic operation that has one bitcasted
42833 // floating-point operand into a floating-point logic operation. This may
42834 // create a load of a constant, but that is cheaper than materializing the
42835 // constant in an integer register and transferring it to an SSE register or
42836 // transferring the SSE operand to integer register and back.
42838 switch (N0.getOpcode()) {
42839 case ISD::AND: FPOpcode = X86ISD::FAND; break;
42840 case ISD::OR: FPOpcode = X86ISD::FOR; break;
42841 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
42842 default: return SDValue();
42845 // Check if we have a bitcast from another integer type as well.
42846 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
42847 (Subtarget.hasSSE2() && VT == MVT::f64) ||
42848 (Subtarget.hasFP16() && VT == MVT::f16) ||
42849 (Subtarget.hasSSE2() && VT.isInteger() && VT.isVector() &&
42850 TLI.isTypeLegal(VT))))
42853 SDValue LogicOp0 = N0.getOperand(0);
42854 SDValue LogicOp1 = N0.getOperand(1);
42857 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
42858 if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
42859 LogicOp0.hasOneUse() && LogicOp0.getOperand(0).hasOneUse() &&
42860 LogicOp0.getOperand(0).getValueType() == VT &&
42861 !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
42862 SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
42863 unsigned Opcode = VT.isFloatingPoint() ? FPOpcode : N0.getOpcode();
42864 return DAG.getNode(Opcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
42866 // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
42867 if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
42868 LogicOp1.hasOneUse() && LogicOp1.getOperand(0).hasOneUse() &&
42869 LogicOp1.getOperand(0).getValueType() == VT &&
42870 !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
42871 SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
42872 unsigned Opcode = VT.isFloatingPoint() ? FPOpcode : N0.getOpcode();
42873 return DAG.getNode(Opcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
42879 // (mul (zext a), (sext, b))
42880 static bool detectExtMul(SelectionDAG &DAG, const SDValue &Mul, SDValue &Op0,
42882 Op0 = Mul.getOperand(0);
42883 Op1 = Mul.getOperand(1);
42885 // The operand1 should be signed extend
42886 if (Op0.getOpcode() == ISD::SIGN_EXTEND)
42887 std::swap(Op0, Op1);
42889 auto IsFreeTruncation = [](SDValue &Op) -> bool {
42890 if ((Op.getOpcode() == ISD::ZERO_EXTEND ||
42891 Op.getOpcode() == ISD::SIGN_EXTEND) &&
42892 Op.getOperand(0).getScalarValueSizeInBits() <= 8)
42895 auto *BV = dyn_cast<BuildVectorSDNode>(Op);
42896 return (BV && BV->isConstant());
42899 // (dpbusd (zext a), (sext, b)). Since the first operand should be unsigned
42900 // value, we need to check Op0 is zero extended value. Op1 should be signed
42901 // value, so we just check the signed bits.
42902 if ((IsFreeTruncation(Op0) &&
42903 DAG.computeKnownBits(Op0).countMaxActiveBits() <= 8) &&
42904 (IsFreeTruncation(Op1) && DAG.ComputeMaxSignificantBits(Op1) <= 8))
42910 // Given a ABS node, detect the following pattern:
42911 // (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
42912 // This is useful as it is the input into a SAD pattern.
42913 static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
42914 SDValue AbsOp1 = Abs->getOperand(0);
42915 if (AbsOp1.getOpcode() != ISD::SUB)
42918 Op0 = AbsOp1.getOperand(0);
42919 Op1 = AbsOp1.getOperand(1);
42921 // Check if the operands of the sub are zero-extended from vectors of i8.
42922 if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
42923 Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
42924 Op1.getOpcode() != ISD::ZERO_EXTEND ||
42925 Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
42931 static SDValue createVPDPBUSD(SelectionDAG &DAG, SDValue LHS, SDValue RHS,
42932 unsigned &LogBias, const SDLoc &DL,
42933 const X86Subtarget &Subtarget) {
42934 // Extend or truncate to MVT::i8 first.
42936 MVT::getVectorVT(MVT::i8, LHS.getValueType().getVectorElementCount());
42937 LHS = DAG.getZExtOrTrunc(LHS, DL, Vi8VT);
42938 RHS = DAG.getSExtOrTrunc(RHS, DL, Vi8VT);
42940 // VPDPBUSD(<16 x i32>C, <16 x i8>A, <16 x i8>B). For each dst element
42941 // C[0] = C[0] + A[0]B[0] + A[1]B[1] + A[2]B[2] + A[3]B[3].
42942 // The src A, B element type is i8, but the dst C element type is i32.
42943 // When we calculate the reduce stage, we use src vector type vXi8 for it
42944 // so we need logbias 2 to avoid extra 2 stages.
42947 unsigned RegSize = std::max(128u, (unsigned)Vi8VT.getSizeInBits());
42948 if (Subtarget.hasVNNI() && !Subtarget.hasVLX())
42949 RegSize = std::max(512u, RegSize);
42951 // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
42952 // fill in the missing vector elements with 0.
42953 unsigned NumConcat = RegSize / Vi8VT.getSizeInBits();
42954 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, Vi8VT));
42956 MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
42957 SDValue DpOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
42959 SDValue DpOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
42961 // Actually build the DotProduct, split as 256/512 bits for
42962 // AVXVNNI/AVX512VNNI.
42963 auto DpBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
42964 ArrayRef<SDValue> Ops) {
42965 MVT VT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
42966 return DAG.getNode(X86ISD::VPDPBUSD, DL, VT, Ops);
42968 MVT DpVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
42969 SDValue Zero = DAG.getConstant(0, DL, DpVT);
42971 return SplitOpsAndApply(DAG, Subtarget, DL, DpVT, {Zero, DpOp0, DpOp1},
42975 // Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
42977 static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
42978 const SDValue &Zext1, const SDLoc &DL,
42979 const X86Subtarget &Subtarget) {
42980 // Find the appropriate width for the PSADBW.
42981 EVT InVT = Zext0.getOperand(0).getValueType();
42982 unsigned RegSize = std::max(128u, (unsigned)InVT.getSizeInBits());
42984 // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
42985 // fill in the missing vector elements with 0.
42986 unsigned NumConcat = RegSize / InVT.getSizeInBits();
42987 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
42988 Ops[0] = Zext0.getOperand(0);
42989 MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
42990 SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
42991 Ops[0] = Zext1.getOperand(0);
42992 SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
42994 // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
42995 auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
42996 ArrayRef<SDValue> Ops) {
42997 MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
42998 return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
43000 MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
43001 return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
43005 // Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
43007 static SDValue combineMinMaxReduction(SDNode *Extract, SelectionDAG &DAG,
43008 const X86Subtarget &Subtarget) {
43009 // Bail without SSE41.
43010 if (!Subtarget.hasSSE41())
43013 EVT ExtractVT = Extract->getValueType(0);
43014 if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
43017 // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
43018 ISD::NodeType BinOp;
43019 SDValue Src = DAG.matchBinOpReduction(
43020 Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, true);
43024 EVT SrcVT = Src.getValueType();
43025 EVT SrcSVT = SrcVT.getScalarType();
43026 if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
43030 SDValue MinPos = Src;
43032 // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
43033 while (SrcVT.getSizeInBits() > 128) {
43035 std::tie(Lo, Hi) = splitVector(MinPos, DAG, DL);
43036 SrcVT = Lo.getValueType();
43037 MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
43039 assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
43040 (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
43041 "Unexpected value type");
43043 // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
43044 // to flip the value accordingly.
43046 unsigned MaskEltsBits = ExtractVT.getSizeInBits();
43047 if (BinOp == ISD::SMAX)
43048 Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
43049 else if (BinOp == ISD::SMIN)
43050 Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
43051 else if (BinOp == ISD::UMAX)
43052 Mask = DAG.getAllOnesConstant(DL, SrcVT);
43055 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
43057 // For v16i8 cases we need to perform UMIN on pairs of byte elements,
43058 // shuffling each upper element down and insert zeros. This means that the
43059 // v16i8 UMIN will leave the upper element as zero, performing zero-extension
43060 // ready for the PHMINPOS.
43061 if (ExtractVT == MVT::i8) {
43062 SDValue Upper = DAG.getVectorShuffle(
43063 SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
43064 {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
43065 MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
43068 // Perform the PHMINPOS on a v8i16 vector,
43069 MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
43070 MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
43071 MinPos = DAG.getBitcast(SrcVT, MinPos);
43074 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
43076 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
43077 DAG.getIntPtrConstant(0, DL));
43080 // Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
43081 static SDValue combinePredicateReduction(SDNode *Extract, SelectionDAG &DAG,
43082 const X86Subtarget &Subtarget) {
43083 // Bail without SSE2.
43084 if (!Subtarget.hasSSE2())
43087 EVT ExtractVT = Extract->getValueType(0);
43088 unsigned BitWidth = ExtractVT.getSizeInBits();
43089 if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
43090 ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
43093 // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
43094 ISD::NodeType BinOp;
43095 SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
43096 if (!Match && ExtractVT == MVT::i1)
43097 Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
43101 // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
43102 // which we can't support here for now.
43103 if (Match.getScalarValueSizeInBits() != BitWidth)
43108 EVT MatchVT = Match.getValueType();
43109 unsigned NumElts = MatchVT.getVectorNumElements();
43110 unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
43111 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43113 if (ExtractVT == MVT::i1) {
43114 // Special case for (pre-legalization) vXi1 reductions.
43115 if (NumElts > 64 || !isPowerOf2_32(NumElts))
43117 if (TLI.isTypeLegal(MatchVT)) {
43118 // If this is a legal AVX512 predicate type then we can just bitcast.
43119 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
43120 Movmsk = DAG.getBitcast(MovmskVT, Match);
43122 // For all_of(setcc(x,y,eq)) - use PMOVMSKB(PCMPEQB()).
43123 if (BinOp == ISD::AND && Match.getOpcode() == ISD::SETCC &&
43124 cast<CondCodeSDNode>(Match.getOperand(2))->get() ==
43125 ISD::CondCode::SETEQ) {
43126 EVT VecSVT = Match.getOperand(0).getValueType().getScalarType();
43127 if (VecSVT != MVT::i8) {
43128 NumElts *= VecSVT.getSizeInBits() / 8;
43129 EVT CmpVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, NumElts);
43130 MatchVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
43131 Match = DAG.getSetCC(
43132 DL, MatchVT, DAG.getBitcast(CmpVT, Match.getOperand(0)),
43133 DAG.getBitcast(CmpVT, Match.getOperand(1)), ISD::CondCode::SETEQ);
43137 // Use combineBitcastvxi1 to create the MOVMSK.
43138 while (NumElts > MaxElts) {
43140 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
43141 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
43144 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
43145 Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
43149 Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
43151 // FIXME: Better handling of k-registers or 512-bit vectors?
43152 unsigned MatchSizeInBits = Match.getValueSizeInBits();
43153 if (!(MatchSizeInBits == 128 ||
43154 (MatchSizeInBits == 256 && Subtarget.hasAVX())))
43157 // Make sure this isn't a vector of 1 element. The perf win from using
43158 // MOVMSK diminishes with less elements in the reduction, but it is
43159 // generally better to get the comparison over to the GPRs as soon as
43160 // possible to reduce the number of vector ops.
43161 if (Match.getValueType().getVectorNumElements() < 2)
43164 // Check that we are extracting a reduction of all sign bits.
43165 if (DAG.ComputeNumSignBits(Match) != BitWidth)
43168 if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
43170 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
43171 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
43172 MatchSizeInBits = Match.getValueSizeInBits();
43175 // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
43177 if (64 == BitWidth || 32 == BitWidth)
43178 MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
43179 MatchSizeInBits / BitWidth);
43181 MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
43183 SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
43184 Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
43185 NumElts = MaskSrcVT.getVectorNumElements();
43187 assert((NumElts <= 32 || NumElts == 64) &&
43188 "Not expecting more than 64 elements");
43190 MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
43191 if (BinOp == ISD::XOR) {
43192 // parity -> (PARITY(MOVMSK X))
43193 SDValue Result = DAG.getNode(ISD::PARITY, DL, CmpVT, Movmsk);
43194 return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
43198 ISD::CondCode CondCode;
43199 if (BinOp == ISD::OR) {
43200 // any_of -> MOVMSK != 0
43201 CmpC = DAG.getConstant(0, DL, CmpVT);
43202 CondCode = ISD::CondCode::SETNE;
43204 // all_of -> MOVMSK == ((1 << NumElts) - 1)
43205 CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
43207 CondCode = ISD::CondCode::SETEQ;
43210 // The setcc produces an i8 of 0/1, so extend that to the result width and
43211 // negate to get the final 0/-1 mask value.
43213 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
43214 SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
43215 SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
43216 SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
43217 return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
43220 static SDValue combineVPDPBUSDPattern(SDNode *Extract, SelectionDAG &DAG,
43221 const X86Subtarget &Subtarget) {
43222 if (!Subtarget.hasVNNI() && !Subtarget.hasAVXVNNI())
43225 EVT ExtractVT = Extract->getValueType(0);
43226 // Verify the type we're extracting is i32, as the output element type of
43227 // vpdpbusd is i32.
43228 if (ExtractVT != MVT::i32)
43231 EVT VT = Extract->getOperand(0).getValueType();
43232 if (!isPowerOf2_32(VT.getVectorNumElements()))
43235 // Match shuffle + add pyramid.
43236 ISD::NodeType BinOp;
43237 SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
43239 // We can't combine to vpdpbusd for zext, because each of the 4 multiplies
43240 // done by vpdpbusd compute a signed 16-bit product that will be sign extended
43241 // before adding into the accumulator.
43243 // We also need to verify that the multiply has at least 2x the number of bits
43244 // of the input. We shouldn't match
43245 // (sign_extend (mul (vXi9 (zext (vXi8 X))), (vXi9 (zext (vXi8 Y)))).
43246 // if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND))
43247 // Root = Root.getOperand(0);
43249 // If there was a match, we want Root to be a mul.
43250 if (!Root || Root.getOpcode() != ISD::MUL)
43253 // Check whether we have an extend and mul pattern
43255 if (!detectExtMul(DAG, Root, LHS, RHS))
43258 // Create the dot product instruction.
43260 unsigned StageBias;
43261 SDValue DP = createVPDPBUSD(DAG, LHS, RHS, StageBias, DL, Subtarget);
43263 // If the original vector was wider than 4 elements, sum over the results
43264 // in the DP vector.
43265 unsigned Stages = Log2_32(VT.getVectorNumElements());
43266 EVT DpVT = DP.getValueType();
43268 if (Stages > StageBias) {
43269 unsigned DpElems = DpVT.getVectorNumElements();
43271 for (unsigned i = Stages - StageBias; i > 0; --i) {
43272 SmallVector<int, 16> Mask(DpElems, -1);
43273 for (unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
43274 Mask[j] = MaskEnd + j;
43277 DAG.getVectorShuffle(DpVT, DL, DP, DAG.getUNDEF(DpVT), Mask);
43278 DP = DAG.getNode(ISD::ADD, DL, DpVT, DP, Shuffle);
43282 // Return the lowest ExtractSizeInBits bits.
43284 EVT::getVectorVT(*DAG.getContext(), ExtractVT,
43285 DpVT.getSizeInBits() / ExtractVT.getSizeInBits());
43286 DP = DAG.getBitcast(ResVT, DP);
43287 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, DP,
43288 Extract->getOperand(1));
43291 static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
43292 const X86Subtarget &Subtarget) {
43293 // PSADBW is only supported on SSE2 and up.
43294 if (!Subtarget.hasSSE2())
43297 EVT ExtractVT = Extract->getValueType(0);
43298 // Verify the type we're extracting is either i32 or i64.
43299 // FIXME: Could support other types, but this is what we have coverage for.
43300 if (ExtractVT != MVT::i32 && ExtractVT != MVT::i64)
43303 EVT VT = Extract->getOperand(0).getValueType();
43304 if (!isPowerOf2_32(VT.getVectorNumElements()))
43307 // Match shuffle + add pyramid.
43308 ISD::NodeType BinOp;
43309 SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
43311 // The operand is expected to be zero extended from i8
43312 // (verified in detectZextAbsDiff).
43313 // In order to convert to i64 and above, additional any/zero/sign
43314 // extend is expected.
43315 // The zero extend from 32 bit has no mathematical effect on the result.
43316 // Also the sign extend is basically zero extend
43317 // (extends the sign bit which is zero).
43318 // So it is correct to skip the sign/zero extend instruction.
43319 if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
43320 Root.getOpcode() == ISD::ZERO_EXTEND ||
43321 Root.getOpcode() == ISD::ANY_EXTEND))
43322 Root = Root.getOperand(0);
43324 // If there was a match, we want Root to be a select that is the root of an
43325 // abs-diff pattern.
43326 if (!Root || Root.getOpcode() != ISD::ABS)
43329 // Check whether we have an abs-diff pattern feeding into the select.
43330 SDValue Zext0, Zext1;
43331 if (!detectZextAbsDiff(Root, Zext0, Zext1))
43334 // Create the SAD instruction.
43336 SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
43338 // If the original vector was wider than 8 elements, sum over the results
43339 // in the SAD vector.
43340 unsigned Stages = Log2_32(VT.getVectorNumElements());
43341 EVT SadVT = SAD.getValueType();
43343 unsigned SadElems = SadVT.getVectorNumElements();
43345 for(unsigned i = Stages - 3; i > 0; --i) {
43346 SmallVector<int, 16> Mask(SadElems, -1);
43347 for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
43348 Mask[j] = MaskEnd + j;
43351 DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
43352 SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
43356 unsigned ExtractSizeInBits = ExtractVT.getSizeInBits();
43357 // Return the lowest ExtractSizeInBits bits.
43358 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), ExtractVT,
43359 SadVT.getSizeInBits() / ExtractSizeInBits);
43360 SAD = DAG.getBitcast(ResVT, SAD);
43361 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, SAD,
43362 Extract->getOperand(1));
43365 // Attempt to peek through a target shuffle and extract the scalar from the
43367 static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
43368 TargetLowering::DAGCombinerInfo &DCI,
43369 const X86Subtarget &Subtarget) {
43370 if (DCI.isBeforeLegalizeOps())
43374 SDValue Src = N->getOperand(0);
43375 SDValue Idx = N->getOperand(1);
43377 EVT VT = N->getValueType(0);
43378 EVT SrcVT = Src.getValueType();
43379 EVT SrcSVT = SrcVT.getVectorElementType();
43380 unsigned SrcEltBits = SrcSVT.getSizeInBits();
43381 unsigned NumSrcElts = SrcVT.getVectorNumElements();
43383 // Don't attempt this for boolean mask vectors or unknown extraction indices.
43384 if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
43387 const APInt &IdxC = N->getConstantOperandAPInt(1);
43388 if (IdxC.uge(NumSrcElts))
43391 SDValue SrcBC = peekThroughBitcasts(Src);
43393 // Handle extract(bitcast(broadcast(scalar_value))).
43394 if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
43395 SDValue SrcOp = SrcBC.getOperand(0);
43396 EVT SrcOpVT = SrcOp.getValueType();
43397 if (SrcOpVT.isScalarInteger() && VT.isInteger() &&
43398 (SrcOpVT.getSizeInBits() % SrcEltBits) == 0) {
43399 unsigned Scale = SrcOpVT.getSizeInBits() / SrcEltBits;
43400 unsigned Offset = IdxC.urem(Scale) * SrcEltBits;
43401 // TODO support non-zero offsets.
43403 SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, SrcVT.getScalarType());
43404 SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, VT);
43410 // If we're extracting a single element from a broadcast load and there are
43411 // no other users, just create a single load.
43412 if (SrcBC.getOpcode() == X86ISD::VBROADCAST_LOAD && SrcBC.hasOneUse()) {
43413 auto *MemIntr = cast<MemIntrinsicSDNode>(SrcBC);
43414 unsigned SrcBCWidth = SrcBC.getScalarValueSizeInBits();
43415 if (MemIntr->getMemoryVT().getSizeInBits() == SrcBCWidth &&
43416 VT.getSizeInBits() == SrcBCWidth && SrcEltBits == SrcBCWidth) {
43417 SDValue Load = DAG.getLoad(VT, dl, MemIntr->getChain(),
43418 MemIntr->getBasePtr(),
43419 MemIntr->getPointerInfo(),
43420 MemIntr->getOriginalAlign(),
43421 MemIntr->getMemOperand()->getFlags());
43422 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
43427 // Handle extract(bitcast(scalar_to_vector(scalar_value))) for integers.
43428 // TODO: Move to DAGCombine?
43429 if (SrcBC.getOpcode() == ISD::SCALAR_TO_VECTOR && VT.isInteger() &&
43430 SrcBC.getValueType().isInteger() &&
43431 (SrcBC.getScalarValueSizeInBits() % SrcEltBits) == 0 &&
43432 SrcBC.getScalarValueSizeInBits() ==
43433 SrcBC.getOperand(0).getValueSizeInBits()) {
43434 unsigned Scale = SrcBC.getScalarValueSizeInBits() / SrcEltBits;
43435 if (IdxC.ult(Scale)) {
43436 unsigned Offset = IdxC.getZExtValue() * SrcVT.getScalarSizeInBits();
43437 SDValue Scl = SrcBC.getOperand(0);
43438 EVT SclVT = Scl.getValueType();
43440 Scl = DAG.getNode(ISD::SRL, dl, SclVT, Scl,
43441 DAG.getShiftAmountConstant(Offset, SclVT, dl));
43443 Scl = DAG.getZExtOrTrunc(Scl, dl, SrcVT.getScalarType());
43444 Scl = DAG.getZExtOrTrunc(Scl, dl, VT);
43449 // Handle extract(truncate(x)) for 0'th index.
43450 // TODO: Treat this as a faux shuffle?
43451 // TODO: When can we use this for general indices?
43452 if (ISD::TRUNCATE == Src.getOpcode() && IdxC == 0 &&
43453 (SrcVT.getSizeInBits() % 128) == 0) {
43454 Src = extract128BitVector(Src.getOperand(0), 0, DAG, dl);
43455 MVT ExtractVT = MVT::getVectorVT(SrcSVT.getSimpleVT(), 128 / SrcEltBits);
43456 return DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(ExtractVT, Src),
43460 // We can only legally extract other elements from 128-bit vectors and in
43461 // certain circumstances, depending on SSE-level.
43462 // TODO: Investigate float/double extraction if it will be just stored.
43463 auto GetLegalExtract = [&Subtarget, &DAG, &dl](SDValue Vec, EVT VecVT,
43465 EVT VecSVT = VecVT.getScalarType();
43466 if ((VecVT.is256BitVector() || VecVT.is512BitVector()) &&
43467 (VecSVT == MVT::i8 || VecSVT == MVT::i16 || VecSVT == MVT::i32 ||
43468 VecSVT == MVT::i64)) {
43469 unsigned EltSizeInBits = VecSVT.getSizeInBits();
43470 unsigned NumEltsPerLane = 128 / EltSizeInBits;
43471 unsigned LaneOffset = (Idx & ~(NumEltsPerLane - 1)) * EltSizeInBits;
43472 unsigned LaneIdx = LaneOffset / Vec.getScalarValueSizeInBits();
43473 VecVT = EVT::getVectorVT(*DAG.getContext(), VecSVT, NumEltsPerLane);
43474 Vec = extract128BitVector(Vec, LaneIdx, DAG, dl);
43475 Idx &= (NumEltsPerLane - 1);
43477 if ((VecVT == MVT::v4i32 || VecVT == MVT::v2i64) &&
43478 ((Idx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
43479 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VecVT.getScalarType(),
43480 DAG.getBitcast(VecVT, Vec),
43481 DAG.getIntPtrConstant(Idx, dl));
43483 if ((VecVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
43484 (VecVT == MVT::v16i8 && Subtarget.hasSSE41())) {
43485 unsigned OpCode = (VecVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
43486 return DAG.getNode(OpCode, dl, MVT::i32, DAG.getBitcast(VecVT, Vec),
43487 DAG.getTargetConstant(Idx, dl, MVT::i8));
43492 // Resolve the target shuffle inputs and mask.
43493 SmallVector<int, 16> Mask;
43494 SmallVector<SDValue, 2> Ops;
43495 if (!getTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
43498 // Shuffle inputs must be the same size as the result.
43499 if (llvm::any_of(Ops, [SrcVT](SDValue Op) {
43500 return SrcVT.getSizeInBits() != Op.getValueSizeInBits();
43504 // Attempt to narrow/widen the shuffle mask to the correct size.
43505 if (Mask.size() != NumSrcElts) {
43506 if ((NumSrcElts % Mask.size()) == 0) {
43507 SmallVector<int, 16> ScaledMask;
43508 int Scale = NumSrcElts / Mask.size();
43509 narrowShuffleMaskElts(Scale, Mask, ScaledMask);
43510 Mask = std::move(ScaledMask);
43511 } else if ((Mask.size() % NumSrcElts) == 0) {
43512 // Simplify Mask based on demanded element.
43513 int ExtractIdx = (int)IdxC.getZExtValue();
43514 int Scale = Mask.size() / NumSrcElts;
43515 int Lo = Scale * ExtractIdx;
43516 int Hi = Scale * (ExtractIdx + 1);
43517 for (int i = 0, e = (int)Mask.size(); i != e; ++i)
43518 if (i < Lo || Hi <= i)
43519 Mask[i] = SM_SentinelUndef;
43521 SmallVector<int, 16> WidenedMask;
43522 while (Mask.size() > NumSrcElts &&
43523 canWidenShuffleElements(Mask, WidenedMask))
43524 Mask = std::move(WidenedMask);
43528 // If narrowing/widening failed, see if we can extract+zero-extend.
43531 if (Mask.size() == NumSrcElts) {
43532 ExtractIdx = Mask[IdxC.getZExtValue()];
43535 unsigned Scale = Mask.size() / NumSrcElts;
43536 if ((Mask.size() % NumSrcElts) != 0 || SrcVT.isFloatingPoint())
43538 unsigned ScaledIdx = Scale * IdxC.getZExtValue();
43539 if (!isUndefOrZeroInRange(Mask, ScaledIdx + 1, Scale - 1))
43541 ExtractIdx = Mask[ScaledIdx];
43542 EVT ExtractSVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltBits / Scale);
43543 ExtractVT = EVT::getVectorVT(*DAG.getContext(), ExtractSVT, Mask.size());
43544 assert(SrcVT.getSizeInBits() == ExtractVT.getSizeInBits() &&
43545 "Failed to widen vector type");
43548 // If the shuffle source element is undef/zero then we can just accept it.
43549 if (ExtractIdx == SM_SentinelUndef)
43550 return DAG.getUNDEF(VT);
43552 if (ExtractIdx == SM_SentinelZero)
43553 return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
43554 : DAG.getConstant(0, dl, VT);
43556 SDValue SrcOp = Ops[ExtractIdx / Mask.size()];
43557 ExtractIdx = ExtractIdx % Mask.size();
43558 if (SDValue V = GetLegalExtract(SrcOp, ExtractVT, ExtractIdx))
43559 return DAG.getZExtOrTrunc(V, dl, VT);
43564 /// Extracting a scalar FP value from vector element 0 is free, so extract each
43565 /// operand first, then perform the math as a scalar op.
43566 static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG,
43567 const X86Subtarget &Subtarget) {
43568 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
43569 SDValue Vec = ExtElt->getOperand(0);
43570 SDValue Index = ExtElt->getOperand(1);
43571 EVT VT = ExtElt->getValueType(0);
43572 EVT VecVT = Vec.getValueType();
43574 // TODO: If this is a unary/expensive/expand op, allow extraction from a
43575 // non-zero element because the shuffle+scalar op will be cheaper?
43576 if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
43579 // Vector FP compares don't fit the pattern of FP math ops (propagate, not
43580 // extract, the condition code), so deal with those as a special-case.
43581 if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
43582 EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
43583 if (OpVT != MVT::f32 && OpVT != MVT::f64)
43586 // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
43588 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
43589 Vec.getOperand(0), Index);
43590 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
43591 Vec.getOperand(1), Index);
43592 return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
43595 if (!(VT == MVT::f16 && Subtarget.hasFP16()) && VT != MVT::f32 &&
43599 // Vector FP selects don't fit the pattern of FP math ops (because the
43600 // condition has a different type and we have to change the opcode), so deal
43601 // with those here.
43602 // FIXME: This is restricted to pre type legalization by ensuring the setcc
43603 // has i1 elements. If we loosen this we need to convert vector bool to a
43605 if (Vec.getOpcode() == ISD::VSELECT &&
43606 Vec.getOperand(0).getOpcode() == ISD::SETCC &&
43607 Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
43608 Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
43609 // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
43611 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
43612 Vec.getOperand(0).getValueType().getScalarType(),
43613 Vec.getOperand(0), Index);
43614 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
43615 Vec.getOperand(1), Index);
43616 SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
43617 Vec.getOperand(2), Index);
43618 return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
43621 // TODO: This switch could include FNEG and the x86-specific FP logic ops
43622 // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
43623 // missed load folding and fma+fneg combining.
43624 switch (Vec.getOpcode()) {
43625 case ISD::FMA: // Begin 3 operands
43627 case ISD::FADD: // Begin 2 operands
43632 case ISD::FCOPYSIGN:
43635 case ISD::FMINNUM_IEEE:
43636 case ISD::FMAXNUM_IEEE:
43637 case ISD::FMAXIMUM:
43638 case ISD::FMINIMUM:
43641 case ISD::FABS: // Begin 1 operand
43646 case ISD::FNEARBYINT:
43650 case X86ISD::FRSQRT: {
43651 // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
43653 SmallVector<SDValue, 4> ExtOps;
43654 for (SDValue Op : Vec->ops())
43655 ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
43656 return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
43661 llvm_unreachable("All opcodes should return within switch");
43664 /// Try to convert a vector reduction sequence composed of binops and shuffles
43665 /// into horizontal ops.
43666 static SDValue combineArithReduction(SDNode *ExtElt, SelectionDAG &DAG,
43667 const X86Subtarget &Subtarget) {
43668 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller");
43670 // We need at least SSE2 to anything here.
43671 if (!Subtarget.hasSSE2())
43675 SDValue Rdx = DAG.matchBinOpReduction(ExtElt, Opc,
43676 {ISD::ADD, ISD::MUL, ISD::FADD}, true);
43680 SDValue Index = ExtElt->getOperand(1);
43681 assert(isNullConstant(Index) &&
43682 "Reduction doesn't end in an extract from index 0");
43684 EVT VT = ExtElt->getValueType(0);
43685 EVT VecVT = Rdx.getValueType();
43686 if (VecVT.getScalarType() != VT)
43690 unsigned NumElts = VecVT.getVectorNumElements();
43691 unsigned EltSizeInBits = VecVT.getScalarSizeInBits();
43693 // Extend v4i8/v8i8 vector to v16i8, with undef upper 64-bits.
43694 auto WidenToV16I8 = [&](SDValue V, bool ZeroExtend) {
43695 if (V.getValueType() == MVT::v4i8) {
43696 if (ZeroExtend && Subtarget.hasSSE41()) {
43697 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
43698 DAG.getConstant(0, DL, MVT::v4i32),
43699 DAG.getBitcast(MVT::i32, V),
43700 DAG.getIntPtrConstant(0, DL));
43701 return DAG.getBitcast(MVT::v16i8, V);
43703 V = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, V,
43704 ZeroExtend ? DAG.getConstant(0, DL, MVT::v4i8)
43705 : DAG.getUNDEF(MVT::v4i8));
43707 return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V,
43708 DAG.getUNDEF(MVT::v8i8));
43711 // vXi8 mul reduction - promote to vXi16 mul reduction.
43712 if (Opc == ISD::MUL) {
43713 if (VT != MVT::i8 || NumElts < 4 || !isPowerOf2_32(NumElts))
43715 if (VecVT.getSizeInBits() >= 128) {
43716 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts / 2);
43717 SDValue Lo = getUnpackl(DAG, DL, VecVT, Rdx, DAG.getUNDEF(VecVT));
43718 SDValue Hi = getUnpackh(DAG, DL, VecVT, Rdx, DAG.getUNDEF(VecVT));
43719 Lo = DAG.getBitcast(WideVT, Lo);
43720 Hi = DAG.getBitcast(WideVT, Hi);
43721 Rdx = DAG.getNode(Opc, DL, WideVT, Lo, Hi);
43722 while (Rdx.getValueSizeInBits() > 128) {
43723 std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
43724 Rdx = DAG.getNode(Opc, DL, Lo.getValueType(), Lo, Hi);
43727 Rdx = WidenToV16I8(Rdx, false);
43728 Rdx = getUnpackl(DAG, DL, MVT::v16i8, Rdx, DAG.getUNDEF(MVT::v16i8));
43729 Rdx = DAG.getBitcast(MVT::v8i16, Rdx);
43732 Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
43733 DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
43734 {4, 5, 6, 7, -1, -1, -1, -1}));
43735 Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
43736 DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
43737 {2, 3, -1, -1, -1, -1, -1, -1}));
43738 Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
43739 DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
43740 {1, -1, -1, -1, -1, -1, -1, -1}));
43741 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
43742 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
43745 // vXi8 add reduction - sub 128-bit vector.
43746 if (VecVT == MVT::v4i8 || VecVT == MVT::v8i8) {
43747 Rdx = WidenToV16I8(Rdx, true);
43748 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
43749 DAG.getConstant(0, DL, MVT::v16i8));
43750 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
43751 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
43754 // Must be a >=128-bit vector with pow2 elements.
43755 if ((VecVT.getSizeInBits() % 128) != 0 || !isPowerOf2_32(NumElts))
43758 // vXi8 add reduction - sum lo/hi halves then use PSADBW.
43759 if (VT == MVT::i8) {
43760 while (Rdx.getValueSizeInBits() > 128) {
43762 std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
43763 VecVT = Lo.getValueType();
43764 Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
43766 assert(VecVT == MVT::v16i8 && "v16i8 reduction expected");
43768 SDValue Hi = DAG.getVectorShuffle(
43769 MVT::v16i8, DL, Rdx, Rdx,
43770 {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
43771 Rdx = DAG.getNode(ISD::ADD, DL, MVT::v16i8, Rdx, Hi);
43772 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
43773 getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
43774 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
43775 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
43778 // See if we can use vXi8 PSADBW add reduction for larger zext types.
43779 // If the source vector values are 0-255, then we can use PSADBW to
43780 // sum+zext v8i8 subvectors to vXi64, then perform the reduction.
43781 // TODO: See if its worth avoiding vXi16/i32 truncations?
43782 if (Opc == ISD::ADD && NumElts >= 4 && EltSizeInBits >= 16 &&
43783 DAG.computeKnownBits(Rdx).getMaxValue().ule(255) &&
43784 (EltSizeInBits == 16 || Rdx.getOpcode() == ISD::ZERO_EXTEND ||
43785 Subtarget.hasAVX512())) {
43786 EVT ByteVT = VecVT.changeVectorElementType(MVT::i8);
43787 Rdx = DAG.getNode(ISD::TRUNCATE, DL, ByteVT, Rdx);
43788 if (ByteVT.getSizeInBits() < 128)
43789 Rdx = WidenToV16I8(Rdx, true);
43791 // Build the PSADBW, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
43792 auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43793 ArrayRef<SDValue> Ops) {
43794 MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
43795 SDValue Zero = DAG.getConstant(0, DL, Ops[0].getValueType());
43796 return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops[0], Zero);
43798 MVT SadVT = MVT::getVectorVT(MVT::i64, Rdx.getValueSizeInBits() / 64);
43799 Rdx = SplitOpsAndApply(DAG, Subtarget, DL, SadVT, {Rdx}, PSADBWBuilder);
43801 // TODO: We could truncate to vXi16/vXi32 before performing the reduction.
43802 while (Rdx.getValueSizeInBits() > 128) {
43804 std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
43805 VecVT = Lo.getValueType();
43806 Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
43808 assert(Rdx.getValueType() == MVT::v2i64 && "v2i64 reduction expected");
43811 SDValue RdxHi = DAG.getVectorShuffle(MVT::v2i64, DL, Rdx, Rdx, {1, -1});
43812 Rdx = DAG.getNode(ISD::ADD, DL, MVT::v2i64, Rdx, RdxHi);
43815 VecVT = MVT::getVectorVT(VT.getSimpleVT(), 128 / VT.getSizeInBits());
43816 Rdx = DAG.getBitcast(VecVT, Rdx);
43817 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
43820 // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
43821 if (!shouldUseHorizontalOp(true, DAG, Subtarget))
43824 unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;
43826 // 256-bit horizontal instructions operate on 128-bit chunks rather than
43827 // across the whole vector, so we need an extract + hop preliminary stage.
43828 // This is the only step where the operands of the hop are not the same value.
43829 // TODO: We could extend this to handle 512-bit or even longer vectors.
43830 if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
43831 ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
43832 unsigned NumElts = VecVT.getVectorNumElements();
43833 SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
43834 SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
43835 Rdx = DAG.getNode(HorizOpcode, DL, Lo.getValueType(), Hi, Lo);
43836 VecVT = Rdx.getValueType();
43838 if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
43839 !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
43842 // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
43843 unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
43844 for (unsigned i = 0; i != ReductionSteps; ++i)
43845 Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);
43847 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
43850 /// Detect vector gather/scatter index generation and convert it from being a
43851 /// bunch of shuffles and extracts into a somewhat faster sequence.
43852 /// For i686, the best sequence is apparently storing the value and loading
43853 /// scalars back, while for x64 we should use 64-bit extracts and shifts.
43854 static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
43855 TargetLowering::DAGCombinerInfo &DCI,
43856 const X86Subtarget &Subtarget) {
43857 if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
43860 SDValue InputVector = N->getOperand(0);
43861 SDValue EltIdx = N->getOperand(1);
43862 auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
43864 EVT SrcVT = InputVector.getValueType();
43865 EVT VT = N->getValueType(0);
43866 SDLoc dl(InputVector);
43867 bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
43868 unsigned NumSrcElts = SrcVT.getVectorNumElements();
43870 if (CIdx && CIdx->getAPIntValue().uge(NumSrcElts))
43871 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
43873 // Integer Constant Folding.
43874 if (CIdx && VT.isInteger()) {
43875 APInt UndefVecElts;
43876 SmallVector<APInt, 16> EltBits;
43877 unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
43878 if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
43879 EltBits, true, false)) {
43880 uint64_t Idx = CIdx->getZExtValue();
43881 if (UndefVecElts[Idx])
43882 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
43883 return DAG.getConstant(EltBits[Idx].zext(VT.getScalarSizeInBits()), dl,
43889 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43890 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
43891 APInt::getAllOnes(VT.getSizeInBits()), DCI))
43892 return SDValue(N, 0);
43894 // PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
43895 if ((InputVector.getOpcode() == X86ISD::PINSRB ||
43896 InputVector.getOpcode() == X86ISD::PINSRW) &&
43897 InputVector.getOperand(2) == EltIdx) {
43898 assert(SrcVT == InputVector.getOperand(0).getValueType() &&
43899 "Vector type mismatch");
43900 SDValue Scl = InputVector.getOperand(1);
43901 Scl = DAG.getNode(ISD::TRUNCATE, dl, SrcVT.getScalarType(), Scl);
43902 return DAG.getZExtOrTrunc(Scl, dl, VT);
43905 // TODO - Remove this once we can handle the implicit zero-extension of
43906 // X86ISD::PEXTRW/X86ISD::PEXTRB in combinePredicateReduction and
43907 // combineBasicSADPattern.
43911 // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
43912 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
43913 VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
43914 SDValue MMXSrc = InputVector.getOperand(0);
43916 // The bitcast source is a direct mmx result.
43917 if (MMXSrc.getValueType() == MVT::x86mmx)
43918 return DAG.getBitcast(VT, InputVector);
43921 // Detect mmx to i32 conversion through a v2i32 elt extract.
43922 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
43923 VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
43924 SDValue MMXSrc = InputVector.getOperand(0);
43926 // The bitcast source is a direct mmx result.
43927 if (MMXSrc.getValueType() == MVT::x86mmx)
43928 return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
43931 // Check whether this extract is the root of a sum of absolute differences
43932 // pattern. This has to be done here because we really want it to happen
43933 // pre-legalization,
43934 if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
43937 if (SDValue VPDPBUSD = combineVPDPBUSDPattern(N, DAG, Subtarget))
43940 // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
43941 if (SDValue Cmp = combinePredicateReduction(N, DAG, Subtarget))
43944 // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
43945 if (SDValue MinMax = combineMinMaxReduction(N, DAG, Subtarget))
43948 // Attempt to optimize ADD/FADD/MUL reductions with HADD, promotion etc..
43949 if (SDValue V = combineArithReduction(N, DAG, Subtarget))
43952 if (SDValue V = scalarizeExtEltFP(N, DAG, Subtarget))
43955 // Attempt to extract a i1 element by using MOVMSK to extract the signbits
43956 // and then testing the relevant element.
43958 // Note that we only combine extracts on the *same* result number, i.e.
43959 // t0 = merge_values a0, a1, a2, a3
43960 // i1 = extract_vector_elt t0, Constant:i64<2>
43961 // i1 = extract_vector_elt t0, Constant:i64<3>
43963 // i1 = extract_vector_elt t0:1, Constant:i64<2>
43964 // since the latter would need its own MOVMSK.
43965 if (SrcVT.getScalarType() == MVT::i1) {
43966 bool IsVar = !CIdx;
43967 SmallVector<SDNode *, 16> BoolExtracts;
43968 unsigned ResNo = InputVector.getResNo();
43969 auto IsBoolExtract = [&BoolExtracts, &ResNo, &IsVar](SDNode *Use) {
43970 if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
43971 Use->getOperand(0).getResNo() == ResNo &&
43972 Use->getValueType(0) == MVT::i1) {
43973 BoolExtracts.push_back(Use);
43974 IsVar |= !isa<ConstantSDNode>(Use->getOperand(1));
43979 // TODO: Can we drop the oneuse check for constant extracts?
43980 if (all_of(InputVector->uses(), IsBoolExtract) &&
43981 (IsVar || BoolExtracts.size() > 1)) {
43982 EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
43984 combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
43985 for (SDNode *Use : BoolExtracts) {
43986 // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
43987 // Mask = 1 << MaskIdx
43988 SDValue MaskIdx = DAG.getZExtOrTrunc(Use->getOperand(1), dl, MVT::i8);
43989 SDValue MaskBit = DAG.getConstant(1, dl, BCVT);
43990 SDValue Mask = DAG.getNode(ISD::SHL, dl, BCVT, MaskBit, MaskIdx);
43991 SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
43992 Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
43993 DCI.CombineTo(Use, Res);
43995 return SDValue(N, 0);
44000 // If this extract is from a loaded vector value and will be used as an
44001 // integer, that requires a potentially expensive XMM -> GPR transfer.
44002 // Additionally, if we can convert to a scalar integer load, that will likely
44003 // be folded into a subsequent integer op.
44004 // Note: Unlike the related fold for this in DAGCombiner, this is not limited
44005 // to a single-use of the loaded vector. For the reasons above, we
44006 // expect this to be profitable even if it creates an extra load.
44007 bool LikelyUsedAsVector = any_of(N->uses(), [](SDNode *Use) {
44008 return Use->getOpcode() == ISD::STORE ||
44009 Use->getOpcode() == ISD::INSERT_VECTOR_ELT ||
44010 Use->getOpcode() == ISD::SCALAR_TO_VECTOR;
44012 auto *LoadVec = dyn_cast<LoadSDNode>(InputVector);
44013 if (LoadVec && CIdx && ISD::isNormalLoad(LoadVec) && VT.isInteger() &&
44014 SrcVT.getVectorElementType() == VT && DCI.isAfterLegalizeDAG() &&
44015 !LikelyUsedAsVector && LoadVec->isSimple()) {
44016 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44018 TLI.getVectorElementPointer(DAG, LoadVec->getBasePtr(), SrcVT, EltIdx);
44019 unsigned PtrOff = VT.getSizeInBits() * CIdx->getZExtValue() / 8;
44020 MachinePointerInfo MPI = LoadVec->getPointerInfo().getWithOffset(PtrOff);
44021 Align Alignment = commonAlignment(LoadVec->getAlign(), PtrOff);
44023 DAG.getLoad(VT, dl, LoadVec->getChain(), NewPtr, MPI, Alignment,
44024 LoadVec->getMemOperand()->getFlags(), LoadVec->getAAInfo());
44025 DAG.makeEquivalentMemoryOrdering(LoadVec, Load);
44032 // Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
44033 // This is more or less the reverse of combineBitcastvxi1.
44034 static SDValue combineToExtendBoolVectorInReg(
44035 unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N0, SelectionDAG &DAG,
44036 TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) {
44037 if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
44038 Opcode != ISD::ANY_EXTEND)
44040 if (!DCI.isBeforeLegalizeOps())
44042 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
44045 EVT SVT = VT.getScalarType();
44046 EVT InSVT = N0.getValueType().getScalarType();
44047 unsigned EltSizeInBits = SVT.getSizeInBits();
44049 // Input type must be extending a bool vector (bit-casted from a scalar
44050 // integer) to legal integer types.
44051 if (!VT.isVector())
44053 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
44055 if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
44058 SDValue N00 = N0.getOperand(0);
44059 EVT SclVT = N00.getValueType();
44060 if (!SclVT.isScalarInteger())
44064 SmallVector<int> ShuffleMask;
44065 unsigned NumElts = VT.getVectorNumElements();
44066 assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
44068 // Broadcast the scalar integer to the vector elements.
44069 if (NumElts > EltSizeInBits) {
44070 // If the scalar integer is greater than the vector element size, then we
44071 // must split it down into sub-sections for broadcasting. For example:
44072 // i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
44073 // i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
44074 assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
44075 unsigned Scale = NumElts / EltSizeInBits;
44076 EVT BroadcastVT = EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
44077 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
44078 Vec = DAG.getBitcast(VT, Vec);
44080 for (unsigned i = 0; i != Scale; ++i)
44081 ShuffleMask.append(EltSizeInBits, i);
44082 Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
44083 } else if (Subtarget.hasAVX2() && NumElts < EltSizeInBits &&
44084 (SclVT == MVT::i8 || SclVT == MVT::i16 || SclVT == MVT::i32)) {
44085 // If we have register broadcast instructions, use the scalar size as the
44086 // element type for the shuffle. Then cast to the wider element type. The
44087 // widened bits won't be used, and this might allow the use of a broadcast
44089 assert((EltSizeInBits % NumElts) == 0 && "Unexpected integer scale");
44090 unsigned Scale = EltSizeInBits / NumElts;
44092 EVT::getVectorVT(*DAG.getContext(), SclVT, NumElts * Scale);
44093 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
44094 ShuffleMask.append(NumElts * Scale, 0);
44095 Vec = DAG.getVectorShuffle(BroadcastVT, DL, Vec, Vec, ShuffleMask);
44096 Vec = DAG.getBitcast(VT, Vec);
44098 // For smaller scalar integers, we can simply any-extend it to the vector
44099 // element size (we don't care about the upper bits) and broadcast it to all
44101 SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
44102 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
44103 ShuffleMask.append(NumElts, 0);
44104 Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
44107 // Now, mask the relevant bit in each element.
44108 SmallVector<SDValue, 32> Bits;
44109 for (unsigned i = 0; i != NumElts; ++i) {
44110 int BitIdx = (i % EltSizeInBits);
44111 APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
44112 Bits.push_back(DAG.getConstant(Bit, DL, SVT));
44114 SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
44115 Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
44117 // Compare against the bitmask and extend the result.
44118 EVT CCVT = VT.changeVectorElementType(MVT::i1);
44119 Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
44120 Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
44122 // For SEXT, this is now done, otherwise shift the result down for
44124 if (Opcode == ISD::SIGN_EXTEND)
44126 return DAG.getNode(ISD::SRL, DL, VT, Vec,
44127 DAG.getConstant(EltSizeInBits - 1, DL, VT));
44130 /// If a vector select has an operand that is -1 or 0, try to simplify the
44131 /// select to a bitwise logic operation.
44132 /// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
44134 combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
44135 TargetLowering::DAGCombinerInfo &DCI,
44136 const X86Subtarget &Subtarget) {
44137 SDValue Cond = N->getOperand(0);
44138 SDValue LHS = N->getOperand(1);
44139 SDValue RHS = N->getOperand(2);
44140 EVT VT = LHS.getValueType();
44141 EVT CondVT = Cond.getValueType();
44143 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44145 if (N->getOpcode() != ISD::VSELECT)
44148 assert(CondVT.isVector() && "Vector select expects a vector selector!");
44150 // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
44151 // TODO: Can we assert that both operands are not zeros (because that should
44152 // get simplified at node creation time)?
44153 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
44154 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
44156 // If both inputs are 0/undef, create a complete zero vector.
44157 // FIXME: As noted above this should be handled by DAGCombiner/getNode.
44158 if (TValIsAllZeros && FValIsAllZeros) {
44159 if (VT.isFloatingPoint())
44160 return DAG.getConstantFP(0.0, DL, VT);
44161 return DAG.getConstant(0, DL, VT);
44164 // To use the condition operand as a bitwise mask, it must have elements that
44165 // are the same size as the select elements. Ie, the condition operand must
44166 // have already been promoted from the IR select condition type <N x i1>.
44167 // Don't check if the types themselves are equal because that excludes
44168 // vector floating-point selects.
44169 if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
44172 // Try to invert the condition if true value is not all 1s and false value is
44173 // not all 0s. Only do this if the condition has one use.
44174 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
44175 if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
44176 // Check if the selector will be produced by CMPP*/PCMP*.
44177 Cond.getOpcode() == ISD::SETCC &&
44178 // Check if SETCC has already been promoted.
44179 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
44181 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
44183 if (TValIsAllZeros || FValIsAllOnes) {
44184 SDValue CC = Cond.getOperand(2);
44185 ISD::CondCode NewCC = ISD::getSetCCInverse(
44186 cast<CondCodeSDNode>(CC)->get(), Cond.getOperand(0).getValueType());
44187 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
44189 std::swap(LHS, RHS);
44190 TValIsAllOnes = FValIsAllOnes;
44191 FValIsAllZeros = TValIsAllZeros;
44195 // Cond value must be 'sign splat' to be converted to a logical op.
44196 if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
44199 // vselect Cond, 111..., 000... -> Cond
44200 if (TValIsAllOnes && FValIsAllZeros)
44201 return DAG.getBitcast(VT, Cond);
44203 if (!TLI.isTypeLegal(CondVT))
44206 // vselect Cond, 111..., X -> or Cond, X
44207 if (TValIsAllOnes) {
44208 SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
44209 SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
44210 return DAG.getBitcast(VT, Or);
44213 // vselect Cond, X, 000... -> and Cond, X
44214 if (FValIsAllZeros) {
44215 SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
44216 SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
44217 return DAG.getBitcast(VT, And);
44220 // vselect Cond, 000..., X -> andn Cond, X
44221 if (TValIsAllZeros) {
44222 SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
44224 // The canonical form differs for i1 vectors - x86andnp is not used
44225 if (CondVT.getScalarType() == MVT::i1)
44226 AndN = DAG.getNode(ISD::AND, DL, CondVT, DAG.getNOT(DL, Cond, CondVT),
44229 AndN = DAG.getNode(X86ISD::ANDNP, DL, CondVT, Cond, CastRHS);
44230 return DAG.getBitcast(VT, AndN);
44236 /// If both arms of a vector select are concatenated vectors, split the select,
44237 /// and concatenate the result to eliminate a wide (256-bit) vector instruction:
44238 /// vselect Cond, (concat T0, T1), (concat F0, F1) -->
44239 /// concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
44240 static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
44241 const X86Subtarget &Subtarget) {
44242 unsigned Opcode = N->getOpcode();
44243 if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
44246 // TODO: Split 512-bit vectors too?
44247 EVT VT = N->getValueType(0);
44248 if (!VT.is256BitVector())
44251 // TODO: Split as long as any 2 of the 3 operands are concatenated?
44252 SDValue Cond = N->getOperand(0);
44253 SDValue TVal = N->getOperand(1);
44254 SDValue FVal = N->getOperand(2);
44255 SmallVector<SDValue, 4> CatOpsT, CatOpsF;
44256 if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
44257 !collectConcatOps(TVal.getNode(), CatOpsT, DAG) ||
44258 !collectConcatOps(FVal.getNode(), CatOpsF, DAG))
44261 auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
44262 ArrayRef<SDValue> Ops) {
44263 return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
44265 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
44266 makeBlend, /*CheckBWI*/ false);
44269 static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
44270 SDValue Cond = N->getOperand(0);
44271 SDValue LHS = N->getOperand(1);
44272 SDValue RHS = N->getOperand(2);
44275 auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
44276 auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
44277 if (!TrueC || !FalseC)
44280 // Don't do this for crazy integer types.
44281 EVT VT = N->getValueType(0);
44282 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
44285 // We're going to use the condition bit in math or logic ops. We could allow
44286 // this with a wider condition value (post-legalization it becomes an i8),
44287 // but if nothing is creating selects that late, it doesn't matter.
44288 if (Cond.getValueType() != MVT::i1)
44291 // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
44292 // 3, 5, or 9 with i32/i64, so those get transformed too.
44293 // TODO: For constants that overflow or do not differ by power-of-2 or small
44294 // multiplier, convert to 'and' + 'add'.
44295 const APInt &TrueVal = TrueC->getAPIntValue();
44296 const APInt &FalseVal = FalseC->getAPIntValue();
44298 // We have a more efficient lowering for "(X == 0) ? Y : -1" using SBB.
44299 if ((TrueVal.isAllOnes() || FalseVal.isAllOnes()) &&
44300 Cond.getOpcode() == ISD::SETCC && isNullConstant(Cond.getOperand(1))) {
44301 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
44302 if (CC == ISD::SETEQ || CC == ISD::SETNE)
44307 APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
44311 APInt AbsDiff = Diff.abs();
44312 if (AbsDiff.isPowerOf2() ||
44313 ((VT == MVT::i32 || VT == MVT::i64) &&
44314 (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
44316 // We need a positive multiplier constant for shift/LEA codegen. The 'not'
44317 // of the condition can usually be folded into a compare predicate, but even
44318 // without that, the sequence should be cheaper than a CMOV alternative.
44319 if (TrueVal.slt(FalseVal)) {
44320 Cond = DAG.getNOT(DL, Cond, MVT::i1);
44321 std::swap(TrueC, FalseC);
44324 // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
44325 SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
44327 // Multiply condition by the difference if non-one.
44328 if (!AbsDiff.isOne())
44329 R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
44331 // Add the base if non-zero.
44332 if (!FalseC->isZero())
44333 R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
44341 /// If this is a *dynamic* select (non-constant condition) and we can match
44342 /// this node with one of the variable blend instructions, restructure the
44343 /// condition so that blends can use the high (sign) bit of each element.
44344 /// This function will also call SimplifyDemandedBits on already created
44345 /// BLENDV to perform additional simplifications.
44346 static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
44347 TargetLowering::DAGCombinerInfo &DCI,
44348 const X86Subtarget &Subtarget) {
44349 SDValue Cond = N->getOperand(0);
44350 if ((N->getOpcode() != ISD::VSELECT &&
44351 N->getOpcode() != X86ISD::BLENDV) ||
44352 ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
44355 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44356 unsigned BitWidth = Cond.getScalarValueSizeInBits();
44357 EVT VT = N->getValueType(0);
44359 // We can only handle the cases where VSELECT is directly legal on the
44360 // subtarget. We custom lower VSELECT nodes with constant conditions and
44361 // this makes it hard to see whether a dynamic VSELECT will correctly
44362 // lower, so we both check the operation's status and explicitly handle the
44363 // cases where a *dynamic* blend will fail even though a constant-condition
44364 // blend could be custom lowered.
44365 // FIXME: We should find a better way to handle this class of problems.
44366 // Potentially, we should combine constant-condition vselect nodes
44367 // pre-legalization into shuffles and not mark as many types as custom
44369 if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
44371 // FIXME: We don't support i16-element blends currently. We could and
44372 // should support them by making *all* the bits in the condition be set
44373 // rather than just the high bit and using an i8-element blend.
44374 if (VT.getVectorElementType() == MVT::i16)
44376 // Dynamic blending was only available from SSE4.1 onward.
44377 if (VT.is128BitVector() && !Subtarget.hasSSE41())
44379 // Byte blends are only available in AVX2
44380 if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
44382 // There are no 512-bit blend instructions that use sign bits.
44383 if (VT.is512BitVector())
44386 // Don't optimize before the condition has been transformed to a legal type
44387 // and don't ever optimize vector selects that map to AVX512 mask-registers.
44388 if (BitWidth < 8 || BitWidth > 64)
44391 auto OnlyUsedAsSelectCond = [](SDValue Cond) {
44392 for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
44394 if ((UI->getOpcode() != ISD::VSELECT &&
44395 UI->getOpcode() != X86ISD::BLENDV) ||
44396 UI.getOperandNo() != 0)
44402 APInt DemandedBits(APInt::getSignMask(BitWidth));
44404 if (OnlyUsedAsSelectCond(Cond)) {
44406 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
44407 !DCI.isBeforeLegalizeOps());
44408 if (!TLI.SimplifyDemandedBits(Cond, DemandedBits, Known, TLO, 0, true))
44411 // If we changed the computation somewhere in the DAG, this change will
44412 // affect all users of Cond. Update all the nodes so that we do not use
44413 // the generic VSELECT anymore. Otherwise, we may perform wrong
44414 // optimizations as we messed with the actual expectation for the vector
44416 for (SDNode *U : Cond->uses()) {
44417 if (U->getOpcode() == X86ISD::BLENDV)
44420 SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
44421 Cond, U->getOperand(1), U->getOperand(2));
44422 DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
44423 DCI.AddToWorklist(U);
44425 DCI.CommitTargetLoweringOpt(TLO);
44426 return SDValue(N, 0);
44429 // Otherwise we can still at least try to simplify multiple use bits.
44430 if (SDValue V = TLI.SimplifyMultipleUseDemandedBits(Cond, DemandedBits, DAG))
44431 return DAG.getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0), V,
44432 N->getOperand(1), N->getOperand(2));
44438 // (or (and (M, (sub 0, X)), (pandn M, X)))
44439 // which is a special case of:
44440 // (select M, (sub 0, X), X)
44442 // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
44443 // We know that, if fNegate is 0 or 1:
44444 // (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
44446 // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
44447 // ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
44448 // ( M ? -X : X) == ((X ^ M ) + (M & 1))
44449 // This lets us transform our vselect to:
44450 // (add (xor X, M), (and M, 1))
44452 // (sub (xor X, M), M)
44453 static SDValue combineLogicBlendIntoConditionalNegate(
44454 EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
44455 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
44456 EVT MaskVT = Mask.getValueType();
44457 assert(MaskVT.isInteger() &&
44458 DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
44459 "Mask must be zero/all-bits");
44461 if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
44463 if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
44466 auto IsNegV = [](SDNode *N, SDValue V) {
44467 return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
44468 ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
44472 if (IsNegV(Y.getNode(), X))
44474 else if (IsNegV(X.getNode(), Y))
44479 SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
44480 SDValue SubOp2 = Mask;
44482 // If the negate was on the false side of the select, then
44483 // the operands of the SUB need to be swapped. PR 27251.
44484 // This is because the pattern being matched above is
44485 // (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
44486 // but if the pattern matched was
44487 // (vselect M, X, (sub (0, X))), that is really negation of the pattern
44488 // above, -(vselect M, (sub 0, X), X), and therefore the replacement
44489 // pattern also needs to be a negation of the replacement pattern above.
44490 // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
44491 // sub accomplishes the negation of the replacement pattern.
44493 std::swap(SubOp1, SubOp2);
44495 SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
44496 return DAG.getBitcast(VT, Res);
44499 /// Do target-specific dag combines on SELECT and VSELECT nodes.
44500 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
44501 TargetLowering::DAGCombinerInfo &DCI,
44502 const X86Subtarget &Subtarget) {
44504 SDValue Cond = N->getOperand(0);
44505 SDValue LHS = N->getOperand(1);
44506 SDValue RHS = N->getOperand(2);
44508 // Try simplification again because we use this function to optimize
44509 // BLENDV nodes that are not handled by the generic combiner.
44510 if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
44513 EVT VT = LHS.getValueType();
44514 EVT CondVT = Cond.getValueType();
44515 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44516 bool CondConstantVector = ISD::isBuildVectorOfConstantSDNodes(Cond.getNode());
44518 // Attempt to combine (select M, (sub 0, X), X) -> (sub (xor X, M), M).
44519 // Limit this to cases of non-constant masks that createShuffleMaskFromVSELECT
44520 // can't catch, plus vXi8 cases where we'd likely end up with BLENDV.
44521 if (CondVT.isVector() && CondVT.isInteger() &&
44522 CondVT.getScalarSizeInBits() == VT.getScalarSizeInBits() &&
44523 (!CondConstantVector || CondVT.getScalarType() == MVT::i8) &&
44524 DAG.ComputeNumSignBits(Cond) == CondVT.getScalarSizeInBits())
44525 if (SDValue V = combineLogicBlendIntoConditionalNegate(VT, Cond, RHS, LHS,
44526 DL, DAG, Subtarget))
44529 // Convert vselects with constant condition into shuffles.
44530 if (CondConstantVector && DCI.isBeforeLegalizeOps() &&
44531 (N->getOpcode() == ISD::VSELECT || N->getOpcode() == X86ISD::BLENDV)) {
44532 SmallVector<int, 64> Mask;
44533 if (createShuffleMaskFromVSELECT(Mask, Cond,
44534 N->getOpcode() == X86ISD::BLENDV))
44535 return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
44538 // fold vselect(cond, pshufb(x), pshufb(y)) -> or (pshufb(x), pshufb(y))
44539 // by forcing the unselected elements to zero.
44540 // TODO: Can we handle more shuffles with this?
44541 if (N->getOpcode() == ISD::VSELECT && CondVT.isVector() &&
44542 LHS.getOpcode() == X86ISD::PSHUFB && RHS.getOpcode() == X86ISD::PSHUFB &&
44543 LHS.hasOneUse() && RHS.hasOneUse()) {
44544 MVT SimpleVT = VT.getSimpleVT();
44545 SmallVector<SDValue, 1> LHSOps, RHSOps;
44546 SmallVector<int, 64> LHSMask, RHSMask, CondMask;
44547 if (createShuffleMaskFromVSELECT(CondMask, Cond) &&
44548 getTargetShuffleMask(LHS.getNode(), SimpleVT, true, LHSOps, LHSMask) &&
44549 getTargetShuffleMask(RHS.getNode(), SimpleVT, true, RHSOps, RHSMask)) {
44550 int NumElts = VT.getVectorNumElements();
44551 for (int i = 0; i != NumElts; ++i) {
44552 // getConstVector sets negative shuffle mask values as undef, so ensure
44553 // we hardcode SM_SentinelZero values to zero (0x80).
44554 if (CondMask[i] < NumElts) {
44555 LHSMask[i] = isUndefOrZero(LHSMask[i]) ? 0x80 : LHSMask[i];
44559 RHSMask[i] = isUndefOrZero(RHSMask[i]) ? 0x80 : RHSMask[i];
44562 LHS = DAG.getNode(X86ISD::PSHUFB, DL, VT, LHS.getOperand(0),
44563 getConstVector(LHSMask, SimpleVT, DAG, DL, true));
44564 RHS = DAG.getNode(X86ISD::PSHUFB, DL, VT, RHS.getOperand(0),
44565 getConstVector(RHSMask, SimpleVT, DAG, DL, true));
44566 return DAG.getNode(ISD::OR, DL, VT, LHS, RHS);
44570 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
44571 // instructions match the semantics of the common C idiom x<y?x:y but not
44572 // x<=y?x:y, because of how they handle negative zero (which can be
44573 // ignored in unsafe-math mode).
44574 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
44575 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
44576 VT != MVT::f80 && VT != MVT::f128 && !isSoftFP16(VT, Subtarget) &&
44577 (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
44578 (Subtarget.hasSSE2() ||
44579 (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
44580 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
44582 unsigned Opcode = 0;
44583 // Check for x CC y ? x : y.
44584 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
44585 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
44589 // Converting this to a min would handle NaNs incorrectly, and swapping
44590 // the operands would cause it to handle comparisons between positive
44591 // and negative zero incorrectly.
44592 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
44593 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44594 !(DAG.isKnownNeverZeroFloat(LHS) ||
44595 DAG.isKnownNeverZeroFloat(RHS)))
44597 std::swap(LHS, RHS);
44599 Opcode = X86ISD::FMIN;
44602 // Converting this to a min would handle comparisons between positive
44603 // and negative zero incorrectly.
44604 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44605 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
44607 Opcode = X86ISD::FMIN;
44610 // Converting this to a min would handle both negative zeros and NaNs
44611 // incorrectly, but we can swap the operands to fix both.
44612 std::swap(LHS, RHS);
44617 Opcode = X86ISD::FMIN;
44621 // Converting this to a max would handle comparisons between positive
44622 // and negative zero incorrectly.
44623 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44624 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
44626 Opcode = X86ISD::FMAX;
44629 // Converting this to a max would handle NaNs incorrectly, and swapping
44630 // the operands would cause it to handle comparisons between positive
44631 // and negative zero incorrectly.
44632 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
44633 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44634 !(DAG.isKnownNeverZeroFloat(LHS) ||
44635 DAG.isKnownNeverZeroFloat(RHS)))
44637 std::swap(LHS, RHS);
44639 Opcode = X86ISD::FMAX;
44642 // Converting this to a max would handle both negative zeros and NaNs
44643 // incorrectly, but we can swap the operands to fix both.
44644 std::swap(LHS, RHS);
44649 Opcode = X86ISD::FMAX;
44652 // Check for x CC y ? y : x -- a min/max with reversed arms.
44653 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
44654 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
44658 // Converting this to a min would handle comparisons between positive
44659 // and negative zero incorrectly, and swapping the operands would
44660 // cause it to handle NaNs incorrectly.
44661 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44662 !(DAG.isKnownNeverZeroFloat(LHS) ||
44663 DAG.isKnownNeverZeroFloat(RHS))) {
44664 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
44666 std::swap(LHS, RHS);
44668 Opcode = X86ISD::FMIN;
44671 // Converting this to a min would handle NaNs incorrectly.
44672 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
44674 Opcode = X86ISD::FMIN;
44677 // Converting this to a min would handle both negative zeros and NaNs
44678 // incorrectly, but we can swap the operands to fix both.
44679 std::swap(LHS, RHS);
44684 Opcode = X86ISD::FMIN;
44688 // Converting this to a max would handle NaNs incorrectly.
44689 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
44691 Opcode = X86ISD::FMAX;
44694 // Converting this to a max would handle comparisons between positive
44695 // and negative zero incorrectly, and swapping the operands would
44696 // cause it to handle NaNs incorrectly.
44697 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44698 !DAG.isKnownNeverZeroFloat(LHS) &&
44699 !DAG.isKnownNeverZeroFloat(RHS)) {
44700 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
44702 std::swap(LHS, RHS);
44704 Opcode = X86ISD::FMAX;
44707 // Converting this to a max would handle both negative zeros and NaNs
44708 // incorrectly, but we can swap the operands to fix both.
44709 std::swap(LHS, RHS);
44714 Opcode = X86ISD::FMAX;
44720 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
44723 // Some mask scalar intrinsics rely on checking if only one bit is set
44724 // and implement it in C code like this:
44725 // A[0] = (U & 1) ? A[0] : W[0];
44726 // This creates some redundant instructions that break pattern matching.
44727 // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
44728 if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
44729 Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
44730 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
44731 SDValue AndNode = Cond.getOperand(0);
44732 if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
44733 isNullConstant(Cond.getOperand(1)) &&
44734 isOneConstant(AndNode.getOperand(1))) {
44735 // LHS and RHS swapped due to
44736 // setcc outputting 1 when AND resulted in 0 and vice versa.
44737 AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
44738 return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
44742 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
44743 // lowering on KNL. In this case we convert it to
44744 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
44745 // The same situation all vectors of i8 and i16 without BWI.
44746 // Make sure we extend these even before type legalization gets a chance to
44747 // split wide vectors.
44748 // Since SKX these selects have a proper lowering.
44749 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
44750 CondVT.getVectorElementType() == MVT::i1 &&
44751 (VT.getVectorElementType() == MVT::i8 ||
44752 VT.getVectorElementType() == MVT::i16)) {
44753 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
44754 return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
44757 // AVX512 - Extend select with zero to merge with target shuffle.
44758 // select(mask, extract_subvector(shuffle(x)), zero) -->
44759 // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
44760 // TODO - support non target shuffles as well.
44761 if (Subtarget.hasAVX512() && CondVT.isVector() &&
44762 CondVT.getVectorElementType() == MVT::i1) {
44763 auto SelectableOp = [&TLI](SDValue Op) {
44764 return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
44765 isTargetShuffle(Op.getOperand(0).getOpcode()) &&
44766 isNullConstant(Op.getOperand(1)) &&
44767 TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
44768 Op.hasOneUse() && Op.getOperand(0).hasOneUse();
44771 bool SelectableLHS = SelectableOp(LHS);
44772 bool SelectableRHS = SelectableOp(RHS);
44773 bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
44774 bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
44776 if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
44777 EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
44778 : RHS.getOperand(0).getValueType();
44779 EVT SrcCondVT = SrcVT.changeVectorElementType(MVT::i1);
44780 LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
44781 VT.getSizeInBits());
44782 RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
44783 VT.getSizeInBits());
44784 Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
44785 DAG.getUNDEF(SrcCondVT), Cond,
44786 DAG.getIntPtrConstant(0, DL));
44787 SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
44788 return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
44792 if (SDValue V = combineSelectOfTwoConstants(N, DAG))
44795 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
44796 Cond.hasOneUse()) {
44797 EVT CondVT = Cond.getValueType();
44798 SDValue Cond0 = Cond.getOperand(0);
44799 SDValue Cond1 = Cond.getOperand(1);
44800 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
44802 // Canonicalize min/max:
44803 // (x > 0) ? x : 0 -> (x >= 0) ? x : 0
44804 // (x < -1) ? x : -1 -> (x <= -1) ? x : -1
44805 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
44806 // the need for an extra compare against zero. e.g.
44807 // (a - b) > 0 : (a - b) ? 0 -> (a - b) >= 0 : (a - b) ? 0
44809 // testl %edi, %edi
44811 // cmovgl %edi, %eax
44815 // cmovsl %eax, %edi
44817 // We can also canonicalize
44818 // (x s> 1) ? x : 1 -> (x s>= 1) ? x : 1 -> (x s> 0) ? x : 1
44819 // (x u> 1) ? x : 1 -> (x u>= 1) ? x : 1 -> (x != 0) ? x : 1
44820 // This allows the use of a test instruction for the compare.
44821 if (LHS == Cond0 && RHS == Cond1) {
44822 if ((CC == ISD::SETGT && (isNullConstant(RHS) || isOneConstant(RHS))) ||
44823 (CC == ISD::SETLT && isAllOnesConstant(RHS))) {
44824 ISD::CondCode NewCC = CC == ISD::SETGT ? ISD::SETGE : ISD::SETLE;
44825 Cond = DAG.getSetCC(SDLoc(Cond), CondVT, Cond0, Cond1, NewCC);
44826 return DAG.getSelect(DL, VT, Cond, LHS, RHS);
44828 if (CC == ISD::SETUGT && isOneConstant(RHS)) {
44829 ISD::CondCode NewCC = ISD::SETUGE;
44830 Cond = DAG.getSetCC(SDLoc(Cond), CondVT, Cond0, Cond1, NewCC);
44831 return DAG.getSelect(DL, VT, Cond, LHS, RHS);
44835 // Similar to DAGCombine's select(or(CC0,CC1),X,Y) fold but for legal types.
44836 // fold eq + gt/lt nested selects into ge/le selects
44837 // select (cmpeq Cond0, Cond1), LHS, (select (cmpugt Cond0, Cond1), LHS, Y)
44838 // --> (select (cmpuge Cond0, Cond1), LHS, Y)
44839 // select (cmpslt Cond0, Cond1), LHS, (select (cmpeq Cond0, Cond1), LHS, Y)
44840 // --> (select (cmpsle Cond0, Cond1), LHS, Y)
44842 if (RHS.getOpcode() == ISD::SELECT && RHS.getOperand(1) == LHS &&
44843 RHS.getOperand(0).getOpcode() == ISD::SETCC) {
44844 SDValue InnerSetCC = RHS.getOperand(0);
44845 ISD::CondCode InnerCC =
44846 cast<CondCodeSDNode>(InnerSetCC.getOperand(2))->get();
44847 if ((CC == ISD::SETEQ || InnerCC == ISD::SETEQ) &&
44848 Cond0 == InnerSetCC.getOperand(0) &&
44849 Cond1 == InnerSetCC.getOperand(1)) {
44850 ISD::CondCode NewCC;
44851 switch (CC == ISD::SETEQ ? InnerCC : CC) {
44852 case ISD::SETGT: NewCC = ISD::SETGE; break;
44853 case ISD::SETLT: NewCC = ISD::SETLE; break;
44854 case ISD::SETUGT: NewCC = ISD::SETUGE; break;
44855 case ISD::SETULT: NewCC = ISD::SETULE; break;
44856 default: NewCC = ISD::SETCC_INVALID; break;
44858 if (NewCC != ISD::SETCC_INVALID) {
44859 Cond = DAG.getSetCC(DL, CondVT, Cond0, Cond1, NewCC);
44860 return DAG.getSelect(DL, VT, Cond, LHS, RHS.getOperand(2));
44866 // Check if the first operand is all zeros and Cond type is vXi1.
44867 // If this an avx512 target we can improve the use of zero masking by
44868 // swapping the operands and inverting the condition.
44869 if (N->getOpcode() == ISD::VSELECT && Cond.hasOneUse() &&
44870 Subtarget.hasAVX512() && CondVT.getVectorElementType() == MVT::i1 &&
44871 ISD::isBuildVectorAllZeros(LHS.getNode()) &&
44872 !ISD::isBuildVectorAllZeros(RHS.getNode())) {
44873 // Invert the cond to not(cond) : xor(op,allones)=not(op)
44874 SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
44875 // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
44876 return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
44879 // Attempt to convert a (vXi1 bitcast(iX Cond)) selection mask before it might
44880 // get split by legalization.
44881 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::BITCAST &&
44882 CondVT.getVectorElementType() == MVT::i1 && Cond.hasOneUse() &&
44883 TLI.isTypeLegal(VT.getScalarType())) {
44884 EVT ExtCondVT = VT.changeVectorElementTypeToInteger();
44885 if (SDValue ExtCond = combineToExtendBoolVectorInReg(
44886 ISD::SIGN_EXTEND, DL, ExtCondVT, Cond, DAG, DCI, Subtarget)) {
44887 ExtCond = DAG.getNode(ISD::TRUNCATE, DL, CondVT, ExtCond);
44888 return DAG.getSelect(DL, VT, ExtCond, LHS, RHS);
44892 // Early exit check
44893 if (!TLI.isTypeLegal(VT) || isSoftFP16(VT, Subtarget))
44896 if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
44899 if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
44902 if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
44905 // select(~Cond, X, Y) -> select(Cond, Y, X)
44906 if (CondVT.getScalarType() != MVT::i1) {
44907 if (SDValue CondNot = IsNOT(Cond, DAG))
44908 return DAG.getNode(N->getOpcode(), DL, VT,
44909 DAG.getBitcast(CondVT, CondNot), RHS, LHS);
44911 if (Cond.getOpcode() == X86ISD::PCMPGT && Cond.hasOneUse()) {
44912 // pcmpgt(X, -1) -> pcmpgt(0, X) to help select/blendv just use the
44914 if (ISD::isBuildVectorAllOnes(Cond.getOperand(1).getNode())) {
44915 Cond = DAG.getNode(X86ISD::PCMPGT, DL, CondVT,
44916 DAG.getConstant(0, DL, CondVT), Cond.getOperand(0));
44917 return DAG.getNode(N->getOpcode(), DL, VT, Cond, RHS, LHS);
44920 // smin(LHS, RHS) : select(pcmpgt(RHS, LHS), LHS, RHS)
44921 // -> select(pcmpgt(LHS, RHS), RHS, LHS)
44922 // iff the commuted pcmpgt() already exists.
44923 // TODO: Could DAGCombiner::combine cse search for SETCC nodes, like it
44924 // does for commutative binops?
44925 if (Cond.getOperand(0) == RHS && Cond.getOperand(1) == LHS) {
44926 if (SDNode *FlipCond =
44927 DAG.getNodeIfExists(X86ISD::PCMPGT, DAG.getVTList(CondVT),
44928 {Cond.getOperand(1), Cond.getOperand(0)})) {
44929 return DAG.getNode(N->getOpcode(), DL, VT, SDValue(FlipCond, 0), RHS,
44936 // Try to optimize vXi1 selects if both operands are either all constants or
44937 // bitcasts from scalar integer type. In that case we can convert the operands
44938 // to integer and use an integer select which will be converted to a CMOV.
44939 // We need to take a little bit of care to avoid creating an i64 type after
44940 // type legalization.
44941 if (N->getOpcode() == ISD::SELECT && VT.isVector() &&
44942 VT.getVectorElementType() == MVT::i1 &&
44943 (DCI.isBeforeLegalize() || (VT != MVT::v64i1 || Subtarget.is64Bit()))) {
44944 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
44945 bool LHSIsConst = ISD::isBuildVectorOfConstantSDNodes(LHS.getNode());
44946 bool RHSIsConst = ISD::isBuildVectorOfConstantSDNodes(RHS.getNode());
44949 (LHS.getOpcode() == ISD::BITCAST &&
44950 LHS.getOperand(0).getValueType() == IntVT)) &&
44952 (RHS.getOpcode() == ISD::BITCAST &&
44953 RHS.getOperand(0).getValueType() == IntVT))) {
44955 LHS = combinevXi1ConstantToInteger(LHS, DAG);
44957 LHS = LHS.getOperand(0);
44960 RHS = combinevXi1ConstantToInteger(RHS, DAG);
44962 RHS = RHS.getOperand(0);
44964 SDValue Select = DAG.getSelect(DL, IntVT, Cond, LHS, RHS);
44965 return DAG.getBitcast(VT, Select);
44969 // If this is "((X & C) == 0) ? Y : Z" and C is a constant mask vector of
44970 // single bits, then invert the predicate and swap the select operands.
44971 // This can lower using a vector shift bit-hack rather than mask and compare.
44972 if (DCI.isBeforeLegalize() && !Subtarget.hasAVX512() &&
44973 N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
44974 Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1 &&
44975 Cond.getOperand(0).getOpcode() == ISD::AND &&
44976 isNullOrNullSplat(Cond.getOperand(1)) &&
44977 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
44978 Cond.getOperand(0).getValueType() == VT) {
44979 // The 'and' mask must be composed of power-of-2 constants.
44980 SDValue And = Cond.getOperand(0);
44981 auto *C = isConstOrConstSplat(And.getOperand(1));
44982 if (C && C->getAPIntValue().isPowerOf2()) {
44983 // vselect (X & C == 0), LHS, RHS --> vselect (X & C != 0), RHS, LHS
44985 DAG.getSetCC(DL, CondVT, And, Cond.getOperand(1), ISD::SETNE);
44986 return DAG.getSelect(DL, VT, NotCond, RHS, LHS);
44989 // If we have a non-splat but still powers-of-2 mask, AVX1 can use pmulld
44990 // and AVX2 can use vpsllv{dq}. 8-bit lacks a proper shift or multiply.
44991 // 16-bit lacks a proper blendv.
44992 unsigned EltBitWidth = VT.getScalarSizeInBits();
44993 bool CanShiftBlend =
44994 TLI.isTypeLegal(VT) && ((Subtarget.hasAVX() && EltBitWidth == 32) ||
44995 (Subtarget.hasAVX2() && EltBitWidth == 64) ||
44996 (Subtarget.hasXOP()));
44997 if (CanShiftBlend &&
44998 ISD::matchUnaryPredicate(And.getOperand(1), [](ConstantSDNode *C) {
44999 return C->getAPIntValue().isPowerOf2();
45001 // Create a left-shift constant to get the mask bits over to the sign-bit.
45002 SDValue Mask = And.getOperand(1);
45003 SmallVector<int, 32> ShlVals;
45004 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
45005 auto *MaskVal = cast<ConstantSDNode>(Mask.getOperand(i));
45006 ShlVals.push_back(EltBitWidth - 1 -
45007 MaskVal->getAPIntValue().exactLogBase2());
45009 // vsel ((X & C) == 0), LHS, RHS --> vsel ((shl X, C') < 0), RHS, LHS
45010 SDValue ShlAmt = getConstVector(ShlVals, VT.getSimpleVT(), DAG, DL);
45011 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And.getOperand(0), ShlAmt);
45013 DAG.getSetCC(DL, CondVT, Shl, Cond.getOperand(1), ISD::SETLT);
45014 return DAG.getSelect(DL, VT, NewCond, RHS, LHS);
45022 /// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
45024 /// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
45025 /// i.e., reusing the EFLAGS produced by the LOCKed instruction.
45026 /// Note that this is only legal for some op/cc combinations.
45027 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
45029 const X86Subtarget &Subtarget) {
45030 // This combine only operates on CMP-like nodes.
45031 if (!(Cmp.getOpcode() == X86ISD::CMP ||
45032 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
45035 // Can't replace the cmp if it has more uses than the one we're looking at.
45036 // FIXME: We would like to be able to handle this, but would need to make sure
45037 // all uses were updated.
45038 if (!Cmp.hasOneUse())
45041 // This only applies to variations of the common case:
45042 // (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
45043 // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
45044 // (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
45045 // (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
45046 // Using the proper condcodes (see below), overflow is checked for.
45048 // FIXME: We can generalize both constraints:
45049 // - XOR/OR/AND (if they were made to survive AtomicExpand)
45051 // if the result is compared.
45053 SDValue CmpLHS = Cmp.getOperand(0);
45054 SDValue CmpRHS = Cmp.getOperand(1);
45055 EVT CmpVT = CmpLHS.getValueType();
45057 if (!CmpLHS.hasOneUse())
45060 unsigned Opc = CmpLHS.getOpcode();
45061 if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
45064 SDValue OpRHS = CmpLHS.getOperand(2);
45065 auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
45069 APInt Addend = OpRHSC->getAPIntValue();
45070 if (Opc == ISD::ATOMIC_LOAD_SUB)
45073 auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
45077 APInt Comparison = CmpRHSC->getAPIntValue();
45078 APInt NegAddend = -Addend;
45080 // See if we can adjust the CC to make the comparison match the negated
45082 if (Comparison != NegAddend) {
45083 APInt IncComparison = Comparison + 1;
45084 if (IncComparison == NegAddend) {
45085 if (CC == X86::COND_A && !Comparison.isMaxValue()) {
45086 Comparison = IncComparison;
45088 } else if (CC == X86::COND_LE && !Comparison.isMaxSignedValue()) {
45089 Comparison = IncComparison;
45093 APInt DecComparison = Comparison - 1;
45094 if (DecComparison == NegAddend) {
45095 if (CC == X86::COND_AE && !Comparison.isMinValue()) {
45096 Comparison = DecComparison;
45098 } else if (CC == X86::COND_L && !Comparison.isMinSignedValue()) {
45099 Comparison = DecComparison;
45105 // If the addend is the negation of the comparison value, then we can do
45106 // a full comparison by emitting the atomic arithmetic as a locked sub.
45107 if (Comparison == NegAddend) {
45108 // The CC is fine, but we need to rewrite the LHS of the comparison as an
45110 auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
45111 auto AtomicSub = DAG.getAtomic(
45112 ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpVT,
45113 /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
45114 /*RHS*/ DAG.getConstant(NegAddend, SDLoc(CmpRHS), CmpVT),
45115 AN->getMemOperand());
45116 auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
45117 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), DAG.getUNDEF(CmpVT));
45118 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
45122 // We can handle comparisons with zero in a number of cases by manipulating
45124 if (!Comparison.isZero())
45127 if (CC == X86::COND_S && Addend == 1)
45129 else if (CC == X86::COND_NS && Addend == 1)
45131 else if (CC == X86::COND_G && Addend == -1)
45133 else if (CC == X86::COND_LE && Addend == -1)
45138 SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
45139 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), DAG.getUNDEF(CmpVT));
45140 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
45144 // Check whether a boolean test is testing a boolean value generated by
45145 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
45148 // Simplify the following patterns:
45149 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
45150 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
45151 // to (Op EFLAGS Cond)
45153 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
45154 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
45155 // to (Op EFLAGS !Cond)
45157 // where Op could be BRCOND or CMOV.
45159 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
45160 // This combine only operates on CMP-like nodes.
45161 if (!(Cmp.getOpcode() == X86ISD::CMP ||
45162 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
45165 // Quit if not used as a boolean value.
45166 if (CC != X86::COND_E && CC != X86::COND_NE)
45169 // Check CMP operands. One of them should be 0 or 1 and the other should be
45170 // an SetCC or extended from it.
45171 SDValue Op1 = Cmp.getOperand(0);
45172 SDValue Op2 = Cmp.getOperand(1);
45175 const ConstantSDNode* C = nullptr;
45176 bool needOppositeCond = (CC == X86::COND_E);
45177 bool checkAgainstTrue = false; // Is it a comparison against 1?
45179 if ((C = dyn_cast<ConstantSDNode>(Op1)))
45181 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
45183 else // Quit if all operands are not constants.
45186 if (C->getZExtValue() == 1) {
45187 needOppositeCond = !needOppositeCond;
45188 checkAgainstTrue = true;
45189 } else if (C->getZExtValue() != 0)
45190 // Quit if the constant is neither 0 or 1.
45193 bool truncatedToBoolWithAnd = false;
45194 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
45195 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
45196 SetCC.getOpcode() == ISD::TRUNCATE ||
45197 SetCC.getOpcode() == ISD::AND) {
45198 if (SetCC.getOpcode() == ISD::AND) {
45200 if (isOneConstant(SetCC.getOperand(0)))
45202 if (isOneConstant(SetCC.getOperand(1)))
45206 SetCC = SetCC.getOperand(OpIdx);
45207 truncatedToBoolWithAnd = true;
45209 SetCC = SetCC.getOperand(0);
45212 switch (SetCC.getOpcode()) {
45213 case X86ISD::SETCC_CARRY:
45214 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
45215 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
45216 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
45217 // truncated to i1 using 'and'.
45218 if (checkAgainstTrue && !truncatedToBoolWithAnd)
45220 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
45221 "Invalid use of SETCC_CARRY!");
45223 case X86ISD::SETCC:
45224 // Set the condition code or opposite one if necessary.
45225 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
45226 if (needOppositeCond)
45227 CC = X86::GetOppositeBranchCondition(CC);
45228 return SetCC.getOperand(1);
45229 case X86ISD::CMOV: {
45230 // Check whether false/true value has canonical one, i.e. 0 or 1.
45231 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
45232 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
45233 // Quit if true value is not a constant.
45236 // Quit if false value is not a constant.
45238 SDValue Op = SetCC.getOperand(0);
45239 // Skip 'zext' or 'trunc' node.
45240 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
45241 Op.getOpcode() == ISD::TRUNCATE)
45242 Op = Op.getOperand(0);
45243 // A special case for rdrand/rdseed, where 0 is set if false cond is
45245 if ((Op.getOpcode() != X86ISD::RDRAND &&
45246 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
45249 // Quit if false value is not the constant 0 or 1.
45250 bool FValIsFalse = true;
45251 if (FVal && FVal->getZExtValue() != 0) {
45252 if (FVal->getZExtValue() != 1)
45254 // If FVal is 1, opposite cond is needed.
45255 needOppositeCond = !needOppositeCond;
45256 FValIsFalse = false;
45258 // Quit if TVal is not the constant opposite of FVal.
45259 if (FValIsFalse && TVal->getZExtValue() != 1)
45261 if (!FValIsFalse && TVal->getZExtValue() != 0)
45263 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
45264 if (needOppositeCond)
45265 CC = X86::GetOppositeBranchCondition(CC);
45266 return SetCC.getOperand(3);
45273 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
45275 /// (X86or (X86setcc) (X86setcc))
45276 /// (X86cmp (and (X86setcc) (X86setcc)), 0)
45277 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
45278 X86::CondCode &CC1, SDValue &Flags,
45280 if (Cond->getOpcode() == X86ISD::CMP) {
45281 if (!isNullConstant(Cond->getOperand(1)))
45284 Cond = Cond->getOperand(0);
45289 SDValue SetCC0, SetCC1;
45290 switch (Cond->getOpcode()) {
45291 default: return false;
45298 SetCC0 = Cond->getOperand(0);
45299 SetCC1 = Cond->getOperand(1);
45303 // Make sure we have SETCC nodes, using the same flags value.
45304 if (SetCC0.getOpcode() != X86ISD::SETCC ||
45305 SetCC1.getOpcode() != X86ISD::SETCC ||
45306 SetCC0->getOperand(1) != SetCC1->getOperand(1))
45309 CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
45310 CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
45311 Flags = SetCC0->getOperand(1);
45315 // When legalizing carry, we create carries via add X, -1
45316 // If that comes from an actual carry, via setcc, we use the
45318 static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
45319 if (EFLAGS.getOpcode() == X86ISD::ADD) {
45320 if (isAllOnesConstant(EFLAGS.getOperand(1))) {
45321 bool FoundAndLSB = false;
45322 SDValue Carry = EFLAGS.getOperand(0);
45323 while (Carry.getOpcode() == ISD::TRUNCATE ||
45324 Carry.getOpcode() == ISD::ZERO_EXTEND ||
45325 (Carry.getOpcode() == ISD::AND &&
45326 isOneConstant(Carry.getOperand(1)))) {
45327 FoundAndLSB |= Carry.getOpcode() == ISD::AND;
45328 Carry = Carry.getOperand(0);
45330 if (Carry.getOpcode() == X86ISD::SETCC ||
45331 Carry.getOpcode() == X86ISD::SETCC_CARRY) {
45332 // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
45333 uint64_t CarryCC = Carry.getConstantOperandVal(0);
45334 SDValue CarryOp1 = Carry.getOperand(1);
45335 if (CarryCC == X86::COND_B)
45337 if (CarryCC == X86::COND_A) {
45338 // Try to convert COND_A into COND_B in an attempt to facilitate
45339 // materializing "setb reg".
45341 // Do not flip "e > c", where "c" is a constant, because Cmp
45342 // instruction cannot take an immediate as its first operand.
45344 if (CarryOp1.getOpcode() == X86ISD::SUB &&
45345 CarryOp1.getNode()->hasOneUse() &&
45346 CarryOp1.getValueType().isInteger() &&
45347 !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
45348 SDValue SubCommute =
45349 DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
45350 CarryOp1.getOperand(1), CarryOp1.getOperand(0));
45351 return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
45354 // If this is a check of the z flag of an add with 1, switch to the
45356 if (CarryCC == X86::COND_E &&
45357 CarryOp1.getOpcode() == X86ISD::ADD &&
45358 isOneConstant(CarryOp1.getOperand(1)))
45360 } else if (FoundAndLSB) {
45362 SDValue BitNo = DAG.getConstant(0, DL, Carry.getValueType());
45363 if (Carry.getOpcode() == ISD::SRL) {
45364 BitNo = Carry.getOperand(1);
45365 Carry = Carry.getOperand(0);
45367 return getBT(Carry, BitNo, DL, DAG);
45375 /// If we are inverting an PTEST/TESTP operand, attempt to adjust the CC
45376 /// to avoid the inversion.
45377 static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
45379 const X86Subtarget &Subtarget) {
45380 // TODO: Handle X86ISD::KTEST/X86ISD::KORTEST.
45381 if (EFLAGS.getOpcode() != X86ISD::PTEST &&
45382 EFLAGS.getOpcode() != X86ISD::TESTP)
45385 // PTEST/TESTP sets EFLAGS as:
45386 // TESTZ: ZF = (Op0 & Op1) == 0
45387 // TESTC: CF = (~Op0 & Op1) == 0
45388 // TESTNZC: ZF == 0 && CF == 0
45389 EVT VT = EFLAGS.getValueType();
45390 SDValue Op0 = EFLAGS.getOperand(0);
45391 SDValue Op1 = EFLAGS.getOperand(1);
45392 EVT OpVT = Op0.getValueType();
45394 // TEST*(~X,Y) == TEST*(X,Y)
45395 if (SDValue NotOp0 = IsNOT(Op0, DAG)) {
45396 X86::CondCode InvCC;
45400 InvCC = X86::COND_E;
45403 // !testc -> !testz.
45404 InvCC = X86::COND_NE;
45408 InvCC = X86::COND_B;
45411 // !testz -> !testc.
45412 InvCC = X86::COND_AE;
45416 // testnzc -> testnzc (no change).
45420 InvCC = X86::COND_INVALID;
45424 if (InvCC != X86::COND_INVALID) {
45426 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45427 DAG.getBitcast(OpVT, NotOp0), Op1);
45431 if (CC == X86::COND_E || CC == X86::COND_NE) {
45432 // TESTZ(X,~Y) == TESTC(Y,X)
45433 if (SDValue NotOp1 = IsNOT(Op1, DAG)) {
45434 CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
45435 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45436 DAG.getBitcast(OpVT, NotOp1), Op0);
45440 SDValue BC = peekThroughBitcasts(Op0);
45441 EVT BCVT = BC.getValueType();
45442 assert(BCVT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(BCVT) &&
45443 "Unexpected vector type");
45445 // TESTZ(AND(X,Y),AND(X,Y)) == TESTZ(X,Y)
45446 if (BC.getOpcode() == ISD::AND || BC.getOpcode() == X86ISD::FAND) {
45447 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45448 DAG.getBitcast(OpVT, BC.getOperand(0)),
45449 DAG.getBitcast(OpVT, BC.getOperand(1)));
45452 // TESTZ(AND(~X,Y),AND(~X,Y)) == TESTC(X,Y)
45453 if (BC.getOpcode() == X86ISD::ANDNP || BC.getOpcode() == X86ISD::FANDN) {
45454 CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
45455 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45456 DAG.getBitcast(OpVT, BC.getOperand(0)),
45457 DAG.getBitcast(OpVT, BC.getOperand(1)));
45460 // If every element is an all-sign value, see if we can use MOVMSK to
45461 // more efficiently extract the sign bits and compare that.
45462 // TODO: Handle TESTC with comparison inversion.
45463 // TODO: Can we remove SimplifyMultipleUseDemandedBits and rely on
45464 // MOVMSK combines to make sure its never worse than PTEST?
45465 unsigned EltBits = BCVT.getScalarSizeInBits();
45466 if (DAG.ComputeNumSignBits(BC) == EltBits) {
45467 assert(VT == MVT::i32 && "Expected i32 EFLAGS comparison result");
45468 APInt SignMask = APInt::getSignMask(EltBits);
45469 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45471 TLI.SimplifyMultipleUseDemandedBits(BC, SignMask, DAG)) {
45472 // For vXi16 cases we need to use pmovmksb and extract every other
45475 if (EltBits == 16) {
45476 MVT MovmskVT = BCVT.is128BitVector() ? MVT::v16i8 : MVT::v32i8;
45477 Res = DAG.getBitcast(MovmskVT, Res);
45478 Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
45479 Res = DAG.getNode(ISD::AND, DL, MVT::i32, Res,
45480 DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
45482 Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
45484 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Res,
45485 DAG.getConstant(0, DL, MVT::i32));
45490 // TESTZ(-1,X) == TESTZ(X,X)
45491 if (ISD::isBuildVectorAllOnes(Op0.getNode()))
45492 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op1, Op1);
45494 // TESTZ(X,-1) == TESTZ(X,X)
45495 if (ISD::isBuildVectorAllOnes(Op1.getNode()))
45496 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op0, Op0);
45498 // TESTZ(OR(LO(X),HI(X)),OR(LO(Y),HI(Y))) -> TESTZ(X,Y)
45499 // TODO: Add COND_NE handling?
45500 if (CC == X86::COND_E && OpVT.is128BitVector() && Subtarget.hasAVX()) {
45501 SDValue Src0 = peekThroughBitcasts(Op0);
45502 SDValue Src1 = peekThroughBitcasts(Op1);
45503 if (Src0.getOpcode() == ISD::OR && Src1.getOpcode() == ISD::OR) {
45504 Src0 = getSplitVectorSrc(peekThroughBitcasts(Src0.getOperand(0)),
45505 peekThroughBitcasts(Src0.getOperand(1)), true);
45506 Src1 = getSplitVectorSrc(peekThroughBitcasts(Src1.getOperand(0)),
45507 peekThroughBitcasts(Src1.getOperand(1)), true);
45509 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45510 DAG.getBitcast(MVT::v4i64, Src0),
45511 DAG.getBitcast(MVT::v4i64, Src1));
45519 // Attempt to simplify the MOVMSK input based on the comparison type.
45520 static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
45522 const X86Subtarget &Subtarget) {
45523 // Handle eq/ne against zero (any_of).
45524 // Handle eq/ne against -1 (all_of).
45525 if (!(CC == X86::COND_E || CC == X86::COND_NE))
45527 if (EFLAGS.getValueType() != MVT::i32)
45529 unsigned CmpOpcode = EFLAGS.getOpcode();
45530 if (CmpOpcode != X86ISD::CMP && CmpOpcode != X86ISD::SUB)
45532 auto *CmpConstant = dyn_cast<ConstantSDNode>(EFLAGS.getOperand(1));
45535 const APInt &CmpVal = CmpConstant->getAPIntValue();
45537 SDValue CmpOp = EFLAGS.getOperand(0);
45538 unsigned CmpBits = CmpOp.getValueSizeInBits();
45539 assert(CmpBits == CmpVal.getBitWidth() && "Value size mismatch");
45541 // Peek through any truncate.
45542 if (CmpOp.getOpcode() == ISD::TRUNCATE)
45543 CmpOp = CmpOp.getOperand(0);
45545 // Bail if we don't find a MOVMSK.
45546 if (CmpOp.getOpcode() != X86ISD::MOVMSK)
45549 SDValue Vec = CmpOp.getOperand(0);
45550 MVT VecVT = Vec.getSimpleValueType();
45551 assert((VecVT.is128BitVector() || VecVT.is256BitVector()) &&
45552 "Unexpected MOVMSK operand");
45553 unsigned NumElts = VecVT.getVectorNumElements();
45554 unsigned NumEltBits = VecVT.getScalarSizeInBits();
45556 bool IsAnyOf = CmpOpcode == X86ISD::CMP && CmpVal.isZero();
45557 bool IsAllOf = (CmpOpcode == X86ISD::SUB || CmpOpcode == X86ISD::CMP) &&
45558 NumElts <= CmpBits && CmpVal.isMask(NumElts);
45559 if (!IsAnyOf && !IsAllOf)
45562 // TODO: Check more combining cases for me.
45563 // Here we check the cmp use number to decide do combining or not.
45564 // Currently we only get 2 tests about combining "MOVMSK(CONCAT(..))"
45565 // and "MOVMSK(PCMPEQ(..))" are fit to use this constraint.
45566 bool IsOneUse = CmpOp.getNode()->hasOneUse();
45568 // See if we can peek through to a vector with a wider element type, if the
45569 // signbits extend down to all the sub-elements as well.
45570 // Calling MOVMSK with the wider type, avoiding the bitcast, helps expose
45571 // potential SimplifyDemandedBits/Elts cases.
45572 // If we looked through a truncate that discard bits, we can't do this
45574 // FIXME: We could do this transform for truncates that discarded bits by
45575 // inserting an AND mask between the new MOVMSK and the CMP.
45576 if (Vec.getOpcode() == ISD::BITCAST && NumElts <= CmpBits) {
45577 SDValue BC = peekThroughBitcasts(Vec);
45578 MVT BCVT = BC.getSimpleValueType();
45579 unsigned BCNumElts = BCVT.getVectorNumElements();
45580 unsigned BCNumEltBits = BCVT.getScalarSizeInBits();
45581 if ((BCNumEltBits == 32 || BCNumEltBits == 64) &&
45582 BCNumEltBits > NumEltBits &&
45583 DAG.ComputeNumSignBits(BC) > (BCNumEltBits - NumEltBits)) {
45585 APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : BCNumElts);
45586 return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
45587 DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, BC),
45588 DAG.getConstant(CmpMask, DL, MVT::i32));
45592 // MOVMSK(CONCAT(X,Y)) == 0 -> MOVMSK(OR(X,Y)).
45593 // MOVMSK(CONCAT(X,Y)) != 0 -> MOVMSK(OR(X,Y)).
45594 // MOVMSK(CONCAT(X,Y)) == -1 -> MOVMSK(AND(X,Y)).
45595 // MOVMSK(CONCAT(X,Y)) != -1 -> MOVMSK(AND(X,Y)).
45596 if (VecVT.is256BitVector() && NumElts <= CmpBits && IsOneUse) {
45597 SmallVector<SDValue> Ops;
45598 if (collectConcatOps(peekThroughBitcasts(Vec).getNode(), Ops, DAG) &&
45601 EVT SubVT = Ops[0].getValueType().changeTypeToInteger();
45602 APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : NumElts / 2);
45603 SDValue V = DAG.getNode(IsAnyOf ? ISD::OR : ISD::AND, DL, SubVT,
45604 DAG.getBitcast(SubVT, Ops[0]),
45605 DAG.getBitcast(SubVT, Ops[1]));
45606 V = DAG.getBitcast(VecVT.getHalfNumVectorElementsVT(), V);
45607 return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
45608 DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V),
45609 DAG.getConstant(CmpMask, DL, MVT::i32));
45613 // MOVMSK(PCMPEQ(X,0)) == -1 -> PTESTZ(X,X).
45614 // MOVMSK(PCMPEQ(X,0)) != -1 -> !PTESTZ(X,X).
45615 // MOVMSK(PCMPEQ(X,Y)) == -1 -> PTESTZ(SUB(X,Y),SUB(X,Y)).
45616 // MOVMSK(PCMPEQ(X,Y)) != -1 -> !PTESTZ(SUB(X,Y),SUB(X,Y)).
45617 if (IsAllOf && Subtarget.hasSSE41() && IsOneUse) {
45618 MVT TestVT = VecVT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
45619 SDValue BC = peekThroughBitcasts(Vec);
45620 // Ensure MOVMSK was testing every signbit of BC.
45621 if (BC.getValueType().getVectorNumElements() <= NumElts) {
45622 if (BC.getOpcode() == X86ISD::PCMPEQ) {
45623 SDValue V = DAG.getNode(ISD::SUB, SDLoc(BC), BC.getValueType(),
45624 BC.getOperand(0), BC.getOperand(1));
45625 V = DAG.getBitcast(TestVT, V);
45626 return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
45628 // Check for 256-bit split vector cases.
45629 if (BC.getOpcode() == ISD::AND &&
45630 BC.getOperand(0).getOpcode() == X86ISD::PCMPEQ &&
45631 BC.getOperand(1).getOpcode() == X86ISD::PCMPEQ) {
45632 SDValue LHS = BC.getOperand(0);
45633 SDValue RHS = BC.getOperand(1);
45634 LHS = DAG.getNode(ISD::SUB, SDLoc(LHS), LHS.getValueType(),
45635 LHS.getOperand(0), LHS.getOperand(1));
45636 RHS = DAG.getNode(ISD::SUB, SDLoc(RHS), RHS.getValueType(),
45637 RHS.getOperand(0), RHS.getOperand(1));
45638 LHS = DAG.getBitcast(TestVT, LHS);
45639 RHS = DAG.getBitcast(TestVT, RHS);
45640 SDValue V = DAG.getNode(ISD::OR, SDLoc(EFLAGS), TestVT, LHS, RHS);
45641 return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
45646 // See if we can avoid a PACKSS by calling MOVMSK on the sources.
45647 // For vXi16 cases we can use a v2Xi8 PMOVMSKB. We must mask out
45648 // sign bits prior to the comparison with zero unless we know that
45649 // the vXi16 splats the sign bit down to the lower i8 half.
45650 // TODO: Handle all_of patterns.
45651 if (Vec.getOpcode() == X86ISD::PACKSS && VecVT == MVT::v16i8) {
45652 SDValue VecOp0 = Vec.getOperand(0);
45653 SDValue VecOp1 = Vec.getOperand(1);
45654 bool SignExt0 = DAG.ComputeNumSignBits(VecOp0) > 8;
45655 bool SignExt1 = DAG.ComputeNumSignBits(VecOp1) > 8;
45656 // PMOVMSKB(PACKSSBW(X, undef)) -> PMOVMSKB(BITCAST_v16i8(X)) & 0xAAAA.
45657 if (IsAnyOf && CmpBits == 8 && VecOp1.isUndef()) {
45659 SDValue Result = DAG.getBitcast(MVT::v16i8, VecOp0);
45660 Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
45661 Result = DAG.getZExtOrTrunc(Result, DL, MVT::i16);
45663 Result = DAG.getNode(ISD::AND, DL, MVT::i16, Result,
45664 DAG.getConstant(0xAAAA, DL, MVT::i16));
45666 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
45667 DAG.getConstant(0, DL, MVT::i16));
45669 // PMOVMSKB(PACKSSBW(LO(X), HI(X)))
45670 // -> PMOVMSKB(BITCAST_v32i8(X)) & 0xAAAAAAAA.
45671 if (CmpBits >= 16 && Subtarget.hasInt256() &&
45672 (IsAnyOf || (SignExt0 && SignExt1))) {
45673 if (SDValue Src = getSplitVectorSrc(VecOp0, VecOp1, true)) {
45675 SDValue Result = peekThroughBitcasts(Src);
45676 if (IsAllOf && Result.getOpcode() == X86ISD::PCMPEQ &&
45677 Result.getValueType().getVectorNumElements() <= NumElts) {
45678 SDValue V = DAG.getNode(ISD::SUB, DL, Result.getValueType(),
45679 Result.getOperand(0), Result.getOperand(1));
45680 V = DAG.getBitcast(MVT::v4i64, V);
45681 return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
45683 Result = DAG.getBitcast(MVT::v32i8, Result);
45684 Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
45685 unsigned CmpMask = IsAnyOf ? 0 : 0xFFFFFFFF;
45686 if (!SignExt0 || !SignExt1) {
45688 "Only perform v16i16 signmasks for any_of patterns");
45689 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
45690 DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
45692 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
45693 DAG.getConstant(CmpMask, DL, MVT::i32));
45698 // MOVMSK(SHUFFLE(X,u)) -> MOVMSK(X) iff every element is referenced.
45699 SmallVector<int, 32> ShuffleMask;
45700 SmallVector<SDValue, 2> ShuffleInputs;
45701 if (NumElts <= CmpBits &&
45702 getTargetShuffleInputs(peekThroughBitcasts(Vec), ShuffleInputs,
45703 ShuffleMask, DAG) &&
45704 ShuffleInputs.size() == 1 && !isAnyZeroOrUndef(ShuffleMask) &&
45705 ShuffleInputs[0].getValueSizeInBits() == VecVT.getSizeInBits()) {
45706 unsigned NumShuffleElts = ShuffleMask.size();
45707 APInt DemandedElts = APInt::getZero(NumShuffleElts);
45708 for (int M : ShuffleMask) {
45709 assert(0 <= M && M < (int)NumShuffleElts && "Bad unary shuffle index");
45710 DemandedElts.setBit(M);
45712 if (DemandedElts.isAllOnes()) {
45714 SDValue Result = DAG.getBitcast(VecVT, ShuffleInputs[0]);
45715 Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
45717 DAG.getZExtOrTrunc(Result, DL, EFLAGS.getOperand(0).getValueType());
45718 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
45719 EFLAGS.getOperand(1));
45726 /// Optimize an EFLAGS definition used according to the condition code \p CC
45727 /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
45728 /// uses of chain values.
45729 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
45731 const X86Subtarget &Subtarget) {
45732 if (CC == X86::COND_B)
45733 if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
45736 if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
45739 if (SDValue R = combinePTESTCC(EFLAGS, CC, DAG, Subtarget))
45742 if (SDValue R = combineSetCCMOVMSK(EFLAGS, CC, DAG, Subtarget))
45745 return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
45748 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
45749 static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
45750 TargetLowering::DAGCombinerInfo &DCI,
45751 const X86Subtarget &Subtarget) {
45754 SDValue FalseOp = N->getOperand(0);
45755 SDValue TrueOp = N->getOperand(1);
45756 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
45757 SDValue Cond = N->getOperand(3);
45759 // cmov X, X, ?, ? --> X
45760 if (TrueOp == FalseOp)
45763 // Try to simplify the EFLAGS and condition code operands.
45764 // We can't always do this as FCMOV only supports a subset of X86 cond.
45765 if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
45766 if (!(FalseOp.getValueType() == MVT::f80 ||
45767 (FalseOp.getValueType() == MVT::f64 && !Subtarget.hasSSE2()) ||
45768 (FalseOp.getValueType() == MVT::f32 && !Subtarget.hasSSE1())) ||
45769 !Subtarget.canUseCMOV() || hasFPCMov(CC)) {
45770 SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
45772 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
45776 // If this is a select between two integer constants, try to do some
45777 // optimizations. Note that the operands are ordered the opposite of SELECT
45779 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
45780 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
45781 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
45782 // larger than FalseC (the false value).
45783 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
45784 CC = X86::GetOppositeBranchCondition(CC);
45785 std::swap(TrueC, FalseC);
45786 std::swap(TrueOp, FalseOp);
45789 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
45790 // This is efficient for any integer data type (including i8/i16) and
45792 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
45793 Cond = getSETCC(CC, Cond, DL, DAG);
45795 // Zero extend the condition if needed.
45796 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
45798 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
45799 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
45800 DAG.getConstant(ShAmt, DL, MVT::i8));
45804 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
45805 // for any integer data type, including i8/i16.
45806 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
45807 Cond = getSETCC(CC, Cond, DL, DAG);
45809 // Zero extend the condition if needed.
45810 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
45811 FalseC->getValueType(0), Cond);
45812 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
45813 SDValue(FalseC, 0));
45817 // Optimize cases that will turn into an LEA instruction. This requires
45818 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
45819 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
45820 APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
45821 assert(Diff.getBitWidth() == N->getValueType(0).getSizeInBits() &&
45822 "Implicit constant truncation");
45824 bool isFastMultiplier = false;
45825 if (Diff.ult(10)) {
45826 switch (Diff.getZExtValue()) {
45828 case 1: // result = add base, cond
45829 case 2: // result = lea base( , cond*2)
45830 case 3: // result = lea base(cond, cond*2)
45831 case 4: // result = lea base( , cond*4)
45832 case 5: // result = lea base(cond, cond*4)
45833 case 8: // result = lea base( , cond*8)
45834 case 9: // result = lea base(cond, cond*8)
45835 isFastMultiplier = true;
45840 if (isFastMultiplier) {
45841 Cond = getSETCC(CC, Cond, DL ,DAG);
45842 // Zero extend the condition if needed.
45843 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
45845 // Scale the condition by the difference.
45847 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
45848 DAG.getConstant(Diff, DL, Cond.getValueType()));
45850 // Add the base if non-zero.
45851 if (FalseC->getAPIntValue() != 0)
45852 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
45853 SDValue(FalseC, 0));
45860 // Handle these cases:
45861 // (select (x != c), e, c) -> select (x != c), e, x),
45862 // (select (x == c), c, e) -> select (x == c), x, e)
45863 // where the c is an integer constant, and the "select" is the combination
45864 // of CMOV and CMP.
45866 // The rationale for this change is that the conditional-move from a constant
45867 // needs two instructions, however, conditional-move from a register needs
45868 // only one instruction.
45870 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
45871 // some instruction-combining opportunities. This opt needs to be
45872 // postponed as late as possible.
45874 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
45875 // the DCI.xxxx conditions are provided to postpone the optimization as
45876 // late as possible.
45878 ConstantSDNode *CmpAgainst = nullptr;
45879 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
45880 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
45881 !isa<ConstantSDNode>(Cond.getOperand(0))) {
45883 if (CC == X86::COND_NE &&
45884 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
45885 CC = X86::GetOppositeBranchCondition(CC);
45886 std::swap(TrueOp, FalseOp);
45889 if (CC == X86::COND_E &&
45890 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
45891 SDValue Ops[] = {FalseOp, Cond.getOperand(0),
45892 DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
45893 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
45898 // Fold and/or of setcc's to double CMOV:
45899 // (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
45900 // (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
45902 // This combine lets us generate:
45903 // cmovcc1 (jcc1 if we don't have CMOV)
45909 // cmovne (jne if we don't have CMOV)
45910 // When we can't use the CMOV instruction, it might increase branch
45912 // When we can use CMOV, or when there is no mispredict, this improves
45913 // throughput and reduces register pressure.
45915 if (CC == X86::COND_NE) {
45917 X86::CondCode CC0, CC1;
45919 if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
45921 std::swap(FalseOp, TrueOp);
45922 CC0 = X86::GetOppositeBranchCondition(CC0);
45923 CC1 = X86::GetOppositeBranchCondition(CC1);
45926 SDValue LOps[] = {FalseOp, TrueOp,
45927 DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
45928 SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
45929 SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
45931 SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
45936 // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
45937 // (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
45938 // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
45939 // (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
45940 if ((CC == X86::COND_NE || CC == X86::COND_E) &&
45941 Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
45942 SDValue Add = TrueOp;
45943 SDValue Const = FalseOp;
45944 // Canonicalize the condition code for easier matching and output.
45945 if (CC == X86::COND_E)
45946 std::swap(Add, Const);
45948 // We might have replaced the constant in the cmov with the LHS of the
45949 // compare. If so change it to the RHS of the compare.
45950 if (Const == Cond.getOperand(0))
45951 Const = Cond.getOperand(1);
45953 // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
45954 if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
45955 Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
45956 (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
45957 Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
45958 Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
45959 EVT VT = N->getValueType(0);
45960 // This should constant fold.
45961 SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
45963 DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
45964 DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
45965 return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
45972 /// Different mul shrinking modes.
45973 enum class ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
45975 static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
45976 EVT VT = N->getOperand(0).getValueType();
45977 if (VT.getScalarSizeInBits() != 32)
45980 assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
45981 unsigned SignBits[2] = {1, 1};
45982 bool IsPositive[2] = {false, false};
45983 for (unsigned i = 0; i < 2; i++) {
45984 SDValue Opd = N->getOperand(i);
45986 SignBits[i] = DAG.ComputeNumSignBits(Opd);
45987 IsPositive[i] = DAG.SignBitIsZero(Opd);
45990 bool AllPositive = IsPositive[0] && IsPositive[1];
45991 unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
45992 // When ranges are from -128 ~ 127, use MULS8 mode.
45993 if (MinSignBits >= 25)
45994 Mode = ShrinkMode::MULS8;
45995 // When ranges are from 0 ~ 255, use MULU8 mode.
45996 else if (AllPositive && MinSignBits >= 24)
45997 Mode = ShrinkMode::MULU8;
45998 // When ranges are from -32768 ~ 32767, use MULS16 mode.
45999 else if (MinSignBits >= 17)
46000 Mode = ShrinkMode::MULS16;
46001 // When ranges are from 0 ~ 65535, use MULU16 mode.
46002 else if (AllPositive && MinSignBits >= 16)
46003 Mode = ShrinkMode::MULU16;
46009 /// When the operands of vector mul are extended from smaller size values,
46010 /// like i8 and i16, the type of mul may be shrinked to generate more
46011 /// efficient code. Two typical patterns are handled:
46013 /// %2 = sext/zext <N x i8> %1 to <N x i32>
46014 /// %4 = sext/zext <N x i8> %3 to <N x i32>
46015 // or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
46016 /// %5 = mul <N x i32> %2, %4
46019 /// %2 = zext/sext <N x i16> %1 to <N x i32>
46020 /// %4 = zext/sext <N x i16> %3 to <N x i32>
46021 /// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
46022 /// %5 = mul <N x i32> %2, %4
46024 /// There are four mul shrinking modes:
46025 /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
46026 /// -128 to 128, and the scalar value range of %4 is also -128 to 128,
46027 /// generate pmullw+sext32 for it (MULS8 mode).
46028 /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
46029 /// 0 to 255, and the scalar value range of %4 is also 0 to 255,
46030 /// generate pmullw+zext32 for it (MULU8 mode).
46031 /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
46032 /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
46033 /// generate pmullw+pmulhw for it (MULS16 mode).
46034 /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
46035 /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
46036 /// generate pmullw+pmulhuw for it (MULU16 mode).
46037 static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
46038 const X86Subtarget &Subtarget) {
46039 // Check for legality
46040 // pmullw/pmulhw are not supported by SSE.
46041 if (!Subtarget.hasSSE2())
46044 // Check for profitability
46045 // pmulld is supported since SSE41. It is better to use pmulld
46046 // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
46048 bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
46049 if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
46053 if (!canReduceVMulWidth(N, DAG, Mode))
46057 SDValue N0 = N->getOperand(0);
46058 SDValue N1 = N->getOperand(1);
46059 EVT VT = N->getOperand(0).getValueType();
46060 unsigned NumElts = VT.getVectorNumElements();
46061 if ((NumElts % 2) != 0)
46064 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
46066 // Shrink the operands of mul.
46067 SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
46068 SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
46070 // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
46071 // lower part is needed.
46072 SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
46073 if (Mode == ShrinkMode::MULU8 || Mode == ShrinkMode::MULS8)
46074 return DAG.getNode((Mode == ShrinkMode::MULU8) ? ISD::ZERO_EXTEND
46075 : ISD::SIGN_EXTEND,
46078 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts / 2);
46079 // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
46080 // the higher part is also needed.
46082 DAG.getNode(Mode == ShrinkMode::MULS16 ? ISD::MULHS : ISD::MULHU, DL,
46083 ReducedVT, NewN0, NewN1);
46085 // Repack the lower part and higher part result of mul into a wider
46087 // Generate shuffle functioning as punpcklwd.
46088 SmallVector<int, 16> ShuffleMask(NumElts);
46089 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
46090 ShuffleMask[2 * i] = i;
46091 ShuffleMask[2 * i + 1] = i + NumElts;
46094 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
46095 ResLo = DAG.getBitcast(ResVT, ResLo);
46096 // Generate shuffle functioning as punpckhwd.
46097 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
46098 ShuffleMask[2 * i] = i + NumElts / 2;
46099 ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
46102 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
46103 ResHi = DAG.getBitcast(ResVT, ResHi);
46104 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
46107 static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
46108 EVT VT, const SDLoc &DL) {
46110 auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
46111 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46112 DAG.getConstant(Mult, DL, VT));
46113 Result = DAG.getNode(ISD::SHL, DL, VT, Result,
46114 DAG.getConstant(Shift, DL, MVT::i8));
46115 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
46120 auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
46121 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46122 DAG.getConstant(Mul1, DL, VT));
46123 Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
46124 DAG.getConstant(Mul2, DL, VT));
46125 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
46134 // mul x, 11 => add ((shl (mul x, 5), 1), x)
46135 return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
46137 // mul x, 21 => add ((shl (mul x, 5), 2), x)
46138 return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
46140 // mul x, 41 => add ((shl (mul x, 5), 3), x)
46141 return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
46143 // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
46144 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
46145 combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
46147 // mul x, 19 => add ((shl (mul x, 9), 1), x)
46148 return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
46150 // mul x, 37 => add ((shl (mul x, 9), 2), x)
46151 return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
46153 // mul x, 73 => add ((shl (mul x, 9), 3), x)
46154 return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
46156 // mul x, 13 => add ((shl (mul x, 3), 2), x)
46157 return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
46159 // mul x, 23 => sub ((shl (mul x, 3), 3), x)
46160 return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
46162 // mul x, 26 => add ((mul (mul x, 5), 5), x)
46163 return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
46165 // mul x, 28 => add ((mul (mul x, 9), 3), x)
46166 return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
46168 // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
46169 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
46170 combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
46173 // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
46174 // by a single LEA.
46175 // First check if this a sum of two power of 2s because that's easy. Then
46176 // count how many zeros are up to the first bit.
46177 // TODO: We can do this even without LEA at a cost of two shifts and an add.
46178 if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
46179 unsigned ScaleShift = countTrailingZeros(MulAmt);
46180 if (ScaleShift >= 1 && ScaleShift < 4) {
46181 unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
46182 SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46183 DAG.getConstant(ShiftAmt, DL, MVT::i8));
46184 SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46185 DAG.getConstant(ScaleShift, DL, MVT::i8));
46186 return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
46193 // If the upper 17 bits of either element are zero and the other element are
46194 // zero/sign bits then we can use PMADDWD, which is always at least as quick as
46195 // PMULLD, except on KNL.
46196 static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
46197 const X86Subtarget &Subtarget) {
46198 if (!Subtarget.hasSSE2())
46201 if (Subtarget.isPMADDWDSlow())
46204 EVT VT = N->getValueType(0);
46206 // Only support vXi32 vectors.
46207 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
46210 // Make sure the type is legal or can split/widen to a legal type.
46211 // With AVX512 but without BWI, we would need to split v32i16.
46212 unsigned NumElts = VT.getVectorNumElements();
46213 if (NumElts == 1 || !isPowerOf2_32(NumElts))
46216 // With AVX512 but without BWI, we would need to split v32i16.
46217 if (32 <= (2 * NumElts) && Subtarget.hasAVX512() && !Subtarget.hasBWI())
46220 SDValue N0 = N->getOperand(0);
46221 SDValue N1 = N->getOperand(1);
46223 // If we are zero/sign extending two steps without SSE4.1, its better to
46224 // reduce the vmul width instead.
46225 if (!Subtarget.hasSSE41() &&
46226 (((N0.getOpcode() == ISD::ZERO_EXTEND &&
46227 N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
46228 (N1.getOpcode() == ISD::ZERO_EXTEND &&
46229 N1.getOperand(0).getScalarValueSizeInBits() <= 8)) ||
46230 ((N0.getOpcode() == ISD::SIGN_EXTEND &&
46231 N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
46232 (N1.getOpcode() == ISD::SIGN_EXTEND &&
46233 N1.getOperand(0).getScalarValueSizeInBits() <= 8))))
46236 // If we are sign extending a wide vector without SSE4.1, its better to reduce
46237 // the vmul width instead.
46238 if (!Subtarget.hasSSE41() &&
46239 (N0.getOpcode() == ISD::SIGN_EXTEND &&
46240 N0.getOperand(0).getValueSizeInBits() > 128) &&
46241 (N1.getOpcode() == ISD::SIGN_EXTEND &&
46242 N1.getOperand(0).getValueSizeInBits() > 128))
46245 // Sign bits must extend down to the lowest i16.
46246 if (DAG.ComputeMaxSignificantBits(N1) > 16 ||
46247 DAG.ComputeMaxSignificantBits(N0) > 16)
46250 // At least one of the elements must be zero in the upper 17 bits, or can be
46251 // safely made zero without altering the final result.
46252 auto GetZeroableOp = [&](SDValue Op) {
46253 APInt Mask17 = APInt::getHighBitsSet(32, 17);
46254 if (DAG.MaskedValueIsZero(Op, Mask17))
46256 // Mask off upper 16-bits of sign-extended constants.
46257 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()))
46258 return DAG.getNode(ISD::AND, SDLoc(N), VT, Op,
46259 DAG.getConstant(0xFFFF, SDLoc(N), VT));
46260 if (Op.getOpcode() == ISD::SIGN_EXTEND && N->isOnlyUserOf(Op.getNode())) {
46261 SDValue Src = Op.getOperand(0);
46262 // Convert sext(vXi16) to zext(vXi16).
46263 if (Src.getScalarValueSizeInBits() == 16 && VT.getSizeInBits() <= 128)
46264 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Src);
46265 // Convert sext(vXi8) to zext(vXi16 sext(vXi8)) on pre-SSE41 targets
46266 // which will expand the extension.
46267 if (Src.getScalarValueSizeInBits() < 16 && !Subtarget.hasSSE41()) {
46268 EVT ExtVT = VT.changeVectorElementType(MVT::i16);
46269 Src = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), ExtVT, Src);
46270 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Src);
46273 // Convert SIGN_EXTEND_VECTOR_INREG to ZEXT_EXTEND_VECTOR_INREG.
46274 if (Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG &&
46275 N->isOnlyUserOf(Op.getNode())) {
46276 SDValue Src = Op.getOperand(0);
46277 if (Src.getScalarValueSizeInBits() == 16)
46278 return DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(N), VT, Src);
46280 // Convert VSRAI(Op, 16) to VSRLI(Op, 16).
46281 if (Op.getOpcode() == X86ISD::VSRAI && Op.getConstantOperandVal(1) == 16 &&
46282 N->isOnlyUserOf(Op.getNode())) {
46283 return DAG.getNode(X86ISD::VSRLI, SDLoc(N), VT, Op.getOperand(0),
46288 SDValue ZeroN0 = GetZeroableOp(N0);
46289 SDValue ZeroN1 = GetZeroableOp(N1);
46290 if (!ZeroN0 && !ZeroN1)
46292 N0 = ZeroN0 ? ZeroN0 : N0;
46293 N1 = ZeroN1 ? ZeroN1 : N1;
46295 // Use SplitOpsAndApply to handle AVX splitting.
46296 auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
46297 ArrayRef<SDValue> Ops) {
46298 MVT ResVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
46299 MVT OpVT = MVT::getVectorVT(MVT::i16, Ops[0].getValueSizeInBits() / 16);
46300 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
46301 DAG.getBitcast(OpVT, Ops[0]),
46302 DAG.getBitcast(OpVT, Ops[1]));
46304 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {N0, N1},
46308 static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
46309 const X86Subtarget &Subtarget) {
46310 if (!Subtarget.hasSSE2())
46313 EVT VT = N->getValueType(0);
46315 // Only support vXi64 vectors.
46316 if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
46317 VT.getVectorNumElements() < 2 ||
46318 !isPowerOf2_32(VT.getVectorNumElements()))
46321 SDValue N0 = N->getOperand(0);
46322 SDValue N1 = N->getOperand(1);
46324 // MULDQ returns the 64-bit result of the signed multiplication of the lower
46325 // 32-bits. We can lower with this if the sign bits stretch that far.
46326 if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
46327 DAG.ComputeNumSignBits(N1) > 32) {
46328 auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
46329 ArrayRef<SDValue> Ops) {
46330 return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
46332 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
46333 PMULDQBuilder, /*CheckBWI*/false);
46336 // If the upper bits are zero we can use a single pmuludq.
46337 APInt Mask = APInt::getHighBitsSet(64, 32);
46338 if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
46339 auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
46340 ArrayRef<SDValue> Ops) {
46341 return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
46343 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
46344 PMULUDQBuilder, /*CheckBWI*/false);
46350 static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
46351 TargetLowering::DAGCombinerInfo &DCI,
46352 const X86Subtarget &Subtarget) {
46353 EVT VT = N->getValueType(0);
46355 if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
46358 if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
46361 if (DCI.isBeforeLegalize() && VT.isVector())
46362 return reduceVMULWidth(N, DAG, Subtarget);
46364 // Optimize a single multiply with constant into two operations in order to
46365 // implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
46366 if (!MulConstantOptimization)
46369 // An imul is usually smaller than the alternative sequence.
46370 if (DAG.getMachineFunction().getFunction().hasMinSize())
46373 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
46376 if (VT != MVT::i64 && VT != MVT::i32)
46379 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
46382 if (isPowerOf2_64(C->getZExtValue()))
46385 int64_t SignMulAmt = C->getSExtValue();
46386 assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
46387 uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
46390 if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
46391 SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46392 DAG.getConstant(AbsMulAmt, DL, VT));
46393 if (SignMulAmt < 0)
46394 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
46400 uint64_t MulAmt1 = 0;
46401 uint64_t MulAmt2 = 0;
46402 if ((AbsMulAmt % 9) == 0) {
46404 MulAmt2 = AbsMulAmt / 9;
46405 } else if ((AbsMulAmt % 5) == 0) {
46407 MulAmt2 = AbsMulAmt / 5;
46408 } else if ((AbsMulAmt % 3) == 0) {
46410 MulAmt2 = AbsMulAmt / 3;
46414 // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
46416 (isPowerOf2_64(MulAmt2) ||
46417 (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
46419 if (isPowerOf2_64(MulAmt2) &&
46420 !(SignMulAmt >= 0 && N->hasOneUse() &&
46421 N->use_begin()->getOpcode() == ISD::ADD))
46422 // If second multiplifer is pow2, issue it first. We want the multiply by
46423 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
46424 // is an add. Only do this for positive multiply amounts since the
46425 // negate would prevent it from being used as an address mode anyway.
46426 std::swap(MulAmt1, MulAmt2);
46428 if (isPowerOf2_64(MulAmt1))
46429 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46430 DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
46432 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46433 DAG.getConstant(MulAmt1, DL, VT));
46435 if (isPowerOf2_64(MulAmt2))
46436 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
46437 DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
46439 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
46440 DAG.getConstant(MulAmt2, DL, VT));
46442 // Negate the result.
46443 if (SignMulAmt < 0)
46444 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
46446 } else if (!Subtarget.slowLEA())
46447 NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
46450 assert(C->getZExtValue() != 0 &&
46451 C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&
46452 "Both cases that could cause potential overflows should have "
46453 "already been handled.");
46454 if (isPowerOf2_64(AbsMulAmt - 1)) {
46455 // (mul x, 2^N + 1) => (add (shl x, N), x)
46456 NewMul = DAG.getNode(
46457 ISD::ADD, DL, VT, N->getOperand(0),
46458 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46459 DAG.getConstant(Log2_64(AbsMulAmt - 1), DL,
46461 // To negate, subtract the number from zero
46462 if (SignMulAmt < 0)
46463 NewMul = DAG.getNode(ISD::SUB, DL, VT,
46464 DAG.getConstant(0, DL, VT), NewMul);
46465 } else if (isPowerOf2_64(AbsMulAmt + 1)) {
46466 // (mul x, 2^N - 1) => (sub (shl x, N), x)
46467 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46468 DAG.getConstant(Log2_64(AbsMulAmt + 1),
46470 // To negate, reverse the operands of the subtract.
46471 if (SignMulAmt < 0)
46472 NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
46474 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
46475 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) {
46476 // (mul x, 2^N + 2) => (add (add (shl x, N), x), x)
46477 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46478 DAG.getConstant(Log2_64(AbsMulAmt - 2),
46480 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
46481 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
46482 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) {
46483 // (mul x, 2^N - 2) => (sub (sub (shl x, N), x), x)
46484 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46485 DAG.getConstant(Log2_64(AbsMulAmt + 2),
46487 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
46488 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
46495 // Try to form a MULHU or MULHS node by looking for
46496 // (srl (mul ext, ext), 16)
46497 // TODO: This is X86 specific because we want to be able to handle wide types
46498 // before type legalization. But we can only do it if the vector will be
46499 // legalized via widening/splitting. Type legalization can't handle promotion
46500 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
46502 static SDValue combineShiftToPMULH(SDNode *N, SelectionDAG &DAG,
46503 const X86Subtarget &Subtarget) {
46504 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
46505 "SRL or SRA node is required here!");
46508 if (!Subtarget.hasSSE2())
46511 // The operation feeding into the shift must be a multiply.
46512 SDValue ShiftOperand = N->getOperand(0);
46513 if (ShiftOperand.getOpcode() != ISD::MUL || !ShiftOperand.hasOneUse())
46516 // Input type should be at least vXi32.
46517 EVT VT = N->getValueType(0);
46518 if (!VT.isVector() || VT.getVectorElementType().getSizeInBits() < 32)
46521 // Need a shift by 16.
46523 if (!ISD::isConstantSplatVector(N->getOperand(1).getNode(), ShiftAmt) ||
46527 SDValue LHS = ShiftOperand.getOperand(0);
46528 SDValue RHS = ShiftOperand.getOperand(1);
46530 unsigned ExtOpc = LHS.getOpcode();
46531 if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
46532 RHS.getOpcode() != ExtOpc)
46535 // Peek through the extends.
46536 LHS = LHS.getOperand(0);
46537 RHS = RHS.getOperand(0);
46539 // Ensure the input types match.
46540 EVT MulVT = LHS.getValueType();
46541 if (MulVT.getVectorElementType() != MVT::i16 || RHS.getValueType() != MulVT)
46544 unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
46545 SDValue Mulh = DAG.getNode(Opc, DL, MulVT, LHS, RHS);
46547 ExtOpc = N->getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
46548 return DAG.getNode(ExtOpc, DL, VT, Mulh);
46551 static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
46552 SDValue N0 = N->getOperand(0);
46553 SDValue N1 = N->getOperand(1);
46554 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
46555 EVT VT = N0.getValueType();
46557 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
46558 // since the result of setcc_c is all zero's or all ones.
46559 if (VT.isInteger() && !VT.isVector() &&
46560 N1C && N0.getOpcode() == ISD::AND &&
46561 N0.getOperand(1).getOpcode() == ISD::Constant) {
46562 SDValue N00 = N0.getOperand(0);
46563 APInt Mask = N0.getConstantOperandAPInt(1);
46564 Mask <<= N1C->getAPIntValue();
46565 bool MaskOK = false;
46566 // We can handle cases concerning bit-widening nodes containing setcc_c if
46567 // we carefully interrogate the mask to make sure we are semantics
46569 // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
46570 // of the underlying setcc_c operation if the setcc_c was zero extended.
46571 // Consider the following example:
46572 // zext(setcc_c) -> i32 0x0000FFFF
46573 // c1 -> i32 0x0000FFFF
46574 // c2 -> i32 0x00000001
46575 // (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
46576 // (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE
46577 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
46579 } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
46580 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
46582 } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
46583 N00.getOpcode() == ISD::ANY_EXTEND) &&
46584 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
46585 MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
46587 if (MaskOK && Mask != 0) {
46589 return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
46593 // Hardware support for vector shifts is sparse which makes us scalarize the
46594 // vector operations in many cases. Also, on sandybridge ADD is faster than
46596 // (shl V, 1) -> add V,V
46597 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
46598 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
46599 assert(N0.getValueType().isVector() && "Invalid vector shift type");
46600 // We shift all of the values by one. In many cases we do not have
46601 // hardware support for this operation. This is better expressed as an ADD
46603 if (N1SplatC->isOne())
46604 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
46610 static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
46611 const X86Subtarget &Subtarget) {
46612 SDValue N0 = N->getOperand(0);
46613 SDValue N1 = N->getOperand(1);
46614 EVT VT = N0.getValueType();
46615 unsigned Size = VT.getSizeInBits();
46617 if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
46620 // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
46621 // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
46622 // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
46623 // depending on sign of (SarConst - [56,48,32,24,16])
46625 // sexts in X86 are MOVs. The MOVs have the same code size
46626 // as above SHIFTs (only SHIFT on 1 has lower code size).
46627 // However the MOVs have 2 advantages to a SHIFT:
46628 // 1. MOVs can write to a register that differs from source
46629 // 2. MOVs accept memory operands
46631 if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
46632 N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
46633 N0.getOperand(1).getOpcode() != ISD::Constant)
46636 SDValue N00 = N0.getOperand(0);
46637 SDValue N01 = N0.getOperand(1);
46638 APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
46639 APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
46640 EVT CVT = N1.getValueType();
46642 if (SarConst.isNegative())
46645 for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
46646 unsigned ShiftSize = SVT.getSizeInBits();
46647 // skipping types without corresponding sext/zext and
46648 // ShlConst that is not one of [56,48,32,24,16]
46649 if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
46653 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
46654 SarConst = SarConst - (Size - ShiftSize);
46657 if (SarConst.isNegative())
46658 return DAG.getNode(ISD::SHL, DL, VT, NN,
46659 DAG.getConstant(-SarConst, DL, CVT));
46660 return DAG.getNode(ISD::SRA, DL, VT, NN,
46661 DAG.getConstant(SarConst, DL, CVT));
46666 static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
46667 TargetLowering::DAGCombinerInfo &DCI,
46668 const X86Subtarget &Subtarget) {
46669 SDValue N0 = N->getOperand(0);
46670 SDValue N1 = N->getOperand(1);
46671 EVT VT = N0.getValueType();
46673 if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
46676 // Only do this on the last DAG combine as it can interfere with other
46678 if (!DCI.isAfterLegalizeDAG())
46681 // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
46682 // TODO: This is a generic DAG combine that became an x86-only combine to
46683 // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
46684 // and-not ('andn').
46685 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
46688 auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
46689 auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
46690 if (!ShiftC || !AndC)
46693 // If we can shrink the constant mask below 8-bits or 32-bits, then this
46694 // transform should reduce code size. It may also enable secondary transforms
46695 // from improved known-bits analysis or instruction selection.
46696 APInt MaskVal = AndC->getAPIntValue();
46698 // If this can be matched by a zero extend, don't optimize.
46699 if (MaskVal.isMask()) {
46700 unsigned TO = MaskVal.countTrailingOnes();
46701 if (TO >= 8 && isPowerOf2_32(TO))
46705 APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
46706 unsigned OldMaskSize = MaskVal.getMinSignedBits();
46707 unsigned NewMaskSize = NewMaskVal.getMinSignedBits();
46708 if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
46709 (OldMaskSize > 32 && NewMaskSize <= 32)) {
46710 // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
46712 SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
46713 SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
46714 return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
46719 static SDValue combineHorizOpWithShuffle(SDNode *N, SelectionDAG &DAG,
46720 const X86Subtarget &Subtarget) {
46721 unsigned Opcode = N->getOpcode();
46722 assert(isHorizOp(Opcode) && "Unexpected hadd/hsub/pack opcode");
46725 EVT VT = N->getValueType(0);
46726 SDValue N0 = N->getOperand(0);
46727 SDValue N1 = N->getOperand(1);
46728 EVT SrcVT = N0.getValueType();
46731 N->isOnlyUserOf(N0.getNode()) ? peekThroughOneUseBitcasts(N0) : N0;
46733 N->isOnlyUserOf(N1.getNode()) ? peekThroughOneUseBitcasts(N1) : N1;
46735 // Attempt to fold HOP(LOSUBVECTOR(SHUFFLE(X)),HISUBVECTOR(SHUFFLE(X)))
46736 // to SHUFFLE(HOP(LOSUBVECTOR(X),HISUBVECTOR(X))), this is mainly for
46737 // truncation trees that help us avoid lane crossing shuffles.
46738 // TODO: There's a lot more we can do for PACK/HADD style shuffle combines.
46739 // TODO: We don't handle vXf64 shuffles yet.
46740 if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
46741 if (SDValue BCSrc = getSplitVectorSrc(BC0, BC1, false)) {
46742 SmallVector<SDValue> ShuffleOps;
46743 SmallVector<int> ShuffleMask, ScaledMask;
46744 SDValue Vec = peekThroughBitcasts(BCSrc);
46745 if (getTargetShuffleInputs(Vec, ShuffleOps, ShuffleMask, DAG)) {
46746 resolveTargetShuffleInputsAndMask(ShuffleOps, ShuffleMask);
46747 // To keep the HOP LHS/RHS coherency, we must be able to scale the unary
46748 // shuffle to a v4X64 width - we can probably relax this in the future.
46749 if (!isAnyZero(ShuffleMask) && ShuffleOps.size() == 1 &&
46750 ShuffleOps[0].getValueType().is256BitVector() &&
46751 scaleShuffleElements(ShuffleMask, 4, ScaledMask)) {
46753 MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
46754 std::tie(Lo, Hi) = DAG.SplitVector(ShuffleOps[0], DL);
46755 Lo = DAG.getBitcast(SrcVT, Lo);
46756 Hi = DAG.getBitcast(SrcVT, Hi);
46757 SDValue Res = DAG.getNode(Opcode, DL, VT, Lo, Hi);
46758 Res = DAG.getBitcast(ShufVT, Res);
46759 Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ScaledMask);
46760 return DAG.getBitcast(VT, Res);
46766 // Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(Z,W)) -> SHUFFLE(HOP()).
46767 if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
46768 // If either/both ops are a shuffle that can scale to v2x64,
46769 // then see if we can perform this as a v4x32 post shuffle.
46770 SmallVector<SDValue> Ops0, Ops1;
46771 SmallVector<int> Mask0, Mask1, ScaledMask0, ScaledMask1;
46773 getTargetShuffleInputs(BC0, Ops0, Mask0, DAG) && !isAnyZero(Mask0) &&
46774 scaleShuffleElements(Mask0, 2, ScaledMask0) &&
46775 all_of(Ops0, [](SDValue Op) { return Op.getValueSizeInBits() == 128; });
46777 getTargetShuffleInputs(BC1, Ops1, Mask1, DAG) && !isAnyZero(Mask1) &&
46778 scaleShuffleElements(Mask1, 2, ScaledMask1) &&
46779 all_of(Ops1, [](SDValue Op) { return Op.getValueSizeInBits() == 128; });
46780 if (IsShuf0 || IsShuf1) {
46782 Ops0.assign({BC0});
46783 ScaledMask0.assign({0, 1});
46786 Ops1.assign({BC1});
46787 ScaledMask1.assign({0, 1});
46791 int PostShuffle[4] = {-1, -1, -1, -1};
46792 auto FindShuffleOpAndIdx = [&](int M, int &Idx, ArrayRef<SDValue> Ops) {
46796 SDValue Src = Ops[M / 2];
46797 if (!LHS || LHS == Src) {
46801 if (!RHS || RHS == Src) {
46808 if (FindShuffleOpAndIdx(ScaledMask0[0], PostShuffle[0], Ops0) &&
46809 FindShuffleOpAndIdx(ScaledMask0[1], PostShuffle[1], Ops0) &&
46810 FindShuffleOpAndIdx(ScaledMask1[0], PostShuffle[2], Ops1) &&
46811 FindShuffleOpAndIdx(ScaledMask1[1], PostShuffle[3], Ops1)) {
46812 LHS = DAG.getBitcast(SrcVT, LHS);
46813 RHS = DAG.getBitcast(SrcVT, RHS ? RHS : LHS);
46814 MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
46815 SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
46816 Res = DAG.getBitcast(ShufVT, Res);
46817 Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, PostShuffle);
46818 return DAG.getBitcast(VT, Res);
46823 // Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(X,Y)) -> SHUFFLE(HOP(X,Y)).
46824 if (VT.is256BitVector() && Subtarget.hasInt256()) {
46825 SmallVector<int> Mask0, Mask1;
46826 SmallVector<SDValue> Ops0, Ops1;
46827 SmallVector<int, 2> ScaledMask0, ScaledMask1;
46828 if (getTargetShuffleInputs(BC0, Ops0, Mask0, DAG) && !isAnyZero(Mask0) &&
46829 getTargetShuffleInputs(BC1, Ops1, Mask1, DAG) && !isAnyZero(Mask1) &&
46830 !Ops0.empty() && !Ops1.empty() &&
46832 [](SDValue Op) { return Op.getValueType().is256BitVector(); }) &&
46834 [](SDValue Op) { return Op.getValueType().is256BitVector(); }) &&
46835 scaleShuffleElements(Mask0, 2, ScaledMask0) &&
46836 scaleShuffleElements(Mask1, 2, ScaledMask1)) {
46837 SDValue Op00 = peekThroughBitcasts(Ops0.front());
46838 SDValue Op10 = peekThroughBitcasts(Ops1.front());
46839 SDValue Op01 = peekThroughBitcasts(Ops0.back());
46840 SDValue Op11 = peekThroughBitcasts(Ops1.back());
46841 if ((Op00 == Op11) && (Op01 == Op10)) {
46842 std::swap(Op10, Op11);
46843 ShuffleVectorSDNode::commuteMask(ScaledMask1);
46845 if ((Op00 == Op10) && (Op01 == Op11)) {
46846 const int Map[4] = {0, 2, 1, 3};
46847 SmallVector<int, 4> ShuffleMask(
46848 {Map[ScaledMask0[0]], Map[ScaledMask1[0]], Map[ScaledMask0[1]],
46849 Map[ScaledMask1[1]]});
46850 MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
46851 SDValue Res = DAG.getNode(Opcode, DL, VT, DAG.getBitcast(SrcVT, Op00),
46852 DAG.getBitcast(SrcVT, Op01));
46853 Res = DAG.getBitcast(ShufVT, Res);
46854 Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ShuffleMask);
46855 return DAG.getBitcast(VT, Res);
46863 static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
46864 TargetLowering::DAGCombinerInfo &DCI,
46865 const X86Subtarget &Subtarget) {
46866 unsigned Opcode = N->getOpcode();
46867 assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
46868 "Unexpected pack opcode");
46870 EVT VT = N->getValueType(0);
46871 SDValue N0 = N->getOperand(0);
46872 SDValue N1 = N->getOperand(1);
46873 unsigned NumDstElts = VT.getVectorNumElements();
46874 unsigned DstBitsPerElt = VT.getScalarSizeInBits();
46875 unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
46876 assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
46877 N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
46878 "Unexpected PACKSS/PACKUS input type");
46880 bool IsSigned = (X86ISD::PACKSS == Opcode);
46882 // Constant Folding.
46883 APInt UndefElts0, UndefElts1;
46884 SmallVector<APInt, 32> EltBits0, EltBits1;
46885 if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
46886 (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
46887 getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
46888 getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
46889 unsigned NumLanes = VT.getSizeInBits() / 128;
46890 unsigned NumSrcElts = NumDstElts / 2;
46891 unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
46892 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
46894 APInt Undefs(NumDstElts, 0);
46895 SmallVector<APInt, 32> Bits(NumDstElts, APInt::getZero(DstBitsPerElt));
46896 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
46897 for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
46898 unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
46899 auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
46900 auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
46902 if (UndefElts[SrcIdx]) {
46903 Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
46907 APInt &Val = EltBits[SrcIdx];
46909 // PACKSS: Truncate signed value with signed saturation.
46910 // Source values less than dst minint are saturated to minint.
46911 // Source values greater than dst maxint are saturated to maxint.
46912 if (Val.isSignedIntN(DstBitsPerElt))
46913 Val = Val.trunc(DstBitsPerElt);
46914 else if (Val.isNegative())
46915 Val = APInt::getSignedMinValue(DstBitsPerElt);
46917 Val = APInt::getSignedMaxValue(DstBitsPerElt);
46919 // PACKUS: Truncate signed value with unsigned saturation.
46920 // Source values less than zero are saturated to zero.
46921 // Source values greater than dst maxuint are saturated to maxuint.
46922 if (Val.isIntN(DstBitsPerElt))
46923 Val = Val.trunc(DstBitsPerElt);
46924 else if (Val.isNegative())
46925 Val = APInt::getZero(DstBitsPerElt);
46927 Val = APInt::getAllOnes(DstBitsPerElt);
46929 Bits[Lane * NumDstEltsPerLane + Elt] = Val;
46933 return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
46936 // Try to fold PACK(SHUFFLE(),SHUFFLE()) -> SHUFFLE(PACK()).
46937 if (SDValue V = combineHorizOpWithShuffle(N, DAG, Subtarget))
46940 // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
46941 // truncate to create a larger truncate.
46942 if (Subtarget.hasAVX512() &&
46943 N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
46944 N0.getOperand(0).getValueType() == MVT::v8i32) {
46945 if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
46947 DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
46948 if (Subtarget.hasVLX())
46949 return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
46951 // Widen input to v16i32 so we can truncate that.
46953 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
46954 N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
46955 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
46959 // Try to fold PACK(EXTEND(X),EXTEND(Y)) -> CONCAT(X,Y) subvectors.
46960 if (VT.is128BitVector()) {
46961 unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
46962 SDValue Src0, Src1;
46963 if (N0.getOpcode() == ExtOpc &&
46964 N0.getOperand(0).getValueType().is64BitVector() &&
46965 N0.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
46966 Src0 = N0.getOperand(0);
46968 if (N1.getOpcode() == ExtOpc &&
46969 N1.getOperand(0).getValueType().is64BitVector() &&
46970 N1.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
46971 Src1 = N1.getOperand(0);
46973 if ((Src0 || N0.isUndef()) && (Src1 || N1.isUndef())) {
46974 assert((Src0 || Src1) && "Found PACK(UNDEF,UNDEF)");
46975 Src0 = Src0 ? Src0 : DAG.getUNDEF(Src1.getValueType());
46976 Src1 = Src1 ? Src1 : DAG.getUNDEF(Src0.getValueType());
46977 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Src0, Src1);
46980 // Try again with pack(*_extend_vector_inreg, undef).
46981 unsigned VecInRegOpc = IsSigned ? ISD::SIGN_EXTEND_VECTOR_INREG
46982 : ISD::ZERO_EXTEND_VECTOR_INREG;
46983 if (N0.getOpcode() == VecInRegOpc && N1.isUndef() &&
46984 N0.getOperand(0).getScalarValueSizeInBits() < DstBitsPerElt)
46985 return getEXTEND_VECTOR_INREG(ExtOpc, SDLoc(N), VT, N0.getOperand(0),
46989 // Attempt to combine as shuffle.
46991 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
46997 static SDValue combineVectorHADDSUB(SDNode *N, SelectionDAG &DAG,
46998 TargetLowering::DAGCombinerInfo &DCI,
46999 const X86Subtarget &Subtarget) {
47000 assert((X86ISD::HADD == N->getOpcode() || X86ISD::FHADD == N->getOpcode() ||
47001 X86ISD::HSUB == N->getOpcode() || X86ISD::FHSUB == N->getOpcode()) &&
47002 "Unexpected horizontal add/sub opcode");
47004 if (!shouldUseHorizontalOp(true, DAG, Subtarget)) {
47005 MVT VT = N->getSimpleValueType(0);
47006 SDValue LHS = N->getOperand(0);
47007 SDValue RHS = N->getOperand(1);
47009 // HOP(HOP'(X,X),HOP'(Y,Y)) -> HOP(PERMUTE(HOP'(X,Y)),PERMUTE(HOP'(X,Y)).
47010 if (LHS != RHS && LHS.getOpcode() == N->getOpcode() &&
47011 LHS.getOpcode() == RHS.getOpcode() &&
47012 LHS.getValueType() == RHS.getValueType() &&
47013 N->isOnlyUserOf(LHS.getNode()) && N->isOnlyUserOf(RHS.getNode())) {
47014 SDValue LHS0 = LHS.getOperand(0);
47015 SDValue LHS1 = LHS.getOperand(1);
47016 SDValue RHS0 = RHS.getOperand(0);
47017 SDValue RHS1 = RHS.getOperand(1);
47018 if ((LHS0 == LHS1 || LHS0.isUndef() || LHS1.isUndef()) &&
47019 (RHS0 == RHS1 || RHS0.isUndef() || RHS1.isUndef())) {
47021 SDValue Res = DAG.getNode(LHS.getOpcode(), DL, LHS.getValueType(),
47022 LHS0.isUndef() ? LHS1 : LHS0,
47023 RHS0.isUndef() ? RHS1 : RHS0);
47024 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
47025 Res = DAG.getBitcast(ShufVT, Res);
47027 DAG.getNode(X86ISD::PSHUFD, DL, ShufVT, Res,
47028 getV4X86ShuffleImm8ForMask({0, 1, 0, 1}, DL, DAG));
47030 DAG.getNode(X86ISD::PSHUFD, DL, ShufVT, Res,
47031 getV4X86ShuffleImm8ForMask({2, 3, 2, 3}, DL, DAG));
47032 return DAG.getNode(N->getOpcode(), DL, VT, DAG.getBitcast(VT, NewLHS),
47033 DAG.getBitcast(VT, NewRHS));
47038 // Try to fold HOP(SHUFFLE(),SHUFFLE()) -> SHUFFLE(HOP()).
47039 if (SDValue V = combineHorizOpWithShuffle(N, DAG, Subtarget))
47045 static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
47046 TargetLowering::DAGCombinerInfo &DCI,
47047 const X86Subtarget &Subtarget) {
47048 assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
47049 X86ISD::VSRL == N->getOpcode()) &&
47050 "Unexpected shift opcode");
47051 EVT VT = N->getValueType(0);
47052 SDValue N0 = N->getOperand(0);
47053 SDValue N1 = N->getOperand(1);
47055 // Shift zero -> zero.
47056 if (ISD::isBuildVectorAllZeros(N0.getNode()))
47057 return DAG.getConstant(0, SDLoc(N), VT);
47059 // Detect constant shift amounts.
47061 SmallVector<APInt, 32> EltBits;
47062 if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
47063 unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
47064 return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
47065 EltBits[0].getZExtValue(), DAG);
47068 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47069 APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
47070 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
47071 return SDValue(N, 0);
47076 static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
47077 TargetLowering::DAGCombinerInfo &DCI,
47078 const X86Subtarget &Subtarget) {
47079 unsigned Opcode = N->getOpcode();
47080 assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
47081 X86ISD::VSRLI == Opcode) &&
47082 "Unexpected shift opcode");
47083 bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
47084 EVT VT = N->getValueType(0);
47085 SDValue N0 = N->getOperand(0);
47086 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
47087 assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
47088 "Unexpected value type");
47089 assert(N->getOperand(1).getValueType() == MVT::i8 &&
47090 "Unexpected shift amount type");
47092 // (shift undef, X) -> 0
47094 return DAG.getConstant(0, SDLoc(N), VT);
47096 // Out of range logical bit shifts are guaranteed to be zero.
47097 // Out of range arithmetic bit shifts splat the sign bit.
47098 unsigned ShiftVal = N->getConstantOperandVal(1);
47099 if (ShiftVal >= NumBitsPerElt) {
47101 return DAG.getConstant(0, SDLoc(N), VT);
47102 ShiftVal = NumBitsPerElt - 1;
47105 // (shift X, 0) -> X
47109 // (shift 0, C) -> 0
47110 if (ISD::isBuildVectorAllZeros(N0.getNode()))
47111 // N0 is all zeros or undef. We guarantee that the bits shifted into the
47112 // result are all zeros, not undef.
47113 return DAG.getConstant(0, SDLoc(N), VT);
47115 // (VSRAI -1, C) -> -1
47116 if (!LogicalShift && ISD::isBuildVectorAllOnes(N0.getNode()))
47117 // N0 is all ones or undef. We guarantee that the bits shifted into the
47118 // result are all ones, not undef.
47119 return DAG.getConstant(-1, SDLoc(N), VT);
47121 // (shift (shift X, C2), C1) -> (shift X, (C1 + C2))
47122 if (Opcode == N0.getOpcode()) {
47123 unsigned ShiftVal2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
47124 unsigned NewShiftVal = ShiftVal + ShiftVal2;
47125 if (NewShiftVal >= NumBitsPerElt) {
47126 // Out of range logical bit shifts are guaranteed to be zero.
47127 // Out of range arithmetic bit shifts splat the sign bit.
47129 return DAG.getConstant(0, SDLoc(N), VT);
47130 NewShiftVal = NumBitsPerElt - 1;
47132 return DAG.getNode(Opcode, SDLoc(N), VT, N0.getOperand(0),
47133 DAG.getTargetConstant(NewShiftVal, SDLoc(N), MVT::i8));
47136 // We can decode 'whole byte' logical bit shifts as shuffles.
47137 if (LogicalShift && (ShiftVal % 8) == 0) {
47139 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
47143 // Constant Folding.
47145 SmallVector<APInt, 32> EltBits;
47146 if (N->isOnlyUserOf(N0.getNode()) &&
47147 getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
47148 assert(EltBits.size() == VT.getVectorNumElements() &&
47149 "Unexpected shift value type");
47150 // Undef elements need to fold to 0. It's possible SimplifyDemandedBits
47151 // created an undef input due to no input bits being demanded, but user
47152 // still expects 0 in other bits.
47153 for (unsigned i = 0, e = EltBits.size(); i != e; ++i) {
47154 APInt &Elt = EltBits[i];
47157 else if (X86ISD::VSHLI == Opcode)
47159 else if (X86ISD::VSRAI == Opcode)
47160 Elt.ashrInPlace(ShiftVal);
47162 Elt.lshrInPlace(ShiftVal);
47164 // Reset undef elements since they were zeroed above.
47166 return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
47169 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47170 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBitsPerElt),
47172 return SDValue(N, 0);
47177 static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
47178 TargetLowering::DAGCombinerInfo &DCI,
47179 const X86Subtarget &Subtarget) {
47180 EVT VT = N->getValueType(0);
47181 assert(((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) ||
47182 (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16) ||
47183 N->getOpcode() == ISD::INSERT_VECTOR_ELT) &&
47184 "Unexpected vector insertion");
47186 if (N->getOpcode() == X86ISD::PINSRB || N->getOpcode() == X86ISD::PINSRW) {
47187 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
47188 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47189 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
47190 APInt::getAllOnes(NumBitsPerElt), DCI))
47191 return SDValue(N, 0);
47194 // Attempt to combine insertion patterns to a shuffle.
47195 if (VT.isSimple() && DCI.isAfterLegalizeDAG()) {
47197 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
47204 /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
47205 /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
47206 /// OR -> CMPNEQSS.
47207 static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
47208 TargetLowering::DAGCombinerInfo &DCI,
47209 const X86Subtarget &Subtarget) {
47212 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
47213 // we're requiring SSE2 for both.
47214 if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
47215 SDValue N0 = N->getOperand(0);
47216 SDValue N1 = N->getOperand(1);
47217 SDValue CMP0 = N0.getOperand(1);
47218 SDValue CMP1 = N1.getOperand(1);
47221 // The SETCCs should both refer to the same CMP.
47222 if (CMP0.getOpcode() != X86ISD::FCMP || CMP0 != CMP1)
47225 SDValue CMP00 = CMP0->getOperand(0);
47226 SDValue CMP01 = CMP0->getOperand(1);
47227 EVT VT = CMP00.getValueType();
47229 if (VT == MVT::f32 || VT == MVT::f64 ||
47230 (VT == MVT::f16 && Subtarget.hasFP16())) {
47231 bool ExpectingFlags = false;
47232 // Check for any users that want flags:
47233 for (const SDNode *U : N->uses()) {
47234 if (ExpectingFlags)
47237 switch (U->getOpcode()) {
47242 ExpectingFlags = true;
47244 case ISD::CopyToReg:
47245 case ISD::SIGN_EXTEND:
47246 case ISD::ZERO_EXTEND:
47247 case ISD::ANY_EXTEND:
47252 if (!ExpectingFlags) {
47253 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
47254 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
47256 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
47257 X86::CondCode tmp = cc0;
47262 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
47263 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
47264 // FIXME: need symbolic constants for these magic numbers.
47265 // See X86ATTInstPrinter.cpp:printSSECC().
47266 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
47267 if (Subtarget.hasAVX512()) {
47269 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
47270 DAG.getTargetConstant(x86cc, DL, MVT::i8));
47271 // Need to fill with zeros to ensure the bitcast will produce zeroes
47272 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
47273 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
47274 DAG.getConstant(0, DL, MVT::v16i1),
47275 FSetCC, DAG.getIntPtrConstant(0, DL));
47276 return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
47277 N->getSimpleValueType(0));
47279 SDValue OnesOrZeroesF =
47280 DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00,
47281 CMP01, DAG.getTargetConstant(x86cc, DL, MVT::i8));
47283 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
47284 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
47286 if (is64BitFP && !Subtarget.is64Bit()) {
47287 // On a 32-bit target, we cannot bitcast the 64-bit float to a
47288 // 64-bit integer, since that's not a legal type. Since
47289 // OnesOrZeroesF is all ones or all zeroes, we don't need all the
47290 // bits, but can do this little dance to extract the lowest 32 bits
47291 // and work with those going forward.
47292 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
47294 SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
47295 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
47296 Vector32, DAG.getIntPtrConstant(0, DL));
47300 SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
47301 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
47302 DAG.getConstant(1, DL, IntVT));
47303 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
47305 return OneBitOfTruth;
47313 /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
47314 static SDValue combineAndNotIntoANDNP(SDNode *N, SelectionDAG &DAG) {
47315 assert(N->getOpcode() == ISD::AND);
47317 MVT VT = N->getSimpleValueType(0);
47318 if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
47322 SDValue N0 = N->getOperand(0);
47323 SDValue N1 = N->getOperand(1);
47325 auto GetNot = [&VT, &DAG](SDValue V) {
47326 // Basic X = NOT(Y) detection.
47327 if (SDValue Not = IsNOT(V, DAG))
47329 // Fold BROADCAST(NOT(Y)) -> BROADCAST(Y).
47330 if (V.getOpcode() == X86ISD::VBROADCAST) {
47331 SDValue Src = V.getOperand(0);
47332 EVT SrcVT = Src.getValueType();
47333 if (!SrcVT.isVector())
47335 if (SDValue Not = IsNOT(Src, DAG))
47336 return DAG.getNode(X86ISD::VBROADCAST, SDLoc(V), VT,
47337 DAG.getBitcast(SrcVT, Not));
47342 if (SDValue Not = GetNot(N0)) {
47345 } else if (SDValue Not = GetNot(N1)) {
47351 X = DAG.getBitcast(VT, X);
47352 Y = DAG.getBitcast(VT, Y);
47353 return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
47356 // Try to widen AND, OR and XOR nodes to VT in order to remove casts around
47357 // logical operations, like in the example below.
47358 // or (and (truncate x, truncate y)),
47359 // (xor (truncate z, build_vector (constants)))
47360 // Given a target type \p VT, we generate
47361 // or (and x, y), (xor z, zext(build_vector (constants)))
47362 // given x, y and z are of type \p VT. We can do so, if operands are either
47363 // truncates from VT types, the second operand is a vector of constants or can
47364 // be recursively promoted.
47365 static SDValue PromoteMaskArithmetic(SDNode *N, EVT VT, SelectionDAG &DAG,
47367 // Limit recursion to avoid excessive compile times.
47368 if (Depth >= SelectionDAG::MaxRecursionDepth)
47371 if (N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND &&
47372 N->getOpcode() != ISD::OR)
47375 SDValue N0 = N->getOperand(0);
47376 SDValue N1 = N->getOperand(1);
47379 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47380 if (!TLI.isOperationLegalOrPromote(N->getOpcode(), VT))
47383 if (SDValue NN0 = PromoteMaskArithmetic(N0.getNode(), VT, DAG, Depth + 1))
47386 // The Left side has to be a trunc.
47387 if (N0.getOpcode() != ISD::TRUNCATE)
47390 // The type of the truncated inputs.
47391 if (N0.getOperand(0).getValueType() != VT)
47394 N0 = N0.getOperand(0);
47397 if (SDValue NN1 = PromoteMaskArithmetic(N1.getNode(), VT, DAG, Depth + 1))
47400 // The right side has to be a 'trunc' or a constant vector.
47401 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
47402 N1.getOperand(0).getValueType() == VT;
47403 if (!RHSTrunc && !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
47407 N1 = N1.getOperand(0);
47409 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
47412 return DAG.getNode(N->getOpcode(), DL, VT, N0, N1);
47415 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
47416 // register. In most cases we actually compare or select YMM-sized registers
47417 // and mixing the two types creates horrible code. This method optimizes
47418 // some of the transition sequences.
47419 // Even with AVX-512 this is still useful for removing casts around logical
47420 // operations on vXi1 mask types.
47421 static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
47422 const X86Subtarget &Subtarget) {
47423 EVT VT = N->getValueType(0);
47424 assert(VT.isVector() && "Expected vector type");
47427 assert((N->getOpcode() == ISD::ANY_EXTEND ||
47428 N->getOpcode() == ISD::ZERO_EXTEND ||
47429 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
47431 SDValue Narrow = N->getOperand(0);
47432 EVT NarrowVT = Narrow.getValueType();
47434 // Generate the wide operation.
47435 SDValue Op = PromoteMaskArithmetic(Narrow.getNode(), VT, DAG, 0);
47438 switch (N->getOpcode()) {
47439 default: llvm_unreachable("Unexpected opcode");
47440 case ISD::ANY_EXTEND:
47442 case ISD::ZERO_EXTEND:
47443 return DAG.getZeroExtendInReg(Op, DL, NarrowVT);
47444 case ISD::SIGN_EXTEND:
47445 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
47446 Op, DAG.getValueType(NarrowVT));
47450 static unsigned convertIntLogicToFPLogicOpcode(unsigned Opcode) {
47453 default: llvm_unreachable("Unexpected input node for FP logic conversion");
47454 case ISD::AND: FPOpcode = X86ISD::FAND; break;
47455 case ISD::OR: FPOpcode = X86ISD::FOR; break;
47456 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
47461 /// If both input operands of a logic op are being cast from floating-point
47462 /// types or FP compares, try to convert this into a floating-point logic node
47463 /// to avoid unnecessary moves from SSE to integer registers.
47464 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
47465 TargetLowering::DAGCombinerInfo &DCI,
47466 const X86Subtarget &Subtarget) {
47467 EVT VT = N->getValueType(0);
47468 SDValue N0 = N->getOperand(0);
47469 SDValue N1 = N->getOperand(1);
47472 if (!((N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST) ||
47473 (N0.getOpcode() == ISD::SETCC && N1.getOpcode() == ISD::SETCC)))
47476 SDValue N00 = N0.getOperand(0);
47477 SDValue N10 = N1.getOperand(0);
47478 EVT N00Type = N00.getValueType();
47479 EVT N10Type = N10.getValueType();
47481 // Ensure that both types are the same and are legal scalar fp types.
47482 if (N00Type != N10Type || !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
47483 (Subtarget.hasSSE2() && N00Type == MVT::f64) ||
47484 (Subtarget.hasFP16() && N00Type == MVT::f16)))
47487 if (N0.getOpcode() == ISD::BITCAST && !DCI.isBeforeLegalizeOps()) {
47488 unsigned FPOpcode = convertIntLogicToFPLogicOpcode(N->getOpcode());
47489 SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
47490 return DAG.getBitcast(VT, FPLogic);
47493 if (VT != MVT::i1 || N0.getOpcode() != ISD::SETCC || !N0.hasOneUse() ||
47497 ISD::CondCode CC0 = cast<CondCodeSDNode>(N0.getOperand(2))->get();
47498 ISD::CondCode CC1 = cast<CondCodeSDNode>(N1.getOperand(2))->get();
47500 // The vector ISA for FP predicates is incomplete before AVX, so converting
47501 // COMIS* to CMPS* may not be a win before AVX.
47502 if (!Subtarget.hasAVX() &&
47503 !(cheapX86FSETCC_SSE(CC0) && cheapX86FSETCC_SSE(CC1)))
47506 // Convert scalar FP compares and logic to vector compares (COMIS* to CMPS*)
47507 // and vector logic:
47508 // logic (setcc N00, N01), (setcc N10, N11) -->
47509 // extelt (logic (setcc (s2v N00), (s2v N01)), setcc (s2v N10), (s2v N11))), 0
47510 unsigned NumElts = 128 / N00Type.getSizeInBits();
47511 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), N00Type, NumElts);
47512 EVT BoolVecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
47513 SDValue ZeroIndex = DAG.getVectorIdxConstant(0, DL);
47514 SDValue N01 = N0.getOperand(1);
47515 SDValue N11 = N1.getOperand(1);
47516 SDValue Vec00 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N00);
47517 SDValue Vec01 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N01);
47518 SDValue Vec10 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N10);
47519 SDValue Vec11 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N11);
47520 SDValue Setcc0 = DAG.getSetCC(DL, BoolVecVT, Vec00, Vec01, CC0);
47521 SDValue Setcc1 = DAG.getSetCC(DL, BoolVecVT, Vec10, Vec11, CC1);
47522 SDValue Logic = DAG.getNode(N->getOpcode(), DL, BoolVecVT, Setcc0, Setcc1);
47523 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Logic, ZeroIndex);
47526 // Attempt to fold BITOP(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(BITOP(X,Y))
47527 // to reduce XMM->GPR traffic.
47528 static SDValue combineBitOpWithMOVMSK(SDNode *N, SelectionDAG &DAG) {
47529 unsigned Opc = N->getOpcode();
47530 assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
47531 "Unexpected bit opcode");
47533 SDValue N0 = N->getOperand(0);
47534 SDValue N1 = N->getOperand(1);
47536 // Both operands must be single use MOVMSK.
47537 if (N0.getOpcode() != X86ISD::MOVMSK || !N0.hasOneUse() ||
47538 N1.getOpcode() != X86ISD::MOVMSK || !N1.hasOneUse())
47541 SDValue Vec0 = N0.getOperand(0);
47542 SDValue Vec1 = N1.getOperand(0);
47543 EVT VecVT0 = Vec0.getValueType();
47544 EVT VecVT1 = Vec1.getValueType();
47546 // Both MOVMSK operands must be from vectors of the same size and same element
47547 // size, but its OK for a fp/int diff.
47548 if (VecVT0.getSizeInBits() != VecVT1.getSizeInBits() ||
47549 VecVT0.getScalarSizeInBits() != VecVT1.getScalarSizeInBits())
47554 VecVT0.isFloatingPoint() ? convertIntLogicToFPLogicOpcode(Opc) : Opc;
47556 DAG.getNode(VecOpc, DL, VecVT0, Vec0, DAG.getBitcast(VecVT0, Vec1));
47557 return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
47560 // Attempt to fold BITOP(SHIFT(X,Z),SHIFT(Y,Z)) -> SHIFT(BITOP(X,Y),Z).
47561 // NOTE: This is a very limited case of what SimplifyUsingDistributiveLaws
47562 // handles in InstCombine.
47563 static SDValue combineBitOpWithShift(SDNode *N, SelectionDAG &DAG) {
47564 unsigned Opc = N->getOpcode();
47565 assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
47566 "Unexpected bit opcode");
47568 SDValue N0 = N->getOperand(0);
47569 SDValue N1 = N->getOperand(1);
47570 EVT VT = N->getValueType(0);
47572 // Both operands must be single use.
47573 if (!N0.hasOneUse() || !N1.hasOneUse())
47576 // Search for matching shifts.
47577 SDValue BC0 = peekThroughOneUseBitcasts(N0);
47578 SDValue BC1 = peekThroughOneUseBitcasts(N1);
47580 unsigned BCOpc = BC0.getOpcode();
47581 EVT BCVT = BC0.getValueType();
47582 if (BCOpc != BC1->getOpcode() || BCVT != BC1.getValueType())
47586 case X86ISD::VSHLI:
47587 case X86ISD::VSRLI:
47588 case X86ISD::VSRAI: {
47589 if (BC0.getOperand(1) != BC1.getOperand(1))
47594 DAG.getNode(Opc, DL, BCVT, BC0.getOperand(0), BC1.getOperand(0));
47595 SDValue Shift = DAG.getNode(BCOpc, DL, BCVT, BitOp, BC0.getOperand(1));
47596 return DAG.getBitcast(VT, Shift);
47603 /// If this is a zero/all-bits result that is bitwise-anded with a low bits
47604 /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
47605 /// with a shift-right to eliminate loading the vector constant mask value.
47606 static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
47607 const X86Subtarget &Subtarget) {
47608 SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
47609 SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
47610 EVT VT = Op0.getValueType();
47611 if (VT != Op1.getValueType() || !VT.isSimple() || !VT.isInteger())
47614 // Try to convert an "is positive" signbit masking operation into arithmetic
47615 // shift and "andn". This saves a materialization of a -1 vector constant.
47616 // The "is negative" variant should be handled more generally because it only
47617 // requires "and" rather than "andn":
47618 // and (pcmpgt X, -1), Y --> pandn (vsrai X, BitWidth - 1), Y
47620 // This is limited to the original type to avoid producing even more bitcasts.
47621 // If the bitcasts can't be eliminated, then it is unlikely that this fold
47622 // will be profitable.
47623 if (N->getValueType(0) == VT &&
47624 supportedVectorShiftWithImm(VT.getSimpleVT(), Subtarget, ISD::SRA)) {
47626 if (Op1.hasOneUse() && Op1.getOpcode() == X86ISD::PCMPGT &&
47627 isAllOnesOrAllOnesSplat(Op1.getOperand(1))) {
47628 X = Op1.getOperand(0);
47630 } else if (Op0.hasOneUse() && Op0.getOpcode() == X86ISD::PCMPGT &&
47631 isAllOnesOrAllOnesSplat(Op0.getOperand(1))) {
47632 X = Op0.getOperand(0);
47638 getTargetVShiftByConstNode(X86ISD::VSRAI, DL, VT.getSimpleVT(), X,
47639 VT.getScalarSizeInBits() - 1, DAG);
47640 return DAG.getNode(X86ISD::ANDNP, DL, VT, Sra, Y);
47645 if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
47646 !SplatVal.isMask())
47649 // Don't prevent creation of ANDN.
47650 if (isBitwiseNot(Op0))
47653 if (!supportedVectorShiftWithImm(VT.getSimpleVT(), Subtarget, ISD::SRL))
47656 unsigned EltBitWidth = VT.getScalarSizeInBits();
47657 if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
47661 unsigned ShiftVal = SplatVal.countTrailingOnes();
47662 SDValue ShAmt = DAG.getTargetConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
47663 SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT, Op0, ShAmt);
47664 return DAG.getBitcast(N->getValueType(0), Shift);
47667 // Get the index node from the lowered DAG of a GEP IR instruction with one
47668 // indexing dimension.
47669 static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
47670 if (Ld->isIndexed())
47673 SDValue Base = Ld->getBasePtr();
47675 if (Base.getOpcode() != ISD::ADD)
47678 SDValue ShiftedIndex = Base.getOperand(0);
47680 if (ShiftedIndex.getOpcode() != ISD::SHL)
47683 return ShiftedIndex.getOperand(0);
47687 static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
47688 if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
47689 switch (VT.getSizeInBits()) {
47690 default: return false;
47691 case 64: return Subtarget.is64Bit() ? true : false;
47692 case 32: return true;
47698 // This function recognizes cases where X86 bzhi instruction can replace and
47699 // 'and-load' sequence.
47700 // In case of loading integer value from an array of constants which is defined
47703 // int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
47705 // then applying a bitwise and on the result with another input.
47706 // It's equivalent to performing bzhi (zero high bits) on the input, with the
47707 // same index of the load.
47708 static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
47709 const X86Subtarget &Subtarget) {
47710 MVT VT = Node->getSimpleValueType(0);
47713 // Check if subtarget has BZHI instruction for the node's type
47714 if (!hasBZHI(Subtarget, VT))
47717 // Try matching the pattern for both operands.
47718 for (unsigned i = 0; i < 2; i++) {
47719 SDValue N = Node->getOperand(i);
47720 LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
47722 // continue if the operand is not a load instruction
47726 const Value *MemOp = Ld->getMemOperand()->getValue();
47731 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
47732 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
47733 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
47735 Constant *Init = GV->getInitializer();
47736 Type *Ty = Init->getType();
47737 if (!isa<ConstantDataArray>(Init) ||
47738 !Ty->getArrayElementType()->isIntegerTy() ||
47739 Ty->getArrayElementType()->getScalarSizeInBits() !=
47740 VT.getSizeInBits() ||
47741 Ty->getArrayNumElements() >
47742 Ty->getArrayElementType()->getScalarSizeInBits())
47745 // Check if the array's constant elements are suitable to our case.
47746 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
47747 bool ConstantsMatch = true;
47748 for (uint64_t j = 0; j < ArrayElementCount; j++) {
47749 auto *Elem = cast<ConstantInt>(Init->getAggregateElement(j));
47750 if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
47751 ConstantsMatch = false;
47755 if (!ConstantsMatch)
47758 // Do the transformation (For 32-bit type):
47759 // -> (and (load arr[idx]), inp)
47760 // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
47761 // that will be replaced with one bzhi instruction.
47762 SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
47763 SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
47765 // Get the Node which indexes into the array.
47766 SDValue Index = getIndexFromUnindexedLoad(Ld);
47769 Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
47771 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
47772 Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
47774 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
47775 SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
47777 return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
47785 // Look for (and (bitcast (vXi1 (concat_vectors (vYi1 setcc), undef,))), C)
47786 // Where C is a mask containing the same number of bits as the setcc and
47787 // where the setcc will freely 0 upper bits of k-register. We can replace the
47788 // undef in the concat with 0s and remove the AND. This mainly helps with
47789 // v2i1/v4i1 setcc being casted to scalar.
47790 static SDValue combineScalarAndWithMaskSetcc(SDNode *N, SelectionDAG &DAG,
47791 const X86Subtarget &Subtarget) {
47792 assert(N->getOpcode() == ISD::AND && "Unexpected opcode!");
47794 EVT VT = N->getValueType(0);
47796 // Make sure this is an AND with constant. We will check the value of the
47798 if (!isa<ConstantSDNode>(N->getOperand(1)))
47801 // This is implied by the ConstantSDNode.
47802 assert(!VT.isVector() && "Expected scalar VT!");
47804 if (N->getOperand(0).getOpcode() != ISD::BITCAST ||
47805 !N->getOperand(0).hasOneUse() ||
47806 !N->getOperand(0).getOperand(0).hasOneUse())
47809 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47810 SDValue Src = N->getOperand(0).getOperand(0);
47811 EVT SrcVT = Src.getValueType();
47812 if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::i1 ||
47813 !TLI.isTypeLegal(SrcVT))
47816 if (Src.getOpcode() != ISD::CONCAT_VECTORS)
47819 // We only care about the first subvector of the concat, we expect the
47820 // other subvectors to be ignored due to the AND if we make the change.
47821 SDValue SubVec = Src.getOperand(0);
47822 EVT SubVecVT = SubVec.getValueType();
47824 // First subvector should be a setcc with a legal result type. The RHS of the
47825 // AND should be a mask with this many bits.
47826 if (SubVec.getOpcode() != ISD::SETCC || !TLI.isTypeLegal(SubVecVT) ||
47827 !N->getConstantOperandAPInt(1).isMask(SubVecVT.getVectorNumElements()))
47830 EVT SetccVT = SubVec.getOperand(0).getValueType();
47831 if (!TLI.isTypeLegal(SetccVT) ||
47832 !(Subtarget.hasVLX() || SetccVT.is512BitVector()))
47835 if (!(Subtarget.hasBWI() || SetccVT.getScalarSizeInBits() >= 32))
47838 // We passed all the checks. Rebuild the concat_vectors with zeroes
47839 // and cast it back to VT.
47841 SmallVector<SDValue, 4> Ops(Src.getNumOperands(),
47842 DAG.getConstant(0, dl, SubVecVT));
47844 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT,
47846 return DAG.getBitcast(VT, Concat);
47849 static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
47850 TargetLowering::DAGCombinerInfo &DCI,
47851 const X86Subtarget &Subtarget) {
47852 SDValue N0 = N->getOperand(0);
47853 SDValue N1 = N->getOperand(1);
47854 EVT VT = N->getValueType(0);
47856 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47858 // If this is SSE1 only convert to FAND to avoid scalarization.
47859 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
47860 return DAG.getBitcast(MVT::v4i32,
47861 DAG.getNode(X86ISD::FAND, dl, MVT::v4f32,
47862 DAG.getBitcast(MVT::v4f32, N0),
47863 DAG.getBitcast(MVT::v4f32, N1)));
47866 // Use a 32-bit and+zext if upper bits known zero.
47867 if (VT == MVT::i64 && Subtarget.is64Bit() && !isa<ConstantSDNode>(N1)) {
47868 APInt HiMask = APInt::getHighBitsSet(64, 32);
47869 if (DAG.MaskedValueIsZero(N1, HiMask) ||
47870 DAG.MaskedValueIsZero(N0, HiMask)) {
47871 SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N0);
47872 SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N1);
47873 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
47874 DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
47878 // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
47879 // TODO: Support multiple SrcOps.
47880 if (VT == MVT::i1) {
47881 SmallVector<SDValue, 2> SrcOps;
47882 SmallVector<APInt, 2> SrcPartials;
47883 if (matchScalarReduction(SDValue(N, 0), ISD::AND, SrcOps, &SrcPartials) &&
47884 SrcOps.size() == 1) {
47885 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
47886 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
47887 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
47888 if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
47889 Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
47891 assert(SrcPartials[0].getBitWidth() == NumElts &&
47892 "Unexpected partial reduction mask");
47893 SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
47894 Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
47895 return DAG.getSetCC(dl, MVT::i1, Mask, PartialBits, ISD::SETEQ);
47900 if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
47903 if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
47906 if (SDValue R = combineBitOpWithShift(N, DAG))
47909 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
47912 if (DCI.isBeforeLegalizeOps())
47915 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
47918 if (SDValue R = combineAndNotIntoANDNP(N, DAG))
47921 if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
47924 if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
47927 // fold (and (mul x, c1), c2) -> (mul x, (and c1, c2))
47928 // iff c2 is all/no bits mask - i.e. a select-with-zero mask.
47929 // TODO: Handle PMULDQ/PMULUDQ/VPMADDWD/VPMADDUBSW?
47930 if (VT.isVector() && getTargetConstantFromNode(N1)) {
47931 unsigned Opc0 = N0.getOpcode();
47932 if ((Opc0 == ISD::MUL || Opc0 == ISD::MULHU || Opc0 == ISD::MULHS) &&
47933 getTargetConstantFromNode(N0.getOperand(1)) &&
47934 DAG.ComputeNumSignBits(N1) == VT.getScalarSizeInBits() &&
47935 N0->hasOneUse() && N0.getOperand(1)->hasOneUse()) {
47936 SDValue MaskMul = DAG.getNode(ISD::AND, dl, VT, N0.getOperand(1), N1);
47937 return DAG.getNode(Opc0, dl, VT, N0.getOperand(0), MaskMul);
47941 // Fold AND(SRL(X,Y),1) -> SETCC(BT(X,Y), COND_B) iff Y is not a constant
47942 // avoids slow variable shift (moving shift amount to ECX etc.)
47943 if (isOneConstant(N1) && N0->hasOneUse()) {
47945 while ((Src.getOpcode() == ISD::ZERO_EXTEND ||
47946 Src.getOpcode() == ISD::TRUNCATE) &&
47947 Src.getOperand(0)->hasOneUse())
47948 Src = Src.getOperand(0);
47949 bool ContainsNOT = false;
47950 X86::CondCode X86CC = X86::COND_B;
47951 // Peek through AND(NOT(SRL(X,Y)),1).
47952 if (isBitwiseNot(Src)) {
47953 Src = Src.getOperand(0);
47954 X86CC = X86::COND_AE;
47955 ContainsNOT = true;
47957 if (Src.getOpcode() == ISD::SRL &&
47958 !isa<ConstantSDNode>(Src.getOperand(1))) {
47959 SDValue BitNo = Src.getOperand(1);
47960 Src = Src.getOperand(0);
47961 // Peek through AND(SRL(NOT(X),Y),1).
47962 if (isBitwiseNot(Src)) {
47963 Src = Src.getOperand(0);
47964 X86CC = X86CC == X86::COND_AE ? X86::COND_B : X86::COND_AE;
47965 ContainsNOT = true;
47967 // If we have BMI2 then SHRX should be faster for i32/i64 cases.
47968 if (!(Subtarget.hasBMI2() && !ContainsNOT && VT.getSizeInBits() >= 32))
47969 if (SDValue BT = getBT(Src, BitNo, dl, DAG))
47970 return DAG.getZExtOrTrunc(getSETCC(X86CC, BT, dl, DAG), dl, VT);
47974 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
47975 // Attempt to recursively combine a bitmask AND with shuffles.
47977 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
47980 // If either operand is a constant mask, then only the elements that aren't
47981 // zero are actually demanded by the other operand.
47982 auto GetDemandedMasks = [&](SDValue Op) {
47984 SmallVector<APInt> EltBits;
47985 int NumElts = VT.getVectorNumElements();
47986 int EltSizeInBits = VT.getScalarSizeInBits();
47987 APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
47988 APInt DemandedElts = APInt::getAllOnes(NumElts);
47989 if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
47991 DemandedBits.clearAllBits();
47992 DemandedElts.clearAllBits();
47993 for (int I = 0; I != NumElts; ++I) {
47994 if (UndefElts[I]) {
47995 // We can't assume an undef src element gives an undef dst - the
47996 // other src might be zero.
47997 DemandedBits.setAllBits();
47998 DemandedElts.setBit(I);
47999 } else if (!EltBits[I].isZero()) {
48000 DemandedBits |= EltBits[I];
48001 DemandedElts.setBit(I);
48005 return std::make_pair(DemandedBits, DemandedElts);
48007 APInt Bits0, Elts0;
48008 APInt Bits1, Elts1;
48009 std::tie(Bits0, Elts0) = GetDemandedMasks(N1);
48010 std::tie(Bits1, Elts1) = GetDemandedMasks(N0);
48012 if (TLI.SimplifyDemandedVectorElts(N0, Elts0, DCI) ||
48013 TLI.SimplifyDemandedVectorElts(N1, Elts1, DCI) ||
48014 TLI.SimplifyDemandedBits(N0, Bits0, Elts0, DCI) ||
48015 TLI.SimplifyDemandedBits(N1, Bits1, Elts1, DCI)) {
48016 if (N->getOpcode() != ISD::DELETED_NODE)
48017 DCI.AddToWorklist(N);
48018 return SDValue(N, 0);
48021 SDValue NewN0 = TLI.SimplifyMultipleUseDemandedBits(N0, Bits0, Elts0, DAG);
48022 SDValue NewN1 = TLI.SimplifyMultipleUseDemandedBits(N1, Bits1, Elts1, DAG);
48023 if (NewN0 || NewN1)
48024 return DAG.getNode(ISD::AND, dl, VT, NewN0 ? NewN0 : N0,
48025 NewN1 ? NewN1 : N1);
48028 // Attempt to combine a scalar bitmask AND with an extracted shuffle.
48029 if ((VT.getScalarSizeInBits() % 8) == 0 &&
48030 N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
48031 isa<ConstantSDNode>(N0.getOperand(1))) {
48032 SDValue BitMask = N1;
48033 SDValue SrcVec = N0.getOperand(0);
48034 EVT SrcVecVT = SrcVec.getValueType();
48036 // Check that the constant bitmask masks whole bytes.
48038 SmallVector<APInt, 64> EltBits;
48039 if (VT == SrcVecVT.getScalarType() && N0->isOnlyUserOf(SrcVec.getNode()) &&
48040 getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
48041 llvm::all_of(EltBits, [](const APInt &M) {
48042 return M.isZero() || M.isAllOnes();
48044 unsigned NumElts = SrcVecVT.getVectorNumElements();
48045 unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
48046 unsigned Idx = N0.getConstantOperandVal(1);
48048 // Create a root shuffle mask from the byte mask and the extracted index.
48049 SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
48050 for (unsigned i = 0; i != Scale; ++i) {
48053 int VecIdx = Scale * Idx + i;
48054 ShuffleMask[VecIdx] = EltBits[i].isZero() ? SM_SentinelZero : VecIdx;
48057 if (SDValue Shuffle = combineX86ShufflesRecursively(
48058 {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 1,
48059 X86::MaxShuffleCombineDepth,
48060 /*HasVarMask*/ false, /*AllowVarCrossLaneMask*/ true,
48061 /*AllowVarPerLaneMask*/ true, DAG, Subtarget))
48062 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Shuffle,
48070 // Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
48071 static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
48072 const X86Subtarget &Subtarget) {
48073 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
48075 MVT VT = N->getSimpleValueType(0);
48076 unsigned EltSizeInBits = VT.getScalarSizeInBits();
48077 if (!VT.isVector() || (EltSizeInBits % 8) != 0)
48080 SDValue N0 = peekThroughBitcasts(N->getOperand(0));
48081 SDValue N1 = peekThroughBitcasts(N->getOperand(1));
48082 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
48085 // On XOP we'll lower to PCMOV so accept one use. With AVX512, we can use
48086 // VPTERNLOG. Otherwise only do this if either mask has multiple uses already.
48087 if (!(Subtarget.hasXOP() || useVPTERNLOG(Subtarget, VT) ||
48088 !N0.getOperand(1).hasOneUse() || !N1.getOperand(1).hasOneUse()))
48091 // Attempt to extract constant byte masks.
48092 APInt UndefElts0, UndefElts1;
48093 SmallVector<APInt, 32> EltBits0, EltBits1;
48094 if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
48097 if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
48101 for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
48102 // TODO - add UNDEF elts support.
48103 if (UndefElts0[i] || UndefElts1[i])
48105 if (EltBits0[i] != ~EltBits1[i])
48111 if (useVPTERNLOG(Subtarget, VT)) {
48112 // Emit a VPTERNLOG node directly - 0xCA is the imm code for A?B:C.
48113 // VPTERNLOG is only available as vXi32/64-bit types.
48114 MVT OpSVT = EltSizeInBits == 32 ? MVT::i32 : MVT::i64;
48116 MVT::getVectorVT(OpSVT, VT.getSizeInBits() / OpSVT.getSizeInBits());
48117 SDValue A = DAG.getBitcast(OpVT, N0.getOperand(1));
48118 SDValue B = DAG.getBitcast(OpVT, N0.getOperand(0));
48119 SDValue C = DAG.getBitcast(OpVT, N1.getOperand(0));
48120 SDValue Imm = DAG.getTargetConstant(0xCA, DL, MVT::i8);
48121 SDValue Res = getAVX512Node(X86ISD::VPTERNLOG, DL, OpVT, {A, B, C, Imm},
48123 return DAG.getBitcast(VT, Res);
48126 SDValue X = N->getOperand(0);
48128 DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
48129 DAG.getBitcast(VT, N1.getOperand(0)));
48130 return DAG.getNode(ISD::OR, DL, VT, X, Y);
48133 // Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
48134 static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
48135 if (N->getOpcode() != ISD::OR)
48138 SDValue N0 = N->getOperand(0);
48139 SDValue N1 = N->getOperand(1);
48141 // Canonicalize AND to LHS.
48142 if (N1.getOpcode() == ISD::AND)
48145 // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
48146 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
48149 Mask = N1.getOperand(0);
48150 X = N1.getOperand(1);
48152 // Check to see if the mask appeared in both the AND and ANDNP.
48153 if (N0.getOperand(0) == Mask)
48154 Y = N0.getOperand(1);
48155 else if (N0.getOperand(1) == Mask)
48156 Y = N0.getOperand(0);
48160 // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
48161 // ANDNP combine allows other combines to happen that prevent matching.
48166 // (or (and (m, y), (pandn m, x)))
48168 // (vselect m, x, y)
48169 // As a special case, try to fold:
48170 // (or (and (m, (sub 0, x)), (pandn m, x)))
48172 // (sub (xor X, M), M)
48173 static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
48174 const X86Subtarget &Subtarget) {
48175 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
48177 EVT VT = N->getValueType(0);
48178 if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
48179 (VT.is256BitVector() && Subtarget.hasInt256())))
48182 SDValue X, Y, Mask;
48183 if (!matchLogicBlend(N, X, Y, Mask))
48186 // Validate that X, Y, and Mask are bitcasts, and see through them.
48187 Mask = peekThroughBitcasts(Mask);
48188 X = peekThroughBitcasts(X);
48189 Y = peekThroughBitcasts(Y);
48191 EVT MaskVT = Mask.getValueType();
48192 unsigned EltBits = MaskVT.getScalarSizeInBits();
48194 // TODO: Attempt to handle floating point cases as well?
48195 if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
48200 // Attempt to combine to conditional negate: (sub (xor X, M), M)
48201 if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
48205 // PBLENDVB is only available on SSE 4.1.
48206 if (!Subtarget.hasSSE41())
48209 // If we have VPTERNLOG we should prefer that since PBLENDVB is multiple uops.
48210 if (Subtarget.hasVLX())
48213 MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
48215 X = DAG.getBitcast(BlendVT, X);
48216 Y = DAG.getBitcast(BlendVT, Y);
48217 Mask = DAG.getBitcast(BlendVT, Mask);
48218 Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
48219 return DAG.getBitcast(VT, Mask);
48222 // Helper function for combineOrCmpEqZeroToCtlzSrl
48226 // srl(ctlz x), log2(bitsize(x))
48227 // Input pattern is checked by caller.
48228 static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) {
48229 SDValue Cmp = Op.getOperand(1);
48230 EVT VT = Cmp.getOperand(0).getValueType();
48231 unsigned Log2b = Log2_32(VT.getSizeInBits());
48233 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
48234 // The result of the shift is true or false, and on X86, the 32-bit
48235 // encoding of shr and lzcnt is more desirable.
48236 SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
48237 SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
48238 DAG.getConstant(Log2b, dl, MVT::i8));
48242 // Try to transform:
48243 // zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
48245 // srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
48246 // Will also attempt to match more generic cases, eg:
48247 // zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
48248 // Only applies if the target supports the FastLZCNT feature.
48249 static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
48250 TargetLowering::DAGCombinerInfo &DCI,
48251 const X86Subtarget &Subtarget) {
48252 if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
48255 auto isORCandidate = [](SDValue N) {
48256 return (N->getOpcode() == ISD::OR && N->hasOneUse());
48259 // Check the zero extend is extending to 32-bit or more. The code generated by
48260 // srl(ctlz) for 16-bit or less variants of the pattern would require extra
48261 // instructions to clear the upper bits.
48262 if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
48263 !isORCandidate(N->getOperand(0)))
48266 // Check the node matches: setcc(eq, cmp 0)
48267 auto isSetCCCandidate = [](SDValue N) {
48268 return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
48269 X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
48270 N->getOperand(1).getOpcode() == X86ISD::CMP &&
48271 isNullConstant(N->getOperand(1).getOperand(1)) &&
48272 N->getOperand(1).getValueType().bitsGE(MVT::i32);
48275 SDNode *OR = N->getOperand(0).getNode();
48276 SDValue LHS = OR->getOperand(0);
48277 SDValue RHS = OR->getOperand(1);
48279 // Save nodes matching or(or, setcc(eq, cmp 0)).
48280 SmallVector<SDNode *, 2> ORNodes;
48281 while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
48282 (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
48283 ORNodes.push_back(OR);
48284 OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
48285 LHS = OR->getOperand(0);
48286 RHS = OR->getOperand(1);
48289 // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
48290 if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
48291 !isORCandidate(SDValue(OR, 0)))
48294 // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
48296 // or(srl(ctlz),srl(ctlz)).
48297 // The dag combiner can then fold it into:
48298 // srl(or(ctlz, ctlz)).
48299 SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, DAG);
48300 SDValue Ret, NewRHS;
48301 if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, DAG)))
48302 Ret = DAG.getNode(ISD::OR, SDLoc(OR), MVT::i32, NewLHS, NewRHS);
48307 // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
48308 while (ORNodes.size() > 0) {
48309 OR = ORNodes.pop_back_val();
48310 LHS = OR->getOperand(0);
48311 RHS = OR->getOperand(1);
48312 // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
48313 if (RHS->getOpcode() == ISD::OR)
48314 std::swap(LHS, RHS);
48315 NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, DAG);
48318 Ret = DAG.getNode(ISD::OR, SDLoc(OR), MVT::i32, Ret, NewRHS);
48321 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
48324 static SDValue foldMaskedMergeImpl(SDValue And0_L, SDValue And0_R,
48325 SDValue And1_L, SDValue And1_R,
48326 const SDLoc &DL, SelectionDAG &DAG) {
48327 if (!isBitwiseNot(And0_L, true) || !And0_L->hasOneUse())
48329 SDValue NotOp = And0_L->getOperand(0);
48330 if (NotOp == And1_R)
48331 std::swap(And1_R, And1_L);
48332 if (NotOp != And1_L)
48335 // (~(NotOp) & And0_R) | (NotOp & And1_R)
48336 // --> ((And0_R ^ And1_R) & NotOp) ^ And1_R
48337 EVT VT = And1_L->getValueType(0);
48338 SDValue Freeze_And0_R = DAG.getNode(ISD::FREEZE, SDLoc(), VT, And0_R);
48339 SDValue Xor0 = DAG.getNode(ISD::XOR, DL, VT, And1_R, Freeze_And0_R);
48340 SDValue And = DAG.getNode(ISD::AND, DL, VT, Xor0, NotOp);
48341 SDValue Xor1 = DAG.getNode(ISD::XOR, DL, VT, And, Freeze_And0_R);
48345 /// Fold "masked merge" expressions like `(m & x) | (~m & y)` into the
48346 /// equivalent `((x ^ y) & m) ^ y)` pattern.
48347 /// This is typically a better representation for targets without a fused
48348 /// "and-not" operation. This function is intended to be called from a
48349 /// `TargetLowering::PerformDAGCombine` callback on `ISD::OR` nodes.
48350 static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG) {
48351 // Note that masked-merge variants using XOR or ADD expressions are
48352 // normalized to OR by InstCombine so we only check for OR.
48353 assert(Node->getOpcode() == ISD::OR && "Must be called with ISD::OR node");
48354 SDValue N0 = Node->getOperand(0);
48355 if (N0->getOpcode() != ISD::AND || !N0->hasOneUse())
48357 SDValue N1 = Node->getOperand(1);
48358 if (N1->getOpcode() != ISD::AND || !N1->hasOneUse())
48362 SDValue N00 = N0->getOperand(0);
48363 SDValue N01 = N0->getOperand(1);
48364 SDValue N10 = N1->getOperand(0);
48365 SDValue N11 = N1->getOperand(1);
48366 if (SDValue Result = foldMaskedMergeImpl(N00, N01, N10, N11, DL, DAG))
48368 if (SDValue Result = foldMaskedMergeImpl(N01, N00, N10, N11, DL, DAG))
48370 if (SDValue Result = foldMaskedMergeImpl(N10, N11, N00, N01, DL, DAG))
48372 if (SDValue Result = foldMaskedMergeImpl(N11, N10, N00, N01, DL, DAG))
48377 static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
48378 TargetLowering::DAGCombinerInfo &DCI,
48379 const X86Subtarget &Subtarget) {
48380 SDValue N0 = N->getOperand(0);
48381 SDValue N1 = N->getOperand(1);
48382 EVT VT = N->getValueType(0);
48384 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48386 // If this is SSE1 only convert to FOR to avoid scalarization.
48387 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
48388 return DAG.getBitcast(MVT::v4i32,
48389 DAG.getNode(X86ISD::FOR, dl, MVT::v4f32,
48390 DAG.getBitcast(MVT::v4f32, N0),
48391 DAG.getBitcast(MVT::v4f32, N1)));
48394 // Match any-of bool scalar reductions into a bitcast/movmsk + cmp.
48395 // TODO: Support multiple SrcOps.
48396 if (VT == MVT::i1) {
48397 SmallVector<SDValue, 2> SrcOps;
48398 SmallVector<APInt, 2> SrcPartials;
48399 if (matchScalarReduction(SDValue(N, 0), ISD::OR, SrcOps, &SrcPartials) &&
48400 SrcOps.size() == 1) {
48401 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
48402 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
48403 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
48404 if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
48405 Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
48407 assert(SrcPartials[0].getBitWidth() == NumElts &&
48408 "Unexpected partial reduction mask");
48409 SDValue ZeroBits = DAG.getConstant(0, dl, MaskVT);
48410 SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
48411 Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
48412 return DAG.getSetCC(dl, MVT::i1, Mask, ZeroBits, ISD::SETNE);
48417 if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
48420 if (SDValue R = combineBitOpWithShift(N, DAG))
48423 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
48426 if (DCI.isBeforeLegalizeOps())
48429 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
48432 if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
48435 if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
48438 // Combine OR(X,KSHIFTL(Y,Elts/2)) -> CONCAT_VECTORS(X,Y) == KUNPCK(X,Y).
48439 // Combine OR(KSHIFTL(X,Elts/2),Y) -> CONCAT_VECTORS(Y,X) == KUNPCK(Y,X).
48440 // iff the upper elements of the non-shifted arg are zero.
48441 // KUNPCK require 16+ bool vector elements.
48442 if (N0.getOpcode() == X86ISD::KSHIFTL || N1.getOpcode() == X86ISD::KSHIFTL) {
48443 unsigned NumElts = VT.getVectorNumElements();
48444 unsigned HalfElts = NumElts / 2;
48445 APInt UpperElts = APInt::getHighBitsSet(NumElts, HalfElts);
48446 if (NumElts >= 16 && N1.getOpcode() == X86ISD::KSHIFTL &&
48447 N1.getConstantOperandAPInt(1) == HalfElts &&
48448 DAG.MaskedVectorIsZero(N0, UpperElts)) {
48449 return DAG.getNode(
48450 ISD::CONCAT_VECTORS, dl, VT,
48451 extractSubVector(N0, 0, DAG, dl, HalfElts),
48452 extractSubVector(N1.getOperand(0), 0, DAG, dl, HalfElts));
48454 if (NumElts >= 16 && N0.getOpcode() == X86ISD::KSHIFTL &&
48455 N0.getConstantOperandAPInt(1) == HalfElts &&
48456 DAG.MaskedVectorIsZero(N1, UpperElts)) {
48457 return DAG.getNode(
48458 ISD::CONCAT_VECTORS, dl, VT,
48459 extractSubVector(N1, 0, DAG, dl, HalfElts),
48460 extractSubVector(N0.getOperand(0), 0, DAG, dl, HalfElts));
48464 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
48465 // Attempt to recursively combine an OR of shuffles.
48467 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
48470 // If either operand is a constant mask, then only the elements that aren't
48471 // allones are actually demanded by the other operand.
48472 auto SimplifyUndemandedElts = [&](SDValue Op, SDValue OtherOp) {
48474 SmallVector<APInt> EltBits;
48475 int NumElts = VT.getVectorNumElements();
48476 int EltSizeInBits = VT.getScalarSizeInBits();
48477 if (!getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts, EltBits))
48480 APInt DemandedElts = APInt::getZero(NumElts);
48481 for (int I = 0; I != NumElts; ++I)
48482 if (!EltBits[I].isAllOnes())
48483 DemandedElts.setBit(I);
48485 return TLI.SimplifyDemandedVectorElts(OtherOp, DemandedElts, DCI);
48487 if (SimplifyUndemandedElts(N0, N1) || SimplifyUndemandedElts(N1, N0)) {
48488 if (N->getOpcode() != ISD::DELETED_NODE)
48489 DCI.AddToWorklist(N);
48490 return SDValue(N, 0);
48494 // We should fold "masked merge" patterns when `andn` is not available.
48495 if (!Subtarget.hasBMI() && VT.isScalarInteger() && VT != MVT::i1)
48496 if (SDValue R = foldMaskedMerge(N, DAG))
48502 /// Try to turn tests against the signbit in the form of:
48503 /// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
48506 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
48507 // This is only worth doing if the output type is i8 or i1.
48508 EVT ResultType = N->getValueType(0);
48509 if (ResultType != MVT::i8 && ResultType != MVT::i1)
48512 SDValue N0 = N->getOperand(0);
48513 SDValue N1 = N->getOperand(1);
48515 // We should be performing an xor against a truncated shift.
48516 if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
48519 // Make sure we are performing an xor against one.
48520 if (!isOneConstant(N1))
48523 // SetCC on x86 zero extends so only act on this if it's a logical shift.
48524 SDValue Shift = N0.getOperand(0);
48525 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
48528 // Make sure we are truncating from one of i16, i32 or i64.
48529 EVT ShiftTy = Shift.getValueType();
48530 if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
48533 // Make sure the shift amount extracts the sign bit.
48534 if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
48535 Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
48538 // Create a greater-than comparison against -1.
48539 // N.B. Using SETGE against 0 works but we want a canonical looking
48540 // comparison, using SETGT matches up with what TranslateX86CC.
48542 SDValue ShiftOp = Shift.getOperand(0);
48543 EVT ShiftOpTy = ShiftOp.getValueType();
48544 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48545 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
48546 *DAG.getContext(), ResultType);
48547 SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
48548 DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
48549 if (SetCCResultType != ResultType)
48550 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
48554 /// Turn vector tests of the signbit in the form of:
48555 /// xor (sra X, elt_size(X)-1), -1
48559 /// This should be called before type legalization because the pattern may not
48560 /// persist after that.
48561 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
48562 const X86Subtarget &Subtarget) {
48563 EVT VT = N->getValueType(0);
48564 if (!VT.isSimple())
48567 switch (VT.getSimpleVT().SimpleTy) {
48568 default: return SDValue();
48572 case MVT::v2i64: if (!Subtarget.hasSSE2()) return SDValue(); break;
48576 case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
48579 // There must be a shift right algebraic before the xor, and the xor must be a
48580 // 'not' operation.
48581 SDValue Shift = N->getOperand(0);
48582 SDValue Ones = N->getOperand(1);
48583 if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
48584 !ISD::isBuildVectorAllOnes(Ones.getNode()))
48587 // The shift should be smearing the sign bit across each vector element.
48589 isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
48591 ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
48594 // Create a greater-than comparison against -1. We don't use the more obvious
48595 // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
48596 return DAG.getSetCC(SDLoc(N), VT, Shift.getOperand(0), Ones, ISD::SETGT);
48599 /// Detect patterns of truncation with unsigned saturation:
48601 /// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
48602 /// Return the source value x to be truncated or SDValue() if the pattern was
48605 /// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
48606 /// where C1 >= 0 and C2 is unsigned max of destination type.
48608 /// (truncate (smax (smin (x, C2), C1)) to dest_type)
48609 /// where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
48611 /// These two patterns are equivalent to:
48612 /// (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
48613 /// So return the smax(x, C1) value to be truncated or SDValue() if the
48614 /// pattern was not matched.
48615 static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
48617 EVT InVT = In.getValueType();
48619 // Saturation with truncation. We truncate from InVT to VT.
48620 assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
48621 "Unexpected types for truncate operation");
48623 // Match min/max and return limit value as a parameter.
48624 auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
48625 if (V.getOpcode() == Opcode &&
48626 ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
48627 return V.getOperand(0);
48632 if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
48633 // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
48634 // the element size of the destination type.
48635 if (C2.isMask(VT.getScalarSizeInBits()))
48638 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
48639 if (MatchMinMax(SMin, ISD::SMAX, C1))
48640 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
48643 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
48644 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
48645 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
48647 return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
48653 /// Detect patterns of truncation with signed saturation:
48654 /// (truncate (smin ((smax (x, signed_min_of_dest_type)),
48655 /// signed_max_of_dest_type)) to dest_type)
48657 /// (truncate (smax ((smin (x, signed_max_of_dest_type)),
48658 /// signed_min_of_dest_type)) to dest_type).
48659 /// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
48660 /// Return the source value to be truncated or SDValue() if the pattern was not
48662 static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
48663 unsigned NumDstBits = VT.getScalarSizeInBits();
48664 unsigned NumSrcBits = In.getScalarValueSizeInBits();
48665 assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
48667 auto MatchMinMax = [](SDValue V, unsigned Opcode,
48668 const APInt &Limit) -> SDValue {
48670 if (V.getOpcode() == Opcode &&
48671 ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
48672 return V.getOperand(0);
48676 APInt SignedMax, SignedMin;
48678 SignedMax = APInt::getAllOnes(NumDstBits).zext(NumSrcBits);
48679 SignedMin = APInt(NumSrcBits, 0);
48681 SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
48682 SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
48685 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
48686 if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
48689 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
48690 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
48696 static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
48698 const X86Subtarget &Subtarget) {
48699 if (!Subtarget.hasSSE2() || !VT.isVector())
48702 EVT SVT = VT.getVectorElementType();
48703 EVT InVT = In.getValueType();
48704 EVT InSVT = InVT.getVectorElementType();
48706 // If we're clamping a signed 32-bit vector to 0-255 and the 32-bit vector is
48707 // split across two registers. We can use a packusdw+perm to clamp to 0-65535
48708 // and concatenate at the same time. Then we can use a final vpmovuswb to
48710 if (Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
48711 InVT == MVT::v16i32 && VT == MVT::v16i8) {
48712 if (SDValue USatVal = detectSSatPattern(In, VT, true)) {
48713 // Emit a VPACKUSDW+VPERMQ followed by a VPMOVUSWB.
48714 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKUS, MVT::v16i16, USatVal,
48715 DL, DAG, Subtarget);
48716 assert(Mid && "Failed to pack!");
48717 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Mid);
48721 // vXi32 truncate instructions are available with AVX512F.
48722 // vXi16 truncate instructions are only available with AVX512BW.
48723 // For 256-bit or smaller vectors, we require VLX.
48724 // FIXME: We could widen truncates to 512 to remove the VLX restriction.
48725 // If the result type is 256-bits or larger and we have disable 512-bit
48726 // registers, we should go ahead and use the pack instructions if possible.
48727 bool PreferAVX512 = ((Subtarget.hasAVX512() && InSVT == MVT::i32) ||
48728 (Subtarget.hasBWI() && InSVT == MVT::i16)) &&
48729 (InVT.getSizeInBits() > 128) &&
48730 (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) &&
48731 !(!Subtarget.useAVX512Regs() && VT.getSizeInBits() >= 256);
48733 if (isPowerOf2_32(VT.getVectorNumElements()) && !PreferAVX512 &&
48734 VT.getSizeInBits() >= 64 &&
48735 (SVT == MVT::i8 || SVT == MVT::i16) &&
48736 (InSVT == MVT::i16 || InSVT == MVT::i32)) {
48737 if (SDValue USatVal = detectSSatPattern(In, VT, true)) {
48738 // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
48739 // Only do this when the result is at least 64 bits or we'll leaving
48740 // dangling PACKSSDW nodes.
48741 if (SVT == MVT::i8 && InSVT == MVT::i32) {
48742 EVT MidVT = VT.changeVectorElementType(MVT::i16);
48743 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
48745 assert(Mid && "Failed to pack!");
48746 SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
48748 assert(V && "Failed to pack!");
48750 } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
48751 return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
48754 if (SDValue SSatVal = detectSSatPattern(In, VT))
48755 return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
48759 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48760 if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
48761 Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI()) &&
48762 (SVT == MVT::i32 || SVT == MVT::i16 || SVT == MVT::i8)) {
48763 unsigned TruncOpc = 0;
48765 if (SDValue SSatVal = detectSSatPattern(In, VT)) {
48767 TruncOpc = X86ISD::VTRUNCS;
48768 } else if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL)) {
48770 TruncOpc = X86ISD::VTRUNCUS;
48773 unsigned ResElts = VT.getVectorNumElements();
48774 // If the input type is less than 512 bits and we don't have VLX, we need
48775 // to widen to 512 bits.
48776 if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
48777 unsigned NumConcats = 512 / InVT.getSizeInBits();
48778 ResElts *= NumConcats;
48779 SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
48780 ConcatOps[0] = SatVal;
48781 InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
48782 NumConcats * InVT.getVectorNumElements());
48783 SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
48785 // Widen the result if its narrower than 128 bits.
48786 if (ResElts * SVT.getSizeInBits() < 128)
48787 ResElts = 128 / SVT.getSizeInBits();
48788 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
48789 SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
48790 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
48791 DAG.getIntPtrConstant(0, DL));
48798 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
48799 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
48800 /// ISD::AVGCEILU (AVG) instruction.
48801 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
48802 const X86Subtarget &Subtarget,
48804 if (!VT.isVector())
48806 EVT InVT = In.getValueType();
48807 unsigned NumElems = VT.getVectorNumElements();
48809 EVT ScalarVT = VT.getVectorElementType();
48810 if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) && NumElems >= 2))
48813 // InScalarVT is the intermediate type in AVG pattern and it should be greater
48814 // than the original input type (i8/i16).
48815 EVT InScalarVT = InVT.getVectorElementType();
48816 if (InScalarVT.getFixedSizeInBits() <= ScalarVT.getFixedSizeInBits())
48819 if (!Subtarget.hasSSE2())
48822 // Detect the following pattern:
48824 // %1 = zext <N x i8> %a to <N x i32>
48825 // %2 = zext <N x i8> %b to <N x i32>
48826 // %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
48827 // %4 = add nuw nsw <N x i32> %3, %2
48828 // %5 = lshr <N x i32> %N, <i32 1 x N>
48829 // %6 = trunc <N x i32> %5 to <N x i8>
48831 // In AVX512, the last instruction can also be a trunc store.
48832 if (In.getOpcode() != ISD::SRL)
48835 // A lambda checking the given SDValue is a constant vector and each element
48836 // is in the range [Min, Max].
48837 auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
48838 return ISD::matchUnaryPredicate(V, [Min, Max](ConstantSDNode *C) {
48839 return !(C->getAPIntValue().ult(Min) || C->getAPIntValue().ugt(Max));
48843 auto IsZExtLike = [DAG = &DAG, ScalarVT](SDValue V) {
48844 unsigned MaxActiveBits = DAG->computeKnownBits(V).countMaxActiveBits();
48845 return MaxActiveBits <= ScalarVT.getSizeInBits();
48848 // Check if each element of the vector is right-shifted by one.
48849 SDValue LHS = In.getOperand(0);
48850 SDValue RHS = In.getOperand(1);
48851 if (!IsConstVectorInRange(RHS, 1, 1))
48853 if (LHS.getOpcode() != ISD::ADD)
48856 // Detect a pattern of a + b + 1 where the order doesn't matter.
48857 SDValue Operands[3];
48858 Operands[0] = LHS.getOperand(0);
48859 Operands[1] = LHS.getOperand(1);
48861 auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
48862 ArrayRef<SDValue> Ops) {
48863 return DAG.getNode(ISD::AVGCEILU, DL, Ops[0].getValueType(), Ops);
48866 auto AVGSplitter = [&](std::array<SDValue, 2> Ops) {
48867 for (SDValue &Op : Ops)
48868 if (Op.getValueType() != VT)
48869 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
48870 // Pad to a power-of-2 vector, split+apply and extract the original vector.
48871 unsigned NumElemsPow2 = PowerOf2Ceil(NumElems);
48872 EVT Pow2VT = EVT::getVectorVT(*DAG.getContext(), ScalarVT, NumElemsPow2);
48873 if (NumElemsPow2 != NumElems) {
48874 for (SDValue &Op : Ops) {
48875 SmallVector<SDValue, 32> EltsOfOp(NumElemsPow2, DAG.getUNDEF(ScalarVT));
48876 for (unsigned i = 0; i != NumElems; ++i) {
48877 SDValue Idx = DAG.getIntPtrConstant(i, DL);
48879 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarVT, Op, Idx);
48881 Op = DAG.getBuildVector(Pow2VT, DL, EltsOfOp);
48884 SDValue Res = SplitOpsAndApply(DAG, Subtarget, DL, Pow2VT, Ops, AVGBuilder);
48885 if (NumElemsPow2 == NumElems)
48887 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
48888 DAG.getIntPtrConstant(0, DL));
48891 // Take care of the case when one of the operands is a constant vector whose
48892 // element is in the range [1, 256].
48893 if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
48894 IsZExtLike(Operands[0])) {
48895 // The pattern is detected. Subtract one from the constant vector, then
48896 // demote it and emit X86ISD::AVG instruction.
48897 SDValue VecOnes = DAG.getConstant(1, DL, InVT);
48898 Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
48899 return AVGSplitter({Operands[0], Operands[1]});
48902 // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
48903 // Match the or case only if its 'add-like' - can be replaced by an add.
48904 auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
48905 if (ISD::ADD == V.getOpcode()) {
48906 Op0 = V.getOperand(0);
48907 Op1 = V.getOperand(1);
48910 if (ISD::ZERO_EXTEND != V.getOpcode())
48912 V = V.getOperand(0);
48913 if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
48914 !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
48916 Op0 = V.getOperand(0);
48917 Op1 = V.getOperand(1);
48922 if (FindAddLike(Operands[0], Op0, Op1))
48923 std::swap(Operands[0], Operands[1]);
48924 else if (!FindAddLike(Operands[1], Op0, Op1))
48929 // Now we have three operands of two additions. Check that one of them is a
48930 // constant vector with ones, and the other two can be promoted from i8/i16.
48931 for (int i = 0; i < 3; ++i) {
48932 if (!IsConstVectorInRange(Operands[i], 1, 1))
48934 std::swap(Operands[i], Operands[2]);
48936 // Check if Operands[0] and Operands[1] are results of type promotion.
48937 for (int j = 0; j < 2; ++j)
48938 if (Operands[j].getValueType() != VT)
48939 if (!IsZExtLike(Operands[j]))
48942 // The pattern is detected, emit X86ISD::AVG instruction(s).
48943 return AVGSplitter({Operands[0], Operands[1]});
48949 static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
48950 TargetLowering::DAGCombinerInfo &DCI,
48951 const X86Subtarget &Subtarget) {
48952 LoadSDNode *Ld = cast<LoadSDNode>(N);
48953 EVT RegVT = Ld->getValueType(0);
48954 EVT MemVT = Ld->getMemoryVT();
48956 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48958 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
48959 // into two 16-byte operations. Also split non-temporal aligned loads on
48960 // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
48961 ISD::LoadExtType Ext = Ld->getExtensionType();
48963 if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
48964 Ext == ISD::NON_EXTLOAD &&
48965 ((Ld->isNonTemporal() && !Subtarget.hasInt256() &&
48966 Ld->getAlign() >= Align(16)) ||
48967 (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
48968 *Ld->getMemOperand(), &Fast) &&
48970 unsigned NumElems = RegVT.getVectorNumElements();
48974 unsigned HalfOffset = 16;
48975 SDValue Ptr1 = Ld->getBasePtr();
48977 DAG.getMemBasePlusOffset(Ptr1, TypeSize::Fixed(HalfOffset), dl);
48978 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
48981 DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
48982 Ld->getOriginalAlign(),
48983 Ld->getMemOperand()->getFlags());
48984 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
48985 Ld->getPointerInfo().getWithOffset(HalfOffset),
48986 Ld->getOriginalAlign(),
48987 Ld->getMemOperand()->getFlags());
48988 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
48989 Load1.getValue(1), Load2.getValue(1));
48991 SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
48992 return DCI.CombineTo(N, NewVec, TF, true);
48995 // Bool vector load - attempt to cast to an integer, as we have good
48996 // (vXiY *ext(vXi1 bitcast(iX))) handling.
48997 if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
48998 RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
48999 unsigned NumElts = RegVT.getVectorNumElements();
49000 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
49001 if (TLI.isTypeLegal(IntVT)) {
49002 SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
49003 Ld->getPointerInfo(),
49004 Ld->getOriginalAlign(),
49005 Ld->getMemOperand()->getFlags());
49006 SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
49007 return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
49011 // If we also broadcast this as a subvector to a wider type, then just extract
49012 // the lowest subvector.
49013 if (Ext == ISD::NON_EXTLOAD && Subtarget.hasAVX() && Ld->isSimple() &&
49014 (RegVT.is128BitVector() || RegVT.is256BitVector())) {
49015 SDValue Ptr = Ld->getBasePtr();
49016 SDValue Chain = Ld->getChain();
49017 for (SDNode *User : Ptr->uses()) {
49018 if (User != N && User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
49019 cast<MemIntrinsicSDNode>(User)->getBasePtr() == Ptr &&
49020 cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
49021 cast<MemIntrinsicSDNode>(User)->getMemoryVT().getSizeInBits() ==
49022 MemVT.getSizeInBits() &&
49023 !User->hasAnyUseOfValue(1) &&
49024 User->getValueSizeInBits(0).getFixedSize() >
49025 RegVT.getFixedSizeInBits()) {
49026 SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
49027 RegVT.getSizeInBits());
49028 Extract = DAG.getBitcast(RegVT, Extract);
49029 return DCI.CombineTo(N, Extract, SDValue(User, 1));
49034 // Cast ptr32 and ptr64 pointers to the default address space before a load.
49035 unsigned AddrSpace = Ld->getAddressSpace();
49036 if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
49037 AddrSpace == X86AS::PTR32_UPTR) {
49038 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
49039 if (PtrVT != Ld->getBasePtr().getSimpleValueType()) {
49041 DAG.getAddrSpaceCast(dl, PtrVT, Ld->getBasePtr(), AddrSpace, 0);
49042 return DAG.getLoad(RegVT, dl, Ld->getChain(), Cast, Ld->getPointerInfo(),
49043 Ld->getOriginalAlign(),
49044 Ld->getMemOperand()->getFlags());
49051 /// If V is a build vector of boolean constants and exactly one of those
49052 /// constants is true, return the operand index of that true element.
49053 /// Otherwise, return -1.
49054 static int getOneTrueElt(SDValue V) {
49055 // This needs to be a build vector of booleans.
49056 // TODO: Checking for the i1 type matches the IR definition for the mask,
49057 // but the mask check could be loosened to i8 or other types. That might
49058 // also require checking more than 'allOnesValue'; eg, the x86 HW
49059 // instructions only require that the MSB is set for each mask element.
49060 // The ISD::MSTORE comments/definition do not specify how the mask operand
49062 auto *BV = dyn_cast<BuildVectorSDNode>(V);
49063 if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
49066 int TrueIndex = -1;
49067 unsigned NumElts = BV->getValueType(0).getVectorNumElements();
49068 for (unsigned i = 0; i < NumElts; ++i) {
49069 const SDValue &Op = BV->getOperand(i);
49072 auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
49075 if (ConstNode->getAPIntValue().countTrailingOnes() >= 1) {
49076 // If we already found a one, this is too many.
49077 if (TrueIndex >= 0)
49085 /// Given a masked memory load/store operation, return true if it has one mask
49086 /// bit set. If it has one mask bit set, then also return the memory address of
49087 /// the scalar element to load/store, the vector index to insert/extract that
49088 /// scalar element, and the alignment for the scalar memory access.
49089 static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
49090 SelectionDAG &DAG, SDValue &Addr,
49091 SDValue &Index, Align &Alignment,
49092 unsigned &Offset) {
49093 int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
49094 if (TrueMaskElt < 0)
49097 // Get the address of the one scalar element that is specified by the mask
49098 // using the appropriate offset from the base pointer.
49099 EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
49101 Addr = MaskedOp->getBasePtr();
49102 if (TrueMaskElt != 0) {
49103 Offset = TrueMaskElt * EltVT.getStoreSize();
49104 Addr = DAG.getMemBasePlusOffset(Addr, TypeSize::Fixed(Offset),
49108 Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
49109 Alignment = commonAlignment(MaskedOp->getOriginalAlign(),
49110 EltVT.getStoreSize());
49114 /// If exactly one element of the mask is set for a non-extending masked load,
49115 /// it is a scalar load and vector insert.
49116 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
49117 /// mask have already been optimized in IR, so we don't bother with those here.
49119 reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
49120 TargetLowering::DAGCombinerInfo &DCI,
49121 const X86Subtarget &Subtarget) {
49122 assert(ML->isUnindexed() && "Unexpected indexed masked load!");
49123 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
49124 // However, some target hooks may need to be added to know when the transform
49125 // is profitable. Endianness would also have to be considered.
49127 SDValue Addr, VecIndex;
49130 if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment, Offset))
49133 // Load the one scalar element that is specified by the mask using the
49134 // appropriate offset from the base pointer.
49136 EVT VT = ML->getValueType(0);
49137 EVT EltVT = VT.getVectorElementType();
49140 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
49142 CastVT = VT.changeVectorElementType(EltVT);
49146 DAG.getLoad(EltVT, DL, ML->getChain(), Addr,
49147 ML->getPointerInfo().getWithOffset(Offset),
49148 Alignment, ML->getMemOperand()->getFlags());
49150 SDValue PassThru = DAG.getBitcast(CastVT, ML->getPassThru());
49152 // Insert the loaded element into the appropriate place in the vector.
49154 DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, CastVT, PassThru, Load, VecIndex);
49155 Insert = DAG.getBitcast(VT, Insert);
49156 return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
49160 combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
49161 TargetLowering::DAGCombinerInfo &DCI) {
49162 assert(ML->isUnindexed() && "Unexpected indexed masked load!");
49163 if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
49167 EVT VT = ML->getValueType(0);
49169 // If we are loading the first and last elements of a vector, it is safe and
49170 // always faster to load the whole vector. Replace the masked load with a
49171 // vector load and select.
49172 unsigned NumElts = VT.getVectorNumElements();
49173 BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
49174 bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
49175 bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
49176 if (LoadFirstElt && LoadLastElt) {
49177 SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
49178 ML->getMemOperand());
49179 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
49180 ML->getPassThru());
49181 return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
49184 // Convert a masked load with a constant mask into a masked load and a select.
49185 // This allows the select operation to use a faster kind of select instruction
49186 // (for example, vblendvps -> vblendps).
49188 // Don't try this if the pass-through operand is already undefined. That would
49189 // cause an infinite loop because that's what we're about to create.
49190 if (ML->getPassThru().isUndef())
49193 if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
49196 // The new masked load has an undef pass-through operand. The select uses the
49197 // original pass-through operand.
49198 SDValue NewML = DAG.getMaskedLoad(
49199 VT, DL, ML->getChain(), ML->getBasePtr(), ML->getOffset(), ML->getMask(),
49200 DAG.getUNDEF(VT), ML->getMemoryVT(), ML->getMemOperand(),
49201 ML->getAddressingMode(), ML->getExtensionType());
49202 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
49203 ML->getPassThru());
49205 return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
49208 static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
49209 TargetLowering::DAGCombinerInfo &DCI,
49210 const X86Subtarget &Subtarget) {
49211 auto *Mld = cast<MaskedLoadSDNode>(N);
49213 // TODO: Expanding load with constant mask may be optimized as well.
49214 if (Mld->isExpandingLoad())
49217 if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
49218 if (SDValue ScalarLoad =
49219 reduceMaskedLoadToScalarLoad(Mld, DAG, DCI, Subtarget))
49222 // TODO: Do some AVX512 subsets benefit from this transform?
49223 if (!Subtarget.hasAVX512())
49224 if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
49228 // If the mask value has been legalized to a non-boolean vector, try to
49229 // simplify ops leading up to it. We only demand the MSB of each lane.
49230 SDValue Mask = Mld->getMask();
49231 if (Mask.getScalarValueSizeInBits() != 1) {
49232 EVT VT = Mld->getValueType(0);
49233 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49234 APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
49235 if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
49236 if (N->getOpcode() != ISD::DELETED_NODE)
49237 DCI.AddToWorklist(N);
49238 return SDValue(N, 0);
49240 if (SDValue NewMask =
49241 TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
49242 return DAG.getMaskedLoad(
49243 VT, SDLoc(N), Mld->getChain(), Mld->getBasePtr(), Mld->getOffset(),
49244 NewMask, Mld->getPassThru(), Mld->getMemoryVT(), Mld->getMemOperand(),
49245 Mld->getAddressingMode(), Mld->getExtensionType());
49251 /// If exactly one element of the mask is set for a non-truncating masked store,
49252 /// it is a vector extract and scalar store.
49253 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
49254 /// mask have already been optimized in IR, so we don't bother with those here.
49255 static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
49257 const X86Subtarget &Subtarget) {
49258 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
49259 // However, some target hooks may need to be added to know when the transform
49260 // is profitable. Endianness would also have to be considered.
49262 SDValue Addr, VecIndex;
49265 if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment, Offset))
49268 // Extract the one scalar element that is actually being stored.
49270 SDValue Value = MS->getValue();
49271 EVT VT = Value.getValueType();
49272 EVT EltVT = VT.getVectorElementType();
49273 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
49275 EVT CastVT = VT.changeVectorElementType(EltVT);
49276 Value = DAG.getBitcast(CastVT, Value);
49279 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Value, VecIndex);
49281 // Store that element at the appropriate offset from the base pointer.
49282 return DAG.getStore(MS->getChain(), DL, Extract, Addr,
49283 MS->getPointerInfo().getWithOffset(Offset),
49284 Alignment, MS->getMemOperand()->getFlags());
49287 static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
49288 TargetLowering::DAGCombinerInfo &DCI,
49289 const X86Subtarget &Subtarget) {
49290 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
49291 if (Mst->isCompressingStore())
49294 EVT VT = Mst->getValue().getValueType();
49296 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49298 if (Mst->isTruncatingStore())
49301 if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG, Subtarget))
49302 return ScalarStore;
49304 // If the mask value has been legalized to a non-boolean vector, try to
49305 // simplify ops leading up to it. We only demand the MSB of each lane.
49306 SDValue Mask = Mst->getMask();
49307 if (Mask.getScalarValueSizeInBits() != 1) {
49308 APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
49309 if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
49310 if (N->getOpcode() != ISD::DELETED_NODE)
49311 DCI.AddToWorklist(N);
49312 return SDValue(N, 0);
49314 if (SDValue NewMask =
49315 TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
49316 return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Mst->getValue(),
49317 Mst->getBasePtr(), Mst->getOffset(), NewMask,
49318 Mst->getMemoryVT(), Mst->getMemOperand(),
49319 Mst->getAddressingMode());
49322 SDValue Value = Mst->getValue();
49323 if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
49324 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
49325 Mst->getMemoryVT())) {
49326 return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
49327 Mst->getBasePtr(), Mst->getOffset(), Mask,
49328 Mst->getMemoryVT(), Mst->getMemOperand(),
49329 Mst->getAddressingMode(), true);
49335 static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
49336 TargetLowering::DAGCombinerInfo &DCI,
49337 const X86Subtarget &Subtarget) {
49338 StoreSDNode *St = cast<StoreSDNode>(N);
49339 EVT StVT = St->getMemoryVT();
49341 SDValue StoredVal = St->getValue();
49342 EVT VT = StoredVal.getValueType();
49343 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49345 // Convert a store of vXi1 into a store of iX and a bitcast.
49346 if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
49347 VT.getVectorElementType() == MVT::i1) {
49349 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
49350 StoredVal = DAG.getBitcast(NewVT, StoredVal);
49352 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
49353 St->getPointerInfo(), St->getOriginalAlign(),
49354 St->getMemOperand()->getFlags());
49357 // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
49358 // This will avoid a copy to k-register.
49359 if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
49360 StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
49361 StoredVal.getOperand(0).getValueType() == MVT::i8) {
49362 SDValue Val = StoredVal.getOperand(0);
49363 // We must store zeros to the unused bits.
49364 Val = DAG.getZeroExtendInReg(Val, dl, MVT::i1);
49365 return DAG.getStore(St->getChain(), dl, Val,
49366 St->getBasePtr(), St->getPointerInfo(),
49367 St->getOriginalAlign(),
49368 St->getMemOperand()->getFlags());
49371 // Widen v2i1/v4i1 stores to v8i1.
49372 if ((VT == MVT::v1i1 || VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
49373 Subtarget.hasAVX512()) {
49374 unsigned NumConcats = 8 / VT.getVectorNumElements();
49375 // We must store zeros to the unused bits.
49376 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, VT));
49377 Ops[0] = StoredVal;
49378 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
49379 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
49380 St->getPointerInfo(), St->getOriginalAlign(),
49381 St->getMemOperand()->getFlags());
49384 // Turn vXi1 stores of constants into a scalar store.
49385 if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
49386 VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
49387 ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
49388 // If its a v64i1 store without 64-bit support, we need two stores.
49389 if (!DCI.isBeforeLegalize() && VT == MVT::v64i1 && !Subtarget.is64Bit()) {
49390 SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
49391 StoredVal->ops().slice(0, 32));
49392 Lo = combinevXi1ConstantToInteger(Lo, DAG);
49393 SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
49394 StoredVal->ops().slice(32, 32));
49395 Hi = combinevXi1ConstantToInteger(Hi, DAG);
49397 SDValue Ptr0 = St->getBasePtr();
49398 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, TypeSize::Fixed(4), dl);
49401 DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
49402 St->getOriginalAlign(),
49403 St->getMemOperand()->getFlags());
49405 DAG.getStore(St->getChain(), dl, Hi, Ptr1,
49406 St->getPointerInfo().getWithOffset(4),
49407 St->getOriginalAlign(),
49408 St->getMemOperand()->getFlags());
49409 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
49412 StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
49413 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
49414 St->getPointerInfo(), St->getOriginalAlign(),
49415 St->getMemOperand()->getFlags());
49418 // If we are saving a 32-byte vector and 32-byte stores are slow, such as on
49419 // Sandy Bridge, perform two 16-byte stores.
49421 if (VT.is256BitVector() && StVT == VT &&
49422 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
49423 *St->getMemOperand(), &Fast) &&
49425 unsigned NumElems = VT.getVectorNumElements();
49429 return splitVectorStore(St, DAG);
49432 // Split under-aligned vector non-temporal stores.
49433 if (St->isNonTemporal() && StVT == VT &&
49434 St->getAlign().value() < VT.getStoreSize()) {
49435 // ZMM/YMM nt-stores - either it can be stored as a series of shorter
49436 // vectors or the legalizer can scalarize it to use MOVNTI.
49437 if (VT.is256BitVector() || VT.is512BitVector()) {
49438 unsigned NumElems = VT.getVectorNumElements();
49441 return splitVectorStore(St, DAG);
49444 // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
49446 if (VT.is128BitVector() && Subtarget.hasSSE2()) {
49447 MVT NTVT = Subtarget.hasSSE4A()
49449 : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
49450 return scalarizeVectorStore(St, NTVT, DAG);
49454 // Try to optimize v16i16->v16i8 truncating stores when BWI is not
49455 // supported, but avx512f is by extending to v16i32 and truncating.
49456 if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
49457 St->getValue().getOpcode() == ISD::TRUNCATE &&
49458 St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
49459 TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
49460 St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
49461 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32,
49462 St->getValue().getOperand(0));
49463 return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
49464 MVT::v16i8, St->getMemOperand());
49467 // Try to fold a VTRUNCUS or VTRUNCS into a truncating store.
49468 if (!St->isTruncatingStore() &&
49469 (StoredVal.getOpcode() == X86ISD::VTRUNCUS ||
49470 StoredVal.getOpcode() == X86ISD::VTRUNCS) &&
49471 StoredVal.hasOneUse() &&
49472 TLI.isTruncStoreLegal(StoredVal.getOperand(0).getValueType(), VT)) {
49473 bool IsSigned = StoredVal.getOpcode() == X86ISD::VTRUNCS;
49474 return EmitTruncSStore(IsSigned, St->getChain(),
49475 dl, StoredVal.getOperand(0), St->getBasePtr(),
49476 VT, St->getMemOperand(), DAG);
49479 // Try to fold a extract_element(VTRUNC) pattern into a truncating store.
49480 if (!St->isTruncatingStore()) {
49481 auto IsExtractedElement = [](SDValue V) {
49482 if (V.getOpcode() == ISD::TRUNCATE && V.hasOneUse())
49483 V = V.getOperand(0);
49484 unsigned Opc = V.getOpcode();
49485 if ((Opc == ISD::EXTRACT_VECTOR_ELT || Opc == X86ISD::PEXTRW) &&
49486 isNullConstant(V.getOperand(1)) && V.hasOneUse() &&
49487 V.getOperand(0).hasOneUse())
49488 return V.getOperand(0);
49491 if (SDValue Extract = IsExtractedElement(StoredVal)) {
49492 SDValue Trunc = peekThroughOneUseBitcasts(Extract);
49493 if (Trunc.getOpcode() == X86ISD::VTRUNC) {
49494 SDValue Src = Trunc.getOperand(0);
49495 MVT DstVT = Trunc.getSimpleValueType();
49496 MVT SrcVT = Src.getSimpleValueType();
49497 unsigned NumSrcElts = SrcVT.getVectorNumElements();
49498 unsigned NumTruncBits = DstVT.getScalarSizeInBits() * NumSrcElts;
49499 MVT TruncVT = MVT::getVectorVT(DstVT.getScalarType(), NumSrcElts);
49500 if (NumTruncBits == VT.getSizeInBits() &&
49501 TLI.isTruncStoreLegal(SrcVT, TruncVT)) {
49502 return DAG.getTruncStore(St->getChain(), dl, Src, St->getBasePtr(),
49503 TruncVT, St->getMemOperand());
49509 // Optimize trunc store (of multiple scalars) to shuffle and store.
49510 // First, pack all of the elements in one place. Next, store to memory
49511 // in fewer chunks.
49512 if (St->isTruncatingStore() && VT.isVector()) {
49513 // Check if we can detect an AVG pattern from the truncation. If yes,
49514 // replace the trunc store by a normal store with the result of X86ISD::AVG
49516 if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
49517 if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
49519 return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
49520 St->getPointerInfo(), St->getOriginalAlign(),
49521 St->getMemOperand()->getFlags());
49523 if (TLI.isTruncStoreLegal(VT, StVT)) {
49524 if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
49525 return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
49526 dl, Val, St->getBasePtr(),
49527 St->getMemoryVT(), St->getMemOperand(), DAG);
49528 if (SDValue Val = detectUSatPattern(St->getValue(), St->getMemoryVT(),
49530 return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
49531 dl, Val, St->getBasePtr(),
49532 St->getMemoryVT(), St->getMemOperand(), DAG);
49538 // Cast ptr32 and ptr64 pointers to the default address space before a store.
49539 unsigned AddrSpace = St->getAddressSpace();
49540 if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
49541 AddrSpace == X86AS::PTR32_UPTR) {
49542 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
49543 if (PtrVT != St->getBasePtr().getSimpleValueType()) {
49545 DAG.getAddrSpaceCast(dl, PtrVT, St->getBasePtr(), AddrSpace, 0);
49546 return DAG.getStore(St->getChain(), dl, StoredVal, Cast,
49547 St->getPointerInfo(), St->getOriginalAlign(),
49548 St->getMemOperand()->getFlags(), St->getAAInfo());
49552 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
49553 // the FP state in cases where an emms may be missing.
49554 // A preferable solution to the general problem is to figure out the right
49555 // places to insert EMMS. This qualifies as a quick hack.
49557 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
49558 if (VT.getSizeInBits() != 64)
49561 const Function &F = DAG.getMachineFunction().getFunction();
49562 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
49564 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
49565 if ((VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit()) &&
49566 isa<LoadSDNode>(St->getValue()) &&
49567 cast<LoadSDNode>(St->getValue())->isSimple() &&
49568 St->getChain().hasOneUse() && St->isSimple()) {
49569 LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
49571 if (!ISD::isNormalLoad(Ld))
49574 // Avoid the transformation if there are multiple uses of the loaded value.
49575 if (!Ld->hasNUsesOfValue(1, 0))
49580 // Lower to a single movq load/store pair.
49581 SDValue NewLd = DAG.getLoad(MVT::f64, LdDL, Ld->getChain(),
49582 Ld->getBasePtr(), Ld->getMemOperand());
49584 // Make sure new load is placed in same chain order.
49585 DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
49586 return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
49587 St->getMemOperand());
49590 // This is similar to the above case, but here we handle a scalar 64-bit
49591 // integer store that is extracted from a vector on a 32-bit target.
49592 // If we have SSE2, then we can treat it like a floating-point double
49593 // to get past legalization. The execution dependencies fixup pass will
49594 // choose the optimal machine instruction for the store if this really is
49595 // an integer or v2f32 rather than an f64.
49596 if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
49597 St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
49598 SDValue OldExtract = St->getOperand(1);
49599 SDValue ExtOp0 = OldExtract.getOperand(0);
49600 unsigned VecSize = ExtOp0.getValueSizeInBits();
49601 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
49602 SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
49603 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
49604 BitCast, OldExtract.getOperand(1));
49605 return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
49606 St->getPointerInfo(), St->getOriginalAlign(),
49607 St->getMemOperand()->getFlags());
49613 static SDValue combineVEXTRACT_STORE(SDNode *N, SelectionDAG &DAG,
49614 TargetLowering::DAGCombinerInfo &DCI,
49615 const X86Subtarget &Subtarget) {
49616 auto *St = cast<MemIntrinsicSDNode>(N);
49618 SDValue StoredVal = N->getOperand(1);
49619 MVT VT = StoredVal.getSimpleValueType();
49620 EVT MemVT = St->getMemoryVT();
49622 // Figure out which elements we demand.
49623 unsigned StElts = MemVT.getSizeInBits() / VT.getScalarSizeInBits();
49624 APInt DemandedElts = APInt::getLowBitsSet(VT.getVectorNumElements(), StElts);
49626 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49627 if (TLI.SimplifyDemandedVectorElts(StoredVal, DemandedElts, DCI)) {
49628 if (N->getOpcode() != ISD::DELETED_NODE)
49629 DCI.AddToWorklist(N);
49630 return SDValue(N, 0);
49636 /// Return 'true' if this vector operation is "horizontal"
49637 /// and return the operands for the horizontal operation in LHS and RHS. A
49638 /// horizontal operation performs the binary operation on successive elements
49639 /// of its first operand, then on successive elements of its second operand,
49640 /// returning the resulting values in a vector. For example, if
49641 /// A = < float a0, float a1, float a2, float a3 >
49643 /// B = < float b0, float b1, float b2, float b3 >
49644 /// then the result of doing a horizontal operation on A and B is
49645 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
49646 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
49647 /// A horizontal-op B, for some already available A and B, and if so then LHS is
49648 /// set to A, RHS to B, and the routine returns 'true'.
49649 static bool isHorizontalBinOp(unsigned HOpcode, SDValue &LHS, SDValue &RHS,
49650 SelectionDAG &DAG, const X86Subtarget &Subtarget,
49651 bool IsCommutative,
49652 SmallVectorImpl<int> &PostShuffleMask) {
49653 // If either operand is undef, bail out. The binop should be simplified.
49654 if (LHS.isUndef() || RHS.isUndef())
49657 // Look for the following pattern:
49658 // A = < float a0, float a1, float a2, float a3 >
49659 // B = < float b0, float b1, float b2, float b3 >
49661 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
49662 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
49663 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
49664 // which is A horizontal-op B.
49666 MVT VT = LHS.getSimpleValueType();
49667 assert((VT.is128BitVector() || VT.is256BitVector()) &&
49668 "Unsupported vector type for horizontal add/sub");
49669 unsigned NumElts = VT.getVectorNumElements();
49671 auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
49672 SmallVectorImpl<int> &ShuffleMask) {
49673 bool UseSubVector = false;
49674 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
49675 Op.getOperand(0).getValueType().is256BitVector() &&
49676 llvm::isNullConstant(Op.getOperand(1))) {
49677 Op = Op.getOperand(0);
49678 UseSubVector = true;
49680 SmallVector<SDValue, 2> SrcOps;
49681 SmallVector<int, 16> SrcMask, ScaledMask;
49682 SDValue BC = peekThroughBitcasts(Op);
49683 if (getTargetShuffleInputs(BC, SrcOps, SrcMask, DAG) &&
49684 !isAnyZero(SrcMask) && all_of(SrcOps, [BC](SDValue Op) {
49685 return Op.getValueSizeInBits() == BC.getValueSizeInBits();
49687 resolveTargetShuffleInputsAndMask(SrcOps, SrcMask);
49688 if (!UseSubVector && SrcOps.size() <= 2 &&
49689 scaleShuffleElements(SrcMask, NumElts, ScaledMask)) {
49690 N0 = SrcOps.size() > 0 ? SrcOps[0] : SDValue();
49691 N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
49692 ShuffleMask.assign(ScaledMask.begin(), ScaledMask.end());
49694 if (UseSubVector && SrcOps.size() == 1 &&
49695 scaleShuffleElements(SrcMask, 2 * NumElts, ScaledMask)) {
49696 std::tie(N0, N1) = DAG.SplitVector(SrcOps[0], SDLoc(Op));
49697 ArrayRef<int> Mask = ArrayRef<int>(ScaledMask).slice(0, NumElts);
49698 ShuffleMask.assign(Mask.begin(), Mask.end());
49703 // View LHS in the form
49704 // LHS = VECTOR_SHUFFLE A, B, LMask
49705 // If LHS is not a shuffle, then pretend it is the identity shuffle:
49706 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
49707 // NOTE: A default initialized SDValue represents an UNDEF of type VT.
49709 SmallVector<int, 16> LMask;
49710 GetShuffle(LHS, A, B, LMask);
49712 // Likewise, view RHS in the form
49713 // RHS = VECTOR_SHUFFLE C, D, RMask
49715 SmallVector<int, 16> RMask;
49716 GetShuffle(RHS, C, D, RMask);
49718 // At least one of the operands should be a vector shuffle.
49719 unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
49720 if (NumShuffles == 0)
49723 if (LMask.empty()) {
49725 for (unsigned i = 0; i != NumElts; ++i)
49726 LMask.push_back(i);
49729 if (RMask.empty()) {
49731 for (unsigned i = 0; i != NumElts; ++i)
49732 RMask.push_back(i);
49735 // If we have an unary mask, ensure the other op is set to null.
49736 if (isUndefOrInRange(LMask, 0, NumElts))
49738 else if (isUndefOrInRange(LMask, NumElts, NumElts * 2))
49741 if (isUndefOrInRange(RMask, 0, NumElts))
49743 else if (isUndefOrInRange(RMask, NumElts, NumElts * 2))
49746 // If A and B occur in reverse order in RHS, then canonicalize by commuting
49747 // RHS operands and shuffle mask.
49750 ShuffleVectorSDNode::commuteMask(RMask);
49752 // Check that the shuffles are both shuffling the same vectors.
49753 if (!(A == C && B == D))
49756 PostShuffleMask.clear();
49757 PostShuffleMask.append(NumElts, SM_SentinelUndef);
49759 // LHS and RHS are now:
49760 // LHS = shuffle A, B, LMask
49761 // RHS = shuffle A, B, RMask
49762 // Check that the masks correspond to performing a horizontal operation.
49763 // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
49764 // so we just repeat the inner loop if this is a 256-bit op.
49765 unsigned Num128BitChunks = VT.getSizeInBits() / 128;
49766 unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
49767 unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
49768 assert((NumEltsPer128BitChunk % 2 == 0) &&
49769 "Vector type should have an even number of elements in each lane");
49770 for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
49771 for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
49772 // Ignore undefined components.
49773 int LIdx = LMask[i + j], RIdx = RMask[i + j];
49774 if (LIdx < 0 || RIdx < 0 ||
49775 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
49776 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
49779 // Check that successive odd/even elements are being operated on. If not,
49780 // this is not a horizontal operation.
49781 if (!((RIdx & 1) == 1 && (LIdx + 1) == RIdx) &&
49782 !((LIdx & 1) == 1 && (RIdx + 1) == LIdx && IsCommutative))
49785 // Compute the post-shuffle mask index based on where the element
49786 // is stored in the HOP result, and where it needs to be moved to.
49787 int Base = LIdx & ~1u;
49788 int Index = ((Base % NumEltsPer128BitChunk) / 2) +
49789 ((Base % NumElts) & ~(NumEltsPer128BitChunk - 1));
49791 // The low half of the 128-bit result must choose from A.
49792 // The high half of the 128-bit result must choose from B,
49793 // unless B is undef. In that case, we are always choosing from A.
49794 if ((B && Base >= (int)NumElts) || (!B && i >= NumEltsPer64BitChunk))
49795 Index += NumEltsPer64BitChunk;
49796 PostShuffleMask[i + j] = Index;
49800 SDValue NewLHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
49801 SDValue NewRHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
49803 bool IsIdentityPostShuffle =
49804 isSequentialOrUndefInRange(PostShuffleMask, 0, NumElts, 0);
49805 if (IsIdentityPostShuffle)
49806 PostShuffleMask.clear();
49808 // Avoid 128-bit multi lane shuffles if pre-AVX2 and FP (integer will split).
49809 if (!IsIdentityPostShuffle && !Subtarget.hasAVX2() && VT.isFloatingPoint() &&
49810 isMultiLaneShuffleMask(128, VT.getScalarSizeInBits(), PostShuffleMask))
49813 // If the source nodes are already used in HorizOps then always accept this.
49814 // Shuffle folding should merge these back together.
49815 bool FoundHorizLHS = llvm::any_of(NewLHS->uses(), [&](SDNode *User) {
49816 return User->getOpcode() == HOpcode && User->getValueType(0) == VT;
49818 bool FoundHorizRHS = llvm::any_of(NewRHS->uses(), [&](SDNode *User) {
49819 return User->getOpcode() == HOpcode && User->getValueType(0) == VT;
49821 bool ForceHorizOp = FoundHorizLHS && FoundHorizRHS;
49823 // Assume a SingleSource HOP if we only shuffle one input and don't need to
49824 // shuffle the result.
49825 if (!ForceHorizOp &&
49826 !shouldUseHorizontalOp(NewLHS == NewRHS &&
49827 (NumShuffles < 2 || !IsIdentityPostShuffle),
49831 LHS = DAG.getBitcast(VT, NewLHS);
49832 RHS = DAG.getBitcast(VT, NewRHS);
49836 // Try to synthesize horizontal (f)hadd/hsub from (f)adds/subs of shuffles.
49837 static SDValue combineToHorizontalAddSub(SDNode *N, SelectionDAG &DAG,
49838 const X86Subtarget &Subtarget) {
49839 EVT VT = N->getValueType(0);
49840 unsigned Opcode = N->getOpcode();
49841 bool IsAdd = (Opcode == ISD::FADD) || (Opcode == ISD::ADD);
49842 SmallVector<int, 8> PostShuffleMask;
49847 if ((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
49848 (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
49849 SDValue LHS = N->getOperand(0);
49850 SDValue RHS = N->getOperand(1);
49851 auto HorizOpcode = IsAdd ? X86ISD::FHADD : X86ISD::FHSUB;
49852 if (isHorizontalBinOp(HorizOpcode, LHS, RHS, DAG, Subtarget, IsAdd,
49853 PostShuffleMask)) {
49854 SDValue HorizBinOp = DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
49855 if (!PostShuffleMask.empty())
49856 HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
49857 DAG.getUNDEF(VT), PostShuffleMask);
49864 if (Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
49865 VT == MVT::v16i16 || VT == MVT::v8i32)) {
49866 SDValue LHS = N->getOperand(0);
49867 SDValue RHS = N->getOperand(1);
49868 auto HorizOpcode = IsAdd ? X86ISD::HADD : X86ISD::HSUB;
49869 if (isHorizontalBinOp(HorizOpcode, LHS, RHS, DAG, Subtarget, IsAdd,
49870 PostShuffleMask)) {
49871 auto HOpBuilder = [HorizOpcode](SelectionDAG &DAG, const SDLoc &DL,
49872 ArrayRef<SDValue> Ops) {
49873 return DAG.getNode(HorizOpcode, DL, Ops[0].getValueType(), Ops);
49875 SDValue HorizBinOp = SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
49876 {LHS, RHS}, HOpBuilder);
49877 if (!PostShuffleMask.empty())
49878 HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
49879 DAG.getUNDEF(VT), PostShuffleMask);
49889 // Try to combine the following nodes
49890 // t29: i64 = X86ISD::Wrapper TargetConstantPool:i64
49891 // <i32 -2147483648[float -0.000000e+00]> 0
49892 // t27: v16i32[v16f32],ch = X86ISD::VBROADCAST_LOAD
49893 // <(load 4 from constant-pool)> t0, t29
49894 // [t30: v16i32 = bitcast t27]
49895 // t6: v16i32 = xor t7, t27[t30]
49896 // t11: v16f32 = bitcast t6
49897 // t21: v16f32 = X86ISD::VFMULC[X86ISD::VCFMULC] t11, t8
49898 // into X86ISD::VFCMULC[X86ISD::VFMULC] if possible:
49899 // t22: v16f32 = bitcast t7
49900 // t23: v16f32 = X86ISD::VFCMULC[X86ISD::VFMULC] t8, t22
49901 // t24: v32f16 = bitcast t23
49902 static SDValue combineFMulcFCMulc(SDNode *N, SelectionDAG &DAG,
49903 const X86Subtarget &Subtarget) {
49904 EVT VT = N->getValueType(0);
49905 SDValue LHS = N->getOperand(0);
49906 SDValue RHS = N->getOperand(1);
49907 int CombineOpcode =
49908 N->getOpcode() == X86ISD::VFCMULC ? X86ISD::VFMULC : X86ISD::VFCMULC;
49909 auto isConjugationConstant = [](const Constant *c) {
49910 if (const auto *CI = dyn_cast<ConstantInt>(c)) {
49911 APInt ConjugationInt32 = APInt(32, 0x80000000, true);
49912 APInt ConjugationInt64 = APInt(64, 0x8000000080000000ULL, true);
49913 switch (CI->getBitWidth()) {
49917 return CI->getValue() == ConjugationInt32;
49919 return CI->getValue() == ConjugationInt64;
49921 llvm_unreachable("Unexpected bit width");
49924 if (const auto *CF = dyn_cast<ConstantFP>(c))
49925 return CF->isNegativeZeroValue();
49928 auto combineConjugation = [&](SDValue &r) {
49929 if (LHS->getOpcode() == ISD::BITCAST && RHS.hasOneUse()) {
49930 SDValue XOR = LHS.getOperand(0);
49931 if (XOR->getOpcode() == ISD::XOR && XOR.hasOneUse()) {
49932 SDValue XORRHS = XOR.getOperand(1);
49933 if (XORRHS.getOpcode() == ISD::BITCAST && XORRHS.hasOneUse())
49934 XORRHS = XORRHS.getOperand(0);
49935 if (XORRHS.getOpcode() == X86ISD::VBROADCAST_LOAD &&
49936 XORRHS.getOperand(1).getNumOperands()) {
49937 ConstantPoolSDNode *CP =
49938 dyn_cast<ConstantPoolSDNode>(XORRHS.getOperand(1).getOperand(0));
49939 if (CP && isConjugationConstant(CP->getConstVal())) {
49940 SelectionDAG::FlagInserter FlagsInserter(DAG, N);
49941 SDValue I2F = DAG.getBitcast(VT, LHS.getOperand(0).getOperand(0));
49942 SDValue FCMulC = DAG.getNode(CombineOpcode, SDLoc(N), VT, RHS, I2F);
49943 r = DAG.getBitcast(VT, FCMulC);
49952 if (combineConjugation(Res))
49954 std::swap(LHS, RHS);
49955 if (combineConjugation(Res))
49960 // Try to combine the following nodes:
49961 // FADD(A, FMA(B, C, 0)) and FADD(A, FMUL(B, C)) to FMA(B, C, A)
49962 static SDValue combineFaddCFmul(SDNode *N, SelectionDAG &DAG,
49963 const X86Subtarget &Subtarget) {
49964 auto AllowContract = [&DAG](const SDNodeFlags &Flags) {
49965 return DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast ||
49966 Flags.hasAllowContract();
49969 auto HasNoSignedZero = [&DAG](const SDNodeFlags &Flags) {
49970 return DAG.getTarget().Options.NoSignedZerosFPMath ||
49971 Flags.hasNoSignedZeros();
49973 auto IsVectorAllNegativeZero = [](const SDNode *N) {
49974 if (N->getOpcode() != X86ISD::VBROADCAST_LOAD)
49976 assert(N->getSimpleValueType(0).getScalarType() == MVT::f32 &&
49977 "Unexpected vector type!");
49978 if (ConstantPoolSDNode *CP =
49979 dyn_cast<ConstantPoolSDNode>(N->getOperand(1)->getOperand(0))) {
49980 APInt AI = APInt(32, 0x80008000, true);
49981 if (const auto *CI = dyn_cast<ConstantInt>(CP->getConstVal()))
49982 return CI->getValue() == AI;
49983 if (const auto *CF = dyn_cast<ConstantFP>(CP->getConstVal()))
49984 return CF->getValue() == APFloat(APFloat::IEEEsingle(), AI);
49989 if (N->getOpcode() != ISD::FADD || !Subtarget.hasFP16() ||
49990 !AllowContract(N->getFlags()))
49993 EVT VT = N->getValueType(0);
49994 if (VT != MVT::v8f16 && VT != MVT::v16f16 && VT != MVT::v32f16)
49997 SDValue LHS = N->getOperand(0);
49998 SDValue RHS = N->getOperand(1);
50000 SDValue FAddOp1, MulOp0, MulOp1;
50001 auto GetCFmulFrom = [&MulOp0, &MulOp1, &IsConj, &AllowContract,
50002 &IsVectorAllNegativeZero,
50003 &HasNoSignedZero](SDValue N) -> bool {
50004 if (!N.hasOneUse() || N.getOpcode() != ISD::BITCAST)
50006 SDValue Op0 = N.getOperand(0);
50007 unsigned Opcode = Op0.getOpcode();
50008 if (Op0.hasOneUse() && AllowContract(Op0->getFlags())) {
50009 if ((Opcode == X86ISD::VFMULC || Opcode == X86ISD::VFCMULC)) {
50010 MulOp0 = Op0.getOperand(0);
50011 MulOp1 = Op0.getOperand(1);
50012 IsConj = Opcode == X86ISD::VFCMULC;
50015 if ((Opcode == X86ISD::VFMADDC || Opcode == X86ISD::VFCMADDC) &&
50016 ((ISD::isBuildVectorAllZeros(Op0->getOperand(2).getNode()) &&
50017 HasNoSignedZero(Op0->getFlags())) ||
50018 IsVectorAllNegativeZero(Op0->getOperand(2).getNode()))) {
50019 MulOp0 = Op0.getOperand(0);
50020 MulOp1 = Op0.getOperand(1);
50021 IsConj = Opcode == X86ISD::VFCMADDC;
50028 if (GetCFmulFrom(LHS))
50030 else if (GetCFmulFrom(RHS))
50035 MVT CVT = MVT::getVectorVT(MVT::f32, VT.getVectorNumElements() / 2);
50036 FAddOp1 = DAG.getBitcast(CVT, FAddOp1);
50037 unsigned NewOp = IsConj ? X86ISD::VFCMADDC : X86ISD::VFMADDC;
50038 // FIXME: How do we handle when fast math flags of FADD are different from
50041 DAG.getNode(NewOp, SDLoc(N), CVT, MulOp0, MulOp1, FAddOp1, N->getFlags());
50042 return DAG.getBitcast(VT, CFmul);
50045 /// Do target-specific dag combines on floating-point adds/subs.
50046 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
50047 const X86Subtarget &Subtarget) {
50048 if (SDValue HOp = combineToHorizontalAddSub(N, DAG, Subtarget))
50051 if (SDValue COp = combineFaddCFmul(N, DAG, Subtarget))
50057 /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
50059 /// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
50060 /// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
50061 /// anything that is guaranteed to be transformed by DAGCombiner.
50062 static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
50063 const X86Subtarget &Subtarget,
50065 assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
50066 SDValue Src = N->getOperand(0);
50067 unsigned SrcOpcode = Src.getOpcode();
50068 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50070 EVT VT = N->getValueType(0);
50071 EVT SrcVT = Src.getValueType();
50073 auto IsFreeTruncation = [VT](SDValue Op) {
50074 unsigned TruncSizeInBits = VT.getScalarSizeInBits();
50076 // See if this has been extended from a smaller/equal size to
50077 // the truncation size, allowing a truncation to combine with the extend.
50078 unsigned Opcode = Op.getOpcode();
50079 if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
50080 Opcode == ISD::ZERO_EXTEND) &&
50081 Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
50084 // See if this is a single use constant which can be constant folded.
50085 // NOTE: We don't peek throught bitcasts here because there is currently
50086 // no support for constant folding truncate+bitcast+vector_of_constants. So
50087 // we'll just send up with a truncate on both operands which will
50088 // get turned back into (truncate (binop)) causing an infinite loop.
50089 return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
50092 auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
50093 SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
50094 SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
50095 return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
50098 // Don't combine if the operation has other uses.
50099 if (!Src.hasOneUse())
50102 // Only support vector truncation for now.
50103 // TODO: i64 scalar math would benefit as well.
50104 if (!VT.isVector())
50107 // In most cases its only worth pre-truncating if we're only facing the cost
50108 // of one truncation.
50109 // i.e. if one of the inputs will constant fold or the input is repeated.
50110 switch (SrcOpcode) {
50112 // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
50113 // better to truncate if we have the chance.
50114 if (SrcVT.getScalarType() == MVT::i64 &&
50115 TLI.isOperationLegal(SrcOpcode, VT) &&
50116 !TLI.isOperationLegal(SrcOpcode, SrcVT))
50117 return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
50124 SDValue Op0 = Src.getOperand(0);
50125 SDValue Op1 = Src.getOperand(1);
50126 if (TLI.isOperationLegal(SrcOpcode, VT) &&
50127 (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
50128 return TruncateArithmetic(Op0, Op1);
50136 /// Truncate using ISD::AND mask and X86ISD::PACKUS.
50137 /// e.g. trunc <8 x i32> X to <8 x i16> -->
50138 /// MaskX = X & 0xffff (clear high bits to prevent saturation)
50139 /// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
50140 static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL,
50141 const X86Subtarget &Subtarget,
50142 SelectionDAG &DAG) {
50143 SDValue In = N->getOperand(0);
50144 EVT InVT = In.getValueType();
50145 EVT OutVT = N->getValueType(0);
50147 APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(),
50148 OutVT.getScalarSizeInBits());
50149 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
50150 return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget);
50153 /// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
50154 static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL,
50155 const X86Subtarget &Subtarget,
50156 SelectionDAG &DAG) {
50157 SDValue In = N->getOperand(0);
50158 EVT InVT = In.getValueType();
50159 EVT OutVT = N->getValueType(0);
50160 In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In,
50161 DAG.getValueType(OutVT));
50162 return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget);
50165 /// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
50166 /// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
50167 /// legalization the truncation will be translated into a BUILD_VECTOR with each
50168 /// element that is extracted from a vector and then truncated, and it is
50169 /// difficult to do this optimization based on them.
50170 static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
50171 const X86Subtarget &Subtarget) {
50172 EVT OutVT = N->getValueType(0);
50173 if (!OutVT.isVector())
50176 SDValue In = N->getOperand(0);
50177 if (!In.getValueType().isSimple())
50180 EVT InVT = In.getValueType();
50181 unsigned NumElems = OutVT.getVectorNumElements();
50183 // AVX512 provides fast truncate ops.
50184 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
50187 EVT OutSVT = OutVT.getVectorElementType();
50188 EVT InSVT = InVT.getVectorElementType();
50189 if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) &&
50190 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
50194 // SSSE3's pshufb results in less instructions in the cases below.
50195 if (Subtarget.hasSSSE3() && NumElems == 8) {
50196 if (InSVT == MVT::i16)
50198 if (InSVT == MVT::i32 &&
50199 (OutSVT == MVT::i8 || !Subtarget.hasSSE41() || Subtarget.hasInt256()))
50204 // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
50205 // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
50206 // truncate 2 x v4i32 to v8i16.
50207 if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
50208 return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG);
50209 if (InSVT == MVT::i32)
50210 return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG);
50215 /// This function transforms vector truncation of 'extended sign-bits' or
50216 /// 'extended zero-bits' values.
50217 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
50218 static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
50220 const X86Subtarget &Subtarget) {
50222 if (!Subtarget.hasSSE2())
50225 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
50228 SDValue In = N->getOperand(0);
50229 if (!In.getValueType().isSimple())
50232 MVT VT = N->getValueType(0).getSimpleVT();
50233 MVT SVT = VT.getScalarType();
50235 MVT InVT = In.getValueType().getSimpleVT();
50236 MVT InSVT = InVT.getScalarType();
50238 // Check we have a truncation suited for PACKSS/PACKUS.
50239 if (!isPowerOf2_32(VT.getVectorNumElements()))
50241 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
50243 if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
50246 // Truncation to sub-128bit vXi32 can be better handled with shuffles.
50247 if (SVT == MVT::i32 && VT.getSizeInBits() < 128)
50250 // AVX512 has fast truncate, but if the input is already going to be split,
50251 // there's no harm in trying pack.
50252 if (Subtarget.hasAVX512() &&
50253 !(!Subtarget.useAVX512Regs() && VT.is256BitVector() &&
50254 InVT.is512BitVector())) {
50255 // PACK should still be worth it for 128-bit vectors if the sources were
50256 // originally concatenated from subvectors.
50257 SmallVector<SDValue> ConcatOps;
50258 if (VT.getSizeInBits() > 128 ||
50259 !collectConcatOps(In.getNode(), ConcatOps, DAG))
50263 unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
50264 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
50266 // Use PACKUS if the input has zero-bits that extend all the way to the
50267 // packed/truncated value. e.g. masks, zext_in_reg, etc.
50268 KnownBits Known = DAG.computeKnownBits(In);
50269 unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
50270 if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
50271 return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
50273 // Use PACKSS if the input has sign-bits that extend all the way to the
50274 // packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
50275 unsigned NumSignBits = DAG.ComputeNumSignBits(In);
50277 // Don't use PACKSS for vXi64 -> vXi32 truncations unless we're dealing with
50278 // a sign splat. ComputeNumSignBits struggles to see through BITCASTs later
50279 // on and combines/simplifications can't then use it.
50280 if (SVT == MVT::i32 && NumSignBits != InSVT.getSizeInBits())
50283 unsigned MinSignBits = InSVT.getSizeInBits() - NumPackedSignBits;
50284 if (NumSignBits > MinSignBits)
50285 return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);
50287 // If we have a srl that only generates signbits that we will discard in
50288 // the truncation then we can use PACKSS by converting the srl to a sra.
50289 // SimplifyDemandedBits often relaxes sra to srl so we need to reverse it.
50290 if (In.getOpcode() == ISD::SRL && N->isOnlyUserOf(In.getNode()))
50291 if (const APInt *ShAmt = DAG.getValidShiftAmountConstant(
50292 In, APInt::getAllOnes(VT.getVectorNumElements()))) {
50293 if (*ShAmt == MinSignBits) {
50294 SDValue NewIn = DAG.getNode(ISD::SRA, DL, InVT, In->ops());
50295 return truncateVectorWithPACK(X86ISD::PACKSS, VT, NewIn, DL, DAG,
50303 // Try to form a MULHU or MULHS node by looking for
50304 // (trunc (srl (mul ext, ext), 16))
50305 // TODO: This is X86 specific because we want to be able to handle wide types
50306 // before type legalization. But we can only do it if the vector will be
50307 // legalized via widening/splitting. Type legalization can't handle promotion
50308 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
50310 static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
50311 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
50312 // First instruction should be a right shift of a multiply.
50313 if (Src.getOpcode() != ISD::SRL ||
50314 Src.getOperand(0).getOpcode() != ISD::MUL)
50317 if (!Subtarget.hasSSE2())
50320 // Only handle vXi16 types that are at least 128-bits unless they will be
50322 if (!VT.isVector() || VT.getVectorElementType() != MVT::i16)
50325 // Input type should be at least vXi32.
50326 EVT InVT = Src.getValueType();
50327 if (InVT.getVectorElementType().getSizeInBits() < 32)
50330 // Need a shift by 16.
50332 if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
50336 SDValue LHS = Src.getOperand(0).getOperand(0);
50337 SDValue RHS = Src.getOperand(0).getOperand(1);
50339 // Count leading sign/zero bits on both inputs - if there are enough then
50340 // truncation back to vXi16 will be cheap - either as a pack/shuffle
50341 // sequence or using AVX512 truncations. If the inputs are sext/zext then the
50342 // truncations may actually be free by peeking through to the ext source.
50343 auto IsSext = [&DAG](SDValue V) {
50344 return DAG.ComputeMaxSignificantBits(V) <= 16;
50346 auto IsZext = [&DAG](SDValue V) {
50347 return DAG.computeKnownBits(V).countMaxActiveBits() <= 16;
50350 bool IsSigned = IsSext(LHS) && IsSext(RHS);
50351 bool IsUnsigned = IsZext(LHS) && IsZext(RHS);
50352 if (!IsSigned && !IsUnsigned)
50355 // Check if both inputs are extensions, which will be removed by truncation.
50356 bool IsTruncateFree = (LHS.getOpcode() == ISD::SIGN_EXTEND ||
50357 LHS.getOpcode() == ISD::ZERO_EXTEND) &&
50358 (RHS.getOpcode() == ISD::SIGN_EXTEND ||
50359 RHS.getOpcode() == ISD::ZERO_EXTEND) &&
50360 LHS.getOperand(0).getScalarValueSizeInBits() <= 16 &&
50361 RHS.getOperand(0).getScalarValueSizeInBits() <= 16;
50363 // For AVX2+ targets, with the upper bits known zero, we can perform MULHU on
50364 // the (bitcasted) inputs directly, and then cheaply pack/truncate the result
50365 // (upper elts will be zero). Don't attempt this with just AVX512F as MULHU
50366 // will have to split anyway.
50367 unsigned InSizeInBits = InVT.getSizeInBits();
50368 if (IsUnsigned && !IsTruncateFree && Subtarget.hasInt256() &&
50369 !(Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.is256BitVector()) &&
50370 (InSizeInBits % 16) == 0) {
50371 EVT BCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
50372 InVT.getSizeInBits() / 16);
50373 SDValue Res = DAG.getNode(ISD::MULHU, DL, BCVT, DAG.getBitcast(BCVT, LHS),
50374 DAG.getBitcast(BCVT, RHS));
50375 return DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getBitcast(InVT, Res));
50378 // Truncate back to source type.
50379 LHS = DAG.getNode(ISD::TRUNCATE, DL, VT, LHS);
50380 RHS = DAG.getNode(ISD::TRUNCATE, DL, VT, RHS);
50382 unsigned Opc = IsSigned ? ISD::MULHS : ISD::MULHU;
50383 return DAG.getNode(Opc, DL, VT, LHS, RHS);
50386 // Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
50387 // from one vector with signed bytes from another vector, adds together
50388 // adjacent pairs of 16-bit products, and saturates the result before
50389 // truncating to 16-bits.
50391 // Which looks something like this:
50392 // (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
50393 // (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
50394 static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
50395 const X86Subtarget &Subtarget,
50397 if (!VT.isVector() || !Subtarget.hasSSSE3())
50400 unsigned NumElems = VT.getVectorNumElements();
50401 EVT ScalarVT = VT.getVectorElementType();
50402 if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
50405 SDValue SSatVal = detectSSatPattern(In, VT);
50406 if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
50409 // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
50410 // of multiplies from even/odd elements.
50411 SDValue N0 = SSatVal.getOperand(0);
50412 SDValue N1 = SSatVal.getOperand(1);
50414 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
50417 SDValue N00 = N0.getOperand(0);
50418 SDValue N01 = N0.getOperand(1);
50419 SDValue N10 = N1.getOperand(0);
50420 SDValue N11 = N1.getOperand(1);
50422 // TODO: Handle constant vectors and use knownbits/computenumsignbits?
50423 // Canonicalize zero_extend to LHS.
50424 if (N01.getOpcode() == ISD::ZERO_EXTEND)
50425 std::swap(N00, N01);
50426 if (N11.getOpcode() == ISD::ZERO_EXTEND)
50427 std::swap(N10, N11);
50429 // Ensure we have a zero_extend and a sign_extend.
50430 if (N00.getOpcode() != ISD::ZERO_EXTEND ||
50431 N01.getOpcode() != ISD::SIGN_EXTEND ||
50432 N10.getOpcode() != ISD::ZERO_EXTEND ||
50433 N11.getOpcode() != ISD::SIGN_EXTEND)
50436 // Peek through the extends.
50437 N00 = N00.getOperand(0);
50438 N01 = N01.getOperand(0);
50439 N10 = N10.getOperand(0);
50440 N11 = N11.getOperand(0);
50442 // Ensure the extend is from vXi8.
50443 if (N00.getValueType().getVectorElementType() != MVT::i8 ||
50444 N01.getValueType().getVectorElementType() != MVT::i8 ||
50445 N10.getValueType().getVectorElementType() != MVT::i8 ||
50446 N11.getValueType().getVectorElementType() != MVT::i8)
50449 // All inputs should be build_vectors.
50450 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
50451 N01.getOpcode() != ISD::BUILD_VECTOR ||
50452 N10.getOpcode() != ISD::BUILD_VECTOR ||
50453 N11.getOpcode() != ISD::BUILD_VECTOR)
50456 // N00/N10 are zero extended. N01/N11 are sign extended.
50458 // For each element, we need to ensure we have an odd element from one vector
50459 // multiplied by the odd element of another vector and the even element from
50460 // one of the same vectors being multiplied by the even element from the
50461 // other vector. So we need to make sure for each element i, this operator
50462 // is being performed:
50463 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
50464 SDValue ZExtIn, SExtIn;
50465 for (unsigned i = 0; i != NumElems; ++i) {
50466 SDValue N00Elt = N00.getOperand(i);
50467 SDValue N01Elt = N01.getOperand(i);
50468 SDValue N10Elt = N10.getOperand(i);
50469 SDValue N11Elt = N11.getOperand(i);
50470 // TODO: Be more tolerant to undefs.
50471 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
50472 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
50473 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
50474 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
50476 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
50477 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
50478 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
50479 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
50480 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
50482 unsigned IdxN00 = ConstN00Elt->getZExtValue();
50483 unsigned IdxN01 = ConstN01Elt->getZExtValue();
50484 unsigned IdxN10 = ConstN10Elt->getZExtValue();
50485 unsigned IdxN11 = ConstN11Elt->getZExtValue();
50486 // Add is commutative so indices can be reordered.
50487 if (IdxN00 > IdxN10) {
50488 std::swap(IdxN00, IdxN10);
50489 std::swap(IdxN01, IdxN11);
50491 // N0 indices be the even element. N1 indices must be the next odd element.
50492 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
50493 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
50495 SDValue N00In = N00Elt.getOperand(0);
50496 SDValue N01In = N01Elt.getOperand(0);
50497 SDValue N10In = N10Elt.getOperand(0);
50498 SDValue N11In = N11Elt.getOperand(0);
50499 // First time we find an input capture it.
50504 if (ZExtIn != N00In || SExtIn != N01In ||
50505 ZExtIn != N10In || SExtIn != N11In)
50509 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
50510 ArrayRef<SDValue> Ops) {
50511 // Shrink by adding truncate nodes and let DAGCombine fold with the
50513 EVT InVT = Ops[0].getValueType();
50514 assert(InVT.getScalarType() == MVT::i8 &&
50515 "Unexpected scalar element type");
50516 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
50517 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
50518 InVT.getVectorNumElements() / 2);
50519 return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
50521 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
50525 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
50526 const X86Subtarget &Subtarget) {
50527 EVT VT = N->getValueType(0);
50528 SDValue Src = N->getOperand(0);
50531 // Attempt to pre-truncate inputs to arithmetic ops instead.
50532 if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
50535 // Try to detect AVG pattern first.
50536 if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
50539 // Try to detect PMADD
50540 if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
50543 // Try to combine truncation with signed/unsigned saturation.
50544 if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
50547 // Try to combine PMULHUW/PMULHW for vXi16.
50548 if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
50551 // The bitcast source is a direct mmx result.
50552 // Detect bitcasts between i32 to x86mmx
50553 if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
50554 SDValue BCSrc = Src.getOperand(0);
50555 if (BCSrc.getValueType() == MVT::x86mmx)
50556 return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
50559 // Try to truncate extended sign/zero bits with PACKSS/PACKUS.
50560 if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
50563 return combineVectorTruncation(N, DAG, Subtarget);
50566 static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG,
50567 TargetLowering::DAGCombinerInfo &DCI) {
50568 EVT VT = N->getValueType(0);
50569 SDValue In = N->getOperand(0);
50572 if (SDValue SSatVal = detectSSatPattern(In, VT))
50573 return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
50574 if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL))
50575 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
50577 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50578 APInt DemandedMask(APInt::getAllOnes(VT.getScalarSizeInBits()));
50579 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
50580 return SDValue(N, 0);
50585 /// Returns the negated value if the node \p N flips sign of FP value.
50587 /// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
50589 /// AVX512F does not have FXOR, so FNEG is lowered as
50590 /// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
50591 /// In this case we go though all bitcasts.
50592 /// This also recognizes splat of a negated value and returns the splat of that
50594 static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
50595 if (N->getOpcode() == ISD::FNEG)
50596 return N->getOperand(0);
50598 // Don't recurse exponentially.
50599 if (Depth > SelectionDAG::MaxRecursionDepth)
50602 unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
50604 SDValue Op = peekThroughBitcasts(SDValue(N, 0));
50605 EVT VT = Op->getValueType(0);
50607 // Make sure the element size doesn't change.
50608 if (VT.getScalarSizeInBits() != ScalarSize)
50611 unsigned Opc = Op.getOpcode();
50613 case ISD::VECTOR_SHUFFLE: {
50614 // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
50615 // of this is VECTOR_SHUFFLE(-VEC1, UNDEF). The mask can be anything here.
50616 if (!Op.getOperand(1).isUndef())
50618 if (SDValue NegOp0 = isFNEG(DAG, Op.getOperand(0).getNode(), Depth + 1))
50619 if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
50620 return DAG.getVectorShuffle(VT, SDLoc(Op), NegOp0, DAG.getUNDEF(VT),
50621 cast<ShuffleVectorSDNode>(Op)->getMask());
50624 case ISD::INSERT_VECTOR_ELT: {
50625 // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
50627 SDValue InsVector = Op.getOperand(0);
50628 SDValue InsVal = Op.getOperand(1);
50629 if (!InsVector.isUndef())
50631 if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
50632 if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
50633 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
50634 NegInsVal, Op.getOperand(2));
50639 case X86ISD::FXOR: {
50640 SDValue Op1 = Op.getOperand(1);
50641 SDValue Op0 = Op.getOperand(0);
50643 // For XOR and FXOR, we want to check if constant
50644 // bits of Op1 are sign bit masks. For FSUB, we
50645 // have to check if constant bits of Op0 are sign
50646 // bit masks and hence we swap the operands.
50647 if (Opc == ISD::FSUB)
50648 std::swap(Op0, Op1);
50651 SmallVector<APInt, 16> EltBits;
50652 // Extract constant bits and see if they are all
50653 // sign bit masks. Ignore the undef elements.
50654 if (getTargetConstantBitsFromNode(Op1, ScalarSize, UndefElts, EltBits,
50655 /* AllowWholeUndefs */ true,
50656 /* AllowPartialUndefs */ false)) {
50657 for (unsigned I = 0, E = EltBits.size(); I < E; I++)
50658 if (!UndefElts[I] && !EltBits[I].isSignMask())
50661 // Only allow bitcast from correctly-sized constant.
50662 Op0 = peekThroughBitcasts(Op0);
50663 if (Op0.getScalarValueSizeInBits() == ScalarSize)
50673 static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
50677 default: llvm_unreachable("Unexpected opcode");
50678 case ISD::FMA: Opcode = X86ISD::FNMADD; break;
50679 case ISD::STRICT_FMA: Opcode = X86ISD::STRICT_FNMADD; break;
50680 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMADD_RND; break;
50681 case X86ISD::FMSUB: Opcode = X86ISD::FNMSUB; break;
50682 case X86ISD::STRICT_FMSUB: Opcode = X86ISD::STRICT_FNMSUB; break;
50683 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMSUB_RND; break;
50684 case X86ISD::FNMADD: Opcode = ISD::FMA; break;
50685 case X86ISD::STRICT_FNMADD: Opcode = ISD::STRICT_FMA; break;
50686 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMADD_RND; break;
50687 case X86ISD::FNMSUB: Opcode = X86ISD::FMSUB; break;
50688 case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FMSUB; break;
50689 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMSUB_RND; break;
50695 default: llvm_unreachable("Unexpected opcode");
50696 case ISD::FMA: Opcode = X86ISD::FMSUB; break;
50697 case ISD::STRICT_FMA: Opcode = X86ISD::STRICT_FMSUB; break;
50698 case X86ISD::FMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
50699 case X86ISD::FMSUB: Opcode = ISD::FMA; break;
50700 case X86ISD::STRICT_FMSUB: Opcode = ISD::STRICT_FMA; break;
50701 case X86ISD::FMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
50702 case X86ISD::FNMADD: Opcode = X86ISD::FNMSUB; break;
50703 case X86ISD::STRICT_FNMADD: Opcode = X86ISD::STRICT_FNMSUB; break;
50704 case X86ISD::FNMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
50705 case X86ISD::FNMSUB: Opcode = X86ISD::FNMADD; break;
50706 case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FNMADD; break;
50707 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
50708 case X86ISD::FMADDSUB: Opcode = X86ISD::FMSUBADD; break;
50709 case X86ISD::FMADDSUB_RND: Opcode = X86ISD::FMSUBADD_RND; break;
50710 case X86ISD::FMSUBADD: Opcode = X86ISD::FMADDSUB; break;
50711 case X86ISD::FMSUBADD_RND: Opcode = X86ISD::FMADDSUB_RND; break;
50717 // For accuracy reason, we never combine fneg and fma under strict FP.
50718 default: llvm_unreachable("Unexpected opcode");
50719 case ISD::FMA: Opcode = X86ISD::FNMSUB; break;
50720 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
50721 case X86ISD::FMSUB: Opcode = X86ISD::FNMADD; break;
50722 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
50723 case X86ISD::FNMADD: Opcode = X86ISD::FMSUB; break;
50724 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
50725 case X86ISD::FNMSUB: Opcode = ISD::FMA; break;
50726 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
50733 /// Do target-specific dag combines on floating point negations.
50734 static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
50735 TargetLowering::DAGCombinerInfo &DCI,
50736 const X86Subtarget &Subtarget) {
50737 EVT OrigVT = N->getValueType(0);
50738 SDValue Arg = isFNEG(DAG, N);
50742 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50743 EVT VT = Arg.getValueType();
50744 EVT SVT = VT.getScalarType();
50747 // Let legalize expand this if it isn't a legal type yet.
50748 if (!TLI.isTypeLegal(VT))
50751 // If we're negating a FMUL node on a target with FMA, then we can avoid the
50752 // use of a constant by performing (-0 - A*B) instead.
50753 // FIXME: Check rounding control flags as well once it becomes available.
50754 if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
50755 Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
50756 SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
50757 SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
50758 Arg.getOperand(1), Zero);
50759 return DAG.getBitcast(OrigVT, NewNode);
50762 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
50763 bool LegalOperations = !DCI.isBeforeLegalizeOps();
50764 if (SDValue NegArg =
50765 TLI.getNegatedExpression(Arg, DAG, LegalOperations, CodeSize))
50766 return DAG.getBitcast(OrigVT, NegArg);
50771 SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
50772 bool LegalOperations,
50774 NegatibleCost &Cost,
50775 unsigned Depth) const {
50776 // fneg patterns are removable even if they have multiple uses.
50777 if (SDValue Arg = isFNEG(DAG, Op.getNode(), Depth)) {
50778 Cost = NegatibleCost::Cheaper;
50779 return DAG.getBitcast(Op.getValueType(), Arg);
50782 EVT VT = Op.getValueType();
50783 EVT SVT = VT.getScalarType();
50784 unsigned Opc = Op.getOpcode();
50785 SDNodeFlags Flags = Op.getNode()->getFlags();
50788 case X86ISD::FMSUB:
50789 case X86ISD::FNMADD:
50790 case X86ISD::FNMSUB:
50791 case X86ISD::FMADD_RND:
50792 case X86ISD::FMSUB_RND:
50793 case X86ISD::FNMADD_RND:
50794 case X86ISD::FNMSUB_RND: {
50795 if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
50796 !(SVT == MVT::f32 || SVT == MVT::f64) ||
50797 !isOperationLegal(ISD::FMA, VT))
50800 // Don't fold (fneg (fma (fneg x), y, (fneg z))) to (fma x, y, z)
50801 // if it may have signed zeros.
50802 if (!Flags.hasNoSignedZeros())
50805 // This is always negatible for free but we might be able to remove some
50806 // extra operand negations as well.
50807 SmallVector<SDValue, 4> NewOps(Op.getNumOperands(), SDValue());
50808 for (int i = 0; i != 3; ++i)
50809 NewOps[i] = getCheaperNegatedExpression(
50810 Op.getOperand(i), DAG, LegalOperations, ForCodeSize, Depth + 1);
50812 bool NegA = !!NewOps[0];
50813 bool NegB = !!NewOps[1];
50814 bool NegC = !!NewOps[2];
50815 unsigned NewOpc = negateFMAOpcode(Opc, NegA != NegB, NegC, true);
50817 Cost = (NegA || NegB || NegC) ? NegatibleCost::Cheaper
50818 : NegatibleCost::Neutral;
50820 // Fill in the non-negated ops with the original values.
50821 for (int i = 0, e = Op.getNumOperands(); i != e; ++i)
50823 NewOps[i] = Op.getOperand(i);
50824 return DAG.getNode(NewOpc, SDLoc(Op), VT, NewOps);
50827 if (SDValue NegOp0 =
50828 getNegatedExpression(Op.getOperand(0), DAG, LegalOperations,
50829 ForCodeSize, Cost, Depth + 1))
50830 return DAG.getNode(Opc, SDLoc(Op), VT, NegOp0);
50834 return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
50835 ForCodeSize, Cost, Depth);
50838 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
50839 const X86Subtarget &Subtarget) {
50840 MVT VT = N->getSimpleValueType(0);
50841 // If we have integer vector types available, use the integer opcodes.
50842 if (!VT.isVector() || !Subtarget.hasSSE2())
50847 unsigned IntBits = VT.getScalarSizeInBits();
50848 MVT IntSVT = MVT::getIntegerVT(IntBits);
50849 MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
50851 SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
50852 SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
50853 unsigned IntOpcode;
50854 switch (N->getOpcode()) {
50855 default: llvm_unreachable("Unexpected FP logic op");
50856 case X86ISD::FOR: IntOpcode = ISD::OR; break;
50857 case X86ISD::FXOR: IntOpcode = ISD::XOR; break;
50858 case X86ISD::FAND: IntOpcode = ISD::AND; break;
50859 case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
50861 SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
50862 return DAG.getBitcast(VT, IntOp);
50866 /// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
50867 static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
50868 if (N->getOpcode() != ISD::XOR)
50871 SDValue LHS = N->getOperand(0);
50872 if (!isOneConstant(N->getOperand(1)) || LHS->getOpcode() != X86ISD::SETCC)
50875 X86::CondCode NewCC = X86::GetOppositeBranchCondition(
50876 X86::CondCode(LHS->getConstantOperandVal(0)));
50878 return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
50881 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
50882 TargetLowering::DAGCombinerInfo &DCI,
50883 const X86Subtarget &Subtarget) {
50884 SDValue N0 = N->getOperand(0);
50885 SDValue N1 = N->getOperand(1);
50886 EVT VT = N->getValueType(0);
50888 // If this is SSE1 only convert to FXOR to avoid scalarization.
50889 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
50890 return DAG.getBitcast(MVT::v4i32,
50891 DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
50892 DAG.getBitcast(MVT::v4f32, N0),
50893 DAG.getBitcast(MVT::v4f32, N1)));
50896 if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
50899 if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
50902 if (SDValue R = combineBitOpWithShift(N, DAG))
50905 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
50908 if (DCI.isBeforeLegalizeOps())
50911 if (SDValue SetCC = foldXor1SetCC(N, DAG))
50914 if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
50917 // Fold not(iX bitcast(vXi1)) -> (iX bitcast(not(vec))) for legal boolvecs.
50918 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50919 if (llvm::isAllOnesConstant(N1) && N0.getOpcode() == ISD::BITCAST &&
50920 N0.getOperand(0).getValueType().isVector() &&
50921 N0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
50922 TLI.isTypeLegal(N0.getOperand(0).getValueType()) && N0.hasOneUse()) {
50923 return DAG.getBitcast(VT, DAG.getNOT(SDLoc(N), N0.getOperand(0),
50924 N0.getOperand(0).getValueType()));
50927 // Handle AVX512 mask widening.
50928 // Fold not(insert_subvector(undef,sub)) -> insert_subvector(undef,not(sub))
50929 if (ISD::isBuildVectorAllOnes(N1.getNode()) && VT.isVector() &&
50930 VT.getVectorElementType() == MVT::i1 &&
50931 N0.getOpcode() == ISD::INSERT_SUBVECTOR && N0.getOperand(0).isUndef() &&
50932 TLI.isTypeLegal(N0.getOperand(1).getValueType())) {
50933 return DAG.getNode(
50934 ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0.getOperand(0),
50935 DAG.getNOT(SDLoc(N), N0.getOperand(1), N0.getOperand(1).getValueType()),
50939 // Fold xor(zext(xor(x,c1)),c2) -> xor(zext(x),xor(zext(c1),c2))
50940 // Fold xor(truncate(xor(x,c1)),c2) -> xor(truncate(x),xor(truncate(c1),c2))
50941 // TODO: Under what circumstances could this be performed in DAGCombine?
50942 if ((N0.getOpcode() == ISD::TRUNCATE || N0.getOpcode() == ISD::ZERO_EXTEND) &&
50943 N0.getOperand(0).getOpcode() == N->getOpcode()) {
50944 SDValue TruncExtSrc = N0.getOperand(0);
50945 auto *N1C = dyn_cast<ConstantSDNode>(N1);
50946 auto *N001C = dyn_cast<ConstantSDNode>(TruncExtSrc.getOperand(1));
50947 if (N1C && !N1C->isOpaque() && N001C && !N001C->isOpaque()) {
50949 SDValue LHS = DAG.getZExtOrTrunc(TruncExtSrc.getOperand(0), DL, VT);
50950 SDValue RHS = DAG.getZExtOrTrunc(TruncExtSrc.getOperand(1), DL, VT);
50951 return DAG.getNode(ISD::XOR, DL, VT, LHS,
50952 DAG.getNode(ISD::XOR, DL, VT, RHS, N1));
50956 return combineFneg(N, DAG, DCI, Subtarget);
50959 static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
50960 TargetLowering::DAGCombinerInfo &DCI,
50961 const X86Subtarget &Subtarget) {
50962 EVT VT = N->getValueType(0);
50963 unsigned NumBits = VT.getSizeInBits();
50965 // TODO - Constant Folding.
50967 // Simplify the inputs.
50968 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50969 APInt DemandedMask(APInt::getAllOnes(NumBits));
50970 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
50971 return SDValue(N, 0);
50976 static bool isNullFPScalarOrVectorConst(SDValue V) {
50977 return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
50980 /// If a value is a scalar FP zero or a vector FP zero (potentially including
50981 /// undefined elements), return a zero constant that may be used to fold away
50982 /// that value. In the case of a vector, the returned constant will not contain
50983 /// undefined elements even if the input parameter does. This makes it suitable
50984 /// to be used as a replacement operand with operations (eg, bitwise-and) where
50985 /// an undef should not propagate.
50986 static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
50987 const X86Subtarget &Subtarget) {
50988 if (!isNullFPScalarOrVectorConst(V))
50991 if (V.getValueType().isVector())
50992 return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
50997 static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
50998 const X86Subtarget &Subtarget) {
50999 SDValue N0 = N->getOperand(0);
51000 SDValue N1 = N->getOperand(1);
51001 EVT VT = N->getValueType(0);
51004 // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
51005 if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
51006 (VT == MVT::f64 && Subtarget.hasSSE2()) ||
51007 (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
51010 auto isAllOnesConstantFP = [](SDValue V) {
51011 if (V.getSimpleValueType().isVector())
51012 return ISD::isBuildVectorAllOnes(V.getNode());
51013 auto *C = dyn_cast<ConstantFPSDNode>(V);
51014 return C && C->getConstantFPValue()->isAllOnesValue();
51017 // fand (fxor X, -1), Y --> fandn X, Y
51018 if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
51019 return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
51021 // fand X, (fxor Y, -1) --> fandn Y, X
51022 if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
51023 return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
51028 /// Do target-specific dag combines on X86ISD::FAND nodes.
51029 static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
51030 const X86Subtarget &Subtarget) {
51031 // FAND(0.0, x) -> 0.0
51032 if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
51035 // FAND(x, 0.0) -> 0.0
51036 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
51039 if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
51042 return lowerX86FPLogicOp(N, DAG, Subtarget);
51045 /// Do target-specific dag combines on X86ISD::FANDN nodes.
51046 static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
51047 const X86Subtarget &Subtarget) {
51048 // FANDN(0.0, x) -> x
51049 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
51050 return N->getOperand(1);
51052 // FANDN(x, 0.0) -> 0.0
51053 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
51056 return lowerX86FPLogicOp(N, DAG, Subtarget);
51059 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
51060 static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
51061 TargetLowering::DAGCombinerInfo &DCI,
51062 const X86Subtarget &Subtarget) {
51063 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
51065 // F[X]OR(0.0, x) -> x
51066 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
51067 return N->getOperand(1);
51069 // F[X]OR(x, 0.0) -> x
51070 if (isNullFPScalarOrVectorConst(N->getOperand(1)))
51071 return N->getOperand(0);
51073 if (SDValue NewVal = combineFneg(N, DAG, DCI, Subtarget))
51076 return lowerX86FPLogicOp(N, DAG, Subtarget);
51079 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
51080 static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
51081 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
51083 // FMIN/FMAX are commutative if no NaNs and no negative zeros are allowed.
51084 if (!DAG.getTarget().Options.NoNaNsFPMath ||
51085 !DAG.getTarget().Options.NoSignedZerosFPMath)
51088 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
51089 // into FMINC and FMAXC, which are Commutative operations.
51090 unsigned NewOp = 0;
51091 switch (N->getOpcode()) {
51092 default: llvm_unreachable("unknown opcode");
51093 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
51094 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
51097 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
51098 N->getOperand(0), N->getOperand(1));
51101 static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
51102 const X86Subtarget &Subtarget) {
51103 if (Subtarget.useSoftFloat())
51106 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51108 EVT VT = N->getValueType(0);
51109 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
51110 (Subtarget.hasSSE2() && VT == MVT::f64) ||
51111 (Subtarget.hasFP16() && VT == MVT::f16) ||
51112 (VT.isVector() && TLI.isTypeLegal(VT))))
51115 SDValue Op0 = N->getOperand(0);
51116 SDValue Op1 = N->getOperand(1);
51118 auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
51120 // If we don't have to respect NaN inputs, this is a direct translation to x86
51121 // min/max instructions.
51122 if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
51123 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
51125 // If one of the operands is known non-NaN use the native min/max instructions
51126 // with the non-NaN input as second operand.
51127 if (DAG.isKnownNeverNaN(Op1))
51128 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
51129 if (DAG.isKnownNeverNaN(Op0))
51130 return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
51132 // If we have to respect NaN inputs, this takes at least 3 instructions.
51133 // Favor a library call when operating on a scalar and minimizing code size.
51134 if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
51137 EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
51140 // There are 4 possibilities involving NaN inputs, and these are the required
51144 // ----------------
51145 // Num | Max | Op0 |
51146 // Op0 ----------------
51147 // NaN | Op1 | NaN |
51148 // ----------------
51150 // The SSE FP max/min instructions were not designed for this case, but rather
51152 // Min = Op1 < Op0 ? Op1 : Op0
51153 // Max = Op1 > Op0 ? Op1 : Op0
51155 // So they always return Op0 if either input is a NaN. However, we can still
51156 // use those instructions for fmaxnum by selecting away a NaN input.
51158 // If either operand is NaN, the 2nd source operand (Op0) is passed through.
51159 SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
51160 SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
51162 // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
51163 // are NaN, the NaN value of Op1 is the result.
51164 return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
51167 static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
51168 TargetLowering::DAGCombinerInfo &DCI) {
51169 EVT VT = N->getValueType(0);
51170 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51172 APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
51173 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
51174 return SDValue(N, 0);
51176 // Convert a full vector load into vzload when not all bits are needed.
51177 SDValue In = N->getOperand(0);
51178 MVT InVT = In.getSimpleValueType();
51179 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
51180 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
51181 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
51182 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
51183 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
51184 MVT MemVT = MVT::getIntegerVT(NumBits);
51185 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
51186 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
51188 SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
51189 DAG.getBitcast(InVT, VZLoad));
51190 DCI.CombineTo(N, Convert);
51191 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
51192 DCI.recursivelyDeleteUnusedNodes(LN);
51193 return SDValue(N, 0);
51200 static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
51201 TargetLowering::DAGCombinerInfo &DCI) {
51202 bool IsStrict = N->isTargetStrictFPOpcode();
51203 EVT VT = N->getValueType(0);
51205 // Convert a full vector load into vzload when not all bits are needed.
51206 SDValue In = N->getOperand(IsStrict ? 1 : 0);
51207 MVT InVT = In.getSimpleValueType();
51208 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
51209 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
51210 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
51211 LoadSDNode *LN = cast<LoadSDNode>(In);
51212 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
51213 MVT MemVT = MVT::getFloatingPointVT(NumBits);
51214 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
51215 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
51219 DAG.getNode(N->getOpcode(), dl, {VT, MVT::Other},
51220 {N->getOperand(0), DAG.getBitcast(InVT, VZLoad)});
51221 DCI.CombineTo(N, Convert, Convert.getValue(1));
51224 DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(InVT, VZLoad));
51225 DCI.CombineTo(N, Convert);
51227 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
51228 DCI.recursivelyDeleteUnusedNodes(LN);
51229 return SDValue(N, 0);
51236 /// Do target-specific dag combines on X86ISD::ANDNP nodes.
51237 static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
51238 TargetLowering::DAGCombinerInfo &DCI,
51239 const X86Subtarget &Subtarget) {
51240 SDValue N0 = N->getOperand(0);
51241 SDValue N1 = N->getOperand(1);
51242 MVT VT = N->getSimpleValueType(0);
51243 int NumElts = VT.getVectorNumElements();
51244 unsigned EltSizeInBits = VT.getScalarSizeInBits();
51246 // ANDNP(undef, x) -> 0
51247 // ANDNP(x, undef) -> 0
51248 if (N0.isUndef() || N1.isUndef())
51249 return DAG.getConstant(0, SDLoc(N), VT);
51251 // ANDNP(0, x) -> x
51252 if (ISD::isBuildVectorAllZeros(N0.getNode()))
51255 // ANDNP(x, 0) -> 0
51256 if (ISD::isBuildVectorAllZeros(N1.getNode()))
51257 return DAG.getConstant(0, SDLoc(N), VT);
51259 // Turn ANDNP back to AND if input is inverted.
51260 if (SDValue Not = IsNOT(N0, DAG))
51261 return DAG.getNode(ISD::AND, SDLoc(N), VT, DAG.getBitcast(VT, Not), N1);
51263 // Constant Folding
51264 APInt Undefs0, Undefs1;
51265 SmallVector<APInt> EltBits0, EltBits1;
51266 if (getTargetConstantBitsFromNode(N0, EltSizeInBits, Undefs0, EltBits0)) {
51268 APInt ResultUndefs = APInt::getZero(NumElts);
51270 if (getTargetConstantBitsFromNode(N1, EltSizeInBits, Undefs1, EltBits1)) {
51271 SmallVector<APInt> ResultBits;
51272 for (int I = 0; I != NumElts; ++I)
51273 ResultBits.push_back(~EltBits0[I] & EltBits1[I]);
51274 return getConstVector(ResultBits, ResultUndefs, VT, DAG, DL);
51277 // Constant fold NOT(N0) to allow us to use AND.
51278 // Ensure this is only performed if we can confirm that the bitcasted source
51279 // has oneuse to prevent an infinite loop with canonicalizeBitSelect.
51280 if (N0->hasOneUse()) {
51281 SDValue BC0 = peekThroughOneUseBitcasts(N0);
51282 if (BC0.getOpcode() != ISD::BITCAST) {
51283 for (APInt &Elt : EltBits0)
51285 SDValue Not = getConstVector(EltBits0, ResultUndefs, VT, DAG, DL);
51286 return DAG.getNode(ISD::AND, DL, VT, Not, N1);
51291 // Attempt to recursively combine a bitmask ANDNP with shuffles.
51292 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
51294 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
51297 // If either operand is a constant mask, then only the elements that aren't
51298 // zero are actually demanded by the other operand.
51299 auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
51301 SmallVector<APInt> EltBits;
51302 APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
51303 APInt DemandedElts = APInt::getAllOnes(NumElts);
51304 if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
51306 DemandedBits.clearAllBits();
51307 DemandedElts.clearAllBits();
51308 for (int I = 0; I != NumElts; ++I) {
51309 if (UndefElts[I]) {
51310 // We can't assume an undef src element gives an undef dst - the
51311 // other src might be zero.
51312 DemandedBits.setAllBits();
51313 DemandedElts.setBit(I);
51314 } else if ((Invert && !EltBits[I].isAllOnes()) ||
51315 (!Invert && !EltBits[I].isZero())) {
51316 DemandedBits |= Invert ? ~EltBits[I] : EltBits[I];
51317 DemandedElts.setBit(I);
51321 return std::make_pair(DemandedBits, DemandedElts);
51323 APInt Bits0, Elts0;
51324 APInt Bits1, Elts1;
51325 std::tie(Bits0, Elts0) = GetDemandedMasks(N1);
51326 std::tie(Bits1, Elts1) = GetDemandedMasks(N0, true);
51328 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51329 if (TLI.SimplifyDemandedVectorElts(N0, Elts0, DCI) ||
51330 TLI.SimplifyDemandedVectorElts(N1, Elts1, DCI) ||
51331 TLI.SimplifyDemandedBits(N0, Bits0, Elts0, DCI) ||
51332 TLI.SimplifyDemandedBits(N1, Bits1, Elts1, DCI)) {
51333 if (N->getOpcode() != ISD::DELETED_NODE)
51334 DCI.AddToWorklist(N);
51335 return SDValue(N, 0);
51342 static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
51343 TargetLowering::DAGCombinerInfo &DCI) {
51344 SDValue N1 = N->getOperand(1);
51346 // BT ignores high bits in the bit index operand.
51347 unsigned BitWidth = N1.getValueSizeInBits();
51348 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
51349 if (DAG.getTargetLoweringInfo().SimplifyDemandedBits(N1, DemandedMask, DCI)) {
51350 if (N->getOpcode() != ISD::DELETED_NODE)
51351 DCI.AddToWorklist(N);
51352 return SDValue(N, 0);
51358 static SDValue combineCVTPH2PS(SDNode *N, SelectionDAG &DAG,
51359 TargetLowering::DAGCombinerInfo &DCI) {
51360 bool IsStrict = N->getOpcode() == X86ISD::STRICT_CVTPH2PS;
51361 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
51363 if (N->getValueType(0) == MVT::v4f32 && Src.getValueType() == MVT::v8i16) {
51364 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51365 APInt DemandedElts = APInt::getLowBitsSet(8, 4);
51366 if (TLI.SimplifyDemandedVectorElts(Src, DemandedElts, DCI)) {
51367 if (N->getOpcode() != ISD::DELETED_NODE)
51368 DCI.AddToWorklist(N);
51369 return SDValue(N, 0);
51372 // Convert a full vector load into vzload when not all bits are needed.
51373 if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
51374 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(IsStrict ? 1 : 0));
51375 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::i64, MVT::v2i64, DAG)) {
51378 SDValue Convert = DAG.getNode(
51379 N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
51380 {N->getOperand(0), DAG.getBitcast(MVT::v8i16, VZLoad)});
51381 DCI.CombineTo(N, Convert, Convert.getValue(1));
51383 SDValue Convert = DAG.getNode(N->getOpcode(), dl, MVT::v4f32,
51384 DAG.getBitcast(MVT::v8i16, VZLoad));
51385 DCI.CombineTo(N, Convert);
51388 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
51389 DCI.recursivelyDeleteUnusedNodes(LN);
51390 return SDValue(N, 0);
51398 // Try to combine sext_in_reg of a cmov of constants by extending the constants.
51399 static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
51400 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
51402 EVT DstVT = N->getValueType(0);
51404 SDValue N0 = N->getOperand(0);
51405 SDValue N1 = N->getOperand(1);
51406 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
51408 if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
51411 // Look through single use any_extends / truncs.
51412 SDValue IntermediateBitwidthOp;
51413 if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
51415 IntermediateBitwidthOp = N0;
51416 N0 = N0.getOperand(0);
51419 // See if we have a single use cmov.
51420 if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
51423 SDValue CMovOp0 = N0.getOperand(0);
51424 SDValue CMovOp1 = N0.getOperand(1);
51426 // Make sure both operands are constants.
51427 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
51428 !isa<ConstantSDNode>(CMovOp1.getNode()))
51433 // If we looked through an any_extend/trunc above, add one to the constants.
51434 if (IntermediateBitwidthOp) {
51435 unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
51436 CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
51437 CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
51440 CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
51441 CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
51443 EVT CMovVT = DstVT;
51444 // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
51445 if (DstVT == MVT::i16) {
51447 CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
51448 CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
51451 SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
51452 N0.getOperand(2), N0.getOperand(3));
51454 if (CMovVT != DstVT)
51455 CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
51460 static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
51461 const X86Subtarget &Subtarget) {
51462 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
51464 if (SDValue V = combineSextInRegCmov(N, DAG))
51467 EVT VT = N->getValueType(0);
51468 SDValue N0 = N->getOperand(0);
51469 SDValue N1 = N->getOperand(1);
51470 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
51473 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
51474 // both SSE and AVX2 since there is no sign-extended shift right
51475 // operation on a vector with 64-bit elements.
51476 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
51477 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
51478 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
51479 N0.getOpcode() == ISD::SIGN_EXTEND)) {
51480 SDValue N00 = N0.getOperand(0);
51482 // EXTLOAD has a better solution on AVX2,
51483 // it may be replaced with X86ISD::VSEXT node.
51484 if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
51485 if (!ISD::isNormalLoad(N00.getNode()))
51488 // Attempt to promote any comparison mask ops before moving the
51489 // SIGN_EXTEND_INREG in the way.
51490 if (SDValue Promote = PromoteMaskArithmetic(N0.getNode(), DAG, Subtarget))
51491 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Promote, N1);
51493 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
51495 DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, N00, N1);
51496 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
51502 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
51503 /// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
51504 /// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
51505 /// opportunities to combine math ops, use an LEA, or use a complex addressing
51506 /// mode. This can eliminate extend, add, and shift instructions.
51507 static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
51508 const X86Subtarget &Subtarget) {
51509 if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
51510 Ext->getOpcode() != ISD::ZERO_EXTEND)
51513 // TODO: This should be valid for other integer types.
51514 EVT VT = Ext->getValueType(0);
51515 if (VT != MVT::i64)
51518 SDValue Add = Ext->getOperand(0);
51519 if (Add.getOpcode() != ISD::ADD)
51522 bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
51523 bool NSW = Add->getFlags().hasNoSignedWrap();
51524 bool NUW = Add->getFlags().hasNoUnsignedWrap();
51526 // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
51528 if ((Sext && !NSW) || (!Sext && !NUW))
51531 // Having a constant operand to the 'add' ensures that we are not increasing
51532 // the instruction count because the constant is extended for free below.
51533 // A constant operand can also become the displacement field of an LEA.
51534 auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
51538 // Don't make the 'add' bigger if there's no hope of combining it with some
51539 // other 'add' or 'shl' instruction.
51540 // TODO: It may be profitable to generate simpler LEA instructions in place
51541 // of single 'add' instructions, but the cost model for selecting an LEA
51542 // currently has a high threshold.
51543 bool HasLEAPotential = false;
51544 for (auto *User : Ext->uses()) {
51545 if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
51546 HasLEAPotential = true;
51550 if (!HasLEAPotential)
51553 // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
51554 int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
51555 SDValue AddOp0 = Add.getOperand(0);
51556 SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
51557 SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
51559 // The wider add is guaranteed to not wrap because both operands are
51562 Flags.setNoSignedWrap(NSW);
51563 Flags.setNoUnsignedWrap(NUW);
51564 return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
51567 // If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
51568 // operands and the result of CMOV is not used anywhere else - promote CMOV
51569 // itself instead of promoting its result. This could be beneficial, because:
51570 // 1) X86TargetLowering::EmitLoweredSelect later can do merging of two
51571 // (or more) pseudo-CMOVs only when they go one-after-another and
51572 // getting rid of result extension code after CMOV will help that.
51573 // 2) Promotion of constant CMOV arguments is free, hence the
51574 // {ANY,SIGN,ZERO}_EXTEND will just be deleted.
51575 // 3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
51576 // promotion is also good in terms of code-size.
51577 // (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
51579 static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
51580 SDValue CMovN = Extend->getOperand(0);
51581 if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
51584 EVT TargetVT = Extend->getValueType(0);
51585 unsigned ExtendOpcode = Extend->getOpcode();
51588 EVT VT = CMovN.getValueType();
51589 SDValue CMovOp0 = CMovN.getOperand(0);
51590 SDValue CMovOp1 = CMovN.getOperand(1);
51592 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
51593 !isa<ConstantSDNode>(CMovOp1.getNode()))
51596 // Only extend to i32 or i64.
51597 if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
51600 // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
51602 if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
51605 // If this a zero extend to i64, we should only extend to i32 and use a free
51606 // zero extend to finish.
51607 EVT ExtendVT = TargetVT;
51608 if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
51609 ExtendVT = MVT::i32;
51611 CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
51612 CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
51614 SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
51615 CMovN.getOperand(2), CMovN.getOperand(3));
51617 // Finish extending if needed.
51618 if (ExtendVT != TargetVT)
51619 Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
51624 // Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
51626 static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
51627 const X86Subtarget &Subtarget) {
51628 SDValue N0 = N->getOperand(0);
51629 EVT VT = N->getValueType(0);
51632 // Only do this combine with AVX512 for vector extends.
51633 if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
51636 // Only combine legal element types.
51637 EVT SVT = VT.getVectorElementType();
51638 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
51639 SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
51642 // We don't have CMPP Instruction for vxf16
51643 if (N0.getOperand(0).getValueType().getVectorElementType() == MVT::f16)
51645 // We can only do this if the vector size in 256 bits or less.
51646 unsigned Size = VT.getSizeInBits();
51647 if (Size > 256 && Subtarget.useAVX512Regs())
51650 // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
51651 // that's the only integer compares with we have.
51652 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
51653 if (ISD::isUnsignedIntSetCC(CC))
51656 // Only do this combine if the extension will be fully consumed by the setcc.
51657 EVT N00VT = N0.getOperand(0).getValueType();
51658 EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
51659 if (Size != MatchingVecType.getSizeInBits())
51662 SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
51664 if (N->getOpcode() == ISD::ZERO_EXTEND)
51665 Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType());
51670 static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
51671 TargetLowering::DAGCombinerInfo &DCI,
51672 const X86Subtarget &Subtarget) {
51673 SDValue N0 = N->getOperand(0);
51674 EVT VT = N->getValueType(0);
51677 // (i32 (sext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
51678 if (!DCI.isBeforeLegalizeOps() &&
51679 N0.getOpcode() == X86ISD::SETCC_CARRY) {
51680 SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, N0->getOperand(0),
51681 N0->getOperand(1));
51682 bool ReplaceOtherUses = !N0.hasOneUse();
51683 DCI.CombineTo(N, Setcc);
51684 // Replace other uses with a truncate of the widened setcc_carry.
51685 if (ReplaceOtherUses) {
51686 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
51687 N0.getValueType(), Setcc);
51688 DCI.CombineTo(N0.getNode(), Trunc);
51691 return SDValue(N, 0);
51694 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
51697 if (!DCI.isBeforeLegalizeOps())
51700 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
51703 if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), DL, VT, N0,
51704 DAG, DCI, Subtarget))
51707 if (VT.isVector()) {
51708 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
51711 if (N0.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG)
51712 return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0));
51715 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
51721 static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
51722 TargetLowering::DAGCombinerInfo &DCI,
51723 const X86Subtarget &Subtarget) {
51725 EVT VT = N->getValueType(0);
51726 bool IsStrict = N->isStrictFPOpcode() || N->isTargetStrictFPOpcode();
51728 // Let legalize expand this if it isn't a legal type yet.
51729 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51730 if (!TLI.isTypeLegal(VT))
51733 SDValue A = N->getOperand(IsStrict ? 1 : 0);
51734 SDValue B = N->getOperand(IsStrict ? 2 : 1);
51735 SDValue C = N->getOperand(IsStrict ? 3 : 2);
51737 // If the operation allows fast-math and the target does not support FMA,
51738 // split this into mul+add to avoid libcall(s).
51739 SDNodeFlags Flags = N->getFlags();
51740 if (!IsStrict && Flags.hasAllowReassociation() &&
51741 TLI.isOperationExpand(ISD::FMA, VT)) {
51742 SDValue Fmul = DAG.getNode(ISD::FMUL, dl, VT, A, B, Flags);
51743 return DAG.getNode(ISD::FADD, dl, VT, Fmul, C, Flags);
51746 EVT ScalarVT = VT.getScalarType();
51747 if (((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
51748 !Subtarget.hasAnyFMA()) &&
51749 !(ScalarVT == MVT::f16 && Subtarget.hasFP16()))
51752 auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) {
51753 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
51754 bool LegalOperations = !DCI.isBeforeLegalizeOps();
51755 if (SDValue NegV = TLI.getCheaperNegatedExpression(V, DAG, LegalOperations,
51760 // Look through extract_vector_elts. If it comes from an FNEG, create a
51761 // new extract from the FNEG input.
51762 if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
51763 isNullConstant(V.getOperand(1))) {
51764 SDValue Vec = V.getOperand(0);
51765 if (SDValue NegV = TLI.getCheaperNegatedExpression(
51766 Vec, DAG, LegalOperations, CodeSize)) {
51767 V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
51768 NegV, V.getOperand(1));
51776 // Do not convert the passthru input of scalar intrinsics.
51777 // FIXME: We could allow negations of the lower element only.
51778 bool NegA = invertIfNegative(A);
51779 bool NegB = invertIfNegative(B);
51780 bool NegC = invertIfNegative(C);
51782 if (!NegA && !NegB && !NegC)
51785 unsigned NewOpcode =
51786 negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false);
51788 // Propagate fast-math-flags to new FMA node.
51789 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
51791 assert(N->getNumOperands() == 4 && "Shouldn't be greater than 4");
51792 return DAG.getNode(NewOpcode, dl, {VT, MVT::Other},
51793 {N->getOperand(0), A, B, C});
51795 if (N->getNumOperands() == 4)
51796 return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
51797 return DAG.getNode(NewOpcode, dl, VT, A, B, C);
51801 // Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
51802 // Combine FMSUBADD(A, B, FNEG(C)) -> FMADDSUB(A, B, C)
51803 static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
51804 TargetLowering::DAGCombinerInfo &DCI) {
51806 EVT VT = N->getValueType(0);
51807 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51808 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
51809 bool LegalOperations = !DCI.isBeforeLegalizeOps();
51811 SDValue N2 = N->getOperand(2);
51814 TLI.getCheaperNegatedExpression(N2, DAG, LegalOperations, CodeSize);
51817 unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), false, true, false);
51819 if (N->getNumOperands() == 4)
51820 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
51821 NegN2, N->getOperand(3));
51822 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
51826 static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
51827 TargetLowering::DAGCombinerInfo &DCI,
51828 const X86Subtarget &Subtarget) {
51830 SDValue N0 = N->getOperand(0);
51831 EVT VT = N->getValueType(0);
51833 // (i32 (aext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
51834 // FIXME: Is this needed? We don't seem to have any tests for it.
51835 if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ANY_EXTEND &&
51836 N0.getOpcode() == X86ISD::SETCC_CARRY) {
51837 SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, N0->getOperand(0),
51838 N0->getOperand(1));
51839 bool ReplaceOtherUses = !N0.hasOneUse();
51840 DCI.CombineTo(N, Setcc);
51841 // Replace other uses with a truncate of the widened setcc_carry.
51842 if (ReplaceOtherUses) {
51843 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
51844 N0.getValueType(), Setcc);
51845 DCI.CombineTo(N0.getNode(), Trunc);
51848 return SDValue(N, 0);
51851 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
51854 if (DCI.isBeforeLegalizeOps())
51855 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
51858 if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), dl, VT, N0,
51859 DAG, DCI, Subtarget))
51863 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
51866 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
51869 if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
51872 // TODO: Combine with any target/faux shuffle.
51873 if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
51874 VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
51875 SDValue N00 = N0.getOperand(0);
51876 SDValue N01 = N0.getOperand(1);
51877 unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
51878 APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
51879 if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
51880 (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
51881 return concatSubVectors(N00, N01, DAG, dl);
51888 /// Recursive helper for combineVectorSizedSetCCEquality() to see if we have a
51889 /// recognizable memcmp expansion.
51890 static bool isOrXorXorTree(SDValue X, bool Root = true) {
51891 if (X.getOpcode() == ISD::OR)
51892 return isOrXorXorTree(X.getOperand(0), false) &&
51893 isOrXorXorTree(X.getOperand(1), false);
51896 return X.getOpcode() == ISD::XOR;
51899 /// Recursive helper for combineVectorSizedSetCCEquality() to emit the memcmp
51901 template <typename F>
51902 static SDValue emitOrXorXorTree(SDValue X, SDLoc &DL, SelectionDAG &DAG,
51903 EVT VecVT, EVT CmpVT, bool HasPT, F SToV) {
51904 SDValue Op0 = X.getOperand(0);
51905 SDValue Op1 = X.getOperand(1);
51906 if (X.getOpcode() == ISD::OR) {
51907 SDValue A = emitOrXorXorTree(Op0, DL, DAG, VecVT, CmpVT, HasPT, SToV);
51908 SDValue B = emitOrXorXorTree(Op1, DL, DAG, VecVT, CmpVT, HasPT, SToV);
51909 if (VecVT != CmpVT)
51910 return DAG.getNode(ISD::OR, DL, CmpVT, A, B);
51912 return DAG.getNode(ISD::OR, DL, VecVT, A, B);
51913 return DAG.getNode(ISD::AND, DL, CmpVT, A, B);
51915 if (X.getOpcode() == ISD::XOR) {
51916 SDValue A = SToV(Op0);
51917 SDValue B = SToV(Op1);
51918 if (VecVT != CmpVT)
51919 return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE);
51921 return DAG.getNode(ISD::XOR, DL, VecVT, A, B);
51922 return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
51924 llvm_unreachable("Impossible");
51927 /// Try to map a 128-bit or larger integer comparison to vector instructions
51928 /// before type legalization splits it up into chunks.
51929 static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
51930 const X86Subtarget &Subtarget) {
51931 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
51932 assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
51934 // We're looking for an oversized integer equality comparison.
51935 SDValue X = SetCC->getOperand(0);
51936 SDValue Y = SetCC->getOperand(1);
51937 EVT OpVT = X.getValueType();
51938 unsigned OpSize = OpVT.getSizeInBits();
51939 if (!OpVT.isScalarInteger() || OpSize < 128)
51942 // Ignore a comparison with zero because that gets special treatment in
51943 // EmitTest(). But make an exception for the special case of a pair of
51944 // logically-combined vector-sized operands compared to zero. This pattern may
51945 // be generated by the memcmp expansion pass with oversized integer compares
51947 bool IsOrXorXorTreeCCZero = isNullConstant(Y) && isOrXorXorTree(X);
51948 if (isNullConstant(Y) && !IsOrXorXorTreeCCZero)
51951 // Don't perform this combine if constructing the vector will be expensive.
51952 auto IsVectorBitCastCheap = [](SDValue X) {
51953 X = peekThroughBitcasts(X);
51954 return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
51955 X.getOpcode() == ISD::LOAD;
51957 if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
51958 !IsOrXorXorTreeCCZero)
51961 EVT VT = SetCC->getValueType(0);
51964 // Use XOR (plus OR) and PTEST after SSE4.1 for 128/256-bit operands.
51965 // Use PCMPNEQ (plus OR) and KORTEST for 512-bit operands.
51966 // Otherwise use PCMPEQ (plus AND) and mask testing.
51967 bool NoImplicitFloatOps =
51968 DAG.getMachineFunction().getFunction().hasFnAttribute(
51969 Attribute::NoImplicitFloat);
51970 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
51971 ((OpSize == 128 && Subtarget.hasSSE2()) ||
51972 (OpSize == 256 && Subtarget.hasAVX()) ||
51973 (OpSize == 512 && Subtarget.useAVX512Regs()))) {
51974 bool HasPT = Subtarget.hasSSE41();
51976 // PTEST and MOVMSK are slow on Knights Landing and Knights Mill and widened
51977 // vector registers are essentially free. (Technically, widening registers
51978 // prevents load folding, but the tradeoff is worth it.)
51979 bool PreferKOT = Subtarget.preferMaskRegisters();
51980 bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512;
51982 EVT VecVT = MVT::v16i8;
51983 EVT CmpVT = PreferKOT ? MVT::v16i1 : VecVT;
51984 if (OpSize == 256) {
51985 VecVT = MVT::v32i8;
51986 CmpVT = PreferKOT ? MVT::v32i1 : VecVT;
51988 EVT CastVT = VecVT;
51989 bool NeedsAVX512FCast = false;
51990 if (OpSize == 512 || NeedZExt) {
51991 if (Subtarget.hasBWI()) {
51992 VecVT = MVT::v64i8;
51993 CmpVT = MVT::v64i1;
51997 VecVT = MVT::v16i32;
51998 CmpVT = MVT::v16i1;
51999 CastVT = OpSize == 512 ? VecVT :
52000 OpSize == 256 ? MVT::v8i32 : MVT::v4i32;
52001 NeedsAVX512FCast = true;
52005 auto ScalarToVector = [&](SDValue X) -> SDValue {
52006 bool TmpZext = false;
52007 EVT TmpCastVT = CastVT;
52008 if (X.getOpcode() == ISD::ZERO_EXTEND) {
52009 SDValue OrigX = X.getOperand(0);
52010 unsigned OrigSize = OrigX.getScalarValueSizeInBits();
52011 if (OrigSize < OpSize) {
52012 if (OrigSize == 128) {
52013 TmpCastVT = NeedsAVX512FCast ? MVT::v4i32 : MVT::v16i8;
52016 } else if (OrigSize == 256) {
52017 TmpCastVT = NeedsAVX512FCast ? MVT::v8i32 : MVT::v32i8;
52023 X = DAG.getBitcast(TmpCastVT, X);
52024 if (!NeedZExt && !TmpZext)
52026 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT,
52027 DAG.getConstant(0, DL, VecVT), X,
52028 DAG.getVectorIdxConstant(0, DL));
52032 if (IsOrXorXorTreeCCZero) {
52033 // This is a bitwise-combined equality comparison of 2 pairs of vectors:
52034 // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
52035 // Use 2 vector equality compares and 'and' the results before doing a
52037 Cmp = emitOrXorXorTree(X, DL, DAG, VecVT, CmpVT, HasPT, ScalarToVector);
52039 SDValue VecX = ScalarToVector(X);
52040 SDValue VecY = ScalarToVector(Y);
52041 if (VecVT != CmpVT) {
52042 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETNE);
52043 } else if (HasPT) {
52044 Cmp = DAG.getNode(ISD::XOR, DL, VecVT, VecX, VecY);
52046 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
52049 // AVX512 should emit a setcc that will lower to kortest.
52050 if (VecVT != CmpVT) {
52051 EVT KRegVT = CmpVT == MVT::v64i1 ? MVT::i64 :
52052 CmpVT == MVT::v32i1 ? MVT::i32 : MVT::i16;
52053 return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp),
52054 DAG.getConstant(0, DL, KRegVT), CC);
52057 SDValue BCCmp = DAG.getBitcast(OpSize == 256 ? MVT::v4i64 : MVT::v2i64,
52059 SDValue PT = DAG.getNode(X86ISD::PTEST, DL, MVT::i32, BCCmp, BCCmp);
52060 X86::CondCode X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
52061 SDValue X86SetCC = getSETCC(X86CC, PT, DL, DAG);
52062 return DAG.getNode(ISD::TRUNCATE, DL, VT, X86SetCC.getValue(0));
52064 // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
52065 // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
52066 // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
52067 assert(Cmp.getValueType() == MVT::v16i8 &&
52068 "Non 128-bit vector on pre-SSE41 target");
52069 SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
52070 SDValue FFFFs = DAG.getConstant(0xFFFF, DL, MVT::i32);
52071 return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
52077 static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
52078 TargetLowering::DAGCombinerInfo &DCI,
52079 const X86Subtarget &Subtarget) {
52080 const ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
52081 const SDValue LHS = N->getOperand(0);
52082 const SDValue RHS = N->getOperand(1);
52083 EVT VT = N->getValueType(0);
52084 EVT OpVT = LHS.getValueType();
52087 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
52088 if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
52091 if (VT == MVT::i1 && isNullConstant(RHS)) {
52094 MatchVectorAllZeroTest(LHS, CC, DL, Subtarget, DAG, X86CC))
52095 return DAG.getNode(ISD::TRUNCATE, DL, VT,
52096 DAG.getNode(X86ISD::SETCC, DL, MVT::i8, X86CC, V));
52099 if (OpVT.isScalarInteger()) {
52100 // cmpeq(or(X,Y),X) --> cmpeq(and(~X,Y),0)
52101 // cmpne(or(X,Y),X) --> cmpne(and(~X,Y),0)
52102 auto MatchOrCmpEq = [&](SDValue N0, SDValue N1) {
52103 if (N0.getOpcode() == ISD::OR && N0->hasOneUse()) {
52104 if (N0.getOperand(0) == N1)
52105 return DAG.getNode(ISD::AND, DL, OpVT, DAG.getNOT(DL, N1, OpVT),
52107 if (N0.getOperand(1) == N1)
52108 return DAG.getNode(ISD::AND, DL, OpVT, DAG.getNOT(DL, N1, OpVT),
52113 if (SDValue AndN = MatchOrCmpEq(LHS, RHS))
52114 return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52115 if (SDValue AndN = MatchOrCmpEq(RHS, LHS))
52116 return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52118 // cmpeq(and(X,Y),Y) --> cmpeq(and(~X,Y),0)
52119 // cmpne(and(X,Y),Y) --> cmpne(and(~X,Y),0)
52120 auto MatchAndCmpEq = [&](SDValue N0, SDValue N1) {
52121 if (N0.getOpcode() == ISD::AND && N0->hasOneUse()) {
52122 if (N0.getOperand(0) == N1)
52123 return DAG.getNode(ISD::AND, DL, OpVT, N1,
52124 DAG.getNOT(DL, N0.getOperand(1), OpVT));
52125 if (N0.getOperand(1) == N1)
52126 return DAG.getNode(ISD::AND, DL, OpVT, N1,
52127 DAG.getNOT(DL, N0.getOperand(0), OpVT));
52131 if (SDValue AndN = MatchAndCmpEq(LHS, RHS))
52132 return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52133 if (SDValue AndN = MatchAndCmpEq(RHS, LHS))
52134 return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52136 // cmpeq(trunc(x),0) --> cmpeq(x,0)
52137 // cmpne(trunc(x),0) --> cmpne(x,0)
52138 // iff x upper bits are zero.
52139 // TODO: Add support for RHS to be truncate as well?
52140 if (LHS.getOpcode() == ISD::TRUNCATE &&
52141 LHS.getOperand(0).getScalarValueSizeInBits() >= 32 &&
52142 isNullConstant(RHS) && !DCI.isBeforeLegalize()) {
52143 EVT SrcVT = LHS.getOperand(0).getValueType();
52144 APInt UpperBits = APInt::getBitsSetFrom(SrcVT.getScalarSizeInBits(),
52145 OpVT.getScalarSizeInBits());
52146 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52147 if (DAG.MaskedValueIsZero(LHS.getOperand(0), UpperBits) &&
52148 TLI.isTypeLegal(LHS.getOperand(0).getValueType()))
52149 return DAG.getSetCC(DL, VT, LHS.getOperand(0),
52150 DAG.getConstant(0, DL, SrcVT), CC);
52155 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
52156 (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
52157 // Using temporaries to avoid messing up operand ordering for later
52158 // transformations if this doesn't work.
52161 ISD::CondCode TmpCC = CC;
52162 // Put build_vector on the right.
52163 if (Op0.getOpcode() == ISD::BUILD_VECTOR) {
52164 std::swap(Op0, Op1);
52165 TmpCC = ISD::getSetCCSwappedOperands(TmpCC);
52169 (Op0.getOpcode() == ISD::SIGN_EXTEND) &&
52170 (Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
52171 bool IsVZero1 = ISD::isBuildVectorAllZeros(Op1.getNode());
52173 if (IsSEXT0 && IsVZero1) {
52174 assert(VT == Op0.getOperand(0).getValueType() &&
52175 "Unexpected operand type");
52176 if (TmpCC == ISD::SETGT)
52177 return DAG.getConstant(0, DL, VT);
52178 if (TmpCC == ISD::SETLE)
52179 return DAG.getConstant(1, DL, VT);
52180 if (TmpCC == ISD::SETEQ || TmpCC == ISD::SETGE)
52181 return DAG.getNOT(DL, Op0.getOperand(0), VT);
52183 assert((TmpCC == ISD::SETNE || TmpCC == ISD::SETLT) &&
52184 "Unexpected condition code!");
52185 return Op0.getOperand(0);
52189 // If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
52190 // pre-promote its result type since vXi1 vectors don't get promoted
52191 // during type legalization.
52192 // NOTE: The element count check is to ignore operand types that need to
52193 // go through type promotion to a 128-bit vector.
52194 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
52195 VT.getVectorElementType() == MVT::i1 &&
52196 (OpVT.getVectorElementType() == MVT::i8 ||
52197 OpVT.getVectorElementType() == MVT::i16)) {
52198 SDValue Setcc = DAG.getSetCC(DL, OpVT, LHS, RHS, CC);
52199 return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
52202 // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
52203 // to avoid scalarization via legalization because v4i32 is not a legal type.
52204 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
52205 LHS.getValueType() == MVT::v4f32)
52206 return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
52208 // X pred 0.0 --> X pred -X
52209 // If the negation of X already exists, use it in the comparison. This removes
52210 // the need to materialize 0.0 and allows matching to SSE's MIN/MAX
52211 // instructions in patterns with a 'select' node.
52212 if (isNullFPScalarOrVectorConst(RHS)) {
52213 SDVTList FNegVT = DAG.getVTList(OpVT);
52214 if (SDNode *FNeg = DAG.getNodeIfExists(ISD::FNEG, FNegVT, {LHS}))
52215 return DAG.getSetCC(DL, VT, LHS, SDValue(FNeg, 0), CC);
52221 static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
52222 TargetLowering::DAGCombinerInfo &DCI,
52223 const X86Subtarget &Subtarget) {
52224 SDValue Src = N->getOperand(0);
52225 MVT SrcVT = Src.getSimpleValueType();
52226 MVT VT = N->getSimpleValueType(0);
52227 unsigned NumBits = VT.getScalarSizeInBits();
52228 unsigned NumElts = SrcVT.getVectorNumElements();
52229 unsigned NumBitsPerElt = SrcVT.getScalarSizeInBits();
52230 assert(VT == MVT::i32 && NumElts <= NumBits && "Unexpected MOVMSK types");
52232 // Perform constant folding.
52234 SmallVector<APInt, 32> EltBits;
52235 if (getTargetConstantBitsFromNode(Src, NumBitsPerElt, UndefElts, EltBits)) {
52237 for (unsigned Idx = 0; Idx != NumElts; ++Idx)
52238 if (!UndefElts[Idx] && EltBits[Idx].isNegative())
52241 return DAG.getConstant(Imm, SDLoc(N), VT);
52244 // Look through int->fp bitcasts that don't change the element width.
52245 unsigned EltWidth = SrcVT.getScalarSizeInBits();
52246 if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST &&
52247 Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
52248 return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
52250 // Fold movmsk(not(x)) -> not(movmsk(x)) to improve folding of movmsk results
52251 // with scalar comparisons.
52252 if (SDValue NotSrc = IsNOT(Src, DAG)) {
52254 APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
52255 NotSrc = DAG.getBitcast(SrcVT, NotSrc);
52256 return DAG.getNode(ISD::XOR, DL, VT,
52257 DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
52258 DAG.getConstant(NotMask, DL, VT));
52261 // Fold movmsk(icmp_sgt(x,-1)) -> not(movmsk(x)) to improve folding of movmsk
52262 // results with scalar comparisons.
52263 if (Src.getOpcode() == X86ISD::PCMPGT &&
52264 ISD::isBuildVectorAllOnes(Src.getOperand(1).getNode())) {
52266 APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
52267 return DAG.getNode(ISD::XOR, DL, VT,
52268 DAG.getNode(X86ISD::MOVMSK, DL, VT, Src.getOperand(0)),
52269 DAG.getConstant(NotMask, DL, VT));
52272 // Fold movmsk(icmp_eq(and(x,c1),0)) -> movmsk(not(shl(x,c2)))
52273 // iff pow2splat(c1).
52274 if (Src.getOpcode() == X86ISD::PCMPEQ &&
52275 Src.getOperand(0).getOpcode() == ISD::AND &&
52276 ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode())) {
52277 SDValue LHS = Src.getOperand(0).getOperand(0);
52278 SDValue RHS = Src.getOperand(0).getOperand(1);
52279 KnownBits KnownRHS = DAG.computeKnownBits(RHS);
52280 if (KnownRHS.isConstant() && KnownRHS.getConstant().isPowerOf2()) {
52282 MVT ShiftVT = SrcVT;
52283 if (ShiftVT.getScalarType() == MVT::i8) {
52284 // vXi8 shifts - we only care about the signbit so can use PSLLW.
52285 ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
52286 LHS = DAG.getBitcast(ShiftVT, LHS);
52288 unsigned ShiftAmt = KnownRHS.getConstant().countLeadingZeros();
52289 LHS = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, ShiftVT, LHS,
52291 LHS = DAG.getNOT(DL, DAG.getBitcast(SrcVT, LHS), SrcVT);
52292 return DAG.getNode(X86ISD::MOVMSK, DL, VT, LHS);
52296 // Simplify the inputs.
52297 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52298 APInt DemandedMask(APInt::getAllOnes(NumBits));
52299 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
52300 return SDValue(N, 0);
52305 static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
52306 TargetLowering::DAGCombinerInfo &DCI,
52307 const X86Subtarget &Subtarget) {
52308 auto *MemOp = cast<X86MaskedGatherScatterSDNode>(N);
52309 SDValue BasePtr = MemOp->getBasePtr();
52310 SDValue Index = MemOp->getIndex();
52311 SDValue Scale = MemOp->getScale();
52312 SDValue Mask = MemOp->getMask();
52314 // Attempt to fold an index scale into the scale value directly.
52315 // For smaller indices, implicit sext is performed BEFORE scale, preventing
52316 // this fold under most circumstances.
52317 // TODO: Move this into X86DAGToDAGISel::matchVectorAddressRecursively?
52318 if ((Index.getOpcode() == X86ISD::VSHLI ||
52319 (Index.getOpcode() == ISD::ADD &&
52320 Index.getOperand(0) == Index.getOperand(1))) &&
52321 isa<ConstantSDNode>(Scale) &&
52322 BasePtr.getScalarValueSizeInBits() == Index.getScalarValueSizeInBits()) {
52323 unsigned ShiftAmt =
52324 Index.getOpcode() == ISD::ADD ? 1 : Index.getConstantOperandVal(1);
52325 uint64_t ScaleAmt = cast<ConstantSDNode>(Scale)->getZExtValue();
52326 uint64_t NewScaleAmt = ScaleAmt * (1ULL << ShiftAmt);
52327 if (isPowerOf2_64(NewScaleAmt) && NewScaleAmt <= 8) {
52328 SDValue NewIndex = Index.getOperand(0);
52330 DAG.getTargetConstant(NewScaleAmt, SDLoc(N), Scale.getValueType());
52331 if (N->getOpcode() == X86ISD::MGATHER)
52332 return getAVX2GatherNode(N->getOpcode(), SDValue(N, 0), DAG,
52333 MemOp->getOperand(1), Mask,
52334 MemOp->getBasePtr(), NewIndex, NewScale,
52335 MemOp->getChain(), Subtarget);
52336 if (N->getOpcode() == X86ISD::MSCATTER)
52337 return getScatterNode(N->getOpcode(), SDValue(N, 0), DAG,
52338 MemOp->getOperand(1), Mask, MemOp->getBasePtr(),
52339 NewIndex, NewScale, MemOp->getChain(), Subtarget);
52343 // With vector masks we only demand the upper bit of the mask.
52344 if (Mask.getScalarValueSizeInBits() != 1) {
52345 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52346 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
52347 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
52348 if (N->getOpcode() != ISD::DELETED_NODE)
52349 DCI.AddToWorklist(N);
52350 return SDValue(N, 0);
52357 static SDValue rebuildGatherScatter(MaskedGatherScatterSDNode *GorS,
52358 SDValue Index, SDValue Base, SDValue Scale,
52359 SelectionDAG &DAG) {
52362 if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
52363 SDValue Ops[] = { Gather->getChain(), Gather->getPassThru(),
52364 Gather->getMask(), Base, Index, Scale } ;
52365 return DAG.getMaskedGather(Gather->getVTList(),
52366 Gather->getMemoryVT(), DL, Ops,
52367 Gather->getMemOperand(),
52368 Gather->getIndexType(),
52369 Gather->getExtensionType());
52371 auto *Scatter = cast<MaskedScatterSDNode>(GorS);
52372 SDValue Ops[] = { Scatter->getChain(), Scatter->getValue(),
52373 Scatter->getMask(), Base, Index, Scale };
52374 return DAG.getMaskedScatter(Scatter->getVTList(),
52375 Scatter->getMemoryVT(), DL,
52376 Ops, Scatter->getMemOperand(),
52377 Scatter->getIndexType(),
52378 Scatter->isTruncatingStore());
52381 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
52382 TargetLowering::DAGCombinerInfo &DCI) {
52384 auto *GorS = cast<MaskedGatherScatterSDNode>(N);
52385 SDValue Index = GorS->getIndex();
52386 SDValue Base = GorS->getBasePtr();
52387 SDValue Scale = GorS->getScale();
52389 if (DCI.isBeforeLegalize()) {
52390 unsigned IndexWidth = Index.getScalarValueSizeInBits();
52392 // Shrink constant indices if they are larger than 32-bits.
52393 // Only do this before legalize types since v2i64 could become v2i32.
52394 // FIXME: We could check that the type is legal if we're after legalize
52395 // types, but then we would need to construct test cases where that happens.
52396 // FIXME: We could support more than just constant vectors, but we need to
52397 // careful with costing. A truncate that can be optimized out would be fine.
52398 // Otherwise we might only want to create a truncate if it avoids a split.
52399 if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
52400 if (BV->isConstant() && IndexWidth > 32 &&
52401 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
52402 EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
52403 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
52404 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
52408 // Shrink any sign/zero extends from 32 or smaller to larger than 32 if
52409 // there are sufficient sign bits. Only do this before legalize types to
52410 // avoid creating illegal types in truncate.
52411 if ((Index.getOpcode() == ISD::SIGN_EXTEND ||
52412 Index.getOpcode() == ISD::ZERO_EXTEND) &&
52414 Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
52415 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
52416 EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
52417 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
52418 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
52422 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52423 EVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
52424 // Try to move splat constant adders from the index operand to the base
52425 // pointer operand. Taking care to multiply by the scale. We can only do
52426 // this when index element type is the same as the pointer type.
52427 // Otherwise we need to be sure the math doesn't wrap before the scale.
52428 if (Index.getOpcode() == ISD::ADD &&
52429 Index.getValueType().getVectorElementType() == PtrVT &&
52430 isa<ConstantSDNode>(Scale)) {
52431 uint64_t ScaleAmt = cast<ConstantSDNode>(Scale)->getZExtValue();
52432 if (auto *BV = dyn_cast<BuildVectorSDNode>(Index.getOperand(1))) {
52433 BitVector UndefElts;
52434 if (ConstantSDNode *C = BV->getConstantSplatNode(&UndefElts)) {
52435 // FIXME: Allow non-constant?
52436 if (UndefElts.none()) {
52437 // Apply the scale.
52438 APInt Adder = C->getAPIntValue() * ScaleAmt;
52439 // Add it to the existing base.
52440 Base = DAG.getNode(ISD::ADD, DL, PtrVT, Base,
52441 DAG.getConstant(Adder, DL, PtrVT));
52442 Index = Index.getOperand(0);
52443 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
52447 // It's also possible base is just a constant. In that case, just
52448 // replace it with 0 and move the displacement into the index.
52449 if (BV->isConstant() && isa<ConstantSDNode>(Base) &&
52450 isOneConstant(Scale)) {
52451 SDValue Splat = DAG.getSplatBuildVector(Index.getValueType(), DL, Base);
52452 // Combine the constant build_vector and the constant base.
52453 Splat = DAG.getNode(ISD::ADD, DL, Index.getValueType(),
52454 Index.getOperand(1), Splat);
52455 // Add to the LHS of the original Index add.
52456 Index = DAG.getNode(ISD::ADD, DL, Index.getValueType(),
52457 Index.getOperand(0), Splat);
52458 Base = DAG.getConstant(0, DL, Base.getValueType());
52459 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
52464 if (DCI.isBeforeLegalizeOps()) {
52465 unsigned IndexWidth = Index.getScalarValueSizeInBits();
52467 // Make sure the index is either i32 or i64
52468 if (IndexWidth != 32 && IndexWidth != 64) {
52469 MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
52470 EVT IndexVT = Index.getValueType().changeVectorElementType(EltVT);
52471 Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
52472 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
52476 // With vector masks we only demand the upper bit of the mask.
52477 SDValue Mask = GorS->getMask();
52478 if (Mask.getScalarValueSizeInBits() != 1) {
52479 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52480 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
52481 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
52482 if (N->getOpcode() != ISD::DELETED_NODE)
52483 DCI.AddToWorklist(N);
52484 return SDValue(N, 0);
52491 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
52492 static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
52493 const X86Subtarget &Subtarget) {
52495 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
52496 SDValue EFLAGS = N->getOperand(1);
52498 // Try to simplify the EFLAGS and condition code operands.
52499 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
52500 return getSETCC(CC, Flags, DL, DAG);
52505 /// Optimize branch condition evaluation.
52506 static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
52507 const X86Subtarget &Subtarget) {
52509 SDValue EFLAGS = N->getOperand(3);
52510 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
52512 // Try to simplify the EFLAGS and condition code operands.
52513 // Make sure to not keep references to operands, as combineSetCCEFLAGS can
52514 // RAUW them under us.
52515 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
52516 SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
52517 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
52518 N->getOperand(1), Cond, Flags);
52524 // TODO: Could we move this to DAGCombine?
52525 static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
52526 SelectionDAG &DAG) {
52527 // Take advantage of vector comparisons (etc.) producing 0 or -1 in each lane
52528 // to optimize away operation when it's from a constant.
52530 // The general transformation is:
52531 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
52532 // AND(VECTOR_CMP(x,y), constant2)
52533 // constant2 = UNARYOP(constant)
52535 // Early exit if this isn't a vector operation, the operand of the
52536 // unary operation isn't a bitwise AND, or if the sizes of the operations
52537 // aren't the same.
52538 EVT VT = N->getValueType(0);
52539 bool IsStrict = N->isStrictFPOpcode();
52540 unsigned NumEltBits = VT.getScalarSizeInBits();
52541 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
52542 if (!VT.isVector() || Op0.getOpcode() != ISD::AND ||
52543 DAG.ComputeNumSignBits(Op0.getOperand(0)) != NumEltBits ||
52544 VT.getSizeInBits() != Op0.getValueSizeInBits())
52547 // Now check that the other operand of the AND is a constant. We could
52548 // make the transformation for non-constant splats as well, but it's unclear
52549 // that would be a benefit as it would not eliminate any operations, just
52550 // perform one more step in scalar code before moving to the vector unit.
52551 if (auto *BV = dyn_cast<BuildVectorSDNode>(Op0.getOperand(1))) {
52552 // Bail out if the vector isn't a constant.
52553 if (!BV->isConstant())
52556 // Everything checks out. Build up the new and improved node.
52558 EVT IntVT = BV->getValueType(0);
52559 // Create a new constant of the appropriate type for the transformed
52561 SDValue SourceConst;
52563 SourceConst = DAG.getNode(N->getOpcode(), DL, {VT, MVT::Other},
52564 {N->getOperand(0), SDValue(BV, 0)});
52566 SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
52567 // The AND node needs bitcasts to/from an integer vector type around it.
52568 SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
52569 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, Op0->getOperand(0),
52571 SDValue Res = DAG.getBitcast(VT, NewAnd);
52573 return DAG.getMergeValues({Res, SourceConst.getValue(1)}, DL);
52580 /// If we are converting a value to floating-point, try to replace scalar
52581 /// truncate of an extracted vector element with a bitcast. This tries to keep
52582 /// the sequence on XMM registers rather than moving between vector and GPRs.
52583 static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
52584 // TODO: This is currently only used by combineSIntToFP, but it is generalized
52585 // to allow being called by any similar cast opcode.
52586 // TODO: Consider merging this into lowering: vectorizeExtractedCast().
52587 SDValue Trunc = N->getOperand(0);
52588 if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
52591 SDValue ExtElt = Trunc.getOperand(0);
52592 if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
52593 !isNullConstant(ExtElt.getOperand(1)))
52596 EVT TruncVT = Trunc.getValueType();
52597 EVT SrcVT = ExtElt.getValueType();
52598 unsigned DestWidth = TruncVT.getSizeInBits();
52599 unsigned SrcWidth = SrcVT.getSizeInBits();
52600 if (SrcWidth % DestWidth != 0)
52603 // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
52604 EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
52605 unsigned VecWidth = SrcVecVT.getSizeInBits();
52606 unsigned NumElts = VecWidth / DestWidth;
52607 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
52608 SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
52610 SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
52611 BitcastVec, ExtElt.getOperand(1));
52612 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
52615 static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
52616 const X86Subtarget &Subtarget) {
52617 bool IsStrict = N->isStrictFPOpcode();
52618 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
52619 EVT VT = N->getValueType(0);
52620 EVT InVT = Op0.getValueType();
52622 // UINT_TO_FP(vXi1~15) -> UINT_TO_FP(ZEXT(vXi1~15 to vXi16))
52623 // UINT_TO_FP(vXi17~31) -> UINT_TO_FP(ZEXT(vXi17~31 to vXi32))
52624 // UINT_TO_FP(vXi33~63) -> UINT_TO_FP(ZEXT(vXi33~63 to vXi64))
52625 if (InVT.isVector() && VT.getVectorElementType() == MVT::f16) {
52626 unsigned ScalarSize = InVT.getScalarSizeInBits();
52627 if (ScalarSize == 16 || ScalarSize == 32 || ScalarSize >= 64)
52630 EVT DstVT = EVT::getVectorVT(*DAG.getContext(),
52631 ScalarSize < 16 ? MVT::i16
52632 : ScalarSize < 32 ? MVT::i32
52634 InVT.getVectorNumElements());
52635 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
52637 return DAG.getNode(ISD::STRICT_UINT_TO_FP, dl, {VT, MVT::Other},
52638 {N->getOperand(0), P});
52639 return DAG.getNode(ISD::UINT_TO_FP, dl, VT, P);
52642 // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
52643 // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
52644 // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
52645 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
52646 VT.getScalarType() != MVT::f16) {
52648 EVT DstVT = InVT.changeVectorElementType(MVT::i32);
52649 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
52651 // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
52653 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
52654 {N->getOperand(0), P});
52655 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
52658 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
52659 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
52660 // the optimization here.
52661 if (DAG.SignBitIsZero(Op0)) {
52663 return DAG.getNode(ISD::STRICT_SINT_TO_FP, SDLoc(N), {VT, MVT::Other},
52664 {N->getOperand(0), Op0});
52665 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
52671 static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
52672 TargetLowering::DAGCombinerInfo &DCI,
52673 const X86Subtarget &Subtarget) {
52674 // First try to optimize away the conversion entirely when it's
52675 // conditionally from a constant. Vectors only.
52676 bool IsStrict = N->isStrictFPOpcode();
52677 if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
52680 // Now move on to more general possibilities.
52681 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
52682 EVT VT = N->getValueType(0);
52683 EVT InVT = Op0.getValueType();
52685 // SINT_TO_FP(vXi1~15) -> SINT_TO_FP(SEXT(vXi1~15 to vXi16))
52686 // SINT_TO_FP(vXi17~31) -> SINT_TO_FP(SEXT(vXi17~31 to vXi32))
52687 // SINT_TO_FP(vXi33~63) -> SINT_TO_FP(SEXT(vXi33~63 to vXi64))
52688 if (InVT.isVector() && VT.getVectorElementType() == MVT::f16) {
52689 unsigned ScalarSize = InVT.getScalarSizeInBits();
52690 if (ScalarSize == 16 || ScalarSize == 32 || ScalarSize >= 64)
52693 EVT DstVT = EVT::getVectorVT(*DAG.getContext(),
52694 ScalarSize < 16 ? MVT::i16
52695 : ScalarSize < 32 ? MVT::i32
52697 InVT.getVectorNumElements());
52698 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
52700 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
52701 {N->getOperand(0), P});
52702 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
52705 // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
52706 // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
52707 // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
52708 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
52709 VT.getScalarType() != MVT::f16) {
52711 EVT DstVT = InVT.changeVectorElementType(MVT::i32);
52712 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
52714 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
52715 {N->getOperand(0), P});
52716 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
52719 // Without AVX512DQ we only support i64 to float scalar conversion. For both
52720 // vectors and scalars, see if we know that the upper bits are all the sign
52721 // bit, in which case we can truncate the input to i32 and convert from that.
52722 if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
52723 unsigned BitWidth = InVT.getScalarSizeInBits();
52724 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
52725 if (NumSignBits >= (BitWidth - 31)) {
52726 EVT TruncVT = MVT::i32;
52727 if (InVT.isVector())
52728 TruncVT = InVT.changeVectorElementType(TruncVT);
52730 if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
52731 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
52733 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
52734 {N->getOperand(0), Trunc});
52735 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
52737 // If we're after legalize and the type is v2i32 we need to shuffle and
52739 assert(InVT == MVT::v2i64 && "Unexpected VT!");
52740 SDValue Cast = DAG.getBitcast(MVT::v4i32, Op0);
52741 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Cast, Cast,
52744 return DAG.getNode(X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
52745 {N->getOperand(0), Shuf});
52746 return DAG.getNode(X86ISD::CVTSI2P, dl, VT, Shuf);
52750 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
52751 // a 32-bit target where SSE doesn't support i64->FP operations.
52752 if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
52753 Op0.getOpcode() == ISD::LOAD) {
52754 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
52756 // This transformation is not supported if the result type is f16 or f128.
52757 if (VT == MVT::f16 || VT == MVT::f128)
52760 // If we have AVX512DQ we can use packed conversion instructions unless
52762 if (Subtarget.hasDQI() && VT != MVT::f80)
52765 if (Ld->isSimple() && !VT.isVector() && ISD::isNormalLoad(Op0.getNode()) &&
52766 Op0.hasOneUse() && !Subtarget.is64Bit() && InVT == MVT::i64) {
52767 std::pair<SDValue, SDValue> Tmp =
52768 Subtarget.getTargetLowering()->BuildFILD(
52769 VT, InVT, SDLoc(N), Ld->getChain(), Ld->getBasePtr(),
52770 Ld->getPointerInfo(), Ld->getOriginalAlign(), DAG);
52771 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Tmp.second);
52779 if (SDValue V = combineToFPTruncExtElt(N, DAG))
52785 static bool needCarryOrOverflowFlag(SDValue Flags) {
52786 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
52788 for (const SDNode *User : Flags->uses()) {
52790 switch (User->getOpcode()) {
52792 // Be conservative.
52794 case X86ISD::SETCC:
52795 case X86ISD::SETCC_CARRY:
52796 CC = (X86::CondCode)User->getConstantOperandVal(0);
52798 case X86ISD::BRCOND:
52800 CC = (X86::CondCode)User->getConstantOperandVal(2);
52806 case X86::COND_A: case X86::COND_AE:
52807 case X86::COND_B: case X86::COND_BE:
52808 case X86::COND_O: case X86::COND_NO:
52809 case X86::COND_G: case X86::COND_GE:
52810 case X86::COND_L: case X86::COND_LE:
52818 static bool onlyZeroFlagUsed(SDValue Flags) {
52819 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
52821 for (const SDNode *User : Flags->uses()) {
52823 switch (User->getOpcode()) {
52825 // Be conservative.
52827 case X86ISD::SETCC:
52828 case X86ISD::SETCC_CARRY:
52831 case X86ISD::BRCOND:
52837 X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
52838 if (CC != X86::COND_E && CC != X86::COND_NE)
52845 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
52846 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
52847 /// with CMP+{ADC, SBB}.
52848 /// Also try (ADD/SUB)+(AND(SRL,1)) bit extraction pattern with BT+{ADC, SBB}.
52849 static SDValue combineAddOrSubToADCOrSBB(bool IsSub, const SDLoc &DL, EVT VT,
52850 SDValue X, SDValue Y,
52852 bool ZeroSecondOpOnly = false) {
52853 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
52856 // Look through a one-use zext.
52857 if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse())
52858 Y = Y.getOperand(0);
52862 if (Y.getOpcode() == X86ISD::SETCC && Y.hasOneUse()) {
52863 CC = (X86::CondCode)Y.getConstantOperandVal(0);
52864 EFLAGS = Y.getOperand(1);
52865 } else if (Y.getOpcode() == ISD::AND && isOneConstant(Y.getOperand(1)) &&
52867 EFLAGS = LowerAndToBT(Y, ISD::SETNE, DL, DAG, CC);
52873 // If X is -1 or 0, then we have an opportunity to avoid constants required in
52874 // the general case below.
52875 auto *ConstantX = dyn_cast<ConstantSDNode>(X);
52876 if (ConstantX && !ZeroSecondOpOnly) {
52877 if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnes()) ||
52878 (IsSub && CC == X86::COND_B && ConstantX->isZero())) {
52879 // This is a complicated way to get -1 or 0 from the carry flag:
52880 // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
52881 // 0 - SETB --> 0 - (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
52882 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
52883 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
52887 if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnes()) ||
52888 (IsSub && CC == X86::COND_A && ConstantX->isZero())) {
52889 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
52890 EFLAGS.getValueType().isInteger() &&
52891 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
52892 // Swap the operands of a SUB, and we have the same pattern as above.
52893 // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
52894 // 0 - SETA (SUB A, B) --> 0 - SETB (SUB B, A) --> SUB + SBB
52895 SDValue NewSub = DAG.getNode(
52896 X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
52897 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
52898 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
52899 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
52900 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
52906 if (CC == X86::COND_B) {
52907 // X + SETB Z --> adc X, 0
52908 // X - SETB Z --> sbb X, 0
52909 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
52910 DAG.getVTList(VT, MVT::i32), X,
52911 DAG.getConstant(0, DL, VT), EFLAGS);
52914 if (ZeroSecondOpOnly)
52917 if (CC == X86::COND_A) {
52918 // Try to convert COND_A into COND_B in an attempt to facilitate
52919 // materializing "setb reg".
52921 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
52922 // cannot take an immediate as its first operand.
52924 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
52925 EFLAGS.getValueType().isInteger() &&
52926 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
52928 DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
52929 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
52930 SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
52931 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
52932 DAG.getVTList(VT, MVT::i32), X,
52933 DAG.getConstant(0, DL, VT), NewEFLAGS);
52937 if (CC == X86::COND_AE) {
52938 // X + SETAE --> sbb X, -1
52939 // X - SETAE --> adc X, -1
52940 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
52941 DAG.getVTList(VT, MVT::i32), X,
52942 DAG.getConstant(-1, DL, VT), EFLAGS);
52945 if (CC == X86::COND_BE) {
52946 // X + SETBE --> sbb X, -1
52947 // X - SETBE --> adc X, -1
52948 // Try to convert COND_BE into COND_AE in an attempt to facilitate
52949 // materializing "setae reg".
52951 // Do not flip "e <= c", where "c" is a constant, because Cmp instruction
52952 // cannot take an immediate as its first operand.
52954 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
52955 EFLAGS.getValueType().isInteger() &&
52956 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
52958 DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
52959 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
52960 SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
52961 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
52962 DAG.getVTList(VT, MVT::i32), X,
52963 DAG.getConstant(-1, DL, VT), NewEFLAGS);
52967 if (CC != X86::COND_E && CC != X86::COND_NE)
52970 if (EFLAGS.getOpcode() != X86ISD::CMP || !EFLAGS.hasOneUse() ||
52971 !X86::isZeroNode(EFLAGS.getOperand(1)) ||
52972 !EFLAGS.getOperand(0).getValueType().isInteger())
52975 SDValue Z = EFLAGS.getOperand(0);
52976 EVT ZVT = Z.getValueType();
52978 // If X is -1 or 0, then we have an opportunity to avoid constants required in
52979 // the general case below.
52981 // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
52983 // 0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
52984 // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
52985 if ((IsSub && CC == X86::COND_NE && ConstantX->isZero()) ||
52986 (!IsSub && CC == X86::COND_E && ConstantX->isAllOnes())) {
52987 SDValue Zero = DAG.getConstant(0, DL, ZVT);
52988 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
52989 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
52990 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
52991 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
52992 SDValue(Neg.getNode(), 1));
52995 // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
52996 // with fake operands:
52997 // 0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
52998 // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
52999 if ((IsSub && CC == X86::COND_E && ConstantX->isZero()) ||
53000 (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnes())) {
53001 SDValue One = DAG.getConstant(1, DL, ZVT);
53002 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
53003 SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
53004 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
53005 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
53010 // (cmp Z, 1) sets the carry flag if Z is 0.
53011 SDValue One = DAG.getConstant(1, DL, ZVT);
53012 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
53013 SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
53015 // Add the flags type for ADC/SBB nodes.
53016 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
53018 // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
53019 // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
53020 if (CC == X86::COND_NE)
53021 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
53022 DAG.getConstant(-1ULL, DL, VT), Cmp1.getValue(1));
53024 // X - (Z == 0) --> sub X, (zext(sete Z, 0)) --> sbb X, 0, (cmp Z, 1)
53025 // X + (Z == 0) --> add X, (zext(sete Z, 0)) --> adc X, 0, (cmp Z, 1)
53026 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
53027 DAG.getConstant(0, DL, VT), Cmp1.getValue(1));
53030 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
53031 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
53032 /// with CMP+{ADC, SBB}.
53033 static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
53034 bool IsSub = N->getOpcode() == ISD::SUB;
53035 SDValue X = N->getOperand(0);
53036 SDValue Y = N->getOperand(1);
53037 EVT VT = N->getValueType(0);
53040 if (SDValue ADCOrSBB = combineAddOrSubToADCOrSBB(IsSub, DL, VT, X, Y, DAG))
53043 // Commute and try again (negate the result for subtracts).
53044 if (SDValue ADCOrSBB = combineAddOrSubToADCOrSBB(IsSub, DL, VT, Y, X, DAG)) {
53047 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), ADCOrSBB);
53054 static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
53055 // Only handle test patterns.
53056 if (!isNullConstant(N->getOperand(1)))
53059 // If we have a CMP of a truncated binop, see if we can make a smaller binop
53060 // and use its flags directly.
53061 // TODO: Maybe we should try promoting compares that only use the zero flag
53062 // first if we can prove the upper bits with computeKnownBits?
53064 SDValue Op = N->getOperand(0);
53065 EVT VT = Op.getValueType();
53067 // If we have a constant logical shift that's only used in a comparison
53068 // against zero turn it into an equivalent AND. This allows turning it into
53069 // a TEST instruction later.
53070 if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
53071 Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
53072 onlyZeroFlagUsed(SDValue(N, 0))) {
53073 unsigned BitWidth = VT.getSizeInBits();
53074 const APInt &ShAmt = Op.getConstantOperandAPInt(1);
53075 if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
53076 unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
53077 APInt Mask = Op.getOpcode() == ISD::SRL
53078 ? APInt::getHighBitsSet(BitWidth, MaskBits)
53079 : APInt::getLowBitsSet(BitWidth, MaskBits);
53080 if (Mask.isSignedIntN(32)) {
53081 Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
53082 DAG.getConstant(Mask, dl, VT));
53083 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
53084 DAG.getConstant(0, dl, VT));
53089 // Peek through any zero-extend if we're only testing for a zero result.
53090 if (Op.getOpcode() == ISD::ZERO_EXTEND && onlyZeroFlagUsed(SDValue(N, 0))) {
53091 SDValue Src = Op.getOperand(0);
53092 EVT SrcVT = Src.getValueType();
53093 if (SrcVT.getScalarSizeInBits() >= 8 &&
53094 DAG.getTargetLoweringInfo().isTypeLegal(SrcVT))
53095 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Src,
53096 DAG.getConstant(0, dl, SrcVT));
53099 // Look for a truncate.
53100 if (Op.getOpcode() != ISD::TRUNCATE)
53103 SDValue Trunc = Op;
53104 Op = Op.getOperand(0);
53106 // See if we can compare with zero against the truncation source,
53107 // which should help using the Z flag from many ops. Only do this for
53108 // i32 truncated op to prevent partial-reg compares of promoted ops.
53109 EVT OpVT = Op.getValueType();
53111 APInt::getBitsSetFrom(OpVT.getSizeInBits(), VT.getSizeInBits());
53112 if (OpVT == MVT::i32 && DAG.MaskedValueIsZero(Op, UpperBits) &&
53113 onlyZeroFlagUsed(SDValue(N, 0))) {
53114 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
53115 DAG.getConstant(0, dl, OpVT));
53118 // After this the truncate and arithmetic op must have a single use.
53119 if (!Trunc.hasOneUse() || !Op.hasOneUse())
53123 switch (Op.getOpcode()) {
53124 default: return SDValue();
53126 // Skip and with constant. We have special handling for and with immediate
53127 // during isel to generate test instructions.
53128 if (isa<ConstantSDNode>(Op.getOperand(1)))
53130 NewOpc = X86ISD::AND;
53132 case ISD::OR: NewOpc = X86ISD::OR; break;
53133 case ISD::XOR: NewOpc = X86ISD::XOR; break;
53135 // If the carry or overflow flag is used, we can't truncate.
53136 if (needCarryOrOverflowFlag(SDValue(N, 0)))
53138 NewOpc = X86ISD::ADD;
53141 // If the carry or overflow flag is used, we can't truncate.
53142 if (needCarryOrOverflowFlag(SDValue(N, 0)))
53144 NewOpc = X86ISD::SUB;
53148 // We found an op we can narrow. Truncate its inputs.
53149 SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
53150 SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
53152 // Use a X86 specific opcode to avoid DAG combine messing with it.
53153 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
53154 Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
53156 // For AND, keep a CMP so that we can match the test pattern.
53157 if (NewOpc == X86ISD::AND)
53158 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
53159 DAG.getConstant(0, dl, VT));
53161 // Return the flags.
53162 return Op.getValue(1);
53165 static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
53166 TargetLowering::DAGCombinerInfo &DCI) {
53167 assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
53168 "Expected X86ISD::ADD or X86ISD::SUB");
53171 SDValue LHS = N->getOperand(0);
53172 SDValue RHS = N->getOperand(1);
53173 MVT VT = LHS.getSimpleValueType();
53174 bool IsSub = X86ISD::SUB == N->getOpcode();
53175 unsigned GenericOpc = IsSub ? ISD::SUB : ISD::ADD;
53177 // If we don't use the flag result, simplify back to a generic ADD/SUB.
53178 if (!N->hasAnyUseOfValue(1)) {
53179 SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
53180 return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
53183 // Fold any similar generic ADD/SUB opcodes to reuse this node.
53184 auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
53185 SDValue Ops[] = {N0, N1};
53186 SDVTList VTs = DAG.getVTList(N->getValueType(0));
53187 if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
53190 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
53191 DCI.CombineTo(GenericAddSub, Op);
53194 MatchGeneric(LHS, RHS, false);
53195 MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
53197 // TODO: Can we drop the ZeroSecondOpOnly limit? This is to guarantee that the
53198 // EFLAGS result doesn't change.
53199 return combineAddOrSubToADCOrSBB(IsSub, DL, VT, LHS, RHS, DAG,
53200 /*ZeroSecondOpOnly*/ true);
53203 static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
53204 SDValue LHS = N->getOperand(0);
53205 SDValue RHS = N->getOperand(1);
53206 SDValue BorrowIn = N->getOperand(2);
53208 if (SDValue Flags = combineCarryThroughADD(BorrowIn, DAG)) {
53209 MVT VT = N->getSimpleValueType(0);
53210 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
53211 return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs, LHS, RHS, Flags);
53214 // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
53215 // iff the flag result is dead.
53216 if (LHS.getOpcode() == ISD::SUB && isNullConstant(RHS) &&
53217 !N->hasAnyUseOfValue(1))
53218 return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), LHS.getOperand(0),
53219 LHS.getOperand(1), BorrowIn);
53224 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
53225 static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
53226 TargetLowering::DAGCombinerInfo &DCI) {
53227 SDValue LHS = N->getOperand(0);
53228 SDValue RHS = N->getOperand(1);
53229 SDValue CarryIn = N->getOperand(2);
53230 auto *LHSC = dyn_cast<ConstantSDNode>(LHS);
53231 auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
53233 // Canonicalize constant to RHS.
53235 return DAG.getNode(X86ISD::ADC, SDLoc(N), N->getVTList(), RHS, LHS,
53238 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
53239 // the result is either zero or one (depending on the input carry bit).
53240 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
53241 if (LHSC && RHSC && LHSC->isZero() && RHSC->isZero() &&
53242 // We don't have a good way to replace an EFLAGS use, so only do this when
53244 SDValue(N, 1).use_empty()) {
53246 EVT VT = N->getValueType(0);
53247 SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
53248 SDValue Res1 = DAG.getNode(
53250 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
53251 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), CarryIn),
53252 DAG.getConstant(1, DL, VT));
53253 return DCI.CombineTo(N, Res1, CarryOut);
53256 // Fold ADC(C1,C2,Carry) -> ADC(0,C1+C2,Carry)
53257 // iff the flag result is dead.
53258 // TODO: Allow flag result if C1+C2 doesn't signed/unsigned overflow.
53259 if (LHSC && RHSC && !LHSC->isZero() && !N->hasAnyUseOfValue(1)) {
53261 APInt Sum = LHSC->getAPIntValue() + RHSC->getAPIntValue();
53262 return DAG.getNode(X86ISD::ADC, DL, N->getVTList(),
53263 DAG.getConstant(0, DL, LHS.getValueType()),
53264 DAG.getConstant(Sum, DL, LHS.getValueType()), CarryIn);
53267 if (SDValue Flags = combineCarryThroughADD(CarryIn, DAG)) {
53268 MVT VT = N->getSimpleValueType(0);
53269 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
53270 return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs, LHS, RHS, Flags);
53273 // Fold ADC(ADD(X,Y),0,Carry) -> ADC(X,Y,Carry)
53274 // iff the flag result is dead.
53275 if (LHS.getOpcode() == ISD::ADD && RHSC && RHSC->isZero() &&
53276 !N->hasAnyUseOfValue(1))
53277 return DAG.getNode(X86ISD::ADC, SDLoc(N), N->getVTList(), LHS.getOperand(0),
53278 LHS.getOperand(1), CarryIn);
53283 static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
53284 const SDLoc &DL, EVT VT,
53285 const X86Subtarget &Subtarget) {
53286 // Example of pattern we try to detect:
53287 // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
53288 //(add (build_vector (extract_elt t, 0),
53289 // (extract_elt t, 2),
53290 // (extract_elt t, 4),
53291 // (extract_elt t, 6)),
53292 // (build_vector (extract_elt t, 1),
53293 // (extract_elt t, 3),
53294 // (extract_elt t, 5),
53295 // (extract_elt t, 7)))
53297 if (!Subtarget.hasSSE2())
53300 if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
53301 Op1.getOpcode() != ISD::BUILD_VECTOR)
53304 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
53305 VT.getVectorNumElements() < 4 ||
53306 !isPowerOf2_32(VT.getVectorNumElements()))
53309 // Check if one of Op0,Op1 is of the form:
53310 // (build_vector (extract_elt Mul, 0),
53311 // (extract_elt Mul, 2),
53312 // (extract_elt Mul, 4),
53314 // the other is of the form:
53315 // (build_vector (extract_elt Mul, 1),
53316 // (extract_elt Mul, 3),
53317 // (extract_elt Mul, 5),
53319 // and identify Mul.
53321 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
53322 SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
53323 Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
53324 // TODO: Be more tolerant to undefs.
53325 if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53326 Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53327 Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53328 Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
53330 auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
53331 auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
53332 auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
53333 auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
53334 if (!Const0L || !Const1L || !Const0H || !Const1H)
53336 unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
53337 Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
53338 // Commutativity of mul allows factors of a product to reorder.
53340 std::swap(Idx0L, Idx1L);
53342 std::swap(Idx0H, Idx1H);
53343 // Commutativity of add allows pairs of factors to reorder.
53344 if (Idx0L > Idx0H) {
53345 std::swap(Idx0L, Idx0H);
53346 std::swap(Idx1L, Idx1H);
53348 if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
53349 Idx1H != 2 * i + 3)
53352 // First time an extract_elt's source vector is visited. Must be a MUL
53353 // with 2X number of vector elements than the BUILD_VECTOR.
53354 // Both extracts must be from same MUL.
53355 Mul = Op0L->getOperand(0);
53356 if (Mul->getOpcode() != ISD::MUL ||
53357 Mul.getValueType().getVectorNumElements() != 2 * e)
53360 // Check that the extract is from the same MUL previously seen.
53361 if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
53362 Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
53366 // Check if the Mul source can be safely shrunk.
53368 if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) ||
53369 Mode == ShrinkMode::MULU16)
53372 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
53373 VT.getVectorNumElements() * 2);
53374 SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(0));
53375 SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(1));
53377 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
53378 ArrayRef<SDValue> Ops) {
53379 EVT InVT = Ops[0].getValueType();
53380 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
53381 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
53382 InVT.getVectorNumElements() / 2);
53383 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
53385 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { N0, N1 }, PMADDBuilder);
53388 // Attempt to turn this pattern into PMADDWD.
53389 // (add (mul (sext (build_vector)), (sext (build_vector))),
53390 // (mul (sext (build_vector)), (sext (build_vector)))
53391 static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
53392 const SDLoc &DL, EVT VT,
53393 const X86Subtarget &Subtarget) {
53394 if (!Subtarget.hasSSE2())
53397 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
53400 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
53401 VT.getVectorNumElements() < 4 ||
53402 !isPowerOf2_32(VT.getVectorNumElements()))
53405 SDValue N00 = N0.getOperand(0);
53406 SDValue N01 = N0.getOperand(1);
53407 SDValue N10 = N1.getOperand(0);
53408 SDValue N11 = N1.getOperand(1);
53410 // All inputs need to be sign extends.
53411 // TODO: Support ZERO_EXTEND from known positive?
53412 if (N00.getOpcode() != ISD::SIGN_EXTEND ||
53413 N01.getOpcode() != ISD::SIGN_EXTEND ||
53414 N10.getOpcode() != ISD::SIGN_EXTEND ||
53415 N11.getOpcode() != ISD::SIGN_EXTEND)
53418 // Peek through the extends.
53419 N00 = N00.getOperand(0);
53420 N01 = N01.getOperand(0);
53421 N10 = N10.getOperand(0);
53422 N11 = N11.getOperand(0);
53424 // Must be extending from vXi16.
53425 EVT InVT = N00.getValueType();
53426 if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
53427 N10.getValueType() != InVT || N11.getValueType() != InVT)
53430 // All inputs should be build_vectors.
53431 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
53432 N01.getOpcode() != ISD::BUILD_VECTOR ||
53433 N10.getOpcode() != ISD::BUILD_VECTOR ||
53434 N11.getOpcode() != ISD::BUILD_VECTOR)
53437 // For each element, we need to ensure we have an odd element from one vector
53438 // multiplied by the odd element of another vector and the even element from
53439 // one of the same vectors being multiplied by the even element from the
53440 // other vector. So we need to make sure for each element i, this operator
53441 // is being performed:
53442 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
53444 for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
53445 SDValue N00Elt = N00.getOperand(i);
53446 SDValue N01Elt = N01.getOperand(i);
53447 SDValue N10Elt = N10.getOperand(i);
53448 SDValue N11Elt = N11.getOperand(i);
53449 // TODO: Be more tolerant to undefs.
53450 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53451 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53452 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53453 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
53455 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
53456 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
53457 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
53458 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
53459 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
53461 unsigned IdxN00 = ConstN00Elt->getZExtValue();
53462 unsigned IdxN01 = ConstN01Elt->getZExtValue();
53463 unsigned IdxN10 = ConstN10Elt->getZExtValue();
53464 unsigned IdxN11 = ConstN11Elt->getZExtValue();
53465 // Add is commutative so indices can be reordered.
53466 if (IdxN00 > IdxN10) {
53467 std::swap(IdxN00, IdxN10);
53468 std::swap(IdxN01, IdxN11);
53470 // N0 indices be the even element. N1 indices must be the next odd element.
53471 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
53472 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
53474 SDValue N00In = N00Elt.getOperand(0);
53475 SDValue N01In = N01Elt.getOperand(0);
53476 SDValue N10In = N10Elt.getOperand(0);
53477 SDValue N11In = N11Elt.getOperand(0);
53479 // First time we find an input capture it.
53484 // The input vectors must be at least as wide as the output.
53485 // If they are larger than the output, we extract subvector below.
53486 if (In0.getValueSizeInBits() < VT.getSizeInBits() ||
53487 In1.getValueSizeInBits() < VT.getSizeInBits())
53490 // Mul is commutative so the input vectors can be in any order.
53491 // Canonicalize to make the compares easier.
53493 std::swap(N00In, N01In);
53495 std::swap(N10In, N11In);
53496 if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
53500 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
53501 ArrayRef<SDValue> Ops) {
53502 EVT OpVT = Ops[0].getValueType();
53503 assert(OpVT.getScalarType() == MVT::i16 &&
53504 "Unexpected scalar element type");
53505 assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
53506 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
53507 OpVT.getVectorNumElements() / 2);
53508 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
53511 // If the output is narrower than an input, extract the low part of the input
53513 EVT OutVT16 = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
53514 VT.getVectorNumElements() * 2);
53515 if (OutVT16.bitsLT(In0.getValueType())) {
53516 In0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT16, In0,
53517 DAG.getIntPtrConstant(0, DL));
53519 if (OutVT16.bitsLT(In1.getValueType())) {
53520 In1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT16, In1,
53521 DAG.getIntPtrConstant(0, DL));
53523 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
53527 // ADD(VPMADDWD(X,Y),VPMADDWD(Z,W)) -> VPMADDWD(SHUFFLE(X,Z), SHUFFLE(Y,W))
53528 // If upper element in each pair of both VPMADDWD are zero then we can merge
53529 // the operand elements and use the implicit add of VPMADDWD.
53530 // TODO: Add support for VPMADDUBSW (which isn't commutable).
53531 static SDValue combineAddOfPMADDWD(SelectionDAG &DAG, SDValue N0, SDValue N1,
53532 const SDLoc &DL, EVT VT) {
53533 if (N0.getOpcode() != N1.getOpcode() || N0.getOpcode() != X86ISD::VPMADDWD)
53536 // TODO: Add 256/512-bit support once VPMADDWD combines with shuffles.
53537 if (VT.getSizeInBits() > 128)
53540 unsigned NumElts = VT.getVectorNumElements();
53541 MVT OpVT = N0.getOperand(0).getSimpleValueType();
53542 APInt DemandedBits = APInt::getAllOnes(OpVT.getScalarSizeInBits());
53543 APInt DemandedHiElts = APInt::getSplat(2 * NumElts, APInt(2, 2));
53546 DAG.MaskedValueIsZero(N0.getOperand(0), DemandedBits, DemandedHiElts) ||
53547 DAG.MaskedValueIsZero(N0.getOperand(1), DemandedBits, DemandedHiElts);
53549 DAG.MaskedValueIsZero(N1.getOperand(0), DemandedBits, DemandedHiElts) ||
53550 DAG.MaskedValueIsZero(N1.getOperand(1), DemandedBits, DemandedHiElts);
53552 // TODO: Check for zero lower elements once we have actual codegen that
53554 if (!Op0HiZero || !Op1HiZero)
53557 // Create a shuffle mask packing the lower elements from each VPMADDWD.
53558 SmallVector<int> Mask;
53559 for (int i = 0; i != (int)NumElts; ++i) {
53560 Mask.push_back(2 * i);
53561 Mask.push_back(2 * (i + NumElts));
53565 DAG.getVectorShuffle(OpVT, DL, N0.getOperand(0), N1.getOperand(0), Mask);
53567 DAG.getVectorShuffle(OpVT, DL, N0.getOperand(1), N1.getOperand(1), Mask);
53568 return DAG.getNode(X86ISD::VPMADDWD, DL, VT, LHS, RHS);
53571 /// CMOV of constants requires materializing constant operands in registers.
53572 /// Try to fold those constants into an 'add' instruction to reduce instruction
53573 /// count. We do this with CMOV rather the generic 'select' because there are
53574 /// earlier folds that may be used to turn select-of-constants into logic hacks.
53575 static SDValue pushAddIntoCmovOfConsts(SDNode *N, SelectionDAG &DAG,
53576 const X86Subtarget &Subtarget) {
53577 // If an operand is zero, add-of-0 gets simplified away, so that's clearly
53578 // better because we eliminate 1-2 instructions. This transform is still
53579 // an improvement without zero operands because we trade 2 move constants and
53580 // 1 add for 2 adds (LEA) as long as the constants can be represented as
53581 // immediate asm operands (fit in 32-bits).
53582 auto isSuitableCmov = [](SDValue V) {
53583 if (V.getOpcode() != X86ISD::CMOV || !V.hasOneUse())
53585 if (!isa<ConstantSDNode>(V.getOperand(0)) ||
53586 !isa<ConstantSDNode>(V.getOperand(1)))
53588 return isNullConstant(V.getOperand(0)) || isNullConstant(V.getOperand(1)) ||
53589 (V.getConstantOperandAPInt(0).isSignedIntN(32) &&
53590 V.getConstantOperandAPInt(1).isSignedIntN(32));
53593 // Match an appropriate CMOV as the first operand of the add.
53594 SDValue Cmov = N->getOperand(0);
53595 SDValue OtherOp = N->getOperand(1);
53596 if (!isSuitableCmov(Cmov))
53597 std::swap(Cmov, OtherOp);
53598 if (!isSuitableCmov(Cmov))
53601 // Don't remove a load folding opportunity for the add. That would neutralize
53602 // any improvements from removing constant materializations.
53603 if (X86::mayFoldLoad(OtherOp, Subtarget))
53606 EVT VT = N->getValueType(0);
53608 SDValue FalseOp = Cmov.getOperand(0);
53609 SDValue TrueOp = Cmov.getOperand(1);
53611 // We will push the add through the select, but we can potentially do better
53612 // if we know there is another add in the sequence and this is pointer math.
53613 // In that case, we can absorb an add into the trailing memory op and avoid
53614 // a 3-operand LEA which is likely slower than a 2-operand LEA.
53615 // TODO: If target has "slow3OpsLEA", do this even without the trailing memop?
53616 if (OtherOp.getOpcode() == ISD::ADD && OtherOp.hasOneUse() &&
53617 !isa<ConstantSDNode>(OtherOp.getOperand(0)) &&
53618 all_of(N->uses(), [&](SDNode *Use) {
53619 auto *MemNode = dyn_cast<MemSDNode>(Use);
53620 return MemNode && MemNode->getBasePtr().getNode() == N;
53622 // add (cmov C1, C2), add (X, Y) --> add (cmov (add X, C1), (add X, C2)), Y
53623 // TODO: We are arbitrarily choosing op0 as the 1st piece of the sum, but
53624 // it is possible that choosing op1 might be better.
53625 SDValue X = OtherOp.getOperand(0), Y = OtherOp.getOperand(1);
53626 FalseOp = DAG.getNode(ISD::ADD, DL, VT, X, FalseOp);
53627 TrueOp = DAG.getNode(ISD::ADD, DL, VT, X, TrueOp);
53628 Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, FalseOp, TrueOp,
53629 Cmov.getOperand(2), Cmov.getOperand(3));
53630 return DAG.getNode(ISD::ADD, DL, VT, Cmov, Y);
53633 // add (cmov C1, C2), OtherOp --> cmov (add OtherOp, C1), (add OtherOp, C2)
53634 FalseOp = DAG.getNode(ISD::ADD, DL, VT, OtherOp, FalseOp);
53635 TrueOp = DAG.getNode(ISD::ADD, DL, VT, OtherOp, TrueOp);
53636 return DAG.getNode(X86ISD::CMOV, DL, VT, FalseOp, TrueOp, Cmov.getOperand(2),
53637 Cmov.getOperand(3));
53640 static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
53641 TargetLowering::DAGCombinerInfo &DCI,
53642 const X86Subtarget &Subtarget) {
53643 EVT VT = N->getValueType(0);
53644 SDValue Op0 = N->getOperand(0);
53645 SDValue Op1 = N->getOperand(1);
53648 if (SDValue Select = pushAddIntoCmovOfConsts(N, DAG, Subtarget))
53651 if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, DL, VT, Subtarget))
53653 if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, DL, VT, Subtarget))
53655 if (SDValue MAdd = combineAddOfPMADDWD(DAG, Op0, Op1, DL, VT))
53658 // Try to synthesize horizontal adds from adds of shuffles.
53659 if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget))
53662 // If vectors of i1 are legal, turn (add (zext (vXi1 X)), Y) into
53663 // (sub Y, (sext (vXi1 X))).
53664 // FIXME: We have the (sub Y, (zext (vXi1 X))) -> (add (sext (vXi1 X)), Y) in
53665 // generic DAG combine without a legal type check, but adding this there
53666 // caused regressions.
53667 if (VT.isVector()) {
53668 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53669 if (Op0.getOpcode() == ISD::ZERO_EXTEND &&
53670 Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
53671 TLI.isTypeLegal(Op0.getOperand(0).getValueType())) {
53672 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op0.getOperand(0));
53673 return DAG.getNode(ISD::SUB, DL, VT, Op1, SExt);
53676 if (Op1.getOpcode() == ISD::ZERO_EXTEND &&
53677 Op1.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
53678 TLI.isTypeLegal(Op1.getOperand(0).getValueType())) {
53679 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op1.getOperand(0));
53680 return DAG.getNode(ISD::SUB, DL, VT, Op0, SExt);
53684 // Fold ADD(ADC(Y,0,W),X) -> ADC(X,Y,W)
53685 if (Op0.getOpcode() == X86ISD::ADC && Op0->hasOneUse() &&
53686 X86::isZeroNode(Op0.getOperand(1))) {
53687 assert(!Op0->hasAnyUseOfValue(1) && "Overflow bit in use");
53688 return DAG.getNode(X86ISD::ADC, SDLoc(Op0), Op0->getVTList(), Op1,
53689 Op0.getOperand(0), Op0.getOperand(2));
53692 return combineAddOrSubToADCOrSBB(N, DAG);
53695 // Try to fold (sub Y, cmovns X, -X) -> (add Y, cmovns -X, X) if the cmov
53696 // condition comes from the subtract node that produced -X. This matches the
53697 // cmov expansion for absolute value. By swapping the operands we convert abs
53699 static SDValue combineSubABS(SDNode *N, SelectionDAG &DAG) {
53700 SDValue N0 = N->getOperand(0);
53701 SDValue N1 = N->getOperand(1);
53703 if (N1.getOpcode() != X86ISD::CMOV || !N1.hasOneUse())
53706 X86::CondCode CC = (X86::CondCode)N1.getConstantOperandVal(2);
53707 if (CC != X86::COND_S && CC != X86::COND_NS)
53710 // Condition should come from a negate operation.
53711 SDValue Cond = N1.getOperand(3);
53712 if (Cond.getOpcode() != X86ISD::SUB || !isNullConstant(Cond.getOperand(0)))
53714 assert(Cond.getResNo() == 1 && "Unexpected result number");
53716 // Get the X and -X from the negate.
53717 SDValue NegX = Cond.getValue(0);
53718 SDValue X = Cond.getOperand(1);
53720 SDValue FalseOp = N1.getOperand(0);
53721 SDValue TrueOp = N1.getOperand(1);
53723 // Cmov operands should be X and NegX. Order doesn't matter.
53724 if (!(TrueOp == X && FalseOp == NegX) && !(TrueOp == NegX && FalseOp == X))
53727 // Build a new CMOV with the operands swapped.
53729 MVT VT = N->getSimpleValueType(0);
53730 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, TrueOp, FalseOp,
53731 N1.getOperand(2), Cond);
53732 // Convert sub to add.
53733 return DAG.getNode(ISD::ADD, DL, VT, N0, Cmov);
53736 static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
53737 TargetLowering::DAGCombinerInfo &DCI,
53738 const X86Subtarget &Subtarget) {
53739 SDValue Op0 = N->getOperand(0);
53740 SDValue Op1 = N->getOperand(1);
53742 // TODO: Add NoOpaque handling to isConstantIntBuildVectorOrConstantInt.
53743 auto IsNonOpaqueConstant = [&](SDValue Op) {
53744 if (SDNode *C = DAG.isConstantIntBuildVectorOrConstantInt(Op)) {
53745 if (auto *Cst = dyn_cast<ConstantSDNode>(C))
53746 return !Cst->isOpaque();
53752 // X86 can't encode an immediate LHS of a sub. See if we can push the
53753 // negation into a preceding instruction. If the RHS of the sub is a XOR with
53754 // one use and a constant, invert the immediate, saving one register.
53755 // sub(C1, xor(X, C2)) -> add(xor(X, ~C2), C1+1)
53756 if (Op1.getOpcode() == ISD::XOR && IsNonOpaqueConstant(Op0) &&
53757 IsNonOpaqueConstant(Op1.getOperand(1)) && Op1->hasOneUse()) {
53759 EVT VT = Op0.getValueType();
53760 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT, Op1.getOperand(0),
53761 DAG.getNOT(SDLoc(Op1), Op1.getOperand(1), VT));
53763 DAG.getNode(ISD::ADD, DL, VT, Op0, DAG.getConstant(1, DL, VT));
53764 return DAG.getNode(ISD::ADD, DL, VT, NewXor, NewAdd);
53767 if (SDValue V = combineSubABS(N, DAG))
53770 // Try to synthesize horizontal subs from subs of shuffles.
53771 if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget))
53774 // Fold SUB(X,ADC(Y,0,W)) -> SBB(X,Y,W)
53775 if (Op1.getOpcode() == X86ISD::ADC && Op1->hasOneUse() &&
53776 X86::isZeroNode(Op1.getOperand(1))) {
53777 assert(!Op1->hasAnyUseOfValue(1) && "Overflow bit in use");
53778 return DAG.getNode(X86ISD::SBB, SDLoc(Op1), Op1->getVTList(), Op0,
53779 Op1.getOperand(0), Op1.getOperand(2));
53782 // Fold SUB(X,SBB(Y,Z,W)) -> SUB(ADC(X,Z,W),Y)
53783 // Don't fold to ADC(0,0,W)/SETCC_CARRY pattern which will prevent more folds.
53784 if (Op1.getOpcode() == X86ISD::SBB && Op1->hasOneUse() &&
53785 !(X86::isZeroNode(Op0) && X86::isZeroNode(Op1.getOperand(1)))) {
53786 assert(!Op1->hasAnyUseOfValue(1) && "Overflow bit in use");
53787 SDValue ADC = DAG.getNode(X86ISD::ADC, SDLoc(Op1), Op1->getVTList(), Op0,
53788 Op1.getOperand(1), Op1.getOperand(2));
53789 return DAG.getNode(ISD::SUB, SDLoc(N), Op0.getValueType(), ADC.getValue(0),
53790 Op1.getOperand(0));
53793 return combineAddOrSubToADCOrSBB(N, DAG);
53796 static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
53797 const X86Subtarget &Subtarget) {
53798 MVT VT = N->getSimpleValueType(0);
53801 if (N->getOperand(0) == N->getOperand(1)) {
53802 if (N->getOpcode() == X86ISD::PCMPEQ)
53803 return DAG.getConstant(-1, DL, VT);
53804 if (N->getOpcode() == X86ISD::PCMPGT)
53805 return DAG.getConstant(0, DL, VT);
53811 /// Helper that combines an array of subvector ops as if they were the operands
53812 /// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
53813 /// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
53814 static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
53815 ArrayRef<SDValue> Ops, SelectionDAG &DAG,
53816 TargetLowering::DAGCombinerInfo &DCI,
53817 const X86Subtarget &Subtarget) {
53818 assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
53819 unsigned EltSizeInBits = VT.getScalarSizeInBits();
53821 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
53822 return DAG.getUNDEF(VT);
53824 if (llvm::all_of(Ops, [](SDValue Op) {
53825 return ISD::isBuildVectorAllZeros(Op.getNode());
53827 return getZeroVector(VT, Subtarget, DAG, DL);
53829 SDValue Op0 = Ops[0];
53830 bool IsSplat = llvm::all_of(Ops, [&Op0](SDValue Op) { return Op == Op0; });
53832 // Repeated subvectors.
53834 (VT.is256BitVector() || (VT.is512BitVector() && Subtarget.hasAVX512()))) {
53835 // If this broadcast is inserted into both halves, use a larger broadcast.
53836 if (Op0.getOpcode() == X86ISD::VBROADCAST)
53837 return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
53839 // If this simple subvector or scalar/subvector broadcast_load is inserted
53840 // into both halves, use a larger broadcast_load. Update other uses to use
53841 // an extracted subvector.
53842 if (ISD::isNormalLoad(Op0.getNode()) ||
53843 Op0.getOpcode() == X86ISD::VBROADCAST_LOAD ||
53844 Op0.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) {
53845 auto *Mem = cast<MemSDNode>(Op0);
53846 unsigned Opc = Op0.getOpcode() == X86ISD::VBROADCAST_LOAD
53847 ? X86ISD::VBROADCAST_LOAD
53848 : X86ISD::SUBV_BROADCAST_LOAD;
53849 if (SDValue BcastLd =
53850 getBROADCAST_LOAD(Opc, DL, VT, Mem->getMemoryVT(), Mem, 0, DAG)) {
53852 extractSubVector(BcastLd, 0, DAG, DL, Op0.getValueSizeInBits());
53853 DAG.ReplaceAllUsesOfValueWith(Op0, BcastSrc);
53858 // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
53859 if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
53860 (Subtarget.hasAVX2() ||
53861 X86::mayFoldLoadIntoBroadcastFromMem(Op0.getOperand(0),
53862 VT.getScalarType(), Subtarget)))
53863 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
53864 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
53866 DAG.getIntPtrConstant(0, DL)));
53868 // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
53869 if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
53870 (Subtarget.hasAVX2() ||
53871 (EltSizeInBits >= 32 &&
53872 X86::mayFoldLoad(Op0.getOperand(0), Subtarget))) &&
53873 Op0.getOperand(0).getValueType() == VT.getScalarType())
53874 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
53876 // concat_vectors(extract_subvector(broadcast(x)),
53877 // extract_subvector(broadcast(x))) -> broadcast(x)
53878 if (Op0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
53879 Op0.getOperand(0).getValueType() == VT) {
53880 if (Op0.getOperand(0).getOpcode() == X86ISD::VBROADCAST ||
53881 Op0.getOperand(0).getOpcode() == X86ISD::VBROADCAST_LOAD)
53882 return Op0.getOperand(0);
53886 // concat(extract_subvector(v0,c0), extract_subvector(v1,c1)) -> vperm2x128.
53887 // Only concat of subvector high halves which vperm2x128 is best at.
53888 // TODO: This should go in combineX86ShufflesRecursively eventually.
53889 if (VT.is256BitVector() && Ops.size() == 2) {
53890 SDValue Src0 = peekThroughBitcasts(Ops[0]);
53891 SDValue Src1 = peekThroughBitcasts(Ops[1]);
53892 if (Src0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
53893 Src1.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
53894 EVT SrcVT0 = Src0.getOperand(0).getValueType();
53895 EVT SrcVT1 = Src1.getOperand(0).getValueType();
53896 unsigned NumSrcElts0 = SrcVT0.getVectorNumElements();
53897 unsigned NumSrcElts1 = SrcVT1.getVectorNumElements();
53898 if (SrcVT0.is256BitVector() && SrcVT1.is256BitVector() &&
53899 Src0.getConstantOperandAPInt(1) == (NumSrcElts0 / 2) &&
53900 Src1.getConstantOperandAPInt(1) == (NumSrcElts1 / 2)) {
53901 return DAG.getNode(X86ISD::VPERM2X128, DL, VT,
53902 DAG.getBitcast(VT, Src0.getOperand(0)),
53903 DAG.getBitcast(VT, Src1.getOperand(0)),
53904 DAG.getTargetConstant(0x31, DL, MVT::i8));
53909 // Repeated opcode.
53910 // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
53911 // but it currently struggles with different vector widths.
53912 if (llvm::all_of(Ops, [Op0](SDValue Op) {
53913 return Op.getOpcode() == Op0.getOpcode();
53915 auto ConcatSubOperand = [&](MVT VT, ArrayRef<SDValue> SubOps, unsigned I) {
53916 SmallVector<SDValue> Subs;
53917 for (SDValue SubOp : SubOps)
53918 Subs.push_back(SubOp.getOperand(I));
53919 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
53921 auto IsConcatFree = [](MVT VT, ArrayRef<SDValue> SubOps, unsigned Op) {
53922 for (unsigned I = 0, E = SubOps.size(); I != E; ++I) {
53923 SDValue Sub = SubOps[I].getOperand(Op);
53924 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
53925 if (Sub.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
53926 Sub.getOperand(0).getValueType() != VT ||
53927 Sub.getConstantOperandAPInt(1) != (I * NumSubElts))
53933 unsigned NumOps = Ops.size();
53934 switch (Op0.getOpcode()) {
53935 case X86ISD::VBROADCAST: {
53936 if (!IsSplat && VT == MVT::v4f64 && llvm::all_of(Ops, [](SDValue Op) {
53937 return Op.getOperand(0).getValueType().is128BitVector();
53939 return DAG.getNode(X86ISD::MOVDDUP, DL, VT,
53940 ConcatSubOperand(VT, Ops, 0));
53943 case X86ISD::MOVDDUP:
53944 case X86ISD::MOVSHDUP:
53945 case X86ISD::MOVSLDUP: {
53947 return DAG.getNode(Op0.getOpcode(), DL, VT,
53948 ConcatSubOperand(VT, Ops, 0));
53951 case X86ISD::SHUFP: {
53952 // Add SHUFPD support if/when necessary.
53953 if (!IsSplat && VT.getScalarType() == MVT::f32 &&
53954 llvm::all_of(Ops, [Op0](SDValue Op) {
53955 return Op.getOperand(2) == Op0.getOperand(2);
53957 return DAG.getNode(Op0.getOpcode(), DL, VT,
53958 ConcatSubOperand(VT, Ops, 0),
53959 ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
53963 case X86ISD::PSHUFHW:
53964 case X86ISD::PSHUFLW:
53965 case X86ISD::PSHUFD:
53966 if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
53967 Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
53968 return DAG.getNode(Op0.getOpcode(), DL, VT,
53969 ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
53972 case X86ISD::VPERMILPI:
53973 if (!IsSplat && NumOps == 2 && (VT == MVT::v8f32 || VT == MVT::v8i32) &&
53974 Op0.getOperand(1) == Ops[1].getOperand(1)) {
53975 SDValue Res = DAG.getBitcast(MVT::v8f32, ConcatSubOperand(VT, Ops, 0));
53976 Res = DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, Res,
53977 Op0.getOperand(1));
53978 return DAG.getBitcast(VT, Res);
53980 if (!IsSplat && NumOps == 2 && VT == MVT::v4f64) {
53981 uint64_t Idx0 = Ops[0].getConstantOperandVal(1);
53982 uint64_t Idx1 = Ops[1].getConstantOperandVal(1);
53983 uint64_t Idx = ((Idx1 & 3) << 2) | (Idx0 & 3);
53984 return DAG.getNode(Op0.getOpcode(), DL, VT,
53985 ConcatSubOperand(VT, Ops, 0),
53986 DAG.getTargetConstant(Idx, DL, MVT::i8));
53989 case X86ISD::PSHUFB:
53990 if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
53991 (VT.is512BitVector() && Subtarget.useBWIRegs()))) {
53992 return DAG.getNode(Op0.getOpcode(), DL, VT,
53993 ConcatSubOperand(VT, Ops, 0),
53994 ConcatSubOperand(VT, Ops, 1));
53997 case X86ISD::VPERMV3:
53998 if (!IsSplat && NumOps == 2 && VT.is512BitVector()) {
53999 MVT OpVT = Op0.getSimpleValueType();
54000 int NumSrcElts = OpVT.getVectorNumElements();
54001 SmallVector<int, 64> ConcatMask;
54002 for (unsigned i = 0; i != NumOps; ++i) {
54003 SmallVector<int, 64> SubMask;
54004 SmallVector<SDValue, 2> SubOps;
54005 if (!getTargetShuffleMask(Ops[i].getNode(), OpVT, false, SubOps,
54008 for (int M : SubMask) {
54010 M += M < NumSrcElts ? 0 : NumSrcElts;
54011 M += i * NumSrcElts;
54013 ConcatMask.push_back(M);
54016 if (ConcatMask.size() == (NumOps * NumSrcElts)) {
54017 SDValue Src0 = concatSubVectors(Ops[0].getOperand(0),
54018 Ops[1].getOperand(0), DAG, DL);
54019 SDValue Src1 = concatSubVectors(Ops[0].getOperand(2),
54020 Ops[1].getOperand(2), DAG, DL);
54021 MVT IntMaskSVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
54022 MVT IntMaskVT = MVT::getVectorVT(IntMaskSVT, NumOps * NumSrcElts);
54023 SDValue Mask = getConstVector(ConcatMask, IntMaskVT, DAG, DL, true);
54024 return DAG.getNode(X86ISD::VPERMV3, DL, VT, Src0, Mask, Src1);
54028 case X86ISD::VSHLI:
54029 case X86ISD::VSRLI:
54030 // Special case: SHL/SRL AVX1 V4i64 by 32-bits can lower as a shuffle.
54031 // TODO: Move this to LowerShiftByScalarImmediate?
54032 if (VT == MVT::v4i64 && !Subtarget.hasInt256() &&
54033 llvm::all_of(Ops, [](SDValue Op) {
54034 return Op.getConstantOperandAPInt(1) == 32;
54036 SDValue Res = DAG.getBitcast(MVT::v8i32, ConcatSubOperand(VT, Ops, 0));
54037 SDValue Zero = getZeroVector(MVT::v8i32, Subtarget, DAG, DL);
54038 if (Op0.getOpcode() == X86ISD::VSHLI) {
54039 Res = DAG.getVectorShuffle(MVT::v8i32, DL, Res, Zero,
54040 {8, 0, 8, 2, 8, 4, 8, 6});
54042 Res = DAG.getVectorShuffle(MVT::v8i32, DL, Res, Zero,
54043 {1, 8, 3, 8, 5, 8, 7, 8});
54045 return DAG.getBitcast(VT, Res);
54048 case X86ISD::VSRAI:
54052 if (((VT.is256BitVector() && Subtarget.hasInt256()) ||
54053 (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
54054 (EltSizeInBits >= 32 || Subtarget.useBWIRegs()))) &&
54055 llvm::all_of(Ops, [Op0](SDValue Op) {
54056 return Op0.getOperand(1) == Op.getOperand(1);
54058 return DAG.getNode(Op0.getOpcode(), DL, VT,
54059 ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
54062 case X86ISD::VPERMI:
54063 case X86ISD::VROTLI:
54064 case X86ISD::VROTRI:
54065 if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
54066 llvm::all_of(Ops, [Op0](SDValue Op) {
54067 return Op0.getOperand(1) == Op.getOperand(1);
54069 return DAG.getNode(Op0.getOpcode(), DL, VT,
54070 ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
54076 case X86ISD::ANDNP:
54077 // TODO: Add 256-bit support.
54078 if (!IsSplat && VT.is512BitVector()) {
54079 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
54080 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
54081 NumOps * SrcVT.getVectorNumElements());
54082 return DAG.getNode(Op0.getOpcode(), DL, VT,
54083 ConcatSubOperand(SrcVT, Ops, 0),
54084 ConcatSubOperand(SrcVT, Ops, 1));
54089 case X86ISD::FHADD:
54090 case X86ISD::FHSUB:
54091 case X86ISD::PACKSS:
54092 case X86ISD::PACKUS:
54093 if (!IsSplat && VT.is256BitVector() &&
54094 (VT.isFloatingPoint() || Subtarget.hasInt256())) {
54095 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
54096 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
54097 NumOps * SrcVT.getVectorNumElements());
54098 return DAG.getNode(Op0.getOpcode(), DL, VT,
54099 ConcatSubOperand(SrcVT, Ops, 0),
54100 ConcatSubOperand(SrcVT, Ops, 1));
54103 case X86ISD::PALIGNR:
54105 ((VT.is256BitVector() && Subtarget.hasInt256()) ||
54106 (VT.is512BitVector() && Subtarget.useBWIRegs())) &&
54107 llvm::all_of(Ops, [Op0](SDValue Op) {
54108 return Op0.getOperand(2) == Op.getOperand(2);
54110 return DAG.getNode(Op0.getOpcode(), DL, VT,
54111 ConcatSubOperand(VT, Ops, 0),
54112 ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
54116 case X86ISD::BLENDV:
54117 if (!IsSplat && VT.is256BitVector() && Ops.size() == 2 &&
54118 (VT.getScalarSizeInBits() >= 32 || Subtarget.hasInt256()) &&
54119 IsConcatFree(VT, Ops, 1) && IsConcatFree(VT, Ops, 2)) {
54120 EVT SelVT = Ops[0].getOperand(0).getValueType();
54121 SelVT = SelVT.getDoubleNumVectorElementsVT(*DAG.getContext());
54122 return DAG.getNode(Op0.getOpcode(), DL, VT,
54123 ConcatSubOperand(SelVT.getSimpleVT(), Ops, 0),
54124 ConcatSubOperand(VT, Ops, 1),
54125 ConcatSubOperand(VT, Ops, 2));
54131 // Fold subvector loads into one.
54132 // If needed, look through bitcasts to get to the load.
54133 if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
54135 const X86TargetLowering *TLI = Subtarget.getTargetLowering();
54136 if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
54137 *FirstLd->getMemOperand(), &Fast) &&
54140 EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
54145 // Attempt to fold target constant loads.
54146 if (all_of(Ops, [](SDValue Op) { return getTargetConstantFromNode(Op); })) {
54147 SmallVector<APInt> EltBits;
54148 APInt UndefElts = APInt::getNullValue(VT.getVectorNumElements());
54149 for (unsigned I = 0, E = Ops.size(); I != E; ++I) {
54151 SmallVector<APInt> OpEltBits;
54152 if (!getTargetConstantBitsFromNode(Ops[I], EltSizeInBits, OpUndefElts,
54153 OpEltBits, true, false))
54155 EltBits.append(OpEltBits);
54156 UndefElts.insertBits(OpUndefElts, I * OpUndefElts.getBitWidth());
54158 if (EltBits.size() == VT.getVectorNumElements())
54159 return getConstVector(EltBits, UndefElts, VT, DAG, DL);
54165 static SDValue combineCONCAT_VECTORS(SDNode *N, SelectionDAG &DAG,
54166 TargetLowering::DAGCombinerInfo &DCI,
54167 const X86Subtarget &Subtarget) {
54168 EVT VT = N->getValueType(0);
54169 EVT SrcVT = N->getOperand(0).getValueType();
54170 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54172 // Don't do anything for i1 vectors.
54173 if (VT.getVectorElementType() == MVT::i1)
54176 if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
54177 SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
54178 if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
54186 static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
54187 TargetLowering::DAGCombinerInfo &DCI,
54188 const X86Subtarget &Subtarget) {
54189 if (DCI.isBeforeLegalizeOps())
54192 MVT OpVT = N->getSimpleValueType(0);
54194 bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
54197 SDValue Vec = N->getOperand(0);
54198 SDValue SubVec = N->getOperand(1);
54200 uint64_t IdxVal = N->getConstantOperandVal(2);
54201 MVT SubVecVT = SubVec.getSimpleValueType();
54203 if (Vec.isUndef() && SubVec.isUndef())
54204 return DAG.getUNDEF(OpVT);
54206 // Inserting undefs/zeros into zeros/undefs is a zero vector.
54207 if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
54208 (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
54209 return getZeroVector(OpVT, Subtarget, DAG, dl);
54211 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
54212 // If we're inserting into a zero vector and then into a larger zero vector,
54213 // just insert into the larger zero vector directly.
54214 if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
54215 ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
54216 uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
54217 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
54218 getZeroVector(OpVT, Subtarget, DAG, dl),
54219 SubVec.getOperand(1),
54220 DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
54223 // If we're inserting into a zero vector and our input was extracted from an
54224 // insert into a zero vector of the same type and the extraction was at
54225 // least as large as the original insertion. Just insert the original
54226 // subvector into a zero vector.
54227 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
54228 isNullConstant(SubVec.getOperand(1)) &&
54229 SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
54230 SDValue Ins = SubVec.getOperand(0);
54231 if (isNullConstant(Ins.getOperand(2)) &&
54232 ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
54233 Ins.getOperand(1).getValueSizeInBits().getFixedSize() <=
54234 SubVecVT.getFixedSizeInBits())
54235 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
54236 getZeroVector(OpVT, Subtarget, DAG, dl),
54237 Ins.getOperand(1), N->getOperand(2));
54241 // Stop here if this is an i1 vector.
54245 // If this is an insert of an extract, combine to a shuffle. Don't do this
54246 // if the insert or extract can be represented with a subregister operation.
54247 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
54248 SubVec.getOperand(0).getSimpleValueType() == OpVT &&
54250 !(Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())))) {
54251 int ExtIdxVal = SubVec.getConstantOperandVal(1);
54252 if (ExtIdxVal != 0) {
54253 int VecNumElts = OpVT.getVectorNumElements();
54254 int SubVecNumElts = SubVecVT.getVectorNumElements();
54255 SmallVector<int, 64> Mask(VecNumElts);
54256 // First create an identity shuffle mask.
54257 for (int i = 0; i != VecNumElts; ++i)
54259 // Now insert the extracted portion.
54260 for (int i = 0; i != SubVecNumElts; ++i)
54261 Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
54263 return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
54267 // Match concat_vector style patterns.
54268 SmallVector<SDValue, 2> SubVectorOps;
54269 if (collectConcatOps(N, SubVectorOps, DAG)) {
54271 combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
54274 // If we're inserting all zeros into the upper half, change this to
54275 // a concat with zero. We will match this to a move
54276 // with implicit upper bit zeroing during isel.
54277 // We do this here because we don't want combineConcatVectorOps to
54278 // create INSERT_SUBVECTOR from CONCAT_VECTORS.
54279 if (SubVectorOps.size() == 2 &&
54280 ISD::isBuildVectorAllZeros(SubVectorOps[1].getNode()))
54281 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
54282 getZeroVector(OpVT, Subtarget, DAG, dl),
54283 SubVectorOps[0], DAG.getIntPtrConstant(0, dl));
54286 // If this is a broadcast insert into an upper undef, use a larger broadcast.
54287 if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
54288 return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
54290 // If this is a broadcast load inserted into an upper undef, use a larger
54292 if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
54293 SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
54294 auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
54295 SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
54296 SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
54298 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
54299 MemIntr->getMemoryVT(),
54300 MemIntr->getMemOperand());
54301 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
54305 // If we're splatting the lower half subvector of a full vector load into the
54306 // upper half, attempt to create a subvector broadcast.
54307 if (IdxVal == (OpVT.getVectorNumElements() / 2) && SubVec.hasOneUse() &&
54308 Vec.getValueSizeInBits() == (2 * SubVec.getValueSizeInBits())) {
54309 auto *VecLd = dyn_cast<LoadSDNode>(Vec);
54310 auto *SubLd = dyn_cast<LoadSDNode>(SubVec);
54311 if (VecLd && SubLd &&
54312 DAG.areNonVolatileConsecutiveLoads(SubLd, VecLd,
54313 SubVec.getValueSizeInBits() / 8, 0))
54314 return getBROADCAST_LOAD(X86ISD::SUBV_BROADCAST_LOAD, dl, OpVT, SubVecVT,
54321 /// If we are extracting a subvector of a vector select and the select condition
54322 /// is composed of concatenated vectors, try to narrow the select width. This
54323 /// is a common pattern for AVX1 integer code because 256-bit selects may be
54324 /// legal, but there is almost no integer math/logic available for 256-bit.
54325 /// This function should only be called with legal types (otherwise, the calls
54326 /// to get simple value types will assert).
54327 static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
54328 SDValue Sel = Ext->getOperand(0);
54329 SmallVector<SDValue, 4> CatOps;
54330 if (Sel.getOpcode() != ISD::VSELECT ||
54331 !collectConcatOps(Sel.getOperand(0).getNode(), CatOps, DAG))
54334 // Note: We assume simple value types because this should only be called with
54335 // legal operations/types.
54336 // TODO: This can be extended to handle extraction to 256-bits.
54337 MVT VT = Ext->getSimpleValueType(0);
54338 if (!VT.is128BitVector())
54341 MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
54342 if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
54345 MVT WideVT = Ext->getOperand(0).getSimpleValueType();
54346 MVT SelVT = Sel.getSimpleValueType();
54347 assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
54348 "Unexpected vector type with legal operations");
54350 unsigned SelElts = SelVT.getVectorNumElements();
54351 unsigned CastedElts = WideVT.getVectorNumElements();
54352 unsigned ExtIdx = Ext->getConstantOperandVal(1);
54353 if (SelElts % CastedElts == 0) {
54354 // The select has the same or more (narrower) elements than the extract
54355 // operand. The extraction index gets scaled by that factor.
54356 ExtIdx *= (SelElts / CastedElts);
54357 } else if (CastedElts % SelElts == 0) {
54358 // The select has less (wider) elements than the extract operand. Make sure
54359 // that the extraction index can be divided evenly.
54360 unsigned IndexDivisor = CastedElts / SelElts;
54361 if (ExtIdx % IndexDivisor != 0)
54363 ExtIdx /= IndexDivisor;
54365 llvm_unreachable("Element count of simple vector types are not divisible?");
54368 unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
54369 unsigned NarrowElts = SelElts / NarrowingFactor;
54370 MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
54372 SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
54373 SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
54374 SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
54375 SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
54376 return DAG.getBitcast(VT, NarrowSel);
54379 static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
54380 TargetLowering::DAGCombinerInfo &DCI,
54381 const X86Subtarget &Subtarget) {
54382 // For AVX1 only, if we are extracting from a 256-bit and+not (which will
54383 // eventually get combined/lowered into ANDNP) with a concatenated operand,
54384 // split the 'and' into 128-bit ops to avoid the concatenate and extract.
54385 // We let generic combining take over from there to simplify the
54386 // insert/extract and 'not'.
54387 // This pattern emerges during AVX1 legalization. We handle it before lowering
54388 // to avoid complications like splitting constant vector loads.
54390 // Capture the original wide type in the likely case that we need to bitcast
54391 // back to this type.
54392 if (!N->getValueType(0).isSimple())
54395 MVT VT = N->getSimpleValueType(0);
54396 SDValue InVec = N->getOperand(0);
54397 unsigned IdxVal = N->getConstantOperandVal(1);
54398 SDValue InVecBC = peekThroughBitcasts(InVec);
54399 EVT InVecVT = InVec.getValueType();
54400 unsigned SizeInBits = VT.getSizeInBits();
54401 unsigned InSizeInBits = InVecVT.getSizeInBits();
54402 unsigned NumSubElts = VT.getVectorNumElements();
54403 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54405 if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
54406 TLI.isTypeLegal(InVecVT) &&
54407 InSizeInBits == 256 && InVecBC.getOpcode() == ISD::AND) {
54408 auto isConcatenatedNot = [](SDValue V) {
54409 V = peekThroughBitcasts(V);
54410 if (!isBitwiseNot(V))
54412 SDValue NotOp = V->getOperand(0);
54413 return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
54415 if (isConcatenatedNot(InVecBC.getOperand(0)) ||
54416 isConcatenatedNot(InVecBC.getOperand(1))) {
54417 // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
54418 SDValue Concat = splitVectorIntBinary(InVecBC, DAG);
54419 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
54420 DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
54424 if (DCI.isBeforeLegalizeOps())
54427 if (SDValue V = narrowExtractedVectorSelect(N, DAG))
54430 if (ISD::isBuildVectorAllZeros(InVec.getNode()))
54431 return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
54433 if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
54434 if (VT.getScalarType() == MVT::i1)
54435 return DAG.getConstant(1, SDLoc(N), VT);
54436 return getOnesVector(VT, DAG, SDLoc(N));
54439 if (InVec.getOpcode() == ISD::BUILD_VECTOR)
54440 return DAG.getBuildVector(VT, SDLoc(N),
54441 InVec->ops().slice(IdxVal, NumSubElts));
54443 // If we are extracting from an insert into a larger vector, replace with a
54444 // smaller insert if we don't access less than the original subvector. Don't
54445 // do this for i1 vectors.
54446 // TODO: Relax the matching indices requirement?
54447 if (VT.getVectorElementType() != MVT::i1 &&
54448 InVec.getOpcode() == ISD::INSERT_SUBVECTOR && InVec.hasOneUse() &&
54449 IdxVal == InVec.getConstantOperandVal(2) &&
54450 InVec.getOperand(1).getValueSizeInBits() <= SizeInBits) {
54452 SDValue NewExt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT,
54453 InVec.getOperand(0), N->getOperand(1));
54454 unsigned NewIdxVal = InVec.getConstantOperandVal(2) - IdxVal;
54455 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, NewExt,
54456 InVec.getOperand(1),
54457 DAG.getVectorIdxConstant(NewIdxVal, DL));
54460 // If we're extracting an upper subvector from a broadcast we should just
54461 // extract the lowest subvector instead which should allow
54462 // SimplifyDemandedVectorElts do more simplifications.
54463 if (IdxVal != 0 && (InVec.getOpcode() == X86ISD::VBROADCAST ||
54464 InVec.getOpcode() == X86ISD::VBROADCAST_LOAD ||
54465 DAG.isSplatValue(InVec, /*AllowUndefs*/ false)))
54466 return extractSubVector(InVec, 0, DAG, SDLoc(N), SizeInBits);
54468 // If we're extracting a broadcasted subvector, just use the lowest subvector.
54469 if (IdxVal != 0 && InVec.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
54470 cast<MemIntrinsicSDNode>(InVec)->getMemoryVT() == VT)
54471 return extractSubVector(InVec, 0, DAG, SDLoc(N), SizeInBits);
54473 // Attempt to extract from the source of a shuffle vector.
54474 if ((InSizeInBits % SizeInBits) == 0 && (IdxVal % NumSubElts) == 0) {
54475 SmallVector<int, 32> ShuffleMask;
54476 SmallVector<int, 32> ScaledMask;
54477 SmallVector<SDValue, 2> ShuffleInputs;
54478 unsigned NumSubVecs = InSizeInBits / SizeInBits;
54479 // Decode the shuffle mask and scale it so its shuffling subvectors.
54480 if (getTargetShuffleInputs(InVecBC, ShuffleInputs, ShuffleMask, DAG) &&
54481 scaleShuffleElements(ShuffleMask, NumSubVecs, ScaledMask)) {
54482 unsigned SubVecIdx = IdxVal / NumSubElts;
54483 if (ScaledMask[SubVecIdx] == SM_SentinelUndef)
54484 return DAG.getUNDEF(VT);
54485 if (ScaledMask[SubVecIdx] == SM_SentinelZero)
54486 return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
54487 SDValue Src = ShuffleInputs[ScaledMask[SubVecIdx] / NumSubVecs];
54488 if (Src.getValueSizeInBits() == InSizeInBits) {
54489 unsigned SrcSubVecIdx = ScaledMask[SubVecIdx] % NumSubVecs;
54490 unsigned SrcEltIdx = SrcSubVecIdx * NumSubElts;
54491 return extractSubVector(DAG.getBitcast(InVecVT, Src), SrcEltIdx, DAG,
54492 SDLoc(N), SizeInBits);
54497 // If we're extracting the lowest subvector and we're the only user,
54498 // we may be able to perform this with a smaller vector width.
54499 unsigned InOpcode = InVec.getOpcode();
54500 if (InVec.hasOneUse()) {
54501 if (IdxVal == 0 && VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
54502 // v2f64 CVTDQ2PD(v4i32).
54503 if (InOpcode == ISD::SINT_TO_FP &&
54504 InVec.getOperand(0).getValueType() == MVT::v4i32) {
54505 return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
54507 // v2f64 CVTUDQ2PD(v4i32).
54508 if (InOpcode == ISD::UINT_TO_FP && Subtarget.hasVLX() &&
54509 InVec.getOperand(0).getValueType() == MVT::v4i32) {
54510 return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
54512 // v2f64 CVTPS2PD(v4f32).
54513 if (InOpcode == ISD::FP_EXTEND &&
54514 InVec.getOperand(0).getValueType() == MVT::v4f32) {
54515 return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
54519 (InOpcode == ISD::ANY_EXTEND ||
54520 InOpcode == ISD::ANY_EXTEND_VECTOR_INREG ||
54521 InOpcode == ISD::ZERO_EXTEND ||
54522 InOpcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
54523 InOpcode == ISD::SIGN_EXTEND ||
54524 InOpcode == ISD::SIGN_EXTEND_VECTOR_INREG) &&
54525 (SizeInBits == 128 || SizeInBits == 256) &&
54526 InVec.getOperand(0).getValueSizeInBits() >= SizeInBits) {
54528 SDValue Ext = InVec.getOperand(0);
54529 if (Ext.getValueSizeInBits() > SizeInBits)
54530 Ext = extractSubVector(Ext, 0, DAG, DL, SizeInBits);
54531 unsigned ExtOp = getOpcode_EXTEND_VECTOR_INREG(InOpcode);
54532 return DAG.getNode(ExtOp, DL, VT, Ext);
54534 if (IdxVal == 0 && InOpcode == ISD::VSELECT &&
54535 InVec.getOperand(0).getValueType().is256BitVector() &&
54536 InVec.getOperand(1).getValueType().is256BitVector() &&
54537 InVec.getOperand(2).getValueType().is256BitVector()) {
54539 SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
54540 SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
54541 SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
54542 return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
54544 if (IdxVal == 0 && InOpcode == ISD::TRUNCATE && Subtarget.hasVLX() &&
54545 (VT.is128BitVector() || VT.is256BitVector())) {
54547 SDValue InVecSrc = InVec.getOperand(0);
54548 unsigned Scale = InVecSrc.getValueSizeInBits() / InSizeInBits;
54549 SDValue Ext = extractSubVector(InVecSrc, 0, DAG, DL, Scale * SizeInBits);
54550 return DAG.getNode(InOpcode, DL, VT, Ext);
54552 if (InOpcode == X86ISD::MOVDDUP &&
54553 (VT.is128BitVector() || VT.is256BitVector())) {
54556 extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
54557 return DAG.getNode(InOpcode, DL, VT, Ext0);
54561 // Always split vXi64 logical shifts where we're extracting the upper 32-bits
54562 // as this is very likely to fold into a shuffle/truncation.
54563 if ((InOpcode == X86ISD::VSHLI || InOpcode == X86ISD::VSRLI) &&
54564 InVecVT.getScalarSizeInBits() == 64 &&
54565 InVec.getConstantOperandAPInt(1) == 32) {
54568 extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
54569 return DAG.getNode(InOpcode, DL, VT, Ext, InVec.getOperand(1));
54575 static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
54576 EVT VT = N->getValueType(0);
54577 SDValue Src = N->getOperand(0);
54580 // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
54581 // This occurs frequently in our masked scalar intrinsic code and our
54582 // floating point select lowering with AVX512.
54583 // TODO: SimplifyDemandedBits instead?
54584 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse())
54585 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
54586 if (C->getAPIntValue().isOne())
54587 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1,
54588 Src.getOperand(0));
54590 // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
54591 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
54592 Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
54593 Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
54594 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
54596 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
54597 Src.getOperand(1));
54599 // Reduce v2i64 to v4i32 if we don't need the upper bits.
54600 // TODO: Move to DAGCombine/SimplifyDemandedBits?
54601 if (VT == MVT::v2i64 || VT == MVT::v2f64) {
54602 auto IsAnyExt64 = [](SDValue Op) {
54603 if (Op.getValueType() != MVT::i64 || !Op.hasOneUse())
54605 if (Op.getOpcode() == ISD::ANY_EXTEND &&
54606 Op.getOperand(0).getScalarValueSizeInBits() <= 32)
54607 return Op.getOperand(0);
54608 if (auto *Ld = dyn_cast<LoadSDNode>(Op))
54609 if (Ld->getExtensionType() == ISD::EXTLOAD &&
54610 Ld->getMemoryVT().getScalarSizeInBits() <= 32)
54614 if (SDValue ExtSrc = IsAnyExt64(peekThroughOneUseBitcasts(Src)))
54615 return DAG.getBitcast(
54616 VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
54617 DAG.getAnyExtOrTrunc(ExtSrc, DL, MVT::i32)));
54620 // Combine (v2i64 (scalar_to_vector (i64 (bitconvert (mmx))))) to MOVQ2DQ.
54621 if (VT == MVT::v2i64 && Src.getOpcode() == ISD::BITCAST &&
54622 Src.getOperand(0).getValueType() == MVT::x86mmx)
54623 return DAG.getNode(X86ISD::MOVQ2DQ, DL, VT, Src.getOperand(0));
54625 // See if we're broadcasting the scalar value, in which case just reuse that.
54626 // Ensure the same SDValue from the SDNode use is being used.
54627 if (VT.getScalarType() == Src.getValueType())
54628 for (SDNode *User : Src->uses())
54629 if (User->getOpcode() == X86ISD::VBROADCAST &&
54630 Src == User->getOperand(0)) {
54631 unsigned SizeInBits = VT.getFixedSizeInBits();
54632 unsigned BroadcastSizeInBits =
54633 User->getValueSizeInBits(0).getFixedSize();
54634 if (BroadcastSizeInBits == SizeInBits)
54635 return SDValue(User, 0);
54636 if (BroadcastSizeInBits > SizeInBits)
54637 return extractSubVector(SDValue(User, 0), 0, DAG, DL, SizeInBits);
54638 // TODO: Handle BroadcastSizeInBits < SizeInBits when we have test
54645 // Simplify PMULDQ and PMULUDQ operations.
54646 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
54647 TargetLowering::DAGCombinerInfo &DCI,
54648 const X86Subtarget &Subtarget) {
54649 SDValue LHS = N->getOperand(0);
54650 SDValue RHS = N->getOperand(1);
54652 // Canonicalize constant to RHS.
54653 if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
54654 !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
54655 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
54657 // Multiply by zero.
54658 // Don't return RHS as it may contain UNDEFs.
54659 if (ISD::isBuildVectorAllZeros(RHS.getNode()))
54660 return DAG.getConstant(0, SDLoc(N), N->getValueType(0));
54662 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
54663 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54664 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(64), DCI))
54665 return SDValue(N, 0);
54667 // If the input is an extend_invec and the SimplifyDemandedBits call didn't
54668 // convert it to any_extend_invec, due to the LegalOperations check, do the
54669 // conversion directly to a vector shuffle manually. This exposes combine
54670 // opportunities missed by combineEXTEND_VECTOR_INREG not calling
54671 // combineX86ShufflesRecursively on SSE4.1 targets.
54672 // FIXME: This is basically a hack around several other issues related to
54673 // ANY_EXTEND_VECTOR_INREG.
54674 if (N->getValueType(0) == MVT::v2i64 && LHS.hasOneUse() &&
54675 (LHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
54676 LHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
54677 LHS.getOperand(0).getValueType() == MVT::v4i32) {
54679 LHS = DAG.getVectorShuffle(MVT::v4i32, dl, LHS.getOperand(0),
54680 LHS.getOperand(0), { 0, -1, 1, -1 });
54681 LHS = DAG.getBitcast(MVT::v2i64, LHS);
54682 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
54684 if (N->getValueType(0) == MVT::v2i64 && RHS.hasOneUse() &&
54685 (RHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
54686 RHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
54687 RHS.getOperand(0).getValueType() == MVT::v4i32) {
54689 RHS = DAG.getVectorShuffle(MVT::v4i32, dl, RHS.getOperand(0),
54690 RHS.getOperand(0), { 0, -1, 1, -1 });
54691 RHS = DAG.getBitcast(MVT::v2i64, RHS);
54692 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
54698 // Simplify VPMADDUBSW/VPMADDWD operations.
54699 static SDValue combineVPMADD(SDNode *N, SelectionDAG &DAG,
54700 TargetLowering::DAGCombinerInfo &DCI) {
54701 EVT VT = N->getValueType(0);
54702 SDValue LHS = N->getOperand(0);
54703 SDValue RHS = N->getOperand(1);
54705 // Multiply by zero.
54706 // Don't return LHS/RHS as it may contain UNDEFs.
54707 if (ISD::isBuildVectorAllZeros(LHS.getNode()) ||
54708 ISD::isBuildVectorAllZeros(RHS.getNode()))
54709 return DAG.getConstant(0, SDLoc(N), VT);
54711 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54712 APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
54713 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
54714 return SDValue(N, 0);
54719 static SDValue combineEXTEND_VECTOR_INREG(SDNode *N, SelectionDAG &DAG,
54720 TargetLowering::DAGCombinerInfo &DCI,
54721 const X86Subtarget &Subtarget) {
54722 EVT VT = N->getValueType(0);
54723 SDValue In = N->getOperand(0);
54724 unsigned Opcode = N->getOpcode();
54725 unsigned InOpcode = In.getOpcode();
54726 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54729 // Try to merge vector loads and extend_inreg to an extload.
54730 if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
54732 auto *Ld = cast<LoadSDNode>(In);
54733 if (Ld->isSimple()) {
54734 MVT SVT = In.getSimpleValueType().getVectorElementType();
54735 ISD::LoadExtType Ext = Opcode == ISD::SIGN_EXTEND_VECTOR_INREG
54738 EVT MemVT = VT.changeVectorElementType(SVT);
54739 if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
54740 SDValue Load = DAG.getExtLoad(
54741 Ext, DL, VT, Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
54742 MemVT, Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags());
54743 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
54749 // Fold EXTEND_VECTOR_INREG(EXTEND_VECTOR_INREG(X)) -> EXTEND_VECTOR_INREG(X).
54750 if (Opcode == InOpcode)
54751 return DAG.getNode(Opcode, DL, VT, In.getOperand(0));
54753 // Fold EXTEND_VECTOR_INREG(EXTRACT_SUBVECTOR(EXTEND(X),0))
54754 // -> EXTEND_VECTOR_INREG(X).
54755 // TODO: Handle non-zero subvector indices.
54756 if (InOpcode == ISD::EXTRACT_SUBVECTOR && In.getConstantOperandVal(1) == 0 &&
54757 In.getOperand(0).getOpcode() == getOpcode_EXTEND(Opcode) &&
54758 In.getOperand(0).getOperand(0).getValueSizeInBits() ==
54759 In.getValueSizeInBits())
54760 return DAG.getNode(Opcode, DL, VT, In.getOperand(0).getOperand(0));
54762 // Fold EXTEND_VECTOR_INREG(BUILD_VECTOR(X,Y,?,?)) -> BUILD_VECTOR(X,0,Y,0).
54763 // TODO: Move to DAGCombine?
54764 if (!DCI.isBeforeLegalizeOps() && Opcode == ISD::ZERO_EXTEND_VECTOR_INREG &&
54765 In.getOpcode() == ISD::BUILD_VECTOR && In.hasOneUse() &&
54766 In.getValueSizeInBits() == VT.getSizeInBits()) {
54767 unsigned NumElts = VT.getVectorNumElements();
54768 unsigned Scale = VT.getScalarSizeInBits() / In.getScalarValueSizeInBits();
54769 EVT EltVT = In.getOperand(0).getValueType();
54770 SmallVector<SDValue> Elts(Scale * NumElts, DAG.getConstant(0, DL, EltVT));
54771 for (unsigned I = 0; I != NumElts; ++I)
54772 Elts[I * Scale] = In.getOperand(I);
54773 return DAG.getBitcast(VT, DAG.getBuildVector(In.getValueType(), DL, Elts));
54776 // Attempt to combine as a shuffle on SSE41+ targets.
54777 if ((Opcode == ISD::ANY_EXTEND_VECTOR_INREG ||
54778 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG) &&
54779 Subtarget.hasSSE41()) {
54781 if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
54782 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
54789 static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
54790 TargetLowering::DAGCombinerInfo &DCI) {
54791 EVT VT = N->getValueType(0);
54793 if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
54794 return DAG.getConstant(0, SDLoc(N), VT);
54796 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54797 APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
54798 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
54799 return SDValue(N, 0);
54804 // Optimize (fp16_to_fp (fp_to_fp16 X)) to VCVTPS2PH followed by VCVTPH2PS.
54805 // Done as a combine because the lowering for fp16_to_fp and fp_to_fp16 produce
54806 // extra instructions between the conversion due to going to scalar and back.
54807 static SDValue combineFP16_TO_FP(SDNode *N, SelectionDAG &DAG,
54808 const X86Subtarget &Subtarget) {
54809 if (Subtarget.useSoftFloat() || !Subtarget.hasF16C())
54812 if (N->getOperand(0).getOpcode() != ISD::FP_TO_FP16)
54815 if (N->getValueType(0) != MVT::f32 ||
54816 N->getOperand(0).getOperand(0).getValueType() != MVT::f32)
54820 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32,
54821 N->getOperand(0).getOperand(0));
54822 Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
54823 DAG.getTargetConstant(4, dl, MVT::i32));
54824 Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
54825 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
54826 DAG.getIntPtrConstant(0, dl));
54829 static SDValue combineFP_EXTEND(SDNode *N, SelectionDAG &DAG,
54830 const X86Subtarget &Subtarget) {
54831 if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
54834 if (Subtarget.hasFP16())
54837 bool IsStrict = N->isStrictFPOpcode();
54838 EVT VT = N->getValueType(0);
54839 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
54840 EVT SrcVT = Src.getValueType();
54842 if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::f16)
54845 if (VT.getVectorElementType() != MVT::f32 &&
54846 VT.getVectorElementType() != MVT::f64)
54849 unsigned NumElts = VT.getVectorNumElements();
54850 if (NumElts == 1 || !isPowerOf2_32(NumElts))
54855 // Convert the input to vXi16.
54856 EVT IntVT = SrcVT.changeVectorElementTypeToInteger();
54857 Src = DAG.getBitcast(IntVT, Src);
54859 // Widen to at least 8 input elements.
54861 unsigned NumConcats = 8 / NumElts;
54862 SDValue Fill = NumElts == 4 ? DAG.getUNDEF(IntVT)
54863 : DAG.getConstant(0, dl, IntVT);
54864 SmallVector<SDValue, 4> Ops(NumConcats, Fill);
54866 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, Ops);
54869 // Destination is vXf32 with at least 4 elements.
54870 EVT CvtVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32,
54871 std::max(4U, NumElts));
54872 SDValue Cvt, Chain;
54874 Cvt = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {CvtVT, MVT::Other},
54875 {N->getOperand(0), Src});
54876 Chain = Cvt.getValue(1);
54878 Cvt = DAG.getNode(X86ISD::CVTPH2PS, dl, CvtVT, Src);
54882 assert(NumElts == 2 && "Unexpected size");
54883 Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Cvt,
54884 DAG.getIntPtrConstant(0, dl));
54888 // Extend to the original VT if necessary.
54889 if (Cvt.getValueType() != VT) {
54890 Cvt = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {VT, MVT::Other},
54892 Chain = Cvt.getValue(1);
54894 return DAG.getMergeValues({Cvt, Chain}, dl);
54897 // Extend to the original VT if necessary.
54898 return DAG.getNode(ISD::FP_EXTEND, dl, VT, Cvt);
54901 // Try to find a larger VBROADCAST_LOAD/SUBV_BROADCAST_LOAD that we can extract
54902 // from. Limit this to cases where the loads have the same input chain and the
54903 // output chains are unused. This avoids any memory ordering issues.
54904 static SDValue combineBROADCAST_LOAD(SDNode *N, SelectionDAG &DAG,
54905 TargetLowering::DAGCombinerInfo &DCI) {
54906 assert((N->getOpcode() == X86ISD::VBROADCAST_LOAD ||
54907 N->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) &&
54908 "Unknown broadcast load type");
54910 // Only do this if the chain result is unused.
54911 if (N->hasAnyUseOfValue(1))
54914 auto *MemIntrin = cast<MemIntrinsicSDNode>(N);
54916 SDValue Ptr = MemIntrin->getBasePtr();
54917 SDValue Chain = MemIntrin->getChain();
54918 EVT VT = N->getSimpleValueType(0);
54919 EVT MemVT = MemIntrin->getMemoryVT();
54921 // Look at other users of our base pointer and try to find a wider broadcast.
54922 // The input chain and the size of the memory VT must match.
54923 for (SDNode *User : Ptr->uses())
54924 if (User != N && User->getOpcode() == N->getOpcode() &&
54925 cast<MemIntrinsicSDNode>(User)->getBasePtr() == Ptr &&
54926 cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
54927 cast<MemIntrinsicSDNode>(User)->getMemoryVT().getSizeInBits() ==
54928 MemVT.getSizeInBits() &&
54929 !User->hasAnyUseOfValue(1) &&
54930 User->getValueSizeInBits(0).getFixedSize() > VT.getFixedSizeInBits()) {
54931 SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
54932 VT.getSizeInBits());
54933 Extract = DAG.getBitcast(VT, Extract);
54934 return DCI.CombineTo(N, Extract, SDValue(User, 1));
54940 static SDValue combineFP_ROUND(SDNode *N, SelectionDAG &DAG,
54941 const X86Subtarget &Subtarget) {
54942 if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
54945 if (Subtarget.hasFP16())
54948 bool IsStrict = N->isStrictFPOpcode();
54949 EVT VT = N->getValueType(0);
54950 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
54951 EVT SrcVT = Src.getValueType();
54953 if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
54954 SrcVT.getVectorElementType() != MVT::f32)
54957 unsigned NumElts = VT.getVectorNumElements();
54958 if (NumElts == 1 || !isPowerOf2_32(NumElts))
54963 // Widen to at least 4 input elements.
54965 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
54966 DAG.getConstantFP(0.0, dl, SrcVT));
54968 // Destination is v8i16 with at least 8 elements.
54969 EVT CvtVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
54970 std::max(8U, NumElts));
54971 SDValue Cvt, Chain;
54972 SDValue Rnd = DAG.getTargetConstant(4, dl, MVT::i32);
54974 Cvt = DAG.getNode(X86ISD::STRICT_CVTPS2PH, dl, {CvtVT, MVT::Other},
54975 {N->getOperand(0), Src, Rnd});
54976 Chain = Cvt.getValue(1);
54978 Cvt = DAG.getNode(X86ISD::CVTPS2PH, dl, CvtVT, Src, Rnd);
54981 // Extract down to real number of elements.
54983 EVT IntVT = VT.changeVectorElementTypeToInteger();
54984 Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, IntVT, Cvt,
54985 DAG.getIntPtrConstant(0, dl));
54988 Cvt = DAG.getBitcast(VT, Cvt);
54991 return DAG.getMergeValues({Cvt, Chain}, dl);
54996 static SDValue combineMOVDQ2Q(SDNode *N, SelectionDAG &DAG) {
54997 SDValue Src = N->getOperand(0);
54999 // Turn MOVDQ2Q+simple_load into an mmx load.
55000 if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
55001 LoadSDNode *LN = cast<LoadSDNode>(Src.getNode());
55003 if (LN->isSimple()) {
55004 SDValue NewLd = DAG.getLoad(MVT::x86mmx, SDLoc(N), LN->getChain(),
55006 LN->getPointerInfo(),
55007 LN->getOriginalAlign(),
55008 LN->getMemOperand()->getFlags());
55009 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), NewLd.getValue(1));
55017 static SDValue combinePDEP(SDNode *N, SelectionDAG &DAG,
55018 TargetLowering::DAGCombinerInfo &DCI) {
55019 unsigned NumBits = N->getSimpleValueType(0).getSizeInBits();
55020 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55021 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBits), DCI))
55022 return SDValue(N, 0);
55027 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
55028 DAGCombinerInfo &DCI) const {
55029 SelectionDAG &DAG = DCI.DAG;
55030 switch (N->getOpcode()) {
55032 case ISD::SCALAR_TO_VECTOR:
55033 return combineScalarToVector(N, DAG);
55034 case ISD::EXTRACT_VECTOR_ELT:
55035 case X86ISD::PEXTRW:
55036 case X86ISD::PEXTRB:
55037 return combineExtractVectorElt(N, DAG, DCI, Subtarget);
55038 case ISD::CONCAT_VECTORS:
55039 return combineCONCAT_VECTORS(N, DAG, DCI, Subtarget);
55040 case ISD::INSERT_SUBVECTOR:
55041 return combineINSERT_SUBVECTOR(N, DAG, DCI, Subtarget);
55042 case ISD::EXTRACT_SUBVECTOR:
55043 return combineEXTRACT_SUBVECTOR(N, DAG, DCI, Subtarget);
55046 case X86ISD::BLENDV: return combineSelect(N, DAG, DCI, Subtarget);
55047 case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget);
55048 case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
55049 case X86ISD::CMP: return combineCMP(N, DAG);
55050 case ISD::ADD: return combineAdd(N, DAG, DCI, Subtarget);
55051 case ISD::SUB: return combineSub(N, DAG, DCI, Subtarget);
55053 case X86ISD::SUB: return combineX86AddSub(N, DAG, DCI);
55054 case X86ISD::SBB: return combineSBB(N, DAG);
55055 case X86ISD::ADC: return combineADC(N, DAG, DCI);
55056 case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
55057 case ISD::SHL: return combineShiftLeft(N, DAG);
55058 case ISD::SRA: return combineShiftRightArithmetic(N, DAG, Subtarget);
55059 case ISD::SRL: return combineShiftRightLogical(N, DAG, DCI, Subtarget);
55060 case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
55061 case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
55062 case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
55063 case X86ISD::BEXTR:
55064 case X86ISD::BEXTRI: return combineBEXTR(N, DAG, DCI, Subtarget);
55065 case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
55066 case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
55067 case ISD::STORE: return combineStore(N, DAG, DCI, Subtarget);
55068 case ISD::MSTORE: return combineMaskedStore(N, DAG, DCI, Subtarget);
55069 case X86ISD::VEXTRACT_STORE:
55070 return combineVEXTRACT_STORE(N, DAG, DCI, Subtarget);
55071 case ISD::SINT_TO_FP:
55072 case ISD::STRICT_SINT_TO_FP:
55073 return combineSIntToFP(N, DAG, DCI, Subtarget);
55074 case ISD::UINT_TO_FP:
55075 case ISD::STRICT_UINT_TO_FP:
55076 return combineUIntToFP(N, DAG, Subtarget);
55078 case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
55079 case X86ISD::VFCMULC:
55080 case X86ISD::VFMULC: return combineFMulcFCMulc(N, DAG, Subtarget);
55081 case ISD::FNEG: return combineFneg(N, DAG, DCI, Subtarget);
55082 case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
55083 case X86ISD::VTRUNC: return combineVTRUNC(N, DAG, DCI);
55084 case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget);
55085 case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
55086 case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
55088 case X86ISD::FOR: return combineFOr(N, DAG, DCI, Subtarget);
55090 case X86ISD::FMAX: return combineFMinFMax(N, DAG);
55092 case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
55093 case X86ISD::CVTSI2P:
55094 case X86ISD::CVTUI2P: return combineX86INT_TO_FP(N, DAG, DCI);
55095 case X86ISD::CVTP2SI:
55096 case X86ISD::CVTP2UI:
55097 case X86ISD::STRICT_CVTTP2SI:
55098 case X86ISD::CVTTP2SI:
55099 case X86ISD::STRICT_CVTTP2UI:
55100 case X86ISD::CVTTP2UI:
55101 return combineCVTP2I_CVTTP2I(N, DAG, DCI);
55102 case X86ISD::STRICT_CVTPH2PS:
55103 case X86ISD::CVTPH2PS: return combineCVTPH2PS(N, DAG, DCI);
55104 case X86ISD::BT: return combineBT(N, DAG, DCI);
55105 case ISD::ANY_EXTEND:
55106 case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
55107 case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
55108 case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
55109 case ISD::ANY_EXTEND_VECTOR_INREG:
55110 case ISD::SIGN_EXTEND_VECTOR_INREG:
55111 case ISD::ZERO_EXTEND_VECTOR_INREG:
55112 return combineEXTEND_VECTOR_INREG(N, DAG, DCI, Subtarget);
55113 case ISD::SETCC: return combineSetCC(N, DAG, DCI, Subtarget);
55114 case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget);
55115 case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget);
55116 case X86ISD::PACKSS:
55117 case X86ISD::PACKUS: return combineVectorPack(N, DAG, DCI, Subtarget);
55120 case X86ISD::FHADD:
55121 case X86ISD::FHSUB: return combineVectorHADDSUB(N, DAG, DCI, Subtarget);
55125 return combineVectorShiftVar(N, DAG, DCI, Subtarget);
55126 case X86ISD::VSHLI:
55127 case X86ISD::VSRAI:
55128 case X86ISD::VSRLI:
55129 return combineVectorShiftImm(N, DAG, DCI, Subtarget);
55130 case ISD::INSERT_VECTOR_ELT:
55131 case X86ISD::PINSRB:
55132 case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
55133 case X86ISD::SHUFP: // Handle all target specific shuffles
55134 case X86ISD::INSERTPS:
55135 case X86ISD::EXTRQI:
55136 case X86ISD::INSERTQI:
55137 case X86ISD::VALIGN:
55138 case X86ISD::PALIGNR:
55139 case X86ISD::VSHLDQ:
55140 case X86ISD::VSRLDQ:
55141 case X86ISD::BLENDI:
55142 case X86ISD::UNPCKH:
55143 case X86ISD::UNPCKL:
55144 case X86ISD::MOVHLPS:
55145 case X86ISD::MOVLHPS:
55146 case X86ISD::PSHUFB:
55147 case X86ISD::PSHUFD:
55148 case X86ISD::PSHUFHW:
55149 case X86ISD::PSHUFLW:
55150 case X86ISD::MOVSHDUP:
55151 case X86ISD::MOVSLDUP:
55152 case X86ISD::MOVDDUP:
55153 case X86ISD::MOVSS:
55154 case X86ISD::MOVSD:
55155 case X86ISD::MOVSH:
55156 case X86ISD::VBROADCAST:
55157 case X86ISD::VPPERM:
55158 case X86ISD::VPERMI:
55159 case X86ISD::VPERMV:
55160 case X86ISD::VPERMV3:
55161 case X86ISD::VPERMIL2:
55162 case X86ISD::VPERMILPI:
55163 case X86ISD::VPERMILPV:
55164 case X86ISD::VPERM2X128:
55165 case X86ISD::SHUF128:
55166 case X86ISD::VZEXT_MOVL:
55167 case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
55168 case X86ISD::FMADD_RND:
55169 case X86ISD::FMSUB:
55170 case X86ISD::STRICT_FMSUB:
55171 case X86ISD::FMSUB_RND:
55172 case X86ISD::FNMADD:
55173 case X86ISD::STRICT_FNMADD:
55174 case X86ISD::FNMADD_RND:
55175 case X86ISD::FNMSUB:
55176 case X86ISD::STRICT_FNMSUB:
55177 case X86ISD::FNMSUB_RND:
55179 case ISD::STRICT_FMA: return combineFMA(N, DAG, DCI, Subtarget);
55180 case X86ISD::FMADDSUB_RND:
55181 case X86ISD::FMSUBADD_RND:
55182 case X86ISD::FMADDSUB:
55183 case X86ISD::FMSUBADD: return combineFMADDSUB(N, DAG, DCI);
55184 case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, DCI, Subtarget);
55185 case X86ISD::MGATHER:
55186 case X86ISD::MSCATTER:
55187 return combineX86GatherScatter(N, DAG, DCI, Subtarget);
55189 case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI);
55190 case X86ISD::PCMPEQ:
55191 case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
55192 case X86ISD::PMULDQ:
55193 case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI, Subtarget);
55194 case X86ISD::VPMADDUBSW:
55195 case X86ISD::VPMADDWD: return combineVPMADD(N, DAG, DCI);
55196 case X86ISD::KSHIFTL:
55197 case X86ISD::KSHIFTR: return combineKSHIFT(N, DAG, DCI);
55198 case ISD::FP16_TO_FP: return combineFP16_TO_FP(N, DAG, Subtarget);
55199 case ISD::STRICT_FP_EXTEND:
55200 case ISD::FP_EXTEND: return combineFP_EXTEND(N, DAG, Subtarget);
55201 case ISD::STRICT_FP_ROUND:
55202 case ISD::FP_ROUND: return combineFP_ROUND(N, DAG, Subtarget);
55203 case X86ISD::VBROADCAST_LOAD:
55204 case X86ISD::SUBV_BROADCAST_LOAD: return combineBROADCAST_LOAD(N, DAG, DCI);
55205 case X86ISD::MOVDQ2Q: return combineMOVDQ2Q(N, DAG);
55206 case X86ISD::PDEP: return combinePDEP(N, DAG, DCI);
55212 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
55213 if (!isTypeLegal(VT))
55216 // There are no vXi8 shifts.
55217 if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
55220 // TODO: Almost no 8-bit ops are desirable because they have no actual
55221 // size/speed advantages vs. 32-bit ops, but they do have a major
55222 // potential disadvantage by causing partial register stalls.
55224 // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
55225 // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
55226 // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
55227 // check for a constant operand to the multiply.
55228 if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
55231 // i16 instruction encodings are longer and some i16 instructions are slow,
55232 // so those are not desirable.
55233 if (VT == MVT::i16) {
55238 case ISD::SIGN_EXTEND:
55239 case ISD::ZERO_EXTEND:
55240 case ISD::ANY_EXTEND:
55254 // Any legal type not explicitly accounted for above here is desirable.
55258 SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
55259 SDValue Value, SDValue Addr,
55260 SelectionDAG &DAG) const {
55261 const Module *M = DAG.getMachineFunction().getMMI().getModule();
55262 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
55263 if (IsCFProtectionSupported) {
55264 // In case control-flow branch protection is enabled, we need to add
55265 // notrack prefix to the indirect branch.
55266 // In order to do that we create NT_BRIND SDNode.
55267 // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
55268 return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
55271 return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
55274 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
55275 EVT VT = Op.getValueType();
55276 bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
55277 isa<ConstantSDNode>(Op.getOperand(1));
55279 // i16 is legal, but undesirable since i16 instruction encodings are longer
55280 // and some i16 instructions are slow.
55281 // 8-bit multiply-by-constant can usually be expanded to something cheaper
55282 // using LEA and/or other ALU ops.
55283 if (VT != MVT::i16 && !Is8BitMulByConstant)
55286 auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
55287 if (!Op.hasOneUse())
55289 SDNode *User = *Op->use_begin();
55290 if (!ISD::isNormalStore(User))
55292 auto *Ld = cast<LoadSDNode>(Load);
55293 auto *St = cast<StoreSDNode>(User);
55294 return Ld->getBasePtr() == St->getBasePtr();
55297 auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
55298 if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
55300 if (!Op.hasOneUse())
55302 SDNode *User = *Op->use_begin();
55303 if (User->getOpcode() != ISD::ATOMIC_STORE)
55305 auto *Ld = cast<AtomicSDNode>(Load);
55306 auto *St = cast<AtomicSDNode>(User);
55307 return Ld->getBasePtr() == St->getBasePtr();
55310 bool Commute = false;
55311 switch (Op.getOpcode()) {
55312 default: return false;
55313 case ISD::SIGN_EXTEND:
55314 case ISD::ZERO_EXTEND:
55315 case ISD::ANY_EXTEND:
55320 SDValue N0 = Op.getOperand(0);
55321 // Look out for (store (shl (load), x)).
55322 if (X86::mayFoldLoad(N0, Subtarget) && IsFoldableRMW(N0, Op))
55334 SDValue N0 = Op.getOperand(0);
55335 SDValue N1 = Op.getOperand(1);
55336 // Avoid disabling potential load folding opportunities.
55337 if (X86::mayFoldLoad(N1, Subtarget) &&
55338 (!Commute || !isa<ConstantSDNode>(N0) ||
55339 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
55341 if (X86::mayFoldLoad(N0, Subtarget) &&
55342 ((Commute && !isa<ConstantSDNode>(N1)) ||
55343 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
55345 if (IsFoldableAtomicRMW(N0, Op) ||
55346 (Commute && IsFoldableAtomicRMW(N1, Op)))
55355 //===----------------------------------------------------------------------===//
55356 // X86 Inline Assembly Support
55357 //===----------------------------------------------------------------------===//
55359 // Helper to match a string separated by whitespace.
55360 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
55361 S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
55363 for (StringRef Piece : Pieces) {
55364 if (!S.startswith(Piece)) // Check if the piece matches.
55367 S = S.substr(Piece.size());
55368 StringRef::size_type Pos = S.find_first_not_of(" \t");
55369 if (Pos == 0) // We matched a prefix.
55378 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
55380 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
55381 if (llvm::is_contained(AsmPieces, "~{cc}") &&
55382 llvm::is_contained(AsmPieces, "~{flags}") &&
55383 llvm::is_contained(AsmPieces, "~{fpsr}")) {
55385 if (AsmPieces.size() == 3)
55387 else if (llvm::is_contained(AsmPieces, "~{dirflag}"))
55394 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
55395 InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
55397 const std::string &AsmStr = IA->getAsmString();
55399 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
55400 if (!Ty || Ty->getBitWidth() % 16 != 0)
55403 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
55404 SmallVector<StringRef, 4> AsmPieces;
55405 SplitString(AsmStr, AsmPieces, ";\n");
55407 switch (AsmPieces.size()) {
55408 default: return false;
55410 // FIXME: this should verify that we are targeting a 486 or better. If not,
55411 // we will turn this bswap into something that will be lowered to logical
55412 // ops instead of emitting the bswap asm. For now, we don't support 486 or
55413 // lower so don't worry about this.
55415 if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
55416 matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
55417 matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
55418 matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
55419 matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
55420 matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
55421 // No need to check constraints, nothing other than the equivalent of
55422 // "=r,0" would be valid here.
55423 return IntrinsicLowering::LowerToByteSwap(CI);
55426 // rorw $$8, ${0:w} --> llvm.bswap.i16
55427 if (CI->getType()->isIntegerTy(16) &&
55428 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
55429 (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
55430 matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
55432 StringRef ConstraintsStr = IA->getConstraintString();
55433 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
55434 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
55435 if (clobbersFlagRegisters(AsmPieces))
55436 return IntrinsicLowering::LowerToByteSwap(CI);
55440 if (CI->getType()->isIntegerTy(32) &&
55441 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
55442 matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
55443 matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
55444 matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
55446 StringRef ConstraintsStr = IA->getConstraintString();
55447 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
55448 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
55449 if (clobbersFlagRegisters(AsmPieces))
55450 return IntrinsicLowering::LowerToByteSwap(CI);
55453 if (CI->getType()->isIntegerTy(64)) {
55454 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
55455 if (Constraints.size() >= 2 &&
55456 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
55457 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
55458 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
55459 if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
55460 matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
55461 matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
55462 return IntrinsicLowering::LowerToByteSwap(CI);
55470 static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
55471 X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
55472 .Case("{@cca}", X86::COND_A)
55473 .Case("{@ccae}", X86::COND_AE)
55474 .Case("{@ccb}", X86::COND_B)
55475 .Case("{@ccbe}", X86::COND_BE)
55476 .Case("{@ccc}", X86::COND_B)
55477 .Case("{@cce}", X86::COND_E)
55478 .Case("{@ccz}", X86::COND_E)
55479 .Case("{@ccg}", X86::COND_G)
55480 .Case("{@ccge}", X86::COND_GE)
55481 .Case("{@ccl}", X86::COND_L)
55482 .Case("{@ccle}", X86::COND_LE)
55483 .Case("{@ccna}", X86::COND_BE)
55484 .Case("{@ccnae}", X86::COND_B)
55485 .Case("{@ccnb}", X86::COND_AE)
55486 .Case("{@ccnbe}", X86::COND_A)
55487 .Case("{@ccnc}", X86::COND_AE)
55488 .Case("{@ccne}", X86::COND_NE)
55489 .Case("{@ccnz}", X86::COND_NE)
55490 .Case("{@ccng}", X86::COND_LE)
55491 .Case("{@ccnge}", X86::COND_L)
55492 .Case("{@ccnl}", X86::COND_GE)
55493 .Case("{@ccnle}", X86::COND_G)
55494 .Case("{@ccno}", X86::COND_NO)
55495 .Case("{@ccnp}", X86::COND_NP)
55496 .Case("{@ccns}", X86::COND_NS)
55497 .Case("{@cco}", X86::COND_O)
55498 .Case("{@ccp}", X86::COND_P)
55499 .Case("{@ccs}", X86::COND_S)
55500 .Default(X86::COND_INVALID);
55504 /// Given a constraint letter, return the type of constraint for this target.
55505 X86TargetLowering::ConstraintType
55506 X86TargetLowering::getConstraintType(StringRef Constraint) const {
55507 if (Constraint.size() == 1) {
55508 switch (Constraint[0]) {
55519 case 'k': // AVX512 masking registers.
55520 return C_RegisterClass;
55536 return C_Immediate;
55545 else if (Constraint.size() == 2) {
55546 switch (Constraint[0]) {
55550 switch (Constraint[1]) {
55560 return C_RegisterClass;
55563 } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
55565 return TargetLowering::getConstraintType(Constraint);
55568 /// Examine constraint type and operand type and determine a weight value.
55569 /// This object must already have been set up with the operand type
55570 /// and the current alternative constraint selected.
55571 TargetLowering::ConstraintWeight
55572 X86TargetLowering::getSingleConstraintMatchWeight(
55573 AsmOperandInfo &info, const char *constraint) const {
55574 ConstraintWeight weight = CW_Invalid;
55575 Value *CallOperandVal = info.CallOperandVal;
55576 // If we don't have a value, we can't do a match,
55577 // but allow it at the lowest weight.
55578 if (!CallOperandVal)
55580 Type *type = CallOperandVal->getType();
55581 // Look at the constraint type.
55582 switch (*constraint) {
55584 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
55596 if (CallOperandVal->getType()->isIntegerTy())
55597 weight = CW_SpecificReg;
55602 if (type->isFloatingPointTy())
55603 weight = CW_SpecificReg;
55606 if (type->isX86_MMXTy() && Subtarget.hasMMX())
55607 weight = CW_SpecificReg;
55610 if (StringRef(constraint).size() != 2)
55612 switch (constraint[1]) {
55617 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
55618 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()) ||
55619 ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512()))
55620 return CW_SpecificReg;
55622 // Conditional OpMask regs (AVX512)
55624 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
55625 return CW_Register;
55629 if (type->isX86_MMXTy() && Subtarget.hasMMX())
55632 // Any SSE reg when ISA >= SSE2, same as 'x'
55636 if (!Subtarget.hasSSE2())
55642 if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
55643 weight = CW_Register;
55646 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
55647 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
55648 weight = CW_Register;
55651 // Enable conditional vector operations using %k<#> registers.
55652 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
55653 weight = CW_Register;
55656 if (auto *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
55657 if (C->getZExtValue() <= 31)
55658 weight = CW_Constant;
55662 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
55663 if (C->getZExtValue() <= 63)
55664 weight = CW_Constant;
55668 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
55669 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
55670 weight = CW_Constant;
55674 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
55675 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
55676 weight = CW_Constant;
55680 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
55681 if (C->getZExtValue() <= 3)
55682 weight = CW_Constant;
55686 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
55687 if (C->getZExtValue() <= 0xff)
55688 weight = CW_Constant;
55693 if (isa<ConstantFP>(CallOperandVal)) {
55694 weight = CW_Constant;
55698 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
55699 if ((C->getSExtValue() >= -0x80000000LL) &&
55700 (C->getSExtValue() <= 0x7fffffffLL))
55701 weight = CW_Constant;
55705 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
55706 if (C->getZExtValue() <= 0xffffffff)
55707 weight = CW_Constant;
55714 /// Try to replace an X constraint, which matches anything, with another that
55715 /// has more specific requirements based on the type of the corresponding
55717 const char *X86TargetLowering::
55718 LowerXConstraint(EVT ConstraintVT) const {
55719 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
55720 // 'f' like normal targets.
55721 if (ConstraintVT.isFloatingPoint()) {
55722 if (Subtarget.hasSSE1())
55726 return TargetLowering::LowerXConstraint(ConstraintVT);
55729 // Lower @cc targets via setcc.
55730 SDValue X86TargetLowering::LowerAsmOutputForConstraint(
55731 SDValue &Chain, SDValue &Flag, const SDLoc &DL,
55732 const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const {
55733 X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
55734 if (Cond == X86::COND_INVALID)
55736 // Check that return type is valid.
55737 if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
55738 OpInfo.ConstraintVT.getSizeInBits() < 8)
55739 report_fatal_error("Flag output operand is of invalid type");
55741 // Get EFLAGS register. Only update chain when copyfrom is glued.
55742 if (Flag.getNode()) {
55743 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Flag);
55744 Chain = Flag.getValue(1);
55746 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
55747 // Extract CC code.
55748 SDValue CC = getSETCC(Cond, Flag, DL, DAG);
55749 // Extend to 32-bits
55750 SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
55755 /// Lower the specified operand into the Ops vector.
55756 /// If it is invalid, don't add anything to Ops.
55757 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
55758 std::string &Constraint,
55759 std::vector<SDValue>&Ops,
55760 SelectionDAG &DAG) const {
55763 // Only support length 1 constraints for now.
55764 if (Constraint.length() > 1) return;
55766 char ConstraintLetter = Constraint[0];
55767 switch (ConstraintLetter) {
55770 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
55771 if (C->getZExtValue() <= 31) {
55772 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
55773 Op.getValueType());
55779 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
55780 if (C->getZExtValue() <= 63) {
55781 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
55782 Op.getValueType());
55788 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
55789 if (isInt<8>(C->getSExtValue())) {
55790 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
55791 Op.getValueType());
55797 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
55798 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
55799 (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
55800 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
55801 Op.getValueType());
55807 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
55808 if (C->getZExtValue() <= 3) {
55809 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
55810 Op.getValueType());
55816 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
55817 if (C->getZExtValue() <= 255) {
55818 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
55819 Op.getValueType());
55825 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
55826 if (C->getZExtValue() <= 127) {
55827 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
55828 Op.getValueType());
55834 // 32-bit signed value
55835 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
55836 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
55837 C->getSExtValue())) {
55838 // Widen to 64 bits here to get it sign extended.
55839 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
55842 // FIXME gcc accepts some relocatable values here too, but only in certain
55843 // memory models; it's complicated.
55848 // 32-bit unsigned value
55849 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
55850 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
55851 C->getZExtValue())) {
55852 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
55853 Op.getValueType());
55857 // FIXME gcc accepts some relocatable values here too, but only in certain
55858 // memory models; it's complicated.
55862 // Literal immediates are always ok.
55863 if (auto *CST = dyn_cast<ConstantSDNode>(Op)) {
55864 bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
55865 BooleanContent BCont = getBooleanContents(MVT::i64);
55866 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
55867 : ISD::SIGN_EXTEND;
55868 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
55869 : CST->getSExtValue();
55870 Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
55874 // In any sort of PIC mode addresses need to be computed at runtime by
55875 // adding in a register or some sort of table lookup. These can't
55876 // be used as immediates. BlockAddresses are fine though.
55877 if ((Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC()) &&
55878 !isa<BlockAddressSDNode>(Op))
55881 // If we are in non-pic codegen mode, we allow the address of a global (with
55882 // an optional displacement) to be used with 'i'.
55883 if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
55884 // If we require an extra load to get this address, as in PIC mode, we
55885 // can't accept it.
55886 if (isGlobalStubReference(
55887 Subtarget.classifyGlobalReference(GA->getGlobal())))
55893 if (Result.getNode()) {
55894 Ops.push_back(Result);
55897 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
55900 /// Check if \p RC is a general purpose register class.
55901 /// I.e., GR* or one of their variant.
55902 static bool isGRClass(const TargetRegisterClass &RC) {
55903 return RC.hasSuperClassEq(&X86::GR8RegClass) ||
55904 RC.hasSuperClassEq(&X86::GR16RegClass) ||
55905 RC.hasSuperClassEq(&X86::GR32RegClass) ||
55906 RC.hasSuperClassEq(&X86::GR64RegClass) ||
55907 RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
55910 /// Check if \p RC is a vector register class.
55911 /// I.e., FR* / VR* or one of their variant.
55912 static bool isFRClass(const TargetRegisterClass &RC) {
55913 return RC.hasSuperClassEq(&X86::FR16XRegClass) ||
55914 RC.hasSuperClassEq(&X86::FR32XRegClass) ||
55915 RC.hasSuperClassEq(&X86::FR64XRegClass) ||
55916 RC.hasSuperClassEq(&X86::VR128XRegClass) ||
55917 RC.hasSuperClassEq(&X86::VR256XRegClass) ||
55918 RC.hasSuperClassEq(&X86::VR512RegClass);
55921 /// Check if \p RC is a mask register class.
55922 /// I.e., VK* or one of their variant.
55923 static bool isVKClass(const TargetRegisterClass &RC) {
55924 return RC.hasSuperClassEq(&X86::VK1RegClass) ||
55925 RC.hasSuperClassEq(&X86::VK2RegClass) ||
55926 RC.hasSuperClassEq(&X86::VK4RegClass) ||
55927 RC.hasSuperClassEq(&X86::VK8RegClass) ||
55928 RC.hasSuperClassEq(&X86::VK16RegClass) ||
55929 RC.hasSuperClassEq(&X86::VK32RegClass) ||
55930 RC.hasSuperClassEq(&X86::VK64RegClass);
55933 std::pair<unsigned, const TargetRegisterClass *>
55934 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
55935 StringRef Constraint,
55937 // First, see if this is a constraint that directly corresponds to an LLVM
55939 if (Constraint.size() == 1) {
55940 // GCC Constraint Letters
55941 switch (Constraint[0]) {
55943 // 'A' means [ER]AX + [ER]DX.
55945 if (Subtarget.is64Bit())
55946 return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
55947 assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
55948 "Expecting 64, 32 or 16 bit subtarget");
55949 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
55951 // TODO: Slight differences here in allocation order and leaving
55952 // RIP in the class. Do they matter any more here than they do
55953 // in the normal allocation?
55955 if (Subtarget.hasAVX512()) {
55957 return std::make_pair(0U, &X86::VK1RegClass);
55959 return std::make_pair(0U, &X86::VK8RegClass);
55960 if (VT == MVT::i16)
55961 return std::make_pair(0U, &X86::VK16RegClass);
55963 if (Subtarget.hasBWI()) {
55964 if (VT == MVT::i32)
55965 return std::make_pair(0U, &X86::VK32RegClass);
55966 if (VT == MVT::i64)
55967 return std::make_pair(0U, &X86::VK64RegClass);
55970 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
55971 if (Subtarget.is64Bit()) {
55972 if (VT == MVT::i8 || VT == MVT::i1)
55973 return std::make_pair(0U, &X86::GR8RegClass);
55974 if (VT == MVT::i16)
55975 return std::make_pair(0U, &X86::GR16RegClass);
55976 if (VT == MVT::i32 || VT == MVT::f32)
55977 return std::make_pair(0U, &X86::GR32RegClass);
55978 if (VT != MVT::f80 && !VT.isVector())
55979 return std::make_pair(0U, &X86::GR64RegClass);
55983 // 32-bit fallthrough
55984 case 'Q': // Q_REGS
55985 if (VT == MVT::i8 || VT == MVT::i1)
55986 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
55987 if (VT == MVT::i16)
55988 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
55989 if (VT == MVT::i32 || VT == MVT::f32 ||
55990 (!VT.isVector() && !Subtarget.is64Bit()))
55991 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
55992 if (VT != MVT::f80 && !VT.isVector())
55993 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
55995 case 'r': // GENERAL_REGS
55996 case 'l': // INDEX_REGS
55997 if (VT == MVT::i8 || VT == MVT::i1)
55998 return std::make_pair(0U, &X86::GR8RegClass);
55999 if (VT == MVT::i16)
56000 return std::make_pair(0U, &X86::GR16RegClass);
56001 if (VT == MVT::i32 || VT == MVT::f32 ||
56002 (!VT.isVector() && !Subtarget.is64Bit()))
56003 return std::make_pair(0U, &X86::GR32RegClass);
56004 if (VT != MVT::f80 && !VT.isVector())
56005 return std::make_pair(0U, &X86::GR64RegClass);
56007 case 'R': // LEGACY_REGS
56008 if (VT == MVT::i8 || VT == MVT::i1)
56009 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
56010 if (VT == MVT::i16)
56011 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
56012 if (VT == MVT::i32 || VT == MVT::f32 ||
56013 (!VT.isVector() && !Subtarget.is64Bit()))
56014 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
56015 if (VT != MVT::f80 && !VT.isVector())
56016 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
56018 case 'f': // FP Stack registers.
56019 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
56020 // value to the correct fpstack register class.
56021 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
56022 return std::make_pair(0U, &X86::RFP32RegClass);
56023 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
56024 return std::make_pair(0U, &X86::RFP64RegClass);
56025 if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80)
56026 return std::make_pair(0U, &X86::RFP80RegClass);
56028 case 'y': // MMX_REGS if MMX allowed.
56029 if (!Subtarget.hasMMX()) break;
56030 return std::make_pair(0U, &X86::VR64RegClass);
56032 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
56033 if (!Subtarget.hasSSE1()) break;
56034 bool VConstraint = (Constraint[0] == 'v');
56036 switch (VT.SimpleTy) {
56038 // Scalar SSE types.
56040 if (VConstraint && Subtarget.hasFP16())
56041 return std::make_pair(0U, &X86::FR16XRegClass);
56045 if (VConstraint && Subtarget.hasVLX())
56046 return std::make_pair(0U, &X86::FR32XRegClass);
56047 return std::make_pair(0U, &X86::FR32RegClass);
56050 if (VConstraint && Subtarget.hasVLX())
56051 return std::make_pair(0U, &X86::FR64XRegClass);
56052 return std::make_pair(0U, &X86::FR64RegClass);
56054 if (Subtarget.is64Bit()) {
56055 if (VConstraint && Subtarget.hasVLX())
56056 return std::make_pair(0U, &X86::VR128XRegClass);
56057 return std::make_pair(0U, &X86::VR128RegClass);
56060 // Vector types and fp128.
56062 if (!Subtarget.hasFP16())
56072 if (VConstraint && Subtarget.hasVLX())
56073 return std::make_pair(0U, &X86::VR128XRegClass);
56074 return std::make_pair(0U, &X86::VR128RegClass);
56077 if (!Subtarget.hasFP16())
56086 if (VConstraint && Subtarget.hasVLX())
56087 return std::make_pair(0U, &X86::VR256XRegClass);
56088 if (Subtarget.hasAVX())
56089 return std::make_pair(0U, &X86::VR256RegClass);
56092 if (!Subtarget.hasFP16())
56101 if (!Subtarget.hasAVX512()) break;
56103 return std::make_pair(0U, &X86::VR512RegClass);
56104 return std::make_pair(0U, &X86::VR512_0_15RegClass);
56108 } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
56109 switch (Constraint[1]) {
56115 return getRegForInlineAsmConstraint(TRI, "x", VT);
56117 if (!Subtarget.hasMMX()) break;
56118 return std::make_pair(0U, &X86::VR64RegClass);
56120 if (!Subtarget.hasSSE1()) break;
56121 switch (VT.SimpleTy) {
56123 // Scalar SSE types.
56125 if (!Subtarget.hasFP16())
56127 return std::make_pair(X86::XMM0, &X86::FR16XRegClass);
56130 return std::make_pair(X86::XMM0, &X86::FR32RegClass);
56133 return std::make_pair(X86::XMM0, &X86::FR64RegClass);
56135 if (!Subtarget.hasFP16())
56145 return std::make_pair(X86::XMM0, &X86::VR128RegClass);
56148 if (!Subtarget.hasFP16())
56157 if (Subtarget.hasAVX())
56158 return std::make_pair(X86::YMM0, &X86::VR256RegClass);
56161 if (!Subtarget.hasFP16())
56170 if (Subtarget.hasAVX512())
56171 return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
56176 // This register class doesn't allocate k0 for masked vector operation.
56177 if (Subtarget.hasAVX512()) {
56179 return std::make_pair(0U, &X86::VK1WMRegClass);
56181 return std::make_pair(0U, &X86::VK8WMRegClass);
56182 if (VT == MVT::i16)
56183 return std::make_pair(0U, &X86::VK16WMRegClass);
56185 if (Subtarget.hasBWI()) {
56186 if (VT == MVT::i32)
56187 return std::make_pair(0U, &X86::VK32WMRegClass);
56188 if (VT == MVT::i64)
56189 return std::make_pair(0U, &X86::VK64WMRegClass);
56195 if (parseConstraintCode(Constraint) != X86::COND_INVALID)
56196 return std::make_pair(0U, &X86::GR32RegClass);
56198 // Use the default implementation in TargetLowering to convert the register
56199 // constraint into a member of a register class.
56200 std::pair<Register, const TargetRegisterClass*> Res;
56201 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
56203 // Not found as a standard register?
56205 // Only match x87 registers if the VT is one SelectionDAGBuilder can convert
56207 if (VT == MVT::Other || VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80) {
56208 // Map st(0) -> st(7) -> ST0
56209 if (Constraint.size() == 7 && Constraint[0] == '{' &&
56210 tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
56211 Constraint[3] == '(' &&
56212 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
56213 Constraint[5] == ')' && Constraint[6] == '}') {
56214 // st(7) is not allocatable and thus not a member of RFP80. Return
56215 // singleton class in cases where we have a reference to it.
56216 if (Constraint[4] == '7')
56217 return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
56218 return std::make_pair(X86::FP0 + Constraint[4] - '0',
56219 &X86::RFP80RegClass);
56222 // GCC allows "st(0)" to be called just plain "st".
56223 if (StringRef("{st}").equals_insensitive(Constraint))
56224 return std::make_pair(X86::FP0, &X86::RFP80RegClass);
56228 if (StringRef("{flags}").equals_insensitive(Constraint))
56229 return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
56232 // Only allow for clobber.
56233 if (StringRef("{dirflag}").equals_insensitive(Constraint) &&
56235 return std::make_pair(X86::DF, &X86::DFCCRRegClass);
56238 if (StringRef("{fpsr}").equals_insensitive(Constraint))
56239 return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
56244 // Make sure it isn't a register that requires 64-bit mode.
56245 if (!Subtarget.is64Bit() &&
56246 (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
56247 TRI->getEncodingValue(Res.first) >= 8) {
56248 // Register requires REX prefix, but we're in 32-bit mode.
56249 return std::make_pair(0, nullptr);
56252 // Make sure it isn't a register that requires AVX512.
56253 if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
56254 TRI->getEncodingValue(Res.first) & 0x10) {
56255 // Register requires EVEX prefix.
56256 return std::make_pair(0, nullptr);
56259 // Otherwise, check to see if this is a register class of the wrong value
56260 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
56261 // turn into {ax},{dx}.
56262 // MVT::Other is used to specify clobber names.
56263 if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
56264 return Res; // Correct type already, nothing to do.
56266 // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
56267 // return "eax". This should even work for things like getting 64bit integer
56268 // registers when given an f64 type.
56269 const TargetRegisterClass *Class = Res.second;
56270 // The generic code will match the first register class that contains the
56271 // given register. Thus, based on the ordering of the tablegened file,
56272 // the "plain" GR classes might not come first.
56273 // Therefore, use a helper method.
56274 if (isGRClass(*Class)) {
56275 unsigned Size = VT.getSizeInBits();
56276 if (Size == 1) Size = 8;
56277 Register DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
56279 bool is64Bit = Subtarget.is64Bit();
56280 const TargetRegisterClass *RC =
56281 Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
56282 : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
56283 : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
56284 : Size == 64 ? (is64Bit ? &X86::GR64RegClass : nullptr)
56286 if (Size == 64 && !is64Bit) {
56287 // Model GCC's behavior here and select a fixed pair of 32-bit
56291 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
56293 return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
56295 return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
56297 return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
56299 return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
56301 return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
56303 return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
56305 return std::make_pair(0, nullptr);
56308 if (RC && RC->contains(DestReg))
56309 return std::make_pair(DestReg, RC);
56312 // No register found/type mismatch.
56313 return std::make_pair(0, nullptr);
56314 } else if (isFRClass(*Class)) {
56315 // Handle references to XMM physical registers that got mapped into the
56316 // wrong class. This can happen with constraints like {xmm0} where the
56317 // target independent register mapper will just pick the first match it can
56318 // find, ignoring the required type.
56320 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
56321 if (VT == MVT::f16)
56322 Res.second = &X86::FR16XRegClass;
56323 else if (VT == MVT::f32 || VT == MVT::i32)
56324 Res.second = &X86::FR32XRegClass;
56325 else if (VT == MVT::f64 || VT == MVT::i64)
56326 Res.second = &X86::FR64XRegClass;
56327 else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
56328 Res.second = &X86::VR128XRegClass;
56329 else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
56330 Res.second = &X86::VR256XRegClass;
56331 else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
56332 Res.second = &X86::VR512RegClass;
56334 // Type mismatch and not a clobber: Return an error;
56336 Res.second = nullptr;
56338 } else if (isVKClass(*Class)) {
56340 Res.second = &X86::VK1RegClass;
56341 else if (VT == MVT::i8)
56342 Res.second = &X86::VK8RegClass;
56343 else if (VT == MVT::i16)
56344 Res.second = &X86::VK16RegClass;
56345 else if (VT == MVT::i32)
56346 Res.second = &X86::VK32RegClass;
56347 else if (VT == MVT::i64)
56348 Res.second = &X86::VK64RegClass;
56350 // Type mismatch and not a clobber: Return an error;
56352 Res.second = nullptr;
56359 InstructionCost X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
56360 const AddrMode &AM,
56362 unsigned AS) const {
56363 // Scaling factors are not free at all.
56364 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
56365 // will take 2 allocations in the out of order engine instead of 1
56366 // for plain addressing mode, i.e. inst (reg1).
56368 // vaddps (%rsi,%rdx), %ymm0, %ymm1
56369 // Requires two allocations (one for the load, one for the computation)
56371 // vaddps (%rsi), %ymm0, %ymm1
56372 // Requires just 1 allocation, i.e., freeing allocations for other operations
56373 // and having less micro operations to execute.
56375 // For some X86 architectures, this is even worse because for instance for
56376 // stores, the complex addressing mode forces the instruction to use the
56377 // "load" ports instead of the dedicated "store" port.
56378 // E.g., on Haswell:
56379 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
56380 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
56381 if (isLegalAddressingMode(DL, AM, Ty, AS))
56382 // Scale represents reg2 * scale, thus account for 1
56383 // as soon as we use a second register.
56384 return AM.Scale != 0;
56388 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
56389 // Integer division on x86 is expensive. However, when aggressively optimizing
56390 // for code size, we prefer to use a div instruction, as it is usually smaller
56391 // than the alternative sequence.
56392 // The exception to this is vector division. Since x86 doesn't have vector
56393 // integer division, leaving the division as-is is a loss even in terms of
56394 // size, because it will have to be scalarized, while the alternative code
56395 // sequence can be performed in vector form.
56396 bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
56397 return OptSize && !VT.isVector();
56400 void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
56401 if (!Subtarget.is64Bit())
56404 // Update IsSplitCSR in X86MachineFunctionInfo.
56405 X86MachineFunctionInfo *AFI =
56406 Entry->getParent()->getInfo<X86MachineFunctionInfo>();
56407 AFI->setIsSplitCSR(true);
56410 void X86TargetLowering::insertCopiesSplitCSR(
56411 MachineBasicBlock *Entry,
56412 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
56413 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
56414 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
56418 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
56419 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
56420 MachineBasicBlock::iterator MBBI = Entry->begin();
56421 for (const MCPhysReg *I = IStart; *I; ++I) {
56422 const TargetRegisterClass *RC = nullptr;
56423 if (X86::GR64RegClass.contains(*I))
56424 RC = &X86::GR64RegClass;
56426 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
56428 Register NewVR = MRI->createVirtualRegister(RC);
56429 // Create copy from CSR to a virtual register.
56430 // FIXME: this currently does not emit CFI pseudo-instructions, it works
56431 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
56432 // nounwind. If we want to generalize this later, we may need to emit
56433 // CFI pseudo-instructions.
56435 Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
56436 "Function should be nounwind in insertCopiesSplitCSR!");
56437 Entry->addLiveIn(*I);
56438 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
56441 // Insert the copy-back instructions right before the terminator.
56442 for (auto *Exit : Exits)
56443 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
56444 TII->get(TargetOpcode::COPY), *I)
56449 bool X86TargetLowering::supportSwiftError() const {
56450 return Subtarget.is64Bit();
56453 /// Returns true if stack probing through a function call is requested.
56454 bool X86TargetLowering::hasStackProbeSymbol(MachineFunction &MF) const {
56455 return !getStackProbeSymbolName(MF).empty();
56458 /// Returns true if stack probing through inline assembly is requested.
56459 bool X86TargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
56461 // No inline stack probe for Windows, they have their own mechanism.
56462 if (Subtarget.isOSWindows() ||
56463 MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
56466 // If the function specifically requests inline stack probes, emit them.
56467 if (MF.getFunction().hasFnAttribute("probe-stack"))
56468 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
56474 /// Returns the name of the symbol used to emit stack probes or the empty
56475 /// string if not applicable.
56477 X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
56478 // Inline Stack probes disable stack probe call
56479 if (hasInlineStackProbe(MF))
56482 // If the function specifically requests stack probes, emit them.
56483 if (MF.getFunction().hasFnAttribute("probe-stack"))
56484 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
56486 // Generally, if we aren't on Windows, the platform ABI does not include
56487 // support for stack probes, so don't emit them.
56488 if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
56489 MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
56492 // We need a stack probe to conform to the Windows ABI. Choose the right
56494 if (Subtarget.is64Bit())
56495 return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
56496 return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
56500 X86TargetLowering::getStackProbeSize(MachineFunction &MF) const {
56501 // The default stack probe size is 4096 if the function has no stackprobesize
56503 unsigned StackProbeSize = 4096;
56504 const Function &Fn = MF.getFunction();
56505 if (Fn.hasFnAttribute("stack-probe-size"))
56506 Fn.getFnAttribute("stack-probe-size")
56507 .getValueAsString()
56508 .getAsInteger(0, StackProbeSize);
56509 return StackProbeSize;
56512 Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
56513 if (ML->isInnermost() &&
56514 ExperimentalPrefInnermostLoopAlignment.getNumOccurrences())
56515 return Align(1ULL << ExperimentalPrefInnermostLoopAlignment);
56516 return TargetLowering::getPrefLoopAlignment();