1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This is the parent TargetLowering class for hardware code gen
13 //===----------------------------------------------------------------------===//
15 #include "AMDGPUISelLowering.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPUMachineFunction.h"
19 #include "GCNSubtarget.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/IR/DiagnosticInfo.h"
24 #include "llvm/IR/IntrinsicsAMDGPU.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/KnownBits.h"
27 #include "llvm/Target/TargetMachine.h"
31 #include "AMDGPUGenCallingConv.inc"
33 static cl::opt<bool> AMDGPUBypassSlowDiv(
34 "amdgpu-bypass-slow-div",
35 cl::desc("Skip 64-bit divide for dynamic 32-bit values"),
38 // Find a larger type to do a load / store of a vector with.
39 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
40 unsigned StoreSize = VT.getStoreSizeInBits();
42 return EVT::getIntegerVT(Ctx, StoreSize);
44 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
45 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
48 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) {
49 return DAG.computeKnownBits(Op).countMaxActiveBits();
52 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) {
53 // In order for this to be a signed 24-bit value, bit 23, must
55 return DAG.ComputeMaxSignificantBits(Op);
58 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
59 const AMDGPUSubtarget &STI)
60 : TargetLowering(TM), Subtarget(&STI) {
61 // Lower floating point store/load to integer store/load to reduce the number
62 // of patterns in tablegen.
63 setOperationAction(ISD::LOAD, MVT::f32, Promote);
64 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
66 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
67 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
69 setOperationAction(ISD::LOAD, MVT::v3f32, Promote);
70 AddPromotedToType(ISD::LOAD, MVT::v3f32, MVT::v3i32);
72 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
73 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
75 setOperationAction(ISD::LOAD, MVT::v5f32, Promote);
76 AddPromotedToType(ISD::LOAD, MVT::v5f32, MVT::v5i32);
78 setOperationAction(ISD::LOAD, MVT::v6f32, Promote);
79 AddPromotedToType(ISD::LOAD, MVT::v6f32, MVT::v6i32);
81 setOperationAction(ISD::LOAD, MVT::v7f32, Promote);
82 AddPromotedToType(ISD::LOAD, MVT::v7f32, MVT::v7i32);
84 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
85 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
87 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
88 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
90 setOperationAction(ISD::LOAD, MVT::v32f32, Promote);
91 AddPromotedToType(ISD::LOAD, MVT::v32f32, MVT::v32i32);
93 setOperationAction(ISD::LOAD, MVT::i64, Promote);
94 AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
96 setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
97 AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32);
99 setOperationAction(ISD::LOAD, MVT::f64, Promote);
100 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32);
102 setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
103 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32);
105 setOperationAction(ISD::LOAD, MVT::v3i64, Promote);
106 AddPromotedToType(ISD::LOAD, MVT::v3i64, MVT::v6i32);
108 setOperationAction(ISD::LOAD, MVT::v4i64, Promote);
109 AddPromotedToType(ISD::LOAD, MVT::v4i64, MVT::v8i32);
111 setOperationAction(ISD::LOAD, MVT::v3f64, Promote);
112 AddPromotedToType(ISD::LOAD, MVT::v3f64, MVT::v6i32);
114 setOperationAction(ISD::LOAD, MVT::v4f64, Promote);
115 AddPromotedToType(ISD::LOAD, MVT::v4f64, MVT::v8i32);
117 setOperationAction(ISD::LOAD, MVT::v8i64, Promote);
118 AddPromotedToType(ISD::LOAD, MVT::v8i64, MVT::v16i32);
120 setOperationAction(ISD::LOAD, MVT::v8f64, Promote);
121 AddPromotedToType(ISD::LOAD, MVT::v8f64, MVT::v16i32);
123 setOperationAction(ISD::LOAD, MVT::v16i64, Promote);
124 AddPromotedToType(ISD::LOAD, MVT::v16i64, MVT::v32i32);
126 setOperationAction(ISD::LOAD, MVT::v16f64, Promote);
127 AddPromotedToType(ISD::LOAD, MVT::v16f64, MVT::v32i32);
129 // There are no 64-bit extloads. These should be done as a 32-bit extload and
130 // an extension to 64-bit.
131 for (MVT VT : MVT::integer_valuetypes())
132 setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::i64, VT,
135 for (MVT VT : MVT::integer_valuetypes()) {
139 for (auto Op : {ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}) {
140 setLoadExtAction(Op, VT, MVT::i1, Promote);
141 setLoadExtAction(Op, VT, MVT::i8, Legal);
142 setLoadExtAction(Op, VT, MVT::i16, Legal);
143 setLoadExtAction(Op, VT, MVT::i32, Expand);
147 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
149 {MVT::v2i8, MVT::v4i8, MVT::v2i16, MVT::v3i16, MVT::v4i16})
150 setLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}, VT, MemVT,
153 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
154 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
155 setLoadExtAction(ISD::EXTLOAD, MVT::v3f32, MVT::v3f16, Expand);
156 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
157 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
158 setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Expand);
159 setLoadExtAction(ISD::EXTLOAD, MVT::v32f32, MVT::v32f16, Expand);
161 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
162 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
163 setLoadExtAction(ISD::EXTLOAD, MVT::v3f64, MVT::v3f32, Expand);
164 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
165 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand);
166 setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16f32, Expand);
168 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
169 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
170 setLoadExtAction(ISD::EXTLOAD, MVT::v3f64, MVT::v3f16, Expand);
171 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
172 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
173 setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16f16, Expand);
175 setOperationAction(ISD::STORE, MVT::f32, Promote);
176 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
178 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
179 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
181 setOperationAction(ISD::STORE, MVT::v3f32, Promote);
182 AddPromotedToType(ISD::STORE, MVT::v3f32, MVT::v3i32);
184 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
185 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
187 setOperationAction(ISD::STORE, MVT::v5f32, Promote);
188 AddPromotedToType(ISD::STORE, MVT::v5f32, MVT::v5i32);
190 setOperationAction(ISD::STORE, MVT::v6f32, Promote);
191 AddPromotedToType(ISD::STORE, MVT::v6f32, MVT::v6i32);
193 setOperationAction(ISD::STORE, MVT::v7f32, Promote);
194 AddPromotedToType(ISD::STORE, MVT::v7f32, MVT::v7i32);
196 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
197 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
199 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
200 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
202 setOperationAction(ISD::STORE, MVT::v32f32, Promote);
203 AddPromotedToType(ISD::STORE, MVT::v32f32, MVT::v32i32);
205 setOperationAction(ISD::STORE, MVT::i64, Promote);
206 AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
208 setOperationAction(ISD::STORE, MVT::v2i64, Promote);
209 AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32);
211 setOperationAction(ISD::STORE, MVT::f64, Promote);
212 AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32);
214 setOperationAction(ISD::STORE, MVT::v2f64, Promote);
215 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32);
217 setOperationAction(ISD::STORE, MVT::v3i64, Promote);
218 AddPromotedToType(ISD::STORE, MVT::v3i64, MVT::v6i32);
220 setOperationAction(ISD::STORE, MVT::v3f64, Promote);
221 AddPromotedToType(ISD::STORE, MVT::v3f64, MVT::v6i32);
223 setOperationAction(ISD::STORE, MVT::v4i64, Promote);
224 AddPromotedToType(ISD::STORE, MVT::v4i64, MVT::v8i32);
226 setOperationAction(ISD::STORE, MVT::v4f64, Promote);
227 AddPromotedToType(ISD::STORE, MVT::v4f64, MVT::v8i32);
229 setOperationAction(ISD::STORE, MVT::v8i64, Promote);
230 AddPromotedToType(ISD::STORE, MVT::v8i64, MVT::v16i32);
232 setOperationAction(ISD::STORE, MVT::v8f64, Promote);
233 AddPromotedToType(ISD::STORE, MVT::v8f64, MVT::v16i32);
235 setOperationAction(ISD::STORE, MVT::v16i64, Promote);
236 AddPromotedToType(ISD::STORE, MVT::v16i64, MVT::v32i32);
238 setOperationAction(ISD::STORE, MVT::v16f64, Promote);
239 AddPromotedToType(ISD::STORE, MVT::v16f64, MVT::v32i32);
241 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
242 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
243 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
244 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
246 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
247 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand);
248 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand);
249 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
251 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
252 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
253 setTruncStoreAction(MVT::v3f32, MVT::v3f16, Expand);
254 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
255 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
256 setTruncStoreAction(MVT::v16f32, MVT::v16f16, Expand);
257 setTruncStoreAction(MVT::v32f32, MVT::v32f16, Expand);
259 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
260 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
262 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
263 setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
265 setTruncStoreAction(MVT::v3i64, MVT::v3i32, Expand);
266 setTruncStoreAction(MVT::v3i64, MVT::v3i16, Expand);
267 setTruncStoreAction(MVT::v3f64, MVT::v3f32, Expand);
268 setTruncStoreAction(MVT::v3f64, MVT::v3f16, Expand);
270 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Expand);
271 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Expand);
272 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand);
273 setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand);
275 setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand);
276 setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand);
278 setTruncStoreAction(MVT::v16f64, MVT::v16f32, Expand);
279 setTruncStoreAction(MVT::v16f64, MVT::v16f16, Expand);
280 setTruncStoreAction(MVT::v16i64, MVT::v16i16, Expand);
281 setTruncStoreAction(MVT::v16i64, MVT::v16i16, Expand);
282 setTruncStoreAction(MVT::v16i64, MVT::v16i8, Expand);
283 setTruncStoreAction(MVT::v16i64, MVT::v16i8, Expand);
284 setTruncStoreAction(MVT::v16i64, MVT::v16i1, Expand);
286 setOperationAction(ISD::Constant, {MVT::i32, MVT::i64}, Legal);
287 setOperationAction(ISD::ConstantFP, {MVT::f32, MVT::f64}, Legal);
289 setOperationAction({ISD::BR_JT, ISD::BRIND}, MVT::Other, Expand);
291 // This is totally unsupported, just custom lower to produce an error.
292 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
294 // Library functions. These default to Expand, but we have instructions
296 setOperationAction({ISD::FCEIL, ISD::FEXP2, ISD::FPOW, ISD::FLOG2, ISD::FABS,
297 ISD::FFLOOR, ISD::FRINT, ISD::FTRUNC, ISD::FMINNUM,
301 setOperationAction(ISD::FROUND, {MVT::f32, MVT::f64}, Custom);
303 setOperationAction({ISD::FLOG, ISD::FLOG10, ISD::FEXP}, MVT::f32, Custom);
305 setOperationAction(ISD::FNEARBYINT, {MVT::f32, MVT::f64}, Custom);
307 setOperationAction(ISD::FREM, {MVT::f16, MVT::f32, MVT::f64}, Custom);
309 // Expand to fneg + fadd.
310 setOperationAction(ISD::FSUB, MVT::f64, Expand);
312 setOperationAction(ISD::CONCAT_VECTORS,
313 {MVT::v3i32, MVT::v3f32, MVT::v4i32, MVT::v4f32,
314 MVT::v5i32, MVT::v5f32, MVT::v6i32, MVT::v6f32,
315 MVT::v7i32, MVT::v7f32, MVT::v8i32, MVT::v8f32},
318 ISD::EXTRACT_SUBVECTOR,
319 {MVT::v2f16, MVT::v2i16, MVT::v4f16, MVT::v4i16, MVT::v2f32,
320 MVT::v2i32, MVT::v3f32, MVT::v3i32, MVT::v4f32, MVT::v4i32,
321 MVT::v5f32, MVT::v5i32, MVT::v6f32, MVT::v6i32, MVT::v7f32,
322 MVT::v7i32, MVT::v8f32, MVT::v8i32, MVT::v16f16, MVT::v16i16,
323 MVT::v16f32, MVT::v16i32, MVT::v32f32, MVT::v32i32, MVT::v2f64,
324 MVT::v2i64, MVT::v3f64, MVT::v3i64, MVT::v4f64, MVT::v4i64,
325 MVT::v8f64, MVT::v8i64, MVT::v16f64, MVT::v16i64},
328 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
329 setOperationAction(ISD::FP_TO_FP16, {MVT::f64, MVT::f32}, Custom);
331 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
332 for (MVT VT : ScalarIntVTs) {
333 // These should use [SU]DIVREM, so set them to expand
334 setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, VT,
337 // GPU does not have divrem function for signed or unsigned.
338 setOperationAction({ISD::SDIVREM, ISD::UDIVREM}, VT, Custom);
340 // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
341 setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand);
343 setOperationAction({ISD::BSWAP, ISD::CTTZ, ISD::CTLZ}, VT, Expand);
345 // AMDGPU uses ADDC/SUBC/ADDE/SUBE
346 setOperationAction({ISD::ADDC, ISD::SUBC, ISD::ADDE, ISD::SUBE}, VT, Legal);
349 // The hardware supports 32-bit FSHR, but not FSHL.
350 setOperationAction(ISD::FSHR, MVT::i32, Legal);
352 // The hardware supports 32-bit ROTR, but not ROTL.
353 setOperationAction(ISD::ROTL, {MVT::i32, MVT::i64}, Expand);
354 setOperationAction(ISD::ROTR, MVT::i64, Expand);
356 setOperationAction({ISD::MULHU, ISD::MULHS}, MVT::i16, Expand);
358 setOperationAction({ISD::MUL, ISD::MULHU, ISD::MULHS}, MVT::i64, Expand);
360 {ISD::UINT_TO_FP, ISD::SINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
362 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
364 setOperationAction({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX}, MVT::i32,
368 {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF},
371 static const MVT::SimpleValueType VectorIntTypes[] = {
372 MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32, MVT::v6i32, MVT::v7i32};
374 for (MVT VT : VectorIntTypes) {
375 // Expand the following operations for the current type by default.
376 setOperationAction({ISD::ADD, ISD::AND, ISD::FP_TO_SINT,
377 ISD::FP_TO_UINT, ISD::MUL, ISD::MULHU,
378 ISD::MULHS, ISD::OR, ISD::SHL,
379 ISD::SRA, ISD::SRL, ISD::ROTL,
380 ISD::ROTR, ISD::SUB, ISD::SINT_TO_FP,
381 ISD::UINT_TO_FP, ISD::SDIV, ISD::UDIV,
382 ISD::SREM, ISD::UREM, ISD::SMUL_LOHI,
383 ISD::UMUL_LOHI, ISD::SDIVREM, ISD::UDIVREM,
384 ISD::SELECT, ISD::VSELECT, ISD::SELECT_CC,
385 ISD::XOR, ISD::BSWAP, ISD::CTPOP,
386 ISD::CTTZ, ISD::CTLZ, ISD::VECTOR_SHUFFLE,
391 static const MVT::SimpleValueType FloatVectorTypes[] = {
392 MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32, MVT::v6f32, MVT::v7f32};
394 for (MVT VT : FloatVectorTypes) {
396 {ISD::FABS, ISD::FMINNUM, ISD::FMAXNUM, ISD::FADD,
397 ISD::FCEIL, ISD::FCOS, ISD::FDIV, ISD::FEXP2,
398 ISD::FEXP, ISD::FLOG2, ISD::FREM, ISD::FLOG,
399 ISD::FLOG10, ISD::FPOW, ISD::FFLOOR, ISD::FTRUNC,
400 ISD::FMUL, ISD::FMA, ISD::FRINT, ISD::FNEARBYINT,
401 ISD::FSQRT, ISD::FSIN, ISD::FSUB, ISD::FNEG,
402 ISD::VSELECT, ISD::SELECT_CC, ISD::FCOPYSIGN, ISD::VECTOR_SHUFFLE,
403 ISD::SETCC, ISD::FCANONICALIZE},
407 // This causes using an unrolled select operation rather than expansion with
408 // bit operations. This is in general better, but the alternative using BFI
409 // instructions may be better if the select sources are SGPRs.
410 setOperationAction(ISD::SELECT, MVT::v2f32, Promote);
411 AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32);
413 setOperationAction(ISD::SELECT, MVT::v3f32, Promote);
414 AddPromotedToType(ISD::SELECT, MVT::v3f32, MVT::v3i32);
416 setOperationAction(ISD::SELECT, MVT::v4f32, Promote);
417 AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32);
419 setOperationAction(ISD::SELECT, MVT::v5f32, Promote);
420 AddPromotedToType(ISD::SELECT, MVT::v5f32, MVT::v5i32);
422 setOperationAction(ISD::SELECT, MVT::v6f32, Promote);
423 AddPromotedToType(ISD::SELECT, MVT::v6f32, MVT::v6i32);
425 setOperationAction(ISD::SELECT, MVT::v7f32, Promote);
426 AddPromotedToType(ISD::SELECT, MVT::v7f32, MVT::v7i32);
428 // There are no libcalls of any kind.
429 for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I)
430 setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr);
432 setSchedulingPreference(Sched::RegPressure);
433 setJumpIsExpensive(true);
435 // FIXME: This is only partially true. If we have to do vector compares, any
436 // SGPR pair can be a condition register. If we have a uniform condition, we
437 // are better off doing SALU operations, where there is only one SCC. For now,
438 // we don't have a way of knowing during instruction selection if a condition
439 // will be uniform and we always use vector compares. Assume we are using
440 // vector compares until that is fixed.
441 setHasMultipleConditionRegisters(true);
443 setMinCmpXchgSizeInBits(32);
444 setSupportsUnalignedAtomics(false);
446 PredictableSelectIsExpensive = false;
448 // We want to find all load dependencies for long chains of stores to enable
449 // merging into very wide vectors. The problem is with vectors with > 4
450 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
451 // vectors are a legal type, even though we have to split the loads
452 // usually. When we can more precisely specify load legality per address
453 // space, we should be able to make FindBetterChain/MergeConsecutiveStores
454 // smarter so that they can figure out what to do in 2 iterations without all
455 // N > 4 stores on the same chain.
456 GatherAllAliasesMaxDepth = 16;
458 // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry
459 // about these during lowering.
460 MaxStoresPerMemcpy = 0xffffffff;
461 MaxStoresPerMemmove = 0xffffffff;
462 MaxStoresPerMemset = 0xffffffff;
464 // The expansion for 64-bit division is enormous.
465 if (AMDGPUBypassSlowDiv)
466 addBypassSlowDiv(64, 32);
468 setTargetDAGCombine({ISD::BITCAST, ISD::SHL,
470 ISD::TRUNCATE, ISD::MUL,
471 ISD::SMUL_LOHI, ISD::UMUL_LOHI,
472 ISD::MULHU, ISD::MULHS,
473 ISD::SELECT, ISD::SELECT_CC,
474 ISD::STORE, ISD::FADD,
475 ISD::FSUB, ISD::FNEG,
476 ISD::FABS, ISD::AssertZext,
477 ISD::AssertSext, ISD::INTRINSIC_WO_CHAIN});
480 bool AMDGPUTargetLowering::mayIgnoreSignedZero(SDValue Op) const {
481 if (getTargetMachine().Options.NoSignedZerosFPMath)
484 const auto Flags = Op.getNode()->getFlags();
485 if (Flags.hasNoSignedZeros())
491 //===----------------------------------------------------------------------===//
492 // Target Information
493 //===----------------------------------------------------------------------===//
496 static bool fnegFoldsIntoOp(unsigned Opc) {
505 case ISD::FMINNUM_IEEE:
506 case ISD::FMAXNUM_IEEE:
510 case ISD::FNEARBYINT:
511 case ISD::FCANONICALIZE:
513 case AMDGPUISD::RCP_LEGACY:
514 case AMDGPUISD::RCP_IFLAG:
515 case AMDGPUISD::SIN_HW:
516 case AMDGPUISD::FMUL_LEGACY:
517 case AMDGPUISD::FMIN_LEGACY:
518 case AMDGPUISD::FMAX_LEGACY:
519 case AMDGPUISD::FMED3:
520 // TODO: handle llvm.amdgcn.fma.legacy
527 /// \p returns true if the operation will definitely need to use a 64-bit
528 /// encoding, and thus will use a VOP3 encoding regardless of the source
531 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) {
532 return N->getNumOperands() > 2 || VT == MVT::f64;
535 // Most FP instructions support source modifiers, but this could be refined
538 static bool hasSourceMods(const SDNode *N) {
539 if (isa<MemSDNode>(N))
542 switch (N->getOpcode()) {
548 case ISD::INLINEASM_BR:
549 case AMDGPUISD::DIV_SCALE:
550 case ISD::INTRINSIC_W_CHAIN:
552 // TODO: Should really be looking at the users of the bitcast. These are
553 // problematic because bitcasts are used to legalize all stores to integer
557 case ISD::INTRINSIC_WO_CHAIN: {
558 switch (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()) {
559 case Intrinsic::amdgcn_interp_p1:
560 case Intrinsic::amdgcn_interp_p2:
561 case Intrinsic::amdgcn_interp_mov:
562 case Intrinsic::amdgcn_interp_p1_f16:
563 case Intrinsic::amdgcn_interp_p2_f16:
574 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N,
575 unsigned CostThreshold) {
576 // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus
577 // it is truly free to use a source modifier in all cases. If there are
578 // multiple users but for each one will necessitate using VOP3, there will be
579 // a code size increase. Try to avoid increasing code size unless we know it
580 // will save on the instruction count.
581 unsigned NumMayIncreaseSize = 0;
582 MVT VT = N->getValueType(0).getScalarType().getSimpleVT();
584 // XXX - Should this limit number of uses to check?
585 for (const SDNode *U : N->uses()) {
586 if (!hasSourceMods(U))
589 if (!opMustUseVOP3Encoding(U, VT)) {
590 if (++NumMayIncreaseSize > CostThreshold)
598 EVT AMDGPUTargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
599 ISD::NodeType ExtendKind) const {
600 assert(!VT.isVector() && "only scalar expected");
602 // Round to the next multiple of 32-bits.
603 unsigned Size = VT.getSizeInBits();
606 return EVT::getIntegerVT(Context, 32 * ((Size + 31) / 32));
609 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
613 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
617 // The backend supports 32 and 64 bit floating point immediates.
618 // FIXME: Why are we reporting vectors of FP immediates as legal?
619 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
620 bool ForCodeSize) const {
621 EVT ScalarVT = VT.getScalarType();
622 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 ||
623 (ScalarVT == MVT::f16 && Subtarget->has16BitInsts()));
626 // We don't want to shrink f64 / f32 constants.
627 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
628 EVT ScalarVT = VT.getScalarType();
629 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
632 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
633 ISD::LoadExtType ExtTy,
635 // TODO: This may be worth removing. Check regression tests for diffs.
636 if (!TargetLoweringBase::shouldReduceLoadWidth(N, ExtTy, NewVT))
639 unsigned NewSize = NewVT.getStoreSizeInBits();
641 // If we are reducing to a 32-bit load or a smaller multi-dword load,
642 // this is always better.
646 EVT OldVT = N->getValueType(0);
647 unsigned OldSize = OldVT.getStoreSizeInBits();
649 MemSDNode *MN = cast<MemSDNode>(N);
650 unsigned AS = MN->getAddressSpace();
651 // Do not shrink an aligned scalar load to sub-dword.
652 // Scalar engine cannot do sub-dword loads.
653 if (OldSize >= 32 && NewSize < 32 && MN->getAlign() >= Align(4) &&
654 (AS == AMDGPUAS::CONSTANT_ADDRESS ||
655 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
656 (isa<LoadSDNode>(N) && AS == AMDGPUAS::GLOBAL_ADDRESS &&
657 MN->isInvariant())) &&
658 AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand()))
661 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
662 // extloads, so doing one requires using a buffer_load. In cases where we
663 // still couldn't use a scalar load, using the wider load shouldn't really
666 // If the old size already had to be an extload, there's no harm in continuing
667 // to reduce the width.
668 return (OldSize < 32);
671 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy,
672 const SelectionDAG &DAG,
673 const MachineMemOperand &MMO) const {
675 assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits());
677 if (LoadTy.getScalarType() == MVT::i32)
680 unsigned LScalarSize = LoadTy.getScalarSizeInBits();
681 unsigned CastScalarSize = CastTy.getScalarSizeInBits();
683 if ((LScalarSize >= CastScalarSize) && (CastScalarSize < 32))
687 return allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
688 CastTy, MMO, &Fast) &&
692 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
693 // profitable with the expansion for 64-bit since it's generally good to
695 // FIXME: These should really have the size as a parameter.
696 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const {
700 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const {
704 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode *N) const {
705 switch (N->getOpcode()) {
706 case ISD::EntryToken:
707 case ISD::TokenFactor:
709 case ISD::INTRINSIC_WO_CHAIN: {
710 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
712 case Intrinsic::amdgcn_readfirstlane:
713 case Intrinsic::amdgcn_readlane:
719 if (cast<LoadSDNode>(N)->getMemOperand()->getAddrSpace() ==
720 AMDGPUAS::CONSTANT_ADDRESS_32BIT)
723 case AMDGPUISD::SETCC: // ballot-style instruction
729 SDValue AMDGPUTargetLowering::getNegatedExpression(
730 SDValue Op, SelectionDAG &DAG, bool LegalOperations, bool ForCodeSize,
731 NegatibleCost &Cost, unsigned Depth) const {
733 switch (Op.getOpcode()) {
736 // Negating a fma is not free if it has users without source mods.
737 if (!allUsesHaveSourceMods(Op.getNode()))
745 return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
746 ForCodeSize, Cost, Depth);
749 //===---------------------------------------------------------------------===//
751 //===---------------------------------------------------------------------===//
753 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
754 assert(VT.isFloatingPoint());
756 // Packed operations do not have a fabs modifier.
757 return VT == MVT::f32 || VT == MVT::f64 ||
758 (Subtarget->has16BitInsts() && VT == MVT::f16);
761 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
762 assert(VT.isFloatingPoint());
763 // Report this based on the end legalized type.
764 VT = VT.getScalarType();
765 return VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f16;
768 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT,
774 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
775 // There are few operations which truly have vector input operands. Any vector
776 // operation is going to involve operations on each component, and a
777 // build_vector will be a copy per element, so it always makes sense to use a
778 // build_vector input in place of the extracted element to avoid a copy into a
781 // We should probably only do this if all users are extracts only, but this
782 // should be the common case.
786 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
787 // Truncate is just accessing a subregister.
789 unsigned SrcSize = Source.getSizeInBits();
790 unsigned DestSize = Dest.getSizeInBits();
792 return DestSize < SrcSize && DestSize % 32 == 0 ;
795 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
796 // Truncate is just accessing a subregister.
798 unsigned SrcSize = Source->getScalarSizeInBits();
799 unsigned DestSize = Dest->getScalarSizeInBits();
801 if (DestSize== 16 && Subtarget->has16BitInsts())
802 return SrcSize >= 32;
804 return DestSize < SrcSize && DestSize % 32 == 0;
807 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
808 unsigned SrcSize = Src->getScalarSizeInBits();
809 unsigned DestSize = Dest->getScalarSizeInBits();
811 if (SrcSize == 16 && Subtarget->has16BitInsts())
812 return DestSize >= 32;
814 return SrcSize == 32 && DestSize == 64;
817 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
818 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
819 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
820 // this will enable reducing 64-bit operations the 32-bit, which is always
824 return Dest == MVT::i32 ||Dest == MVT::i64 ;
826 return Src == MVT::i32 && Dest == MVT::i64;
829 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
830 return isZExtFree(Val.getValueType(), VT2);
833 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
834 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
835 // limited number of native 64-bit operations. Shrinking an operation to fit
836 // in a single 32-bit register should always be helpful. As currently used,
837 // this is much less general than the name suggests, and is only used in
838 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
839 // not profitable, and may actually be harmful.
840 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
843 //===---------------------------------------------------------------------===//
844 // TargetLowering Callbacks
845 //===---------------------------------------------------------------------===//
847 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC,
850 case CallingConv::AMDGPU_VS:
851 case CallingConv::AMDGPU_GS:
852 case CallingConv::AMDGPU_PS:
853 case CallingConv::AMDGPU_CS:
854 case CallingConv::AMDGPU_HS:
855 case CallingConv::AMDGPU_ES:
856 case CallingConv::AMDGPU_LS:
859 case CallingConv::Fast:
860 case CallingConv::Cold:
861 return CC_AMDGPU_Func;
862 case CallingConv::AMDGPU_Gfx:
864 case CallingConv::AMDGPU_KERNEL:
865 case CallingConv::SPIR_KERNEL:
867 report_fatal_error("Unsupported calling convention for call");
871 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC,
874 case CallingConv::AMDGPU_KERNEL:
875 case CallingConv::SPIR_KERNEL:
876 llvm_unreachable("kernels should not be handled here");
877 case CallingConv::AMDGPU_VS:
878 case CallingConv::AMDGPU_GS:
879 case CallingConv::AMDGPU_PS:
880 case CallingConv::AMDGPU_CS:
881 case CallingConv::AMDGPU_HS:
882 case CallingConv::AMDGPU_ES:
883 case CallingConv::AMDGPU_LS:
884 return RetCC_SI_Shader;
885 case CallingConv::AMDGPU_Gfx:
888 case CallingConv::Fast:
889 case CallingConv::Cold:
890 return RetCC_AMDGPU_Func;
892 report_fatal_error("Unsupported calling convention.");
896 /// The SelectionDAGBuilder will automatically promote function arguments
897 /// with illegal types. However, this does not work for the AMDGPU targets
898 /// since the function arguments are stored in memory as these illegal types.
899 /// In order to handle this properly we need to get the original types sizes
900 /// from the LLVM IR Function and fixup the ISD:InputArg values before
901 /// passing them to AnalyzeFormalArguments()
903 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting
904 /// input values across multiple registers. Each item in the Ins array
905 /// represents a single value that will be stored in registers. Ins[x].VT is
906 /// the value type of the value that will be stored in the register, so
907 /// whatever SDNode we lower the argument to needs to be this type.
909 /// In order to correctly lower the arguments we need to know the size of each
910 /// argument. Since Ins[x].VT gives us the size of the register that will
911 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type
912 /// for the original function argument so that we can deduce the correct memory
913 /// type to use for Ins[x]. In most cases the correct memory type will be
914 /// Ins[x].ArgVT. However, this will not always be the case. If, for example,
915 /// we have a kernel argument of type v8i8, this argument will be split into
916 /// 8 parts and each part will be represented by its own item in the Ins array.
917 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of
918 /// the argument before it was split. From this, we deduce that the memory type
919 /// for each individual part is i8. We pass the memory type as LocVT to the
920 /// calling convention analysis function and the register type (Ins[x].VT) as
922 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(
924 const SmallVectorImpl<ISD::InputArg> &Ins) const {
925 const MachineFunction &MF = State.getMachineFunction();
926 const Function &Fn = MF.getFunction();
927 LLVMContext &Ctx = Fn.getParent()->getContext();
928 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
929 const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn);
930 CallingConv::ID CC = Fn.getCallingConv();
932 Align MaxAlign = Align(1);
933 uint64_t ExplicitArgOffset = 0;
934 const DataLayout &DL = Fn.getParent()->getDataLayout();
936 unsigned InIndex = 0;
938 for (const Argument &Arg : Fn.args()) {
939 const bool IsByRef = Arg.hasByRefAttr();
940 Type *BaseArgTy = Arg.getType();
941 Type *MemArgTy = IsByRef ? Arg.getParamByRefType() : BaseArgTy;
942 Align Alignment = DL.getValueOrABITypeAlignment(
943 IsByRef ? Arg.getParamAlign() : None, MemArgTy);
944 MaxAlign = std::max(Alignment, MaxAlign);
945 uint64_t AllocSize = DL.getTypeAllocSize(MemArgTy);
947 uint64_t ArgOffset = alignTo(ExplicitArgOffset, Alignment) + ExplicitOffset;
948 ExplicitArgOffset = alignTo(ExplicitArgOffset, Alignment) + AllocSize;
950 // We're basically throwing away everything passed into us and starting over
951 // to get accurate in-memory offsets. The "PartOffset" is completely useless
952 // to us as computed in Ins.
954 // We also need to figure out what type legalization is trying to do to get
955 // the correct memory offsets.
957 SmallVector<EVT, 16> ValueVTs;
958 SmallVector<uint64_t, 16> Offsets;
959 ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset);
961 for (unsigned Value = 0, NumValues = ValueVTs.size();
962 Value != NumValues; ++Value) {
963 uint64_t BasePartOffset = Offsets[Value];
965 EVT ArgVT = ValueVTs[Value];
967 MVT RegisterVT = getRegisterTypeForCallingConv(Ctx, CC, ArgVT);
968 unsigned NumRegs = getNumRegistersForCallingConv(Ctx, CC, ArgVT);
971 // This argument is not split, so the IR type is the memory type.
972 if (ArgVT.isExtended()) {
973 // We have an extended type, like i24, so we should just use the
979 } else if (ArgVT.isVector() && RegisterVT.isVector() &&
980 ArgVT.getScalarType() == RegisterVT.getScalarType()) {
981 assert(ArgVT.getVectorNumElements() > RegisterVT.getVectorNumElements());
982 // We have a vector value which has been split into a vector with
983 // the same scalar type, but fewer elements. This should handle
984 // all the floating-point vector types.
986 } else if (ArgVT.isVector() &&
987 ArgVT.getVectorNumElements() == NumRegs) {
988 // This arg has been split so that each element is stored in a separate
990 MemVT = ArgVT.getScalarType();
991 } else if (ArgVT.isExtended()) {
992 // We have an extended type, like i65.
995 unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs;
996 assert(ArgVT.getStoreSizeInBits() % NumRegs == 0);
997 if (RegisterVT.isInteger()) {
998 MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
999 } else if (RegisterVT.isVector()) {
1000 assert(!RegisterVT.getScalarType().isFloatingPoint());
1001 unsigned NumElements = RegisterVT.getVectorNumElements();
1002 assert(MemoryBits % NumElements == 0);
1003 // This vector type has been split into another vector type with
1004 // a different elements size.
1005 EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
1006 MemoryBits / NumElements);
1007 MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements);
1009 llvm_unreachable("cannot deduce memory type.");
1013 // Convert one element vectors to scalar.
1014 if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
1015 MemVT = MemVT.getScalarType();
1017 // Round up vec3/vec5 argument.
1018 if (MemVT.isVector() && !MemVT.isPow2VectorType()) {
1019 assert(MemVT.getVectorNumElements() == 3 ||
1020 MemVT.getVectorNumElements() == 5);
1021 MemVT = MemVT.getPow2VectorType(State.getContext());
1022 } else if (!MemVT.isSimple() && !MemVT.isVector()) {
1023 MemVT = MemVT.getRoundIntegerType(State.getContext());
1026 unsigned PartOffset = 0;
1027 for (unsigned i = 0; i != NumRegs; ++i) {
1028 State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT,
1029 BasePartOffset + PartOffset,
1030 MemVT.getSimpleVT(),
1031 CCValAssign::Full));
1032 PartOffset += MemVT.getStoreSize();
1038 SDValue AMDGPUTargetLowering::LowerReturn(
1039 SDValue Chain, CallingConv::ID CallConv,
1041 const SmallVectorImpl<ISD::OutputArg> &Outs,
1042 const SmallVectorImpl<SDValue> &OutVals,
1043 const SDLoc &DL, SelectionDAG &DAG) const {
1044 // FIXME: Fails for r600 tests
1045 //assert(!isVarArg && Outs.empty() && OutVals.empty() &&
1046 // "wave terminate should not have return values");
1047 return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain);
1050 //===---------------------------------------------------------------------===//
1051 // Target specific lowering
1052 //===---------------------------------------------------------------------===//
1054 /// Selects the correct CCAssignFn for a given CallingConvention value.
1055 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1057 return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg);
1060 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1062 return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg);
1065 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain,
1067 MachineFrameInfo &MFI,
1068 int ClobberedFI) const {
1069 SmallVector<SDValue, 8> ArgChains;
1070 int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
1071 int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
1073 // Include the original chain at the beginning of the list. When this is
1074 // used by target LowerCall hooks, this helps legalize find the
1075 // CALLSEQ_BEGIN node.
1076 ArgChains.push_back(Chain);
1078 // Add a chain value for each stack argument corresponding
1079 for (SDNode *U : DAG.getEntryNode().getNode()->uses()) {
1080 if (LoadSDNode *L = dyn_cast<LoadSDNode>(U)) {
1081 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) {
1082 if (FI->getIndex() < 0) {
1083 int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
1084 int64_t InLastByte = InFirstByte;
1085 InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
1087 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1088 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1089 ArgChains.push_back(SDValue(L, 1));
1095 // Build a tokenfactor for all the chains.
1096 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
1099 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI,
1100 SmallVectorImpl<SDValue> &InVals,
1101 StringRef Reason) const {
1102 SDValue Callee = CLI.Callee;
1103 SelectionDAG &DAG = CLI.DAG;
1105 const Function &Fn = DAG.getMachineFunction().getFunction();
1107 StringRef FuncName("<unknown>");
1109 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
1110 FuncName = G->getSymbol();
1111 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1112 FuncName = G->getGlobal()->getName();
1114 DiagnosticInfoUnsupported NoCalls(
1115 Fn, Reason + FuncName, CLI.DL.getDebugLoc());
1116 DAG.getContext()->diagnose(NoCalls);
1118 if (!CLI.IsTailCall) {
1119 for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
1120 InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
1123 return DAG.getEntryNode();
1126 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
1127 SmallVectorImpl<SDValue> &InVals) const {
1128 return lowerUnhandledCall(CLI, InVals, "unsupported call to function ");
1131 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
1132 SelectionDAG &DAG) const {
1133 const Function &Fn = DAG.getMachineFunction().getFunction();
1135 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
1136 SDLoc(Op).getDebugLoc());
1137 DAG.getContext()->diagnose(NoDynamicAlloca);
1138 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
1139 return DAG.getMergeValues(Ops, SDLoc());
1142 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
1143 SelectionDAG &DAG) const {
1144 switch (Op.getOpcode()) {
1146 Op->print(errs(), &DAG);
1147 llvm_unreachable("Custom lowering code for this "
1148 "instruction is not implemented yet!");
1150 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
1151 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
1152 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
1153 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
1154 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
1155 case ISD::FREM: return LowerFREM(Op, DAG);
1156 case ISD::FCEIL: return LowerFCEIL(Op, DAG);
1157 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
1158 case ISD::FRINT: return LowerFRINT(Op, DAG);
1159 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
1160 case ISD::FROUND: return LowerFROUND(Op, DAG);
1161 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
1163 return LowerFLOG(Op, DAG, numbers::ln2f);
1165 return LowerFLOG(Op, DAG, numbers::ln2f / numbers::ln10f);
1167 return lowerFEXP(Op, DAG);
1168 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
1169 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
1170 case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
1171 case ISD::FP_TO_SINT:
1172 case ISD::FP_TO_UINT:
1173 return LowerFP_TO_INT(Op, DAG);
1175 case ISD::CTTZ_ZERO_UNDEF:
1177 case ISD::CTLZ_ZERO_UNDEF:
1178 return LowerCTLZ_CTTZ(Op, DAG);
1179 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1184 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
1185 SmallVectorImpl<SDValue> &Results,
1186 SelectionDAG &DAG) const {
1187 switch (N->getOpcode()) {
1188 case ISD::SIGN_EXTEND_INREG:
1189 // Different parts of legalization seem to interpret which type of
1190 // sign_extend_inreg is the one to check for custom lowering. The extended
1191 // from type is what really matters, but some places check for custom
1192 // lowering of the result type. This results in trying to use
1193 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
1194 // nothing here and let the illegal result integer be handled normally.
1201 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
1203 SelectionDAG &DAG) const {
1205 const DataLayout &DL = DAG.getDataLayout();
1206 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
1207 const GlobalValue *GV = G->getGlobal();
1209 if (G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1210 G->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) {
1211 if (!MFI->isModuleEntryFunction() &&
1212 !GV->getName().equals("llvm.amdgcn.module.lds")) {
1214 const Function &Fn = DAG.getMachineFunction().getFunction();
1215 DiagnosticInfoUnsupported BadLDSDecl(
1216 Fn, "local memory global used by non-kernel function",
1217 DL.getDebugLoc(), DS_Warning);
1218 DAG.getContext()->diagnose(BadLDSDecl);
1220 // We currently don't have a way to correctly allocate LDS objects that
1221 // aren't directly associated with a kernel. We do force inlining of
1222 // functions that use local objects. However, if these dead functions are
1223 // not eliminated, we don't want a compile time error. Just emit a warning
1224 // and a trap, since there should be no callable path here.
1225 SDValue Trap = DAG.getNode(ISD::TRAP, DL, MVT::Other, DAG.getEntryNode());
1226 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1227 Trap, DAG.getRoot());
1228 DAG.setRoot(OutputChain);
1229 return DAG.getUNDEF(Op.getValueType());
1232 // XXX: What does the value of G->getOffset() mean?
1233 assert(G->getOffset() == 0 &&
1234 "Do not know what to do with an non-zero offset");
1236 // TODO: We could emit code to handle the initialization somewhere.
1237 // We ignore the initializer for now and legalize it to allow selection.
1238 // The initializer will anyway get errored out during assembly emission.
1239 unsigned Offset = MFI->allocateLDSGlobal(DL, *cast<GlobalVariable>(GV));
1240 return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType());
1245 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
1246 SelectionDAG &DAG) const {
1247 SmallVector<SDValue, 8> Args;
1249 EVT VT = Op.getValueType();
1250 if (VT == MVT::v4i16 || VT == MVT::v4f16) {
1252 SDValue Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(0));
1253 SDValue Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(1));
1255 SDValue BV = DAG.getBuildVector(MVT::v2i32, SL, { Lo, Hi });
1256 return DAG.getNode(ISD::BITCAST, SL, VT, BV);
1259 for (const SDUse &U : Op->ops())
1260 DAG.ExtractVectorElements(U.get(), Args);
1262 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1265 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
1266 SelectionDAG &DAG) const {
1268 SmallVector<SDValue, 8> Args;
1269 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1270 EVT VT = Op.getValueType();
1271 EVT SrcVT = Op.getOperand(0).getValueType();
1273 // For these types, we have some TableGen patterns except if the index is 1
1274 if (((SrcVT == MVT::v4f16 && VT == MVT::v2f16) ||
1275 (SrcVT == MVT::v4i16 && VT == MVT::v2i16)) &&
1279 if (((SrcVT == MVT::v8f16 && VT == MVT::v4f16) ||
1280 (SrcVT == MVT::v8i16 && VT == MVT::v4i16)) &&
1281 (Start == 0 || Start == 4))
1284 if (((SrcVT == MVT::v16f16 && VT == MVT::v8f16) ||
1285 (SrcVT == MVT::v16i16 && VT == MVT::v8i16)) &&
1286 (Start == 0 || Start == 8))
1289 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
1290 VT.getVectorNumElements());
1292 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1295 /// Generate Min/Max node
1296 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT,
1297 SDValue LHS, SDValue RHS,
1298 SDValue True, SDValue False,
1300 DAGCombinerInfo &DCI) const {
1301 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
1304 SelectionDAG &DAG = DCI.DAG;
1305 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1314 case ISD::SETFALSE2:
1323 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1324 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1330 // Ordered. Assume ordered for undefined.
1332 // Only do this after legalization to avoid interfering with other combines
1333 // which might occur.
1334 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1335 !DCI.isCalledByLegalizer())
1338 // We need to permute the operands to get the correct NaN behavior. The
1339 // selected operand is the second one based on the failing compare with NaN,
1340 // so permute it based on the compare type the hardware uses.
1342 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1343 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1348 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1349 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1355 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1356 !DCI.isCalledByLegalizer())
1360 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1361 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1363 case ISD::SETCC_INVALID:
1364 llvm_unreachable("Invalid setcc condcode!");
1369 std::pair<SDValue, SDValue>
1370 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
1373 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1375 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1376 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1378 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1379 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1381 return std::make_pair(Lo, Hi);
1384 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
1387 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1388 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1389 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1392 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
1395 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1396 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1397 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1400 // Split a vector type into two parts. The first part is a power of two vector.
1401 // The second part is whatever is left over, and is a scalar if it would
1402 // otherwise be a 1-vector.
1404 AMDGPUTargetLowering::getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const {
1406 EVT EltVT = VT.getVectorElementType();
1407 unsigned NumElts = VT.getVectorNumElements();
1408 unsigned LoNumElts = PowerOf2Ceil((NumElts + 1) / 2);
1409 LoVT = EVT::getVectorVT(*DAG.getContext(), EltVT, LoNumElts);
1410 HiVT = NumElts - LoNumElts == 1
1412 : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts);
1413 return std::make_pair(LoVT, HiVT);
1416 // Split a vector value into two parts of types LoVT and HiVT. HiVT could be
1418 std::pair<SDValue, SDValue>
1419 AMDGPUTargetLowering::splitVector(const SDValue &N, const SDLoc &DL,
1420 const EVT &LoVT, const EVT &HiVT,
1421 SelectionDAG &DAG) const {
1422 assert(LoVT.getVectorNumElements() +
1423 (HiVT.isVector() ? HiVT.getVectorNumElements() : 1) <=
1424 N.getValueType().getVectorNumElements() &&
1425 "More vector elements requested than available!");
1426 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
1427 DAG.getVectorIdxConstant(0, DL));
1428 SDValue Hi = DAG.getNode(
1429 HiVT.isVector() ? ISD::EXTRACT_SUBVECTOR : ISD::EXTRACT_VECTOR_ELT, DL,
1430 HiVT, N, DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), DL));
1431 return std::make_pair(Lo, Hi);
1434 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1435 SelectionDAG &DAG) const {
1436 LoadSDNode *Load = cast<LoadSDNode>(Op);
1437 EVT VT = Op.getValueType();
1441 // If this is a 2 element vector, we really want to scalarize and not create
1442 // weird 1 element vectors.
1443 if (VT.getVectorNumElements() == 2) {
1445 std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
1446 return DAG.getMergeValues(Ops, SL);
1449 SDValue BasePtr = Load->getBasePtr();
1450 EVT MemVT = Load->getMemoryVT();
1452 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1455 EVT LoMemVT, HiMemVT;
1458 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1459 std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1460 std::tie(Lo, Hi) = splitVector(Op, SL, LoVT, HiVT, DAG);
1462 unsigned Size = LoMemVT.getStoreSize();
1463 Align BaseAlign = Load->getAlign();
1464 Align HiAlign = commonAlignment(BaseAlign, Size);
1466 SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1467 Load->getChain(), BasePtr, SrcValue, LoMemVT,
1468 BaseAlign, Load->getMemOperand()->getFlags());
1469 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Size));
1471 DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
1472 HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1473 HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
1477 // This is the case that the vector is power of two so was evenly split.
1478 Join = DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad);
1480 Join = DAG.getNode(ISD::INSERT_SUBVECTOR, SL, VT, DAG.getUNDEF(VT), LoLoad,
1481 DAG.getVectorIdxConstant(0, SL));
1483 HiVT.isVector() ? ISD::INSERT_SUBVECTOR : ISD::INSERT_VECTOR_ELT, SL,
1485 DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), SL));
1488 SDValue Ops[] = {Join, DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1489 LoLoad.getValue(1), HiLoad.getValue(1))};
1491 return DAG.getMergeValues(Ops, SL);
1494 SDValue AMDGPUTargetLowering::WidenOrSplitVectorLoad(SDValue Op,
1495 SelectionDAG &DAG) const {
1496 LoadSDNode *Load = cast<LoadSDNode>(Op);
1497 EVT VT = Op.getValueType();
1498 SDValue BasePtr = Load->getBasePtr();
1499 EVT MemVT = Load->getMemoryVT();
1501 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1502 Align BaseAlign = Load->getAlign();
1503 unsigned NumElements = MemVT.getVectorNumElements();
1505 // Widen from vec3 to vec4 when the load is at least 8-byte aligned
1506 // or 16-byte fully dereferenceable. Otherwise, split the vector load.
1507 if (NumElements != 3 ||
1508 (BaseAlign < Align(8) &&
1509 !SrcValue.isDereferenceable(16, *DAG.getContext(), DAG.getDataLayout())))
1510 return SplitVectorLoad(Op, DAG);
1512 assert(NumElements == 3);
1515 EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4);
1517 EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), 4);
1518 SDValue WideLoad = DAG.getExtLoad(
1519 Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue,
1520 WideMemVT, BaseAlign, Load->getMemOperand()->getFlags());
1521 return DAG.getMergeValues(
1522 {DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, VT, WideLoad,
1523 DAG.getVectorIdxConstant(0, SL)),
1524 WideLoad.getValue(1)},
1528 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1529 SelectionDAG &DAG) const {
1530 StoreSDNode *Store = cast<StoreSDNode>(Op);
1531 SDValue Val = Store->getValue();
1532 EVT VT = Val.getValueType();
1534 // If this is a 2 element vector, we really want to scalarize and not create
1535 // weird 1 element vectors.
1536 if (VT.getVectorNumElements() == 2)
1537 return scalarizeVectorStore(Store, DAG);
1539 EVT MemVT = Store->getMemoryVT();
1540 SDValue Chain = Store->getChain();
1541 SDValue BasePtr = Store->getBasePtr();
1545 EVT LoMemVT, HiMemVT;
1548 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1549 std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1550 std::tie(Lo, Hi) = splitVector(Val, SL, LoVT, HiVT, DAG);
1552 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize());
1554 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1555 Align BaseAlign = Store->getAlign();
1556 unsigned Size = LoMemVT.getStoreSize();
1557 Align HiAlign = commonAlignment(BaseAlign, Size);
1560 DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1561 Store->getMemOperand()->getFlags());
1563 DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size),
1564 HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
1566 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1569 // This is a shortcut for integer division because we have fast i32<->f32
1570 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1571 // float is enough to accurately represent up to a 24-bit signed integer.
1572 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG,
1575 EVT VT = Op.getValueType();
1576 SDValue LHS = Op.getOperand(0);
1577 SDValue RHS = Op.getOperand(1);
1578 MVT IntVT = MVT::i32;
1579 MVT FltVT = MVT::f32;
1581 unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS);
1582 if (LHSSignBits < 9)
1585 unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS);
1586 if (RHSSignBits < 9)
1589 unsigned BitSize = VT.getSizeInBits();
1590 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1591 unsigned DivBits = BitSize - SignBits;
1595 ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1596 ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1598 SDValue jq = DAG.getConstant(1, DL, IntVT);
1601 // char|short jq = ia ^ ib;
1602 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1604 // jq = jq >> (bitsize - 2)
1605 jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1606 DAG.getConstant(BitSize - 2, DL, VT));
1609 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
1612 // int ia = (int)LHS;
1615 // int ib, (int)RHS;
1618 // float fa = (float)ia;
1619 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1621 // float fb = (float)ib;
1622 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1624 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1625 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1628 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1630 // float fqneg = -fq;
1631 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1633 MachineFunction &MF = DAG.getMachineFunction();
1634 const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
1636 // float fr = mad(fqneg, fb, fa);
1637 unsigned OpCode = !Subtarget->hasMadMacF32Insts() ?
1638 (unsigned)ISD::FMA :
1639 !MFI->getMode().allFP32Denormals() ?
1640 (unsigned)ISD::FMAD :
1641 (unsigned)AMDGPUISD::FMAD_FTZ;
1642 SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa);
1644 // int iq = (int)fq;
1645 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1648 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1651 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1653 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1655 // int cv = fr >= fb;
1656 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1658 // jq = (cv ? jq : 0);
1659 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
1662 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1664 // Rem needs compensation, it's easier to recompute it
1665 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1666 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1668 // Truncate to number of bits this divide really is.
1671 = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits));
1672 Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize);
1673 Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize);
1675 SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT);
1676 Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask);
1677 Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask);
1680 return DAG.getMergeValues({ Div, Rem }, DL);
1683 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1685 SmallVectorImpl<SDValue> &Results) const {
1687 EVT VT = Op.getValueType();
1689 assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64");
1691 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1693 SDValue One = DAG.getConstant(1, DL, HalfVT);
1694 SDValue Zero = DAG.getConstant(0, DL, HalfVT);
1697 SDValue LHS = Op.getOperand(0);
1698 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1699 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, One);
1701 SDValue RHS = Op.getOperand(1);
1702 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1703 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, One);
1705 if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1706 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1708 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1711 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero});
1712 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero});
1714 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
1715 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
1719 if (isTypeLegal(MVT::i64)) {
1720 // The algorithm here is based on ideas from "Software Integer Division",
1721 // Tom Rodeheffer, August 2008.
1723 MachineFunction &MF = DAG.getMachineFunction();
1724 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1726 // Compute denominator reciprocal.
1727 unsigned FMAD = !Subtarget->hasMadMacF32Insts() ?
1728 (unsigned)ISD::FMA :
1729 !MFI->getMode().allFP32Denormals() ?
1730 (unsigned)ISD::FMAD :
1731 (unsigned)AMDGPUISD::FMAD_FTZ;
1733 SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo);
1734 SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi);
1735 SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi,
1736 DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32),
1738 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1);
1739 SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp,
1740 DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32));
1741 SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1,
1742 DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32));
1743 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2);
1744 SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc,
1745 DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32),
1747 SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2);
1748 SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc);
1749 SDValue Rcp64 = DAG.getBitcast(VT,
1750 DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi}));
1752 SDValue Zero64 = DAG.getConstant(0, DL, VT);
1753 SDValue One64 = DAG.getConstant(1, DL, VT);
1754 SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1);
1755 SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1);
1757 // First round of UNR (Unsigned integer Newton-Raphson).
1758 SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS);
1759 SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64);
1760 SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1);
1761 SDValue Mulhi1_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1,
1764 DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1, One);
1765 SDValue Add1_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Lo,
1767 SDValue Add1_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Hi,
1768 Mulhi1_Hi, Add1_Lo.getValue(1));
1769 SDValue Add1 = DAG.getBitcast(VT,
1770 DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi}));
1772 // Second round of UNR.
1773 SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1);
1774 SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2);
1775 SDValue Mulhi2_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2,
1778 DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2, One);
1779 SDValue Add2_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_Lo,
1781 SDValue Add2_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_Hi,
1782 Mulhi2_Hi, Add2_Lo.getValue(1));
1783 SDValue Add2 = DAG.getBitcast(VT,
1784 DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi}));
1786 SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2);
1788 SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3);
1790 SDValue Mul3_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, Zero);
1791 SDValue Mul3_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, One);
1792 SDValue Sub1_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Lo,
1794 SDValue Sub1_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Hi,
1795 Mul3_Hi, Sub1_Lo.getValue(1));
1796 SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi);
1797 SDValue Sub1 = DAG.getBitcast(VT,
1798 DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi}));
1800 SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT);
1801 SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero,
1803 SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero,
1805 SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ);
1807 // TODO: Here and below portions of the code can be enclosed into if/endif.
1808 // Currently control flow is unconditional and we have 4 selects after
1809 // potential endif to substitute PHIs.
1812 SDValue Sub2_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Lo,
1814 SDValue Sub2_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Mi,
1815 RHS_Hi, Sub1_Lo.getValue(1));
1816 SDValue Sub2_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1817 Zero, Sub2_Lo.getValue(1));
1818 SDValue Sub2 = DAG.getBitcast(VT,
1819 DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi}));
1821 SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64);
1823 SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero,
1825 SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero,
1827 SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ);
1830 SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64);
1832 SDValue Sub3_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Lo,
1834 SDValue Sub3_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1835 RHS_Hi, Sub2_Lo.getValue(1));
1836 SDValue Sub3_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub3_Mi,
1837 Zero, Sub3_Lo.getValue(1));
1838 SDValue Sub3 = DAG.getBitcast(VT,
1839 DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi}));
1844 SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE);
1845 SDValue Div = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE);
1847 SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE);
1848 SDValue Rem = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE);
1850 Results.push_back(Div);
1851 Results.push_back(Rem);
1857 // Get Speculative values
1858 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
1859 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
1861 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ);
1862 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero});
1863 REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
1865 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ);
1866 SDValue DIV_Lo = Zero;
1868 const unsigned halfBitWidth = HalfVT.getSizeInBits();
1870 for (unsigned i = 0; i < halfBitWidth; ++i) {
1871 const unsigned bitPos = halfBitWidth - i - 1;
1872 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
1873 // Get value of high bit
1874 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
1875 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One);
1876 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
1879 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
1881 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
1883 SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
1884 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE);
1886 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
1889 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
1890 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
1893 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
1894 DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
1895 Results.push_back(DIV);
1896 Results.push_back(REM);
1899 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1900 SelectionDAG &DAG) const {
1902 EVT VT = Op.getValueType();
1904 if (VT == MVT::i64) {
1905 SmallVector<SDValue, 2> Results;
1906 LowerUDIVREM64(Op, DAG, Results);
1907 return DAG.getMergeValues(Results, DL);
1910 if (VT == MVT::i32) {
1911 if (SDValue Res = LowerDIVREM24(Op, DAG, false))
1915 SDValue X = Op.getOperand(0);
1916 SDValue Y = Op.getOperand(1);
1918 // See AMDGPUCodeGenPrepare::expandDivRem32 for a description of the
1919 // algorithm used here.
1921 // Initial estimate of inv(y).
1922 SDValue Z = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Y);
1924 // One round of UNR.
1925 SDValue NegY = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Y);
1926 SDValue NegYZ = DAG.getNode(ISD::MUL, DL, VT, NegY, Z);
1927 Z = DAG.getNode(ISD::ADD, DL, VT, Z,
1928 DAG.getNode(ISD::MULHU, DL, VT, Z, NegYZ));
1930 // Quotient/remainder estimate.
1931 SDValue Q = DAG.getNode(ISD::MULHU, DL, VT, X, Z);
1933 DAG.getNode(ISD::SUB, DL, VT, X, DAG.getNode(ISD::MUL, DL, VT, Q, Y));
1935 // First quotient/remainder refinement.
1936 EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1937 SDValue One = DAG.getConstant(1, DL, VT);
1938 SDValue Cond = DAG.getSetCC(DL, CCVT, R, Y, ISD::SETUGE);
1939 Q = DAG.getNode(ISD::SELECT, DL, VT, Cond,
1940 DAG.getNode(ISD::ADD, DL, VT, Q, One), Q);
1941 R = DAG.getNode(ISD::SELECT, DL, VT, Cond,
1942 DAG.getNode(ISD::SUB, DL, VT, R, Y), R);
1944 // Second quotient/remainder refinement.
1945 Cond = DAG.getSetCC(DL, CCVT, R, Y, ISD::SETUGE);
1946 Q = DAG.getNode(ISD::SELECT, DL, VT, Cond,
1947 DAG.getNode(ISD::ADD, DL, VT, Q, One), Q);
1948 R = DAG.getNode(ISD::SELECT, DL, VT, Cond,
1949 DAG.getNode(ISD::SUB, DL, VT, R, Y), R);
1951 return DAG.getMergeValues({Q, R}, DL);
1954 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
1955 SelectionDAG &DAG) const {
1957 EVT VT = Op.getValueType();
1959 SDValue LHS = Op.getOperand(0);
1960 SDValue RHS = Op.getOperand(1);
1962 SDValue Zero = DAG.getConstant(0, DL, VT);
1963 SDValue NegOne = DAG.getConstant(-1, DL, VT);
1965 if (VT == MVT::i32) {
1966 if (SDValue Res = LowerDIVREM24(Op, DAG, true))
1970 if (VT == MVT::i64 &&
1971 DAG.ComputeNumSignBits(LHS) > 32 &&
1972 DAG.ComputeNumSignBits(RHS) > 32) {
1973 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1976 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1977 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1978 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1981 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
1982 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
1984 return DAG.getMergeValues(Res, DL);
1987 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
1988 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
1989 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
1990 SDValue RSign = LHSign; // Remainder sign is the same as LHS
1992 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
1993 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
1995 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
1996 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
1998 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
1999 SDValue Rem = Div.getValue(1);
2001 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
2002 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
2004 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
2005 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
2011 return DAG.getMergeValues(Res, DL);
2014 // (frem x, y) -> (fma (fneg (ftrunc (fdiv x, y))), y, x)
2015 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
2017 EVT VT = Op.getValueType();
2018 auto Flags = Op->getFlags();
2019 SDValue X = Op.getOperand(0);
2020 SDValue Y = Op.getOperand(1);
2022 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y, Flags);
2023 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, VT, Div, Flags);
2024 SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Trunc, Flags);
2025 // TODO: For f32 use FMAD instead if !hasFastFMA32?
2026 return DAG.getNode(ISD::FMA, SL, VT, Neg, Y, X, Flags);
2029 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
2031 SDValue Src = Op.getOperand(0);
2033 // result = trunc(src)
2034 // if (src > 0.0 && src != result)
2037 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2039 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2040 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
2043 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2045 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
2046 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2047 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2049 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
2050 // TODO: Should this propagate fast-math-flags?
2051 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2054 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL,
2055 SelectionDAG &DAG) {
2056 const unsigned FractBits = 52;
2057 const unsigned ExpBits = 11;
2059 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
2061 DAG.getConstant(FractBits - 32, SL, MVT::i32),
2062 DAG.getConstant(ExpBits, SL, MVT::i32));
2063 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
2064 DAG.getConstant(1023, SL, MVT::i32));
2069 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
2071 SDValue Src = Op.getOperand(0);
2073 assert(Op.getValueType() == MVT::f64);
2075 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2077 // Extract the upper half, since this is where we will find the sign and
2079 SDValue Hi = getHiHalf64(Src, DAG);
2081 SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2083 const unsigned FractBits = 52;
2085 // Extract the sign bit.
2086 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
2087 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
2089 // Extend back to 64-bits.
2090 SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
2091 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
2093 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
2094 const SDValue FractMask
2095 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2097 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
2098 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
2099 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
2102 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2104 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
2106 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2107 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2109 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
2110 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
2112 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
2115 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
2117 SDValue Src = Op.getOperand(0);
2119 assert(Op.getValueType() == MVT::f64);
2121 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
2122 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
2123 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
2125 // TODO: Should this propagate fast-math-flags?
2127 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
2128 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
2130 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
2132 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
2133 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
2136 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2137 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
2139 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
2142 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
2143 // FNEARBYINT and FRINT are the same, except in their handling of FP
2144 // exceptions. Those aren't really meaningful for us, and OpenCL only has
2145 // rint, so just treat them as equivalent.
2146 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
2149 // XXX - May require not supporting f32 denormals?
2151 // Don't handle v2f16. The extra instructions to scalarize and repack around the
2152 // compare and vselect end up producing worse code than scalarizing the whole
2154 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2156 SDValue X = Op.getOperand(0);
2157 EVT VT = Op.getValueType();
2159 SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X);
2161 // TODO: Should this propagate fast-math-flags?
2163 SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T);
2165 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff);
2167 const SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2168 const SDValue One = DAG.getConstantFP(1.0, SL, VT);
2169 const SDValue Half = DAG.getConstantFP(0.5, SL, VT);
2171 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X);
2174 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2176 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
2178 SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero);
2180 return DAG.getNode(ISD::FADD, SL, VT, T, Sel);
2183 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
2185 SDValue Src = Op.getOperand(0);
2187 // result = trunc(src);
2188 // if (src < 0.0 && src != result)
2191 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2193 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2194 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
2197 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2199 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
2200 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2201 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2203 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
2204 // TODO: Should this propagate fast-math-flags?
2205 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2208 SDValue AMDGPUTargetLowering::LowerFLOG(SDValue Op, SelectionDAG &DAG,
2209 double Log2BaseInverted) const {
2210 EVT VT = Op.getValueType();
2213 SDValue Operand = Op.getOperand(0);
2214 SDValue Log2Operand = DAG.getNode(ISD::FLOG2, SL, VT, Operand);
2215 SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT);
2217 return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand);
2220 // exp2(M_LOG2E_F * f);
2221 SDValue AMDGPUTargetLowering::lowerFEXP(SDValue Op, SelectionDAG &DAG) const {
2222 EVT VT = Op.getValueType();
2224 SDValue Src = Op.getOperand(0);
2226 const SDValue K = DAG.getConstantFP(numbers::log2e, SL, VT);
2227 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Src, K, Op->getFlags());
2228 return DAG.getNode(ISD::FEXP2, SL, VT, Mul, Op->getFlags());
2231 static bool isCtlzOpc(unsigned Opc) {
2232 return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF;
2235 static bool isCttzOpc(unsigned Opc) {
2236 return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF;
2239 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const {
2241 SDValue Src = Op.getOperand(0);
2243 assert(isCtlzOpc(Op.getOpcode()) || isCttzOpc(Op.getOpcode()));
2244 bool Ctlz = isCtlzOpc(Op.getOpcode());
2245 unsigned NewOpc = Ctlz ? AMDGPUISD::FFBH_U32 : AMDGPUISD::FFBL_B32;
2247 bool ZeroUndef = Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF ||
2248 Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF;
2250 if (Src.getValueType() == MVT::i32) {
2251 // (ctlz hi:lo) -> (umin (ffbh src), 32)
2252 // (cttz hi:lo) -> (umin (ffbl src), 32)
2253 // (ctlz_zero_undef src) -> (ffbh src)
2254 // (cttz_zero_undef src) -> (ffbl src)
2255 SDValue NewOpr = DAG.getNode(NewOpc, SL, MVT::i32, Src);
2257 const SDValue Const32 = DAG.getConstant(32, SL, MVT::i32);
2258 NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, NewOpr, Const32);
2264 std::tie(Lo, Hi) = split64BitValue(Src, DAG);
2266 SDValue OprLo = DAG.getNode(NewOpc, SL, MVT::i32, Lo);
2267 SDValue OprHi = DAG.getNode(NewOpc, SL, MVT::i32, Hi);
2269 // (ctlz hi:lo) -> (umin3 (ffbh hi), (uaddsat (ffbh lo), 32), 64)
2270 // (cttz hi:lo) -> (umin3 (uaddsat (ffbl hi), 32), (ffbl lo), 64)
2271 // (ctlz_zero_undef hi:lo) -> (umin (ffbh hi), (add (ffbh lo), 32))
2272 // (cttz_zero_undef hi:lo) -> (umin (add (ffbl hi), 32), (ffbl lo))
2274 unsigned AddOpc = ZeroUndef ? ISD::ADD : ISD::UADDSAT;
2275 const SDValue Const32 = DAG.getConstant(32, SL, MVT::i32);
2277 OprLo = DAG.getNode(AddOpc, SL, MVT::i32, OprLo, Const32);
2279 OprHi = DAG.getNode(AddOpc, SL, MVT::i32, OprHi, Const32);
2282 NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, OprLo, OprHi);
2284 const SDValue Const64 = DAG.getConstant(64, SL, MVT::i32);
2285 NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, NewOpr, Const64);
2288 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr);
2291 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
2292 bool Signed) const {
2293 // The regular method converting a 64-bit integer to float roughly consists of
2294 // 2 steps: normalization and rounding. In fact, after normalization, the
2295 // conversion from a 64-bit integer to a float is essentially the same as the
2296 // one from a 32-bit integer. The only difference is that it has more
2297 // trailing bits to be rounded. To leverage the native 32-bit conversion, a
2298 // 64-bit integer could be preprocessed and fit into a 32-bit integer then
2299 // converted into the correct float number. The basic steps for the unsigned
2300 // conversion are illustrated in the following pseudo code:
2302 // f32 uitofp(i64 u) {
2303 // i32 hi, lo = split(u);
2304 // // Only count the leading zeros in hi as we have native support of the
2305 // // conversion from i32 to f32. If hi is all 0s, the conversion is
2306 // // reduced to a 32-bit one automatically.
2307 // i32 shamt = clz(hi); // Return 32 if hi is all 0s.
2309 // hi, lo = split(u);
2310 // hi |= (lo != 0) ? 1 : 0; // Adjust rounding bit in hi based on lo.
2311 // // convert it as a 32-bit integer and scale the result back.
2312 // return uitofp(hi) * 2^(32 - shamt);
2315 // The signed one follows the same principle but uses 'ffbh_i32' to count its
2316 // sign bits instead. If 'ffbh_i32' is not available, its absolute value is
2317 // converted instead followed by negation based its sign bit.
2320 SDValue Src = Op.getOperand(0);
2323 std::tie(Lo, Hi) = split64BitValue(Src, DAG);
2326 if (Signed && Subtarget->isGCN()) {
2327 // We also need to consider the sign bit in Lo if Hi has just sign bits,
2328 // i.e. Hi is 0 or -1. However, that only needs to take the MSB into
2329 // account. That is, the maximal shift is
2330 // - 32 if Lo and Hi have opposite signs;
2331 // - 33 if Lo and Hi have the same sign.
2333 // Or, MaxShAmt = 33 + OppositeSign, where
2335 // OppositeSign is defined as ((Lo ^ Hi) >> 31), which is
2336 // - -1 if Lo and Hi have opposite signs; and
2339 // All in all, ShAmt is calculated as
2341 // umin(sffbh(Hi), 33 + (Lo^Hi)>>31) - 1.
2345 // umin(sffbh(Hi) - 1, 32 + (Lo^Hi)>>31).
2347 // to reduce the critical path.
2348 SDValue OppositeSign = DAG.getNode(
2349 ISD::SRA, SL, MVT::i32, DAG.getNode(ISD::XOR, SL, MVT::i32, Lo, Hi),
2350 DAG.getConstant(31, SL, MVT::i32));
2352 DAG.getNode(ISD::ADD, SL, MVT::i32, DAG.getConstant(32, SL, MVT::i32),
2354 // Count the leading sign bits.
2355 ShAmt = DAG.getNode(AMDGPUISD::FFBH_I32, SL, MVT::i32, Hi);
2356 // Different from unsigned conversion, the shift should be one bit less to
2357 // preserve the sign bit.
2358 ShAmt = DAG.getNode(ISD::SUB, SL, MVT::i32, ShAmt,
2359 DAG.getConstant(1, SL, MVT::i32));
2360 ShAmt = DAG.getNode(ISD::UMIN, SL, MVT::i32, ShAmt, MaxShAmt);
2363 // Without 'ffbh_i32', only leading zeros could be counted. Take the
2364 // absolute value first.
2365 Sign = DAG.getNode(ISD::SRA, SL, MVT::i64, Src,
2366 DAG.getConstant(63, SL, MVT::i64));
2368 DAG.getNode(ISD::XOR, SL, MVT::i64,
2369 DAG.getNode(ISD::ADD, SL, MVT::i64, Src, Sign), Sign);
2370 std::tie(Lo, Hi) = split64BitValue(Abs, DAG);
2372 // Count the leading zeros.
2373 ShAmt = DAG.getNode(ISD::CTLZ, SL, MVT::i32, Hi);
2374 // The shift amount for signed integers is [0, 32].
2376 // Normalize the given 64-bit integer.
2377 SDValue Norm = DAG.getNode(ISD::SHL, SL, MVT::i64, Src, ShAmt);
2379 std::tie(Lo, Hi) = split64BitValue(Norm, DAG);
2380 // Calculate the adjust bit for rounding.
2381 // (lo != 0) ? 1 : 0 => (lo >= 1) ? 1 : 0 => umin(1, lo)
2382 SDValue Adjust = DAG.getNode(ISD::UMIN, SL, MVT::i32,
2383 DAG.getConstant(1, SL, MVT::i32), Lo);
2384 // Get the 32-bit normalized integer.
2385 Norm = DAG.getNode(ISD::OR, SL, MVT::i32, Hi, Adjust);
2386 // Convert the normalized 32-bit integer into f32.
2388 (Signed && Subtarget->isGCN()) ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
2389 SDValue FVal = DAG.getNode(Opc, SL, MVT::f32, Norm);
2391 // Finally, need to scale back the converted floating number as the original
2392 // 64-bit integer is converted as a 32-bit one.
2393 ShAmt = DAG.getNode(ISD::SUB, SL, MVT::i32, DAG.getConstant(32, SL, MVT::i32),
2395 // On GCN, use LDEXP directly.
2396 if (Subtarget->isGCN())
2397 return DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f32, FVal, ShAmt);
2399 // Otherwise, align 'ShAmt' to the exponent part and add it into the exponent
2400 // part directly to emulate the multiplication of 2^ShAmt. That 8-bit
2401 // exponent is enough to avoid overflowing into the sign bit.
2402 SDValue Exp = DAG.getNode(ISD::SHL, SL, MVT::i32, ShAmt,
2403 DAG.getConstant(23, SL, MVT::i32));
2405 DAG.getNode(ISD::ADD, SL, MVT::i32,
2406 DAG.getNode(ISD::BITCAST, SL, MVT::i32, FVal), Exp);
2408 // Set the sign bit.
2409 Sign = DAG.getNode(ISD::SHL, SL, MVT::i32,
2410 DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Sign),
2411 DAG.getConstant(31, SL, MVT::i32));
2412 IVal = DAG.getNode(ISD::OR, SL, MVT::i32, IVal, Sign);
2414 return DAG.getNode(ISD::BITCAST, SL, MVT::f32, IVal);
2417 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
2418 bool Signed) const {
2420 SDValue Src = Op.getOperand(0);
2423 std::tie(Lo, Hi) = split64BitValue(Src, DAG);
2425 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
2428 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
2430 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
2431 DAG.getConstant(32, SL, MVT::i32));
2432 // TODO: Should this propagate fast-math-flags?
2433 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
2436 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
2437 SelectionDAG &DAG) const {
2438 // TODO: Factor out code common with LowerSINT_TO_FP.
2439 EVT DestVT = Op.getValueType();
2440 SDValue Src = Op.getOperand(0);
2441 EVT SrcVT = Src.getValueType();
2443 if (SrcVT == MVT::i16) {
2444 if (DestVT == MVT::f16)
2448 // Promote src to i32
2449 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Src);
2450 return DAG.getNode(ISD::UINT_TO_FP, DL, DestVT, Ext);
2453 assert(SrcVT == MVT::i64 && "operation should be legal");
2455 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2458 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2459 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2461 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2466 if (DestVT == MVT::f32)
2467 return LowerINT_TO_FP32(Op, DAG, false);
2469 assert(DestVT == MVT::f64);
2470 return LowerINT_TO_FP64(Op, DAG, false);
2473 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
2474 SelectionDAG &DAG) const {
2475 EVT DestVT = Op.getValueType();
2477 SDValue Src = Op.getOperand(0);
2478 EVT SrcVT = Src.getValueType();
2480 if (SrcVT == MVT::i16) {
2481 if (DestVT == MVT::f16)
2485 // Promote src to i32
2486 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32, Src);
2487 return DAG.getNode(ISD::SINT_TO_FP, DL, DestVT, Ext);
2490 assert(SrcVT == MVT::i64 && "operation should be legal");
2492 // TODO: Factor out code common with LowerUINT_TO_FP.
2494 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2496 SDValue Src = Op.getOperand(0);
2498 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2499 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2501 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2506 if (DestVT == MVT::f32)
2507 return LowerINT_TO_FP32(Op, DAG, true);
2509 assert(DestVT == MVT::f64);
2510 return LowerINT_TO_FP64(Op, DAG, true);
2513 SDValue AMDGPUTargetLowering::LowerFP_TO_INT64(SDValue Op, SelectionDAG &DAG,
2514 bool Signed) const {
2517 SDValue Src = Op.getOperand(0);
2518 EVT SrcVT = Src.getValueType();
2520 assert(SrcVT == MVT::f32 || SrcVT == MVT::f64);
2522 // The basic idea of converting a floating point number into a pair of 32-bit
2523 // integers is illustrated as follows:
2525 // tf := trunc(val);
2526 // hif := floor(tf * 2^-32);
2527 // lof := tf - hif * 2^32; // lof is always positive due to floor.
2528 // hi := fptoi(hif);
2529 // lo := fptoi(lof);
2531 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, SrcVT, Src);
2533 if (Signed && SrcVT == MVT::f32) {
2534 // However, a 32-bit floating point number has only 23 bits mantissa and
2535 // it's not enough to hold all the significant bits of `lof` if val is
2536 // negative. To avoid the loss of precision, We need to take the absolute
2537 // value after truncating and flip the result back based on the original
2539 Sign = DAG.getNode(ISD::SRA, SL, MVT::i32,
2540 DAG.getNode(ISD::BITCAST, SL, MVT::i32, Trunc),
2541 DAG.getConstant(31, SL, MVT::i32));
2542 Trunc = DAG.getNode(ISD::FABS, SL, SrcVT, Trunc);
2546 if (SrcVT == MVT::f64) {
2547 K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(/*2^-32*/ 0x3df0000000000000)),
2549 K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(/*-2^32*/ 0xc1f0000000000000)),
2552 K0 = DAG.getConstantFP(BitsToFloat(UINT32_C(/*2^-32*/ 0x2f800000)), SL,
2554 K1 = DAG.getConstantFP(BitsToFloat(UINT32_C(/*-2^32*/ 0xcf800000)), SL,
2557 // TODO: Should this propagate fast-math-flags?
2558 SDValue Mul = DAG.getNode(ISD::FMUL, SL, SrcVT, Trunc, K0);
2560 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, SrcVT, Mul);
2562 SDValue Fma = DAG.getNode(ISD::FMA, SL, SrcVT, FloorMul, K1, Trunc);
2564 SDValue Hi = DAG.getNode((Signed && SrcVT == MVT::f64) ? ISD::FP_TO_SINT
2566 SL, MVT::i32, FloorMul);
2567 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
2569 SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i64,
2570 DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi}));
2572 if (Signed && SrcVT == MVT::f32) {
2574 // Flip the result based on the signedness, which is either all 0s or 1s.
2575 Sign = DAG.getNode(ISD::BITCAST, SL, MVT::i64,
2576 DAG.getBuildVector(MVT::v2i32, SL, {Sign, Sign}));
2577 // r := xor(r, sign) - sign;
2579 DAG.getNode(ISD::SUB, SL, MVT::i64,
2580 DAG.getNode(ISD::XOR, SL, MVT::i64, Result, Sign), Sign);
2586 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const {
2588 SDValue N0 = Op.getOperand(0);
2590 // Convert to target node to get known bits
2591 if (N0.getValueType() == MVT::f32)
2592 return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0);
2594 if (getTargetMachine().Options.UnsafeFPMath) {
2595 // There is a generic expand for FP_TO_FP16 with unsafe fast math.
2599 assert(N0.getSimpleValueType() == MVT::f64);
2601 // f64 -> f16 conversion using round-to-nearest-even rounding mode.
2602 const unsigned ExpMask = 0x7ff;
2603 const unsigned ExpBiasf64 = 1023;
2604 const unsigned ExpBiasf16 = 15;
2605 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
2606 SDValue One = DAG.getConstant(1, DL, MVT::i32);
2607 SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0);
2608 SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U,
2609 DAG.getConstant(32, DL, MVT::i64));
2610 UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32);
2611 U = DAG.getZExtOrTrunc(U, DL, MVT::i32);
2612 SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2613 DAG.getConstant(20, DL, MVT::i64));
2614 E = DAG.getNode(ISD::AND, DL, MVT::i32, E,
2615 DAG.getConstant(ExpMask, DL, MVT::i32));
2616 // Subtract the fp64 exponent bias (1023) to get the real exponent and
2617 // add the f16 bias (15) to get the biased exponent for the f16 format.
2618 E = DAG.getNode(ISD::ADD, DL, MVT::i32, E,
2619 DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32));
2621 SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2622 DAG.getConstant(8, DL, MVT::i32));
2623 M = DAG.getNode(ISD::AND, DL, MVT::i32, M,
2624 DAG.getConstant(0xffe, DL, MVT::i32));
2626 SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH,
2627 DAG.getConstant(0x1ff, DL, MVT::i32));
2628 MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U);
2630 SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ);
2631 M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set);
2633 // (M != 0 ? 0x0200 : 0) | 0x7c00;
2634 SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32,
2635 DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32),
2636 Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32));
2638 // N = M | (E << 12);
2639 SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2640 DAG.getNode(ISD::SHL, DL, MVT::i32, E,
2641 DAG.getConstant(12, DL, MVT::i32)));
2643 // B = clamp(1-E, 0, 13);
2644 SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32,
2646 SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero);
2647 B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B,
2648 DAG.getConstant(13, DL, MVT::i32));
2650 SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2651 DAG.getConstant(0x1000, DL, MVT::i32));
2653 SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B);
2654 SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B);
2655 SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE);
2656 D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1);
2658 SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT);
2659 SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V,
2660 DAG.getConstant(0x7, DL, MVT::i32));
2661 V = DAG.getNode(ISD::SRL, DL, MVT::i32, V,
2662 DAG.getConstant(2, DL, MVT::i32));
2663 SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32),
2664 One, Zero, ISD::SETEQ);
2665 SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32),
2666 One, Zero, ISD::SETGT);
2667 V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1);
2668 V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1);
2670 V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32),
2671 DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT);
2672 V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32),
2675 // Extract the sign bit.
2676 SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2677 DAG.getConstant(16, DL, MVT::i32));
2678 Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign,
2679 DAG.getConstant(0x8000, DL, MVT::i32));
2681 V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V);
2682 return DAG.getZExtOrTrunc(V, DL, Op.getValueType());
2685 SDValue AMDGPUTargetLowering::LowerFP_TO_INT(SDValue Op,
2686 SelectionDAG &DAG) const {
2687 SDValue Src = Op.getOperand(0);
2688 unsigned OpOpcode = Op.getOpcode();
2689 EVT SrcVT = Src.getValueType();
2690 EVT DestVT = Op.getValueType();
2692 // Will be selected natively
2693 if (SrcVT == MVT::f16 && DestVT == MVT::i16)
2696 // Promote i16 to i32
2697 if (DestVT == MVT::i16 && (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
2700 SDValue FpToInt32 = DAG.getNode(OpOpcode, DL, MVT::i32, Src);
2701 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToInt32);
2704 if (SrcVT == MVT::f16 ||
2705 (SrcVT == MVT::f32 && Src.getOpcode() == ISD::FP16_TO_FP)) {
2708 SDValue FpToInt32 = DAG.getNode(OpOpcode, DL, MVT::i32, Src);
2710 OpOpcode == ISD::FP_TO_SINT ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
2711 return DAG.getNode(Ext, DL, MVT::i64, FpToInt32);
2714 if (DestVT == MVT::i64 && (SrcVT == MVT::f32 || SrcVT == MVT::f64))
2715 return LowerFP_TO_INT64(Op, DAG, OpOpcode == ISD::FP_TO_SINT);
2720 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2721 SelectionDAG &DAG) const {
2722 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2723 MVT VT = Op.getSimpleValueType();
2724 MVT ScalarVT = VT.getScalarType();
2726 assert(VT.isVector());
2728 SDValue Src = Op.getOperand(0);
2731 // TODO: Don't scalarize on Evergreen?
2732 unsigned NElts = VT.getVectorNumElements();
2733 SmallVector<SDValue, 8> Args;
2734 DAG.ExtractVectorElements(Src, Args, 0, NElts);
2736 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
2737 for (unsigned I = 0; I < NElts; ++I)
2738 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
2740 return DAG.getBuildVector(VT, DL, Args);
2743 //===----------------------------------------------------------------------===//
2744 // Custom DAG optimizations
2745 //===----------------------------------------------------------------------===//
2747 static bool isU24(SDValue Op, SelectionDAG &DAG) {
2748 return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24;
2751 static bool isI24(SDValue Op, SelectionDAG &DAG) {
2752 EVT VT = Op.getValueType();
2753 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
2754 // as unsigned 24-bit values.
2755 AMDGPUTargetLowering::numBitsSigned(Op, DAG) <= 24;
2758 static SDValue simplifyMul24(SDNode *Node24,
2759 TargetLowering::DAGCombinerInfo &DCI) {
2760 SelectionDAG &DAG = DCI.DAG;
2761 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2762 bool IsIntrin = Node24->getOpcode() == ISD::INTRINSIC_WO_CHAIN;
2764 SDValue LHS = IsIntrin ? Node24->getOperand(1) : Node24->getOperand(0);
2765 SDValue RHS = IsIntrin ? Node24->getOperand(2) : Node24->getOperand(1);
2766 unsigned NewOpcode = Node24->getOpcode();
2768 unsigned IID = cast<ConstantSDNode>(Node24->getOperand(0))->getZExtValue();
2770 case Intrinsic::amdgcn_mul_i24:
2771 NewOpcode = AMDGPUISD::MUL_I24;
2773 case Intrinsic::amdgcn_mul_u24:
2774 NewOpcode = AMDGPUISD::MUL_U24;
2776 case Intrinsic::amdgcn_mulhi_i24:
2777 NewOpcode = AMDGPUISD::MULHI_I24;
2779 case Intrinsic::amdgcn_mulhi_u24:
2780 NewOpcode = AMDGPUISD::MULHI_U24;
2783 llvm_unreachable("Expected 24-bit mul intrinsic");
2787 APInt Demanded = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 24);
2789 // First try to simplify using SimplifyMultipleUseDemandedBits which allows
2790 // the operands to have other uses, but will only perform simplifications that
2791 // involve bypassing some nodes for this user.
2792 SDValue DemandedLHS = TLI.SimplifyMultipleUseDemandedBits(LHS, Demanded, DAG);
2793 SDValue DemandedRHS = TLI.SimplifyMultipleUseDemandedBits(RHS, Demanded, DAG);
2794 if (DemandedLHS || DemandedRHS)
2795 return DAG.getNode(NewOpcode, SDLoc(Node24), Node24->getVTList(),
2796 DemandedLHS ? DemandedLHS : LHS,
2797 DemandedRHS ? DemandedRHS : RHS);
2799 // Now try SimplifyDemandedBits which can simplify the nodes used by our
2800 // operands if this node is the only user.
2801 if (TLI.SimplifyDemandedBits(LHS, Demanded, DCI))
2802 return SDValue(Node24, 0);
2803 if (TLI.SimplifyDemandedBits(RHS, Demanded, DCI))
2804 return SDValue(Node24, 0);
2809 template <typename IntTy>
2810 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset,
2811 uint32_t Width, const SDLoc &DL) {
2812 if (Width + Offset < 32) {
2813 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
2814 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
2815 return DAG.getConstant(Result, DL, MVT::i32);
2818 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
2821 static bool hasVolatileUser(SDNode *Val) {
2822 for (SDNode *U : Val->uses()) {
2823 if (MemSDNode *M = dyn_cast<MemSDNode>(U)) {
2824 if (M->isVolatile())
2832 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const {
2833 // i32 vectors are the canonical memory type.
2834 if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT))
2837 if (!VT.isByteSized())
2840 unsigned Size = VT.getStoreSize();
2842 if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector())
2845 if (Size == 3 || (Size > 4 && (Size % 4 != 0)))
2851 // Replace load of an illegal type with a store of a bitcast to a friendlier
2853 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
2854 DAGCombinerInfo &DCI) const {
2855 if (!DCI.isBeforeLegalize())
2858 LoadSDNode *LN = cast<LoadSDNode>(N);
2859 if (!LN->isSimple() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN))
2863 SelectionDAG &DAG = DCI.DAG;
2864 EVT VT = LN->getMemoryVT();
2866 unsigned Size = VT.getStoreSize();
2867 Align Alignment = LN->getAlign();
2868 if (Alignment < Size && isTypeLegal(VT)) {
2870 unsigned AS = LN->getAddressSpace();
2872 // Expand unaligned loads earlier than legalization. Due to visitation order
2873 // problems during legalization, the emitted instructions to pack and unpack
2874 // the bytes again are not eliminated in the case of an unaligned copy.
2875 if (!allowsMisalignedMemoryAccesses(
2876 VT, AS, Alignment, LN->getMemOperand()->getFlags(), &IsFast)) {
2878 return SplitVectorLoad(SDValue(LN, 0), DAG);
2881 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
2883 return DAG.getMergeValues(Ops, SDLoc(N));
2890 if (!shouldCombineMemoryType(VT))
2893 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2896 = DAG.getLoad(NewVT, SL, LN->getChain(),
2897 LN->getBasePtr(), LN->getMemOperand());
2899 SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad);
2900 DCI.CombineTo(N, BC, NewLoad.getValue(1));
2901 return SDValue(N, 0);
2904 // Replace store of an illegal type with a store of a bitcast to a friendlier
2906 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
2907 DAGCombinerInfo &DCI) const {
2908 if (!DCI.isBeforeLegalize())
2911 StoreSDNode *SN = cast<StoreSDNode>(N);
2912 if (!SN->isSimple() || !ISD::isNormalStore(SN))
2915 EVT VT = SN->getMemoryVT();
2916 unsigned Size = VT.getStoreSize();
2919 SelectionDAG &DAG = DCI.DAG;
2920 Align Alignment = SN->getAlign();
2921 if (Alignment < Size && isTypeLegal(VT)) {
2923 unsigned AS = SN->getAddressSpace();
2925 // Expand unaligned stores earlier than legalization. Due to visitation
2926 // order problems during legalization, the emitted instructions to pack and
2927 // unpack the bytes again are not eliminated in the case of an unaligned
2929 if (!allowsMisalignedMemoryAccesses(
2930 VT, AS, Alignment, SN->getMemOperand()->getFlags(), &IsFast)) {
2932 return SplitVectorStore(SDValue(SN, 0), DAG);
2934 return expandUnalignedStore(SN, DAG);
2941 if (!shouldCombineMemoryType(VT))
2944 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2945 SDValue Val = SN->getValue();
2947 //DCI.AddToWorklist(Val.getNode());
2949 bool OtherUses = !Val.hasOneUse();
2950 SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val);
2952 SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal);
2953 DAG.ReplaceAllUsesOfValueWith(Val, CastBack);
2956 return DAG.getStore(SN->getChain(), SL, CastVal,
2957 SN->getBasePtr(), SN->getMemOperand());
2960 // FIXME: This should go in generic DAG combiner with an isTruncateFree check,
2961 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU
2963 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N,
2964 DAGCombinerInfo &DCI) const {
2965 SelectionDAG &DAG = DCI.DAG;
2966 SDValue N0 = N->getOperand(0);
2968 // (vt2 (assertzext (truncate vt0:x), vt1)) ->
2969 // (vt2 (truncate (assertzext vt0:x, vt1)))
2970 if (N0.getOpcode() == ISD::TRUNCATE) {
2971 SDValue N1 = N->getOperand(1);
2972 EVT ExtVT = cast<VTSDNode>(N1)->getVT();
2975 SDValue Src = N0.getOperand(0);
2976 EVT SrcVT = Src.getValueType();
2977 if (SrcVT.bitsGE(ExtVT)) {
2978 SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1);
2979 return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg);
2986 SDValue AMDGPUTargetLowering::performIntrinsicWOChainCombine(
2987 SDNode *N, DAGCombinerInfo &DCI) const {
2988 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
2990 case Intrinsic::amdgcn_mul_i24:
2991 case Intrinsic::amdgcn_mul_u24:
2992 case Intrinsic::amdgcn_mulhi_i24:
2993 case Intrinsic::amdgcn_mulhi_u24:
2994 return simplifyMul24(N, DCI);
2995 case Intrinsic::amdgcn_fract:
2996 case Intrinsic::amdgcn_rsq:
2997 case Intrinsic::amdgcn_rcp_legacy:
2998 case Intrinsic::amdgcn_rsq_legacy:
2999 case Intrinsic::amdgcn_rsq_clamp:
3000 case Intrinsic::amdgcn_ldexp: {
3001 // FIXME: This is probably wrong. If src is an sNaN, it won't be quieted
3002 SDValue Src = N->getOperand(1);
3003 return Src.isUndef() ? Src : SDValue();
3010 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the
3011 /// binary operation \p Opc to it with the corresponding constant operands.
3012 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl(
3013 DAGCombinerInfo &DCI, const SDLoc &SL,
3014 unsigned Opc, SDValue LHS,
3015 uint32_t ValLo, uint32_t ValHi) const {
3016 SelectionDAG &DAG = DCI.DAG;
3018 std::tie(Lo, Hi) = split64BitValue(LHS, DAG);
3020 SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32);
3021 SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32);
3023 SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS);
3024 SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS);
3026 // Re-visit the ands. It's possible we eliminated one of them and it could
3027 // simplify the vector.
3028 DCI.AddToWorklist(Lo.getNode());
3029 DCI.AddToWorklist(Hi.getNode());
3031 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
3032 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3035 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
3036 DAGCombinerInfo &DCI) const {
3037 EVT VT = N->getValueType(0);
3039 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3043 SDValue LHS = N->getOperand(0);
3044 unsigned RHSVal = RHS->getZExtValue();
3049 SelectionDAG &DAG = DCI.DAG;
3051 switch (LHS->getOpcode()) {
3054 case ISD::ZERO_EXTEND:
3055 case ISD::SIGN_EXTEND:
3056 case ISD::ANY_EXTEND: {
3057 SDValue X = LHS->getOperand(0);
3059 if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 &&
3060 isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) {
3061 // Prefer build_vector as the canonical form if packed types are legal.
3062 // (shl ([asz]ext i16:x), 16 -> build_vector 0, x
3063 SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL,
3064 { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) });
3065 return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
3068 // shl (ext x) => zext (shl x), if shift does not overflow int
3071 KnownBits Known = DAG.computeKnownBits(X);
3072 unsigned LZ = Known.countMinLeadingZeros();
3075 EVT XVT = X.getValueType();
3076 SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
3077 return DAG.getZExtOrTrunc(Shl, SL, VT);
3084 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
3086 // On some subtargets, 64-bit shift is a quarter rate instruction. In the
3087 // common case, splitting this into a move and a 32-bit shift is faster and
3088 // the same code size.
3092 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
3094 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
3095 SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
3097 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3099 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
3100 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3103 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
3104 DAGCombinerInfo &DCI) const {
3105 if (N->getValueType(0) != MVT::i64)
3108 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3112 SelectionDAG &DAG = DCI.DAG;
3114 unsigned RHSVal = RHS->getZExtValue();
3116 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
3118 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3119 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3120 DAG.getConstant(31, SL, MVT::i32));
3122 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
3123 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3126 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
3128 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3129 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3130 DAG.getConstant(31, SL, MVT::i32));
3131 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
3132 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3138 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
3139 DAGCombinerInfo &DCI) const {
3140 auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3144 EVT VT = N->getValueType(0);
3145 SDValue LHS = N->getOperand(0);
3146 unsigned ShiftAmt = RHS->getZExtValue();
3147 SelectionDAG &DAG = DCI.DAG;
3150 // fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1)
3151 // this improves the ability to match BFE patterns in isel.
3152 if (LHS.getOpcode() == ISD::AND) {
3153 if (auto *Mask = dyn_cast<ConstantSDNode>(LHS.getOperand(1))) {
3154 unsigned MaskIdx, MaskLen;
3155 if (Mask->getAPIntValue().isShiftedMask(MaskIdx, MaskLen) &&
3156 MaskIdx == ShiftAmt) {
3159 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)),
3160 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1)));
3171 // srl i64:x, C for C >= 32
3173 // build_pair (srl hi_32(x), C - 32), 0
3174 SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3176 SDValue Hi = getHiHalf64(LHS, DAG);
3178 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
3179 SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
3181 SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
3183 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
3186 SDValue AMDGPUTargetLowering::performTruncateCombine(
3187 SDNode *N, DAGCombinerInfo &DCI) const {
3189 SelectionDAG &DAG = DCI.DAG;
3190 EVT VT = N->getValueType(0);
3191 SDValue Src = N->getOperand(0);
3193 // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x)
3194 if (Src.getOpcode() == ISD::BITCAST && !VT.isVector()) {
3195 SDValue Vec = Src.getOperand(0);
3196 if (Vec.getOpcode() == ISD::BUILD_VECTOR) {
3197 SDValue Elt0 = Vec.getOperand(0);
3198 EVT EltVT = Elt0.getValueType();
3199 if (VT.getFixedSizeInBits() <= EltVT.getFixedSizeInBits()) {
3200 if (EltVT.isFloatingPoint()) {
3201 Elt0 = DAG.getNode(ISD::BITCAST, SL,
3202 EltVT.changeTypeToInteger(), Elt0);
3205 return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0);
3210 // Equivalent of above for accessing the high element of a vector as an
3211 // integer operation.
3212 // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y)
3213 if (Src.getOpcode() == ISD::SRL && !VT.isVector()) {
3214 if (auto K = isConstOrConstSplat(Src.getOperand(1))) {
3215 if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) {
3216 SDValue BV = stripBitcast(Src.getOperand(0));
3217 if (BV.getOpcode() == ISD::BUILD_VECTOR &&
3218 BV.getValueType().getVectorNumElements() == 2) {
3219 SDValue SrcElt = BV.getOperand(1);
3220 EVT SrcEltVT = SrcElt.getValueType();
3221 if (SrcEltVT.isFloatingPoint()) {
3222 SrcElt = DAG.getNode(ISD::BITCAST, SL,
3223 SrcEltVT.changeTypeToInteger(), SrcElt);
3226 return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt);
3232 // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit.
3234 // i16 (trunc (srl i64:x, K)), K <= 16 ->
3235 // i16 (trunc (srl (i32 (trunc x), K)))
3236 if (VT.getScalarSizeInBits() < 32) {
3237 EVT SrcVT = Src.getValueType();
3238 if (SrcVT.getScalarSizeInBits() > 32 &&
3239 (Src.getOpcode() == ISD::SRL ||
3240 Src.getOpcode() == ISD::SRA ||
3241 Src.getOpcode() == ISD::SHL)) {
3242 SDValue Amt = Src.getOperand(1);
3243 KnownBits Known = DAG.computeKnownBits(Amt);
3244 unsigned Size = VT.getScalarSizeInBits();
3245 if ((Known.isConstant() && Known.getConstant().ule(Size)) ||
3246 (Known.countMaxActiveBits() <= Log2_32(Size))) {
3247 EVT MidVT = VT.isVector() ?
3248 EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3249 VT.getVectorNumElements()) : MVT::i32;
3251 EVT NewShiftVT = getShiftAmountTy(MidVT, DAG.getDataLayout());
3252 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MidVT,
3254 DCI.AddToWorklist(Trunc.getNode());
3256 if (Amt.getValueType() != NewShiftVT) {
3257 Amt = DAG.getZExtOrTrunc(Amt, SL, NewShiftVT);
3258 DCI.AddToWorklist(Amt.getNode());
3261 SDValue ShrunkShift = DAG.getNode(Src.getOpcode(), SL, MidVT,
3263 return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift);
3271 // We need to specifically handle i64 mul here to avoid unnecessary conversion
3272 // instructions. If we only match on the legalized i64 mul expansion,
3273 // SimplifyDemandedBits will be unable to remove them because there will be
3274 // multiple uses due to the separate mul + mulh[su].
3275 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL,
3276 SDValue N0, SDValue N1, unsigned Size, bool Signed) {
3278 unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3279 return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1);
3282 unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3283 unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
3285 SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1);
3286 SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1);
3288 return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64, MulLo, MulHi);
3291 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
3292 DAGCombinerInfo &DCI) const {
3293 EVT VT = N->getValueType(0);
3295 // Don't generate 24-bit multiplies on values that are in SGPRs, since
3296 // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
3297 // unnecessarily). isDivergent() is used as an approximation of whether the
3298 // value is in an SGPR.
3299 if (!N->isDivergent())
3302 unsigned Size = VT.getSizeInBits();
3303 if (VT.isVector() || Size > 64)
3306 // There are i16 integer mul/mad.
3307 if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16))
3310 SelectionDAG &DAG = DCI.DAG;
3313 SDValue N0 = N->getOperand(0);
3314 SDValue N1 = N->getOperand(1);
3316 // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
3317 // in the source into any_extends if the result of the mul is truncated. Since
3318 // we can assume the high bits are whatever we want, use the underlying value
3319 // to avoid the unknown high bits from interfering.
3320 if (N0.getOpcode() == ISD::ANY_EXTEND)
3321 N0 = N0.getOperand(0);
3323 if (N1.getOpcode() == ISD::ANY_EXTEND)
3324 N1 = N1.getOperand(0);
3328 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
3329 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3330 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3331 Mul = getMul24(DAG, DL, N0, N1, Size, false);
3332 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
3333 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3334 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3335 Mul = getMul24(DAG, DL, N0, N1, Size, true);
3340 // We need to use sext even for MUL_U24, because MUL_U24 is used
3341 // for signed multiply of 8 and 16-bit types.
3342 return DAG.getSExtOrTrunc(Mul, DL, VT);
3346 AMDGPUTargetLowering::performMulLoHiCombine(SDNode *N,
3347 DAGCombinerInfo &DCI) const {
3348 if (N->getValueType(0) != MVT::i32)
3351 SelectionDAG &DAG = DCI.DAG;
3354 SDValue N0 = N->getOperand(0);
3355 SDValue N1 = N->getOperand(1);
3357 // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
3358 // in the source into any_extends if the result of the mul is truncated. Since
3359 // we can assume the high bits are whatever we want, use the underlying value
3360 // to avoid the unknown high bits from interfering.
3361 if (N0.getOpcode() == ISD::ANY_EXTEND)
3362 N0 = N0.getOperand(0);
3363 if (N1.getOpcode() == ISD::ANY_EXTEND)
3364 N1 = N1.getOperand(0);
3366 // Try to use two fast 24-bit multiplies (one for each half of the result)
3367 // instead of one slow extending multiply.
3368 unsigned LoOpcode, HiOpcode;
3369 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
3370 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3371 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3372 LoOpcode = AMDGPUISD::MUL_U24;
3373 HiOpcode = AMDGPUISD::MULHI_U24;
3374 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
3375 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3376 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3377 LoOpcode = AMDGPUISD::MUL_I24;
3378 HiOpcode = AMDGPUISD::MULHI_I24;
3383 SDValue Lo = DAG.getNode(LoOpcode, DL, MVT::i32, N0, N1);
3384 SDValue Hi = DAG.getNode(HiOpcode, DL, MVT::i32, N0, N1);
3385 DCI.CombineTo(N, Lo, Hi);
3386 return SDValue(N, 0);
3389 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N,
3390 DAGCombinerInfo &DCI) const {
3391 EVT VT = N->getValueType(0);
3393 if (!Subtarget->hasMulI24() || VT.isVector())
3396 // Don't generate 24-bit multiplies on values that are in SGPRs, since
3397 // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
3398 // unnecessarily). isDivergent() is used as an approximation of whether the
3399 // value is in an SGPR.
3400 // This doesn't apply if no s_mul_hi is available (since we'll end up with a
3402 if (Subtarget->hasSMulHi() && !N->isDivergent())
3405 SelectionDAG &DAG = DCI.DAG;
3408 SDValue N0 = N->getOperand(0);
3409 SDValue N1 = N->getOperand(1);
3411 if (!isI24(N0, DAG) || !isI24(N1, DAG))
3414 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3415 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3417 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1);
3418 DCI.AddToWorklist(Mulhi.getNode());
3419 return DAG.getSExtOrTrunc(Mulhi, DL, VT);
3422 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N,
3423 DAGCombinerInfo &DCI) const {
3424 EVT VT = N->getValueType(0);
3426 if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32)
3429 // Don't generate 24-bit multiplies on values that are in SGPRs, since
3430 // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
3431 // unnecessarily). isDivergent() is used as an approximation of whether the
3432 // value is in an SGPR.
3433 // This doesn't apply if no s_mul_hi is available (since we'll end up with a
3435 if (Subtarget->hasSMulHi() && !N->isDivergent())
3438 SelectionDAG &DAG = DCI.DAG;
3441 SDValue N0 = N->getOperand(0);
3442 SDValue N1 = N->getOperand(1);
3444 if (!isU24(N0, DAG) || !isU24(N1, DAG))
3447 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3448 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3450 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1);
3451 DCI.AddToWorklist(Mulhi.getNode());
3452 return DAG.getZExtOrTrunc(Mulhi, DL, VT);
3455 static bool isNegativeOne(SDValue Val) {
3456 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val))
3457 return C->isAllOnes();
3461 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG,
3464 unsigned Opc) const {
3465 EVT VT = Op.getValueType();
3466 EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT);
3467 if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() &&
3468 LegalVT != MVT::i16))
3472 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op);
3474 SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op);
3476 FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX);
3481 // The native instructions return -1 on 0 input. Optimize out a select that
3482 // produces -1 on 0.
3484 // TODO: If zero is not undef, we could also do this if the output is compared
3485 // against the bitwidth.
3487 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
3488 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond,
3489 SDValue LHS, SDValue RHS,
3490 DAGCombinerInfo &DCI) const {
3491 ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
3492 if (!CmpRhs || !CmpRhs->isZero())
3495 SelectionDAG &DAG = DCI.DAG;
3496 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
3497 SDValue CmpLHS = Cond.getOperand(0);
3499 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
3500 // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x
3501 if (CCOpcode == ISD::SETEQ &&
3502 (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) &&
3503 RHS.getOperand(0) == CmpLHS && isNegativeOne(LHS)) {
3505 isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
3506 return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3509 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
3510 // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x
3511 if (CCOpcode == ISD::SETNE &&
3512 (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(LHS.getOpcode())) &&
3513 LHS.getOperand(0) == CmpLHS && isNegativeOne(RHS)) {
3515 isCttzOpc(LHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
3517 return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3523 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI,
3529 SelectionDAG &DAG = DCI.DAG;
3530 EVT VT = N1.getValueType();
3532 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond,
3533 N1.getOperand(0), N2.getOperand(0));
3534 DCI.AddToWorklist(NewSelect.getNode());
3535 return DAG.getNode(Op, SL, VT, NewSelect);
3538 // Pull a free FP operation out of a select so it may fold into uses.
3540 // select c, (fneg x), (fneg y) -> fneg (select c, x, y)
3541 // select c, (fneg x), k -> fneg (select c, x, (fneg k))
3543 // select c, (fabs x), (fabs y) -> fabs (select c, x, y)
3544 // select c, (fabs x), +k -> fabs (select c, x, k)
3545 static SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI,
3547 SelectionDAG &DAG = DCI.DAG;
3548 SDValue Cond = N.getOperand(0);
3549 SDValue LHS = N.getOperand(1);
3550 SDValue RHS = N.getOperand(2);
3552 EVT VT = N.getValueType();
3553 if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) ||
3554 (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) {
3555 return distributeOpThroughSelect(DCI, LHS.getOpcode(),
3556 SDLoc(N), Cond, LHS, RHS);
3560 if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) {
3561 std::swap(LHS, RHS);
3565 // TODO: Support vector constants.
3566 ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
3567 if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS) {
3569 // If one side is an fneg/fabs and the other is a constant, we can push the
3570 // fneg/fabs down. If it's an fabs, the constant needs to be non-negative.
3571 SDValue NewLHS = LHS.getOperand(0);
3572 SDValue NewRHS = RHS;
3574 // Careful: if the neg can be folded up, don't try to pull it back down.
3575 bool ShouldFoldNeg = true;
3577 if (NewLHS.hasOneUse()) {
3578 unsigned Opc = NewLHS.getOpcode();
3579 if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(Opc))
3580 ShouldFoldNeg = false;
3581 if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL)
3582 ShouldFoldNeg = false;
3585 if (ShouldFoldNeg) {
3586 if (LHS.getOpcode() == ISD::FNEG)
3587 NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3588 else if (CRHS->isNegative())
3592 std::swap(NewLHS, NewRHS);
3594 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT,
3595 Cond, NewLHS, NewRHS);
3596 DCI.AddToWorklist(NewSelect.getNode());
3597 return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect);
3605 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
3606 DAGCombinerInfo &DCI) const {
3607 if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0)))
3610 SDValue Cond = N->getOperand(0);
3611 if (Cond.getOpcode() != ISD::SETCC)
3614 EVT VT = N->getValueType(0);
3615 SDValue LHS = Cond.getOperand(0);
3616 SDValue RHS = Cond.getOperand(1);
3617 SDValue CC = Cond.getOperand(2);
3619 SDValue True = N->getOperand(1);
3620 SDValue False = N->getOperand(2);
3622 if (Cond.hasOneUse()) { // TODO: Look for multiple select uses.
3623 SelectionDAG &DAG = DCI.DAG;
3624 if (DAG.isConstantValueOfAnyType(True) &&
3625 !DAG.isConstantValueOfAnyType(False)) {
3626 // Swap cmp + select pair to move constant to false input.
3627 // This will allow using VOPC cndmasks more often.
3628 // select (setcc x, y), k, x -> select (setccinv x, y), x, k
3631 ISD::CondCode NewCC =
3632 getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), LHS.getValueType());
3634 SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC);
3635 return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True);
3638 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) {
3640 = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
3641 // Revisit this node so we can catch min3/max3/med3 patterns.
3642 //DCI.AddToWorklist(MinMax.getNode());
3647 // There's no reason to not do this if the condition has other uses.
3648 return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI);
3651 static bool isInv2Pi(const APFloat &APF) {
3652 static const APFloat KF16(APFloat::IEEEhalf(), APInt(16, 0x3118));
3653 static const APFloat KF32(APFloat::IEEEsingle(), APInt(32, 0x3e22f983));
3654 static const APFloat KF64(APFloat::IEEEdouble(), APInt(64, 0x3fc45f306dc9c882));
3656 return APF.bitwiseIsEqual(KF16) ||
3657 APF.bitwiseIsEqual(KF32) ||
3658 APF.bitwiseIsEqual(KF64);
3661 // 0 and 1.0 / (0.5 * pi) do not have inline immmediates, so there is an
3662 // additional cost to negate them.
3663 bool AMDGPUTargetLowering::isConstantCostlierToNegate(SDValue N) const {
3664 if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N)) {
3665 if (C->isZero() && !C->isNegative())
3668 if (Subtarget->hasInv2PiInlineImm() && isInv2Pi(C->getValueAPF()))
3675 static unsigned inverseMinMax(unsigned Opc) {
3678 return ISD::FMINNUM;
3680 return ISD::FMAXNUM;
3681 case ISD::FMAXNUM_IEEE:
3682 return ISD::FMINNUM_IEEE;
3683 case ISD::FMINNUM_IEEE:
3684 return ISD::FMAXNUM_IEEE;
3685 case AMDGPUISD::FMAX_LEGACY:
3686 return AMDGPUISD::FMIN_LEGACY;
3687 case AMDGPUISD::FMIN_LEGACY:
3688 return AMDGPUISD::FMAX_LEGACY;
3690 llvm_unreachable("invalid min/max opcode");
3694 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N,
3695 DAGCombinerInfo &DCI) const {
3696 SelectionDAG &DAG = DCI.DAG;
3697 SDValue N0 = N->getOperand(0);
3698 EVT VT = N->getValueType(0);
3700 unsigned Opc = N0.getOpcode();
3702 // If the input has multiple uses and we can either fold the negate down, or
3703 // the other uses cannot, give up. This both prevents unprofitable
3704 // transformations and infinite loops: we won't repeatedly try to fold around
3705 // a negate that has no 'good' form.
3706 if (N0.hasOneUse()) {
3707 // This may be able to fold into the source, but at a code size cost. Don't
3708 // fold if the fold into the user is free.
3709 if (allUsesHaveSourceMods(N, 0))
3712 if (fnegFoldsIntoOp(Opc) &&
3713 (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode())))
3720 if (!mayIgnoreSignedZero(N0))
3723 // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y))
3724 SDValue LHS = N0.getOperand(0);
3725 SDValue RHS = N0.getOperand(1);
3727 if (LHS.getOpcode() != ISD::FNEG)
3728 LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3730 LHS = LHS.getOperand(0);
3732 if (RHS.getOpcode() != ISD::FNEG)
3733 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3735 RHS = RHS.getOperand(0);
3737 SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags());
3738 if (Res.getOpcode() != ISD::FADD)
3739 return SDValue(); // Op got folded away.
3740 if (!N0.hasOneUse())
3741 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3745 case AMDGPUISD::FMUL_LEGACY: {
3746 // (fneg (fmul x, y)) -> (fmul x, (fneg y))
3747 // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y))
3748 SDValue LHS = N0.getOperand(0);
3749 SDValue RHS = N0.getOperand(1);
3751 if (LHS.getOpcode() == ISD::FNEG)
3752 LHS = LHS.getOperand(0);
3753 else if (RHS.getOpcode() == ISD::FNEG)
3754 RHS = RHS.getOperand(0);
3756 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3758 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags());
3759 if (Res.getOpcode() != Opc)
3760 return SDValue(); // Op got folded away.
3761 if (!N0.hasOneUse())
3762 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3767 // TODO: handle llvm.amdgcn.fma.legacy
3768 if (!mayIgnoreSignedZero(N0))
3771 // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z))
3772 SDValue LHS = N0.getOperand(0);
3773 SDValue MHS = N0.getOperand(1);
3774 SDValue RHS = N0.getOperand(2);
3776 if (LHS.getOpcode() == ISD::FNEG)
3777 LHS = LHS.getOperand(0);
3778 else if (MHS.getOpcode() == ISD::FNEG)
3779 MHS = MHS.getOperand(0);
3781 MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS);
3783 if (RHS.getOpcode() != ISD::FNEG)
3784 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3786 RHS = RHS.getOperand(0);
3788 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS);
3789 if (Res.getOpcode() != Opc)
3790 return SDValue(); // Op got folded away.
3791 if (!N0.hasOneUse())
3792 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3797 case ISD::FMAXNUM_IEEE:
3798 case ISD::FMINNUM_IEEE:
3799 case AMDGPUISD::FMAX_LEGACY:
3800 case AMDGPUISD::FMIN_LEGACY: {
3801 // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y)
3802 // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y)
3803 // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y)
3804 // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y)
3806 SDValue LHS = N0.getOperand(0);
3807 SDValue RHS = N0.getOperand(1);
3809 // 0 doesn't have a negated inline immediate.
3810 // TODO: This constant check should be generalized to other operations.
3811 if (isConstantCostlierToNegate(RHS))
3814 SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3815 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3816 unsigned Opposite = inverseMinMax(Opc);
3818 SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags());
3819 if (Res.getOpcode() != Opposite)
3820 return SDValue(); // Op got folded away.
3821 if (!N0.hasOneUse())
3822 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3825 case AMDGPUISD::FMED3: {
3827 for (unsigned I = 0; I < 3; ++I)
3828 Ops[I] = DAG.getNode(ISD::FNEG, SL, VT, N0->getOperand(I), N0->getFlags());
3830 SDValue Res = DAG.getNode(AMDGPUISD::FMED3, SL, VT, Ops, N0->getFlags());
3831 if (Res.getOpcode() != AMDGPUISD::FMED3)
3832 return SDValue(); // Op got folded away.
3834 if (!N0.hasOneUse()) {
3835 SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Res);
3836 DAG.ReplaceAllUsesWith(N0, Neg);
3838 for (SDNode *U : Neg->uses())
3839 DCI.AddToWorklist(U);
3844 case ISD::FP_EXTEND:
3847 case ISD::FNEARBYINT: // XXX - Should fround be handled?
3849 case ISD::FCANONICALIZE:
3850 case AMDGPUISD::RCP:
3851 case AMDGPUISD::RCP_LEGACY:
3852 case AMDGPUISD::RCP_IFLAG:
3853 case AMDGPUISD::SIN_HW: {
3854 SDValue CvtSrc = N0.getOperand(0);
3855 if (CvtSrc.getOpcode() == ISD::FNEG) {
3856 // (fneg (fp_extend (fneg x))) -> (fp_extend x)
3857 // (fneg (rcp (fneg x))) -> (rcp x)
3858 return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0));
3861 if (!N0.hasOneUse())
3864 // (fneg (fp_extend x)) -> (fp_extend (fneg x))
3865 // (fneg (rcp x)) -> (rcp (fneg x))
3866 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3867 return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags());
3869 case ISD::FP_ROUND: {
3870 SDValue CvtSrc = N0.getOperand(0);
3872 if (CvtSrc.getOpcode() == ISD::FNEG) {
3873 // (fneg (fp_round (fneg x))) -> (fp_round x)
3874 return DAG.getNode(ISD::FP_ROUND, SL, VT,
3875 CvtSrc.getOperand(0), N0.getOperand(1));
3878 if (!N0.hasOneUse())
3881 // (fneg (fp_round x)) -> (fp_round (fneg x))
3882 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3883 return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1));
3885 case ISD::FP16_TO_FP: {
3886 // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal
3887 // f16, but legalization of f16 fneg ends up pulling it out of the source.
3888 // Put the fneg back as a legal source operation that can be matched later.
3891 SDValue Src = N0.getOperand(0);
3892 EVT SrcVT = Src.getValueType();
3894 // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000)
3895 SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src,
3896 DAG.getConstant(0x8000, SL, SrcVT));
3897 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg);
3904 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N,
3905 DAGCombinerInfo &DCI) const {
3906 SelectionDAG &DAG = DCI.DAG;
3907 SDValue N0 = N->getOperand(0);
3909 if (!N0.hasOneUse())
3912 switch (N0.getOpcode()) {
3913 case ISD::FP16_TO_FP: {
3914 assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal");
3916 SDValue Src = N0.getOperand(0);
3917 EVT SrcVT = Src.getValueType();
3919 // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff)
3920 SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src,
3921 DAG.getConstant(0x7fff, SL, SrcVT));
3922 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs);
3929 SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N,
3930 DAGCombinerInfo &DCI) const {
3931 const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
3935 // XXX - Should this flush denormals?
3936 const APFloat &Val = CFP->getValueAPF();
3937 APFloat One(Val.getSemantics(), "1.0");
3938 return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
3941 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
3942 DAGCombinerInfo &DCI) const {
3943 SelectionDAG &DAG = DCI.DAG;
3946 switch(N->getOpcode()) {
3949 case ISD::BITCAST: {
3950 EVT DestVT = N->getValueType(0);
3952 // Push casts through vector builds. This helps avoid emitting a large
3953 // number of copies when materializing floating point vector constants.
3955 // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) =>
3956 // vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y))
3957 if (DestVT.isVector()) {
3958 SDValue Src = N->getOperand(0);
3959 if (Src.getOpcode() == ISD::BUILD_VECTOR) {
3960 EVT SrcVT = Src.getValueType();
3961 unsigned NElts = DestVT.getVectorNumElements();
3963 if (SrcVT.getVectorNumElements() == NElts) {
3964 EVT DestEltVT = DestVT.getVectorElementType();
3966 SmallVector<SDValue, 8> CastedElts;
3968 for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) {
3969 SDValue Elt = Src.getOperand(I);
3970 CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt));
3973 return DAG.getBuildVector(DestVT, SL, CastedElts);
3978 if (DestVT.getSizeInBits() != 64 || !DestVT.isVector())
3981 // Fold bitcasts of constants.
3983 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
3984 // TODO: Generalize and move to DAGCombiner
3985 SDValue Src = N->getOperand(0);
3986 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
3988 uint64_t CVal = C->getZExtValue();
3989 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
3990 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
3991 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
3992 return DAG.getNode(ISD::BITCAST, SL, DestVT, BV);
3995 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
3996 const APInt &Val = C->getValueAPF().bitcastToAPInt();
3998 uint64_t CVal = Val.getZExtValue();
3999 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
4000 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
4001 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
4003 return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec);
4009 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
4012 return performShlCombine(N, DCI);
4015 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
4018 return performSrlCombine(N, DCI);
4021 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
4024 return performSraCombine(N, DCI);
4027 return performTruncateCombine(N, DCI);
4029 return performMulCombine(N, DCI);
4030 case ISD::SMUL_LOHI:
4031 case ISD::UMUL_LOHI:
4032 return performMulLoHiCombine(N, DCI);
4034 return performMulhsCombine(N, DCI);
4036 return performMulhuCombine(N, DCI);
4037 case AMDGPUISD::MUL_I24:
4038 case AMDGPUISD::MUL_U24:
4039 case AMDGPUISD::MULHI_I24:
4040 case AMDGPUISD::MULHI_U24:
4041 return simplifyMul24(N, DCI);
4043 return performSelectCombine(N, DCI);
4045 return performFNegCombine(N, DCI);
4047 return performFAbsCombine(N, DCI);
4048 case AMDGPUISD::BFE_I32:
4049 case AMDGPUISD::BFE_U32: {
4050 assert(!N->getValueType(0).isVector() &&
4051 "Vector handling of BFE not implemented");
4052 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
4056 uint32_t WidthVal = Width->getZExtValue() & 0x1f;
4058 return DAG.getConstant(0, DL, MVT::i32);
4060 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
4064 SDValue BitsFrom = N->getOperand(0);
4065 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
4067 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
4069 if (OffsetVal == 0) {
4070 // This is already sign / zero extended, so try to fold away extra BFEs.
4071 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
4073 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
4074 if (OpSignBits >= SignBits)
4077 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
4079 // This is a sign_extend_inreg. Replace it to take advantage of existing
4080 // DAG Combines. If not eliminated, we will match back to BFE during
4083 // TODO: The sext_inreg of extended types ends, although we can could
4084 // handle them in a single BFE.
4085 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
4086 DAG.getValueType(SmallVT));
4089 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
4092 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
4094 return constantFoldBFE<int32_t>(DAG,
4095 CVal->getSExtValue(),
4101 return constantFoldBFE<uint32_t>(DAG,
4102 CVal->getZExtValue(),
4108 if ((OffsetVal + WidthVal) >= 32 &&
4109 !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) {
4110 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
4111 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
4112 BitsFrom, ShiftVal);
4115 if (BitsFrom.hasOneUse()) {
4116 APInt Demanded = APInt::getBitsSet(32,
4118 OffsetVal + WidthVal);
4121 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
4122 !DCI.isBeforeLegalizeOps());
4123 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4124 if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) ||
4125 TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) {
4126 DCI.CommitTargetLoweringOpt(TLO);
4133 return performLoadCombine(N, DCI);
4135 return performStoreCombine(N, DCI);
4136 case AMDGPUISD::RCP:
4137 case AMDGPUISD::RCP_IFLAG:
4138 return performRcpCombine(N, DCI);
4139 case ISD::AssertZext:
4140 case ISD::AssertSext:
4141 return performAssertSZExtCombine(N, DCI);
4142 case ISD::INTRINSIC_WO_CHAIN:
4143 return performIntrinsicWOChainCombine(N, DCI);
4148 //===----------------------------------------------------------------------===//
4150 //===----------------------------------------------------------------------===//
4152 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
4153 const TargetRegisterClass *RC,
4154 Register Reg, EVT VT,
4156 bool RawReg) const {
4157 MachineFunction &MF = DAG.getMachineFunction();
4158 MachineRegisterInfo &MRI = MF.getRegInfo();
4161 if (!MRI.isLiveIn(Reg)) {
4162 VReg = MRI.createVirtualRegister(RC);
4163 MRI.addLiveIn(Reg, VReg);
4165 VReg = MRI.getLiveInVirtReg(Reg);
4169 return DAG.getRegister(VReg, VT);
4171 return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT);
4174 // This may be called multiple times, and nothing prevents creating multiple
4175 // objects at the same offset. See if we already defined this object.
4176 static int getOrCreateFixedStackObject(MachineFrameInfo &MFI, unsigned Size,
4178 for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) {
4179 if (MFI.getObjectOffset(I) == Offset) {
4180 assert(MFI.getObjectSize(I) == Size);
4185 return MFI.CreateFixedObject(Size, Offset, true);
4188 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG,
4191 int64_t Offset) const {
4192 MachineFunction &MF = DAG.getMachineFunction();
4193 MachineFrameInfo &MFI = MF.getFrameInfo();
4194 int FI = getOrCreateFixedStackObject(MFI, VT.getStoreSize(), Offset);
4196 auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset);
4197 SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32);
4199 return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, Align(4),
4200 MachineMemOperand::MODereferenceable |
4201 MachineMemOperand::MOInvariant);
4204 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG,
4208 int64_t Offset) const {
4209 MachineFunction &MF = DAG.getMachineFunction();
4210 MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset);
4211 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4213 SDValue Ptr = DAG.getConstant(Offset, SL, MVT::i32);
4214 // Stores to the argument stack area are relative to the stack pointer.
4216 DAG.getCopyFromReg(Chain, SL, Info->getStackPtrOffsetReg(), MVT::i32);
4217 Ptr = DAG.getNode(ISD::ADD, SL, MVT::i32, SP, Ptr);
4218 SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, Align(4),
4219 MachineMemOperand::MODereferenceable);
4223 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG,
4224 const TargetRegisterClass *RC,
4225 EVT VT, const SDLoc &SL,
4226 const ArgDescriptor &Arg) const {
4227 assert(Arg && "Attempting to load missing argument");
4229 SDValue V = Arg.isRegister() ?
4230 CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL) :
4231 loadStackInputValue(DAG, VT, SL, Arg.getStackOffset());
4233 if (!Arg.isMasked())
4236 unsigned Mask = Arg.getMask();
4237 unsigned Shift = countTrailingZeros<unsigned>(Mask);
4238 V = DAG.getNode(ISD::SRL, SL, VT, V,
4239 DAG.getShiftAmountConstant(Shift, VT, SL));
4240 return DAG.getNode(ISD::AND, SL, VT, V,
4241 DAG.getConstant(Mask >> Shift, SL, VT));
4244 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
4245 const MachineFunction &MF, const ImplicitParameter Param) const {
4246 const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
4247 const AMDGPUSubtarget &ST =
4248 AMDGPUSubtarget::get(getTargetMachine(), MF.getFunction());
4249 unsigned ExplicitArgOffset = ST.getExplicitKernelArgOffset(MF.getFunction());
4250 const Align Alignment = ST.getAlignmentForImplicitArgPtr();
4251 uint64_t ArgOffset = alignTo(MFI->getExplicitKernArgSize(), Alignment) +
4254 case FIRST_IMPLICIT:
4257 return ArgOffset + AMDGPU::ImplicitArg::PRIVATE_BASE_OFFSET;
4259 return ArgOffset + AMDGPU::ImplicitArg::SHARED_BASE_OFFSET;
4261 return ArgOffset + AMDGPU::ImplicitArg::QUEUE_PTR_OFFSET;
4263 llvm_unreachable("unexpected implicit parameter type");
4266 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
4268 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
4269 switch ((AMDGPUISD::NodeType)Opcode) {
4270 case AMDGPUISD::FIRST_NUMBER: break;
4272 NODE_NAME_CASE(UMUL);
4273 NODE_NAME_CASE(BRANCH_COND);
4277 NODE_NAME_CASE(ELSE)
4278 NODE_NAME_CASE(LOOP)
4279 NODE_NAME_CASE(CALL)
4280 NODE_NAME_CASE(TC_RETURN)
4281 NODE_NAME_CASE(TRAP)
4282 NODE_NAME_CASE(RET_FLAG)
4283 NODE_NAME_CASE(RETURN_TO_EPILOG)
4284 NODE_NAME_CASE(ENDPGM)
4285 NODE_NAME_CASE(DWORDADDR)
4286 NODE_NAME_CASE(FRACT)
4287 NODE_NAME_CASE(SETCC)
4288 NODE_NAME_CASE(SETREG)
4289 NODE_NAME_CASE(DENORM_MODE)
4290 NODE_NAME_CASE(FMA_W_CHAIN)
4291 NODE_NAME_CASE(FMUL_W_CHAIN)
4292 NODE_NAME_CASE(CLAMP)
4293 NODE_NAME_CASE(COS_HW)
4294 NODE_NAME_CASE(SIN_HW)
4295 NODE_NAME_CASE(FMAX_LEGACY)
4296 NODE_NAME_CASE(FMIN_LEGACY)
4297 NODE_NAME_CASE(FMAX3)
4298 NODE_NAME_CASE(SMAX3)
4299 NODE_NAME_CASE(UMAX3)
4300 NODE_NAME_CASE(FMIN3)
4301 NODE_NAME_CASE(SMIN3)
4302 NODE_NAME_CASE(UMIN3)
4303 NODE_NAME_CASE(FMED3)
4304 NODE_NAME_CASE(SMED3)
4305 NODE_NAME_CASE(UMED3)
4306 NODE_NAME_CASE(FDOT2)
4307 NODE_NAME_CASE(URECIP)
4308 NODE_NAME_CASE(DIV_SCALE)
4309 NODE_NAME_CASE(DIV_FMAS)
4310 NODE_NAME_CASE(DIV_FIXUP)
4311 NODE_NAME_CASE(FMAD_FTZ)
4314 NODE_NAME_CASE(RCP_LEGACY)
4315 NODE_NAME_CASE(RCP_IFLAG)
4316 NODE_NAME_CASE(FMUL_LEGACY)
4317 NODE_NAME_CASE(RSQ_CLAMP)
4318 NODE_NAME_CASE(LDEXP)
4319 NODE_NAME_CASE(FP_CLASS)
4320 NODE_NAME_CASE(DOT4)
4321 NODE_NAME_CASE(CARRY)
4322 NODE_NAME_CASE(BORROW)
4323 NODE_NAME_CASE(BFE_U32)
4324 NODE_NAME_CASE(BFE_I32)
4327 NODE_NAME_CASE(FFBH_U32)
4328 NODE_NAME_CASE(FFBH_I32)
4329 NODE_NAME_CASE(FFBL_B32)
4330 NODE_NAME_CASE(MUL_U24)
4331 NODE_NAME_CASE(MUL_I24)
4332 NODE_NAME_CASE(MULHI_U24)
4333 NODE_NAME_CASE(MULHI_I24)
4334 NODE_NAME_CASE(MAD_U24)
4335 NODE_NAME_CASE(MAD_I24)
4336 NODE_NAME_CASE(MAD_I64_I32)
4337 NODE_NAME_CASE(MAD_U64_U32)
4338 NODE_NAME_CASE(PERM)
4339 NODE_NAME_CASE(TEXTURE_FETCH)
4340 NODE_NAME_CASE(R600_EXPORT)
4341 NODE_NAME_CASE(CONST_ADDRESS)
4342 NODE_NAME_CASE(REGISTER_LOAD)
4343 NODE_NAME_CASE(REGISTER_STORE)
4344 NODE_NAME_CASE(SAMPLE)
4345 NODE_NAME_CASE(SAMPLEB)
4346 NODE_NAME_CASE(SAMPLED)
4347 NODE_NAME_CASE(SAMPLEL)
4348 NODE_NAME_CASE(CVT_F32_UBYTE0)
4349 NODE_NAME_CASE(CVT_F32_UBYTE1)
4350 NODE_NAME_CASE(CVT_F32_UBYTE2)
4351 NODE_NAME_CASE(CVT_F32_UBYTE3)
4352 NODE_NAME_CASE(CVT_PKRTZ_F16_F32)
4353 NODE_NAME_CASE(CVT_PKNORM_I16_F32)
4354 NODE_NAME_CASE(CVT_PKNORM_U16_F32)
4355 NODE_NAME_CASE(CVT_PK_I16_I32)
4356 NODE_NAME_CASE(CVT_PK_U16_U32)
4357 NODE_NAME_CASE(FP_TO_FP16)
4358 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
4359 NODE_NAME_CASE(CONST_DATA_PTR)
4360 NODE_NAME_CASE(PC_ADD_REL_OFFSET)
4362 NODE_NAME_CASE(FPTRUNC_ROUND_UPWARD)
4363 NODE_NAME_CASE(FPTRUNC_ROUND_DOWNWARD)
4364 NODE_NAME_CASE(DUMMY_CHAIN)
4365 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
4366 NODE_NAME_CASE(LOAD_D16_HI)
4367 NODE_NAME_CASE(LOAD_D16_LO)
4368 NODE_NAME_CASE(LOAD_D16_HI_I8)
4369 NODE_NAME_CASE(LOAD_D16_HI_U8)
4370 NODE_NAME_CASE(LOAD_D16_LO_I8)
4371 NODE_NAME_CASE(LOAD_D16_LO_U8)
4372 NODE_NAME_CASE(STORE_MSKOR)
4373 NODE_NAME_CASE(LOAD_CONSTANT)
4374 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
4375 NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16)
4376 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT)
4377 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16)
4378 NODE_NAME_CASE(DS_ORDERED_COUNT)
4379 NODE_NAME_CASE(ATOMIC_CMP_SWAP)
4380 NODE_NAME_CASE(ATOMIC_INC)
4381 NODE_NAME_CASE(ATOMIC_DEC)
4382 NODE_NAME_CASE(ATOMIC_LOAD_FMIN)
4383 NODE_NAME_CASE(ATOMIC_LOAD_FMAX)
4384 NODE_NAME_CASE(BUFFER_LOAD)
4385 NODE_NAME_CASE(BUFFER_LOAD_UBYTE)
4386 NODE_NAME_CASE(BUFFER_LOAD_USHORT)
4387 NODE_NAME_CASE(BUFFER_LOAD_BYTE)
4388 NODE_NAME_CASE(BUFFER_LOAD_SHORT)
4389 NODE_NAME_CASE(BUFFER_LOAD_FORMAT)
4390 NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16)
4391 NODE_NAME_CASE(SBUFFER_LOAD)
4392 NODE_NAME_CASE(BUFFER_STORE)
4393 NODE_NAME_CASE(BUFFER_STORE_BYTE)
4394 NODE_NAME_CASE(BUFFER_STORE_SHORT)
4395 NODE_NAME_CASE(BUFFER_STORE_FORMAT)
4396 NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16)
4397 NODE_NAME_CASE(BUFFER_ATOMIC_SWAP)
4398 NODE_NAME_CASE(BUFFER_ATOMIC_ADD)
4399 NODE_NAME_CASE(BUFFER_ATOMIC_SUB)
4400 NODE_NAME_CASE(BUFFER_ATOMIC_SMIN)
4401 NODE_NAME_CASE(BUFFER_ATOMIC_UMIN)
4402 NODE_NAME_CASE(BUFFER_ATOMIC_SMAX)
4403 NODE_NAME_CASE(BUFFER_ATOMIC_UMAX)
4404 NODE_NAME_CASE(BUFFER_ATOMIC_AND)
4405 NODE_NAME_CASE(BUFFER_ATOMIC_OR)
4406 NODE_NAME_CASE(BUFFER_ATOMIC_XOR)
4407 NODE_NAME_CASE(BUFFER_ATOMIC_INC)
4408 NODE_NAME_CASE(BUFFER_ATOMIC_DEC)
4409 NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP)
4410 NODE_NAME_CASE(BUFFER_ATOMIC_CSUB)
4411 NODE_NAME_CASE(BUFFER_ATOMIC_FADD)
4412 NODE_NAME_CASE(BUFFER_ATOMIC_FMIN)
4413 NODE_NAME_CASE(BUFFER_ATOMIC_FMAX)
4415 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
4420 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand,
4421 SelectionDAG &DAG, int Enabled,
4422 int &RefinementSteps,
4423 bool &UseOneConstNR,
4424 bool Reciprocal) const {
4425 EVT VT = Operand.getValueType();
4427 if (VT == MVT::f32) {
4428 RefinementSteps = 0;
4429 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
4432 // TODO: There is also f64 rsq instruction, but the documentation is less
4433 // clear on its precision.
4438 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
4439 SelectionDAG &DAG, int Enabled,
4440 int &RefinementSteps) const {
4441 EVT VT = Operand.getValueType();
4443 if (VT == MVT::f32) {
4444 // Reciprocal, < 1 ulp error.
4446 // This reciprocal approximation converges to < 0.5 ulp error with one
4447 // newton rhapson performed with two fused multiple adds (FMAs).
4449 RefinementSteps = 0;
4450 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
4453 // TODO: There is also f64 rcp instruction, but the documentation is less
4454 // clear on its precision.
4459 static unsigned workitemIntrinsicDim(unsigned ID) {
4461 case Intrinsic::amdgcn_workitem_id_x:
4463 case Intrinsic::amdgcn_workitem_id_y:
4465 case Intrinsic::amdgcn_workitem_id_z:
4468 llvm_unreachable("not a workitem intrinsic");
4472 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
4473 const SDValue Op, KnownBits &Known,
4474 const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
4476 Known.resetAll(); // Don't know anything.
4478 unsigned Opc = Op.getOpcode();
4483 case AMDGPUISD::CARRY:
4484 case AMDGPUISD::BORROW: {
4485 Known.Zero = APInt::getHighBitsSet(32, 31);
4489 case AMDGPUISD::BFE_I32:
4490 case AMDGPUISD::BFE_U32: {
4491 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4495 uint32_t Width = CWidth->getZExtValue() & 0x1f;
4497 if (Opc == AMDGPUISD::BFE_U32)
4498 Known.Zero = APInt::getHighBitsSet(32, 32 - Width);
4502 case AMDGPUISD::FP_TO_FP16: {
4503 unsigned BitWidth = Known.getBitWidth();
4505 // High bits are zero.
4506 Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
4509 case AMDGPUISD::MUL_U24:
4510 case AMDGPUISD::MUL_I24: {
4511 KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
4512 KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
4513 unsigned TrailZ = LHSKnown.countMinTrailingZeros() +
4514 RHSKnown.countMinTrailingZeros();
4515 Known.Zero.setLowBits(std::min(TrailZ, 32u));
4516 // Skip extra check if all bits are known zeros.
4520 // Truncate to 24 bits.
4521 LHSKnown = LHSKnown.trunc(24);
4522 RHSKnown = RHSKnown.trunc(24);
4524 if (Opc == AMDGPUISD::MUL_I24) {
4525 unsigned LHSValBits = LHSKnown.countMaxSignificantBits();
4526 unsigned RHSValBits = RHSKnown.countMaxSignificantBits();
4527 unsigned MaxValBits = LHSValBits + RHSValBits;
4528 if (MaxValBits > 32)
4530 unsigned SignBits = 32 - MaxValBits + 1;
4531 bool LHSNegative = LHSKnown.isNegative();
4532 bool LHSNonNegative = LHSKnown.isNonNegative();
4533 bool LHSPositive = LHSKnown.isStrictlyPositive();
4534 bool RHSNegative = RHSKnown.isNegative();
4535 bool RHSNonNegative = RHSKnown.isNonNegative();
4536 bool RHSPositive = RHSKnown.isStrictlyPositive();
4538 if ((LHSNonNegative && RHSNonNegative) || (LHSNegative && RHSNegative))
4539 Known.Zero.setHighBits(SignBits);
4540 else if ((LHSNegative && RHSPositive) || (LHSPositive && RHSNegative))
4541 Known.One.setHighBits(SignBits);
4543 unsigned LHSValBits = LHSKnown.countMaxActiveBits();
4544 unsigned RHSValBits = RHSKnown.countMaxActiveBits();
4545 unsigned MaxValBits = LHSValBits + RHSValBits;
4546 if (MaxValBits >= 32)
4548 Known.Zero.setBitsFrom(MaxValBits);
4552 case AMDGPUISD::PERM: {
4553 ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4557 KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
4558 KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
4559 unsigned Sel = CMask->getZExtValue();
4561 for (unsigned I = 0; I < 32; I += 8) {
4562 unsigned SelBits = Sel & 0xff;
4565 Known.One |= ((RHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4566 Known.Zero |= ((RHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4567 } else if (SelBits < 7) {
4568 SelBits = (SelBits & 3) * 8;
4569 Known.One |= ((LHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4570 Known.Zero |= ((LHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4571 } else if (SelBits == 0x0c) {
4572 Known.Zero |= 0xFFull << I;
4573 } else if (SelBits > 0x0c) {
4574 Known.One |= 0xFFull << I;
4580 case AMDGPUISD::BUFFER_LOAD_UBYTE: {
4581 Known.Zero.setHighBits(24);
4584 case AMDGPUISD::BUFFER_LOAD_USHORT: {
4585 Known.Zero.setHighBits(16);
4588 case AMDGPUISD::LDS: {
4589 auto GA = cast<GlobalAddressSDNode>(Op.getOperand(0).getNode());
4590 Align Alignment = GA->getGlobal()->getPointerAlignment(DAG.getDataLayout());
4592 Known.Zero.setHighBits(16);
4593 Known.Zero.setLowBits(Log2(Alignment));
4596 case ISD::INTRINSIC_WO_CHAIN: {
4597 unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4599 case Intrinsic::amdgcn_mbcnt_lo:
4600 case Intrinsic::amdgcn_mbcnt_hi: {
4601 const GCNSubtarget &ST =
4602 DAG.getMachineFunction().getSubtarget<GCNSubtarget>();
4603 // These return at most the wavefront size - 1.
4604 unsigned Size = Op.getValueType().getSizeInBits();
4605 Known.Zero.setHighBits(Size - ST.getWavefrontSizeLog2());
4608 case Intrinsic::amdgcn_workitem_id_x:
4609 case Intrinsic::amdgcn_workitem_id_y:
4610 case Intrinsic::amdgcn_workitem_id_z: {
4611 unsigned MaxValue = Subtarget->getMaxWorkitemID(
4612 DAG.getMachineFunction().getFunction(), workitemIntrinsicDim(IID));
4613 Known.Zero.setHighBits(countLeadingZeros(MaxValue));
4623 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
4624 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4625 unsigned Depth) const {
4626 switch (Op.getOpcode()) {
4627 case AMDGPUISD::BFE_I32: {
4628 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4632 unsigned SignBits = 32 - Width->getZExtValue() + 1;
4633 if (!isNullConstant(Op.getOperand(1)))
4636 // TODO: Could probably figure something out with non-0 offsets.
4637 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4638 return std::max(SignBits, Op0SignBits);
4641 case AMDGPUISD::BFE_U32: {
4642 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4643 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
4646 case AMDGPUISD::CARRY:
4647 case AMDGPUISD::BORROW:
4649 case AMDGPUISD::BUFFER_LOAD_BYTE:
4651 case AMDGPUISD::BUFFER_LOAD_SHORT:
4653 case AMDGPUISD::BUFFER_LOAD_UBYTE:
4655 case AMDGPUISD::BUFFER_LOAD_USHORT:
4657 case AMDGPUISD::FP_TO_FP16:
4664 unsigned AMDGPUTargetLowering::computeNumSignBitsForTargetInstr(
4665 GISelKnownBits &Analysis, Register R,
4666 const APInt &DemandedElts, const MachineRegisterInfo &MRI,
4667 unsigned Depth) const {
4668 const MachineInstr *MI = MRI.getVRegDef(R);
4672 // TODO: Check range metadata on MMO.
4673 switch (MI->getOpcode()) {
4674 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE:
4676 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT:
4678 case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE:
4680 case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT:
4687 bool AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
4688 const SelectionDAG &DAG,
4690 unsigned Depth) const {
4691 unsigned Opcode = Op.getOpcode();
4693 case AMDGPUISD::FMIN_LEGACY:
4694 case AMDGPUISD::FMAX_LEGACY: {
4698 // TODO: Can check no nans on one of the operands for each one, but which
4702 case AMDGPUISD::FMUL_LEGACY:
4703 case AMDGPUISD::CVT_PKRTZ_F16_F32: {
4706 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4707 DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4709 case AMDGPUISD::FMED3:
4710 case AMDGPUISD::FMIN3:
4711 case AMDGPUISD::FMAX3:
4712 case AMDGPUISD::FMAD_FTZ: {
4715 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4716 DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4717 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4719 case AMDGPUISD::CVT_F32_UBYTE0:
4720 case AMDGPUISD::CVT_F32_UBYTE1:
4721 case AMDGPUISD::CVT_F32_UBYTE2:
4722 case AMDGPUISD::CVT_F32_UBYTE3:
4725 case AMDGPUISD::RCP:
4726 case AMDGPUISD::RSQ:
4727 case AMDGPUISD::RCP_LEGACY:
4728 case AMDGPUISD::RSQ_CLAMP: {
4732 // TODO: Need is known positive check.
4735 case AMDGPUISD::LDEXP:
4736 case AMDGPUISD::FRACT: {
4739 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4741 case AMDGPUISD::DIV_SCALE:
4742 case AMDGPUISD::DIV_FMAS:
4743 case AMDGPUISD::DIV_FIXUP:
4744 // TODO: Refine on operands.
4746 case AMDGPUISD::SIN_HW:
4747 case AMDGPUISD::COS_HW: {
4748 // TODO: Need check for infinity
4751 case ISD::INTRINSIC_WO_CHAIN: {
4752 unsigned IntrinsicID
4753 = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4754 // TODO: Handle more intrinsics
4755 switch (IntrinsicID) {
4756 case Intrinsic::amdgcn_cubeid:
4759 case Intrinsic::amdgcn_frexp_mant: {
4762 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4764 case Intrinsic::amdgcn_cvt_pkrtz: {
4767 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4768 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4770 case Intrinsic::amdgcn_rcp:
4771 case Intrinsic::amdgcn_rsq:
4772 case Intrinsic::amdgcn_rcp_legacy:
4773 case Intrinsic::amdgcn_rsq_legacy:
4774 case Intrinsic::amdgcn_rsq_clamp: {
4778 // TODO: Need is known positive check.
4781 case Intrinsic::amdgcn_trig_preop:
4782 case Intrinsic::amdgcn_fdot2:
4783 // TODO: Refine on operand
4785 case Intrinsic::amdgcn_fma_legacy:
4788 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4789 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1) &&
4790 DAG.isKnownNeverNaN(Op.getOperand(3), SNaN, Depth + 1);
4800 TargetLowering::AtomicExpansionKind
4801 AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
4802 switch (RMW->getOperation()) {
4803 case AtomicRMWInst::Nand:
4804 case AtomicRMWInst::FAdd:
4805 case AtomicRMWInst::FSub:
4806 case AtomicRMWInst::FMax:
4807 case AtomicRMWInst::FMin:
4808 return AtomicExpansionKind::CmpXChg;
4810 return AtomicExpansionKind::None;
4814 bool AMDGPUTargetLowering::isConstantUnsignedBitfieldExtractLegal(
4815 unsigned Opc, LLT Ty1, LLT Ty2) const {
4816 return (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64)) &&
4817 Ty2 == LLT::scalar(32);