1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the interfaces that Sparc uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "SparcISelLowering.h"
15 #include "MCTargetDesc/SparcMCExpr.h"
16 #include "SparcMachineFunctionInfo.h"
17 #include "SparcRegisterInfo.h"
18 #include "SparcTargetMachine.h"
19 #include "SparcTargetObjectFile.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringSwitch.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Module.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/KnownBits.h"
37 //===----------------------------------------------------------------------===//
38 // Calling Convention Implementation
39 //===----------------------------------------------------------------------===//
41 static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
42 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
43 ISD::ArgFlagsTy &ArgFlags, CCState &State)
45 assert (ArgFlags.isSRet());
47 // Assign SRet argument.
48 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
54 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
55 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
56 ISD::ArgFlagsTy &ArgFlags, CCState &State)
58 static const MCPhysReg RegList[] = {
59 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
61 // Try to get first reg.
62 if (Register Reg = State.AllocateReg(RegList)) {
63 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
65 // Assign whole thing in stack.
66 State.addLoc(CCValAssign::getCustomMem(
67 ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
71 // Try to get second reg.
72 if (Register Reg = State.AllocateReg(RegList))
73 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
75 State.addLoc(CCValAssign::getCustomMem(
76 ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
80 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
81 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
82 ISD::ArgFlagsTy &ArgFlags, CCState &State)
84 static const MCPhysReg RegList[] = {
85 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
88 // Try to get first reg.
89 if (Register Reg = State.AllocateReg(RegList))
90 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
94 // Try to get second reg.
95 if (Register Reg = State.AllocateReg(RegList))
96 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
103 // Allocate a full-sized argument for the 64-bit ABI.
104 static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
105 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
106 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
107 assert((LocVT == MVT::f32 || LocVT == MVT::f128
108 || LocVT.getSizeInBits() == 64) &&
109 "Can't handle non-64 bits locations");
111 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
112 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
113 Align alignment = (LocVT == MVT::f128) ? Align(16) : Align(8);
114 unsigned Offset = State.AllocateStack(size, alignment);
117 if (LocVT == MVT::i64 && Offset < 6*8)
118 // Promote integers to %i0-%i5.
119 Reg = SP::I0 + Offset/8;
120 else if (LocVT == MVT::f64 && Offset < 16*8)
121 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
122 Reg = SP::D0 + Offset/8;
123 else if (LocVT == MVT::f32 && Offset < 16*8)
124 // Promote floats to %f1, %f3, ...
125 Reg = SP::F1 + Offset/4;
126 else if (LocVT == MVT::f128 && Offset < 16*8)
127 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
128 Reg = SP::Q0 + Offset/16;
130 // Promote to register when possible, otherwise use the stack slot.
132 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
136 // This argument goes on the stack in an 8-byte slot.
137 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
138 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
139 if (LocVT == MVT::f32)
142 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
146 // Allocate a half-sized argument for the 64-bit ABI.
148 // This is used when passing { float, int } structs by value in registers.
149 static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
150 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
151 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
152 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
153 unsigned Offset = State.AllocateStack(4, Align(4));
155 if (LocVT == MVT::f32 && Offset < 16*8) {
156 // Promote floats to %f0-%f31.
157 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
162 if (LocVT == MVT::i32 && Offset < 6*8) {
163 // Promote integers to %i0-%i5, using half the register.
164 unsigned Reg = SP::I0 + Offset/8;
166 LocInfo = CCValAssign::AExt;
168 // Set the Custom bit if this i32 goes in the high bits of a register.
170 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
173 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
177 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
181 #include "SparcGenCallingConv.inc"
183 // The calling conventions in SparcCallingConv.td are described in terms of the
184 // callee's register window. This function translates registers to the
185 // corresponding caller window %o register.
186 static unsigned toCallerWindow(unsigned Reg) {
187 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
189 if (Reg >= SP::I0 && Reg <= SP::I7)
190 return Reg - SP::I0 + SP::O0;
195 SparcTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
197 const SmallVectorImpl<ISD::OutputArg> &Outs,
198 const SmallVectorImpl<SDValue> &OutVals,
199 const SDLoc &DL, SelectionDAG &DAG) const {
200 if (Subtarget->is64Bit())
201 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
202 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
206 SparcTargetLowering::LowerReturn_32(SDValue Chain, CallingConv::ID CallConv,
208 const SmallVectorImpl<ISD::OutputArg> &Outs,
209 const SmallVectorImpl<SDValue> &OutVals,
210 const SDLoc &DL, SelectionDAG &DAG) const {
211 MachineFunction &MF = DAG.getMachineFunction();
213 // CCValAssign - represent the assignment of the return value to locations.
214 SmallVector<CCValAssign, 16> RVLocs;
216 // CCState - Info about the registers and stack slot.
217 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
220 // Analyze return values.
221 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
224 SmallVector<SDValue, 4> RetOps(1, Chain);
225 // Make room for the return address offset.
226 RetOps.push_back(SDValue());
228 // Copy the result values into the output registers.
229 for (unsigned i = 0, realRVLocIdx = 0;
231 ++i, ++realRVLocIdx) {
232 CCValAssign &VA = RVLocs[i];
233 assert(VA.isRegLoc() && "Can only return in registers!");
235 SDValue Arg = OutVals[realRVLocIdx];
237 if (VA.needsCustom()) {
238 assert(VA.getLocVT() == MVT::v2i32);
239 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
240 // happen by default if this wasn't a legal type)
242 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
244 DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
245 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
247 DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));
249 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag);
250 Flag = Chain.getValue(1);
251 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
252 VA = RVLocs[++i]; // skip ahead to next loc
253 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
256 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
258 // Guarantee that all emitted copies are stuck together with flags.
259 Flag = Chain.getValue(1);
260 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
263 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
264 // If the function returns a struct, copy the SRetReturnReg to I0
265 if (MF.getFunction().hasStructRetAttr()) {
266 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
267 Register Reg = SFI->getSRetReturnReg();
269 llvm_unreachable("sret virtual register not created in the entry block");
270 auto PtrVT = getPointerTy(DAG.getDataLayout());
271 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
272 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
273 Flag = Chain.getValue(1);
274 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
275 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
278 RetOps[0] = Chain; // Update chain.
279 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
281 // Add the flag if we have it.
283 RetOps.push_back(Flag);
285 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
288 // Lower return values for the 64-bit ABI.
289 // Return values are passed the exactly the same way as function arguments.
291 SparcTargetLowering::LowerReturn_64(SDValue Chain, CallingConv::ID CallConv,
293 const SmallVectorImpl<ISD::OutputArg> &Outs,
294 const SmallVectorImpl<SDValue> &OutVals,
295 const SDLoc &DL, SelectionDAG &DAG) const {
296 // CCValAssign - represent the assignment of the return value to locations.
297 SmallVector<CCValAssign, 16> RVLocs;
299 // CCState - Info about the registers and stack slot.
300 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
303 // Analyze return values.
304 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
307 SmallVector<SDValue, 4> RetOps(1, Chain);
309 // The second operand on the return instruction is the return address offset.
310 // The return address is always %i7+8 with the 64-bit ABI.
311 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
313 // Copy the result values into the output registers.
314 for (unsigned i = 0; i != RVLocs.size(); ++i) {
315 CCValAssign &VA = RVLocs[i];
316 assert(VA.isRegLoc() && "Can only return in registers!");
317 SDValue OutVal = OutVals[i];
319 // Integer return values must be sign or zero extended by the callee.
320 switch (VA.getLocInfo()) {
321 case CCValAssign::Full: break;
322 case CCValAssign::SExt:
323 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
325 case CCValAssign::ZExt:
326 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
328 case CCValAssign::AExt:
329 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
332 llvm_unreachable("Unknown loc info!");
335 // The custom bit on an i32 return value indicates that it should be passed
336 // in the high bits of the register.
337 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
338 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
339 DAG.getConstant(32, DL, MVT::i32));
341 // The next value may go in the low bits of the same register.
342 // Handle both at once.
343 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
344 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
345 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
346 // Skip the next value, it's already done.
351 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
353 // Guarantee that all emitted copies are stuck together with flags.
354 Flag = Chain.getValue(1);
355 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
358 RetOps[0] = Chain; // Update chain.
360 // Add the flag if we have it.
362 RetOps.push_back(Flag);
364 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
367 SDValue SparcTargetLowering::LowerFormalArguments(
368 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
369 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
370 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
371 if (Subtarget->is64Bit())
372 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
374 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
378 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
379 /// passed in either one or two GPRs, including FP values. TODO: we should
380 /// pass FP values in FP registers for fastcc functions.
381 SDValue SparcTargetLowering::LowerFormalArguments_32(
382 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
383 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
384 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
385 MachineFunction &MF = DAG.getMachineFunction();
386 MachineRegisterInfo &RegInfo = MF.getRegInfo();
387 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
389 // Assign locations to all of the incoming arguments.
390 SmallVector<CCValAssign, 16> ArgLocs;
391 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
393 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
395 const unsigned StackOffset = 92;
396 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
399 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
400 CCValAssign &VA = ArgLocs[i];
402 if (Ins[InIdx].Flags.isSRet()) {
404 report_fatal_error("sparc only supports sret on the first parameter");
405 // Get SRet from [%fp+64].
406 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
407 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
409 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
410 InVals.push_back(Arg);
415 if (VA.needsCustom()) {
416 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
418 Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
419 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
420 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
423 CCValAssign &NextVA = ArgLocs[++i];
426 if (NextVA.isMemLoc()) {
427 int FrameIdx = MF.getFrameInfo().
428 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
429 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
430 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
432 Register loReg = MF.addLiveIn(NextVA.getLocReg(),
433 &SP::IntRegsRegClass);
434 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
438 std::swap(LoVal, HiVal);
441 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
442 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
443 InVals.push_back(WholeValue);
446 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
447 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
448 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
449 if (VA.getLocVT() == MVT::f32)
450 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
451 else if (VA.getLocVT() != MVT::i32) {
452 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
453 DAG.getValueType(VA.getLocVT()));
454 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
456 InVals.push_back(Arg);
460 assert(VA.isMemLoc());
462 unsigned Offset = VA.getLocMemOffset()+StackOffset;
463 auto PtrVT = getPointerTy(DAG.getDataLayout());
465 if (VA.needsCustom()) {
466 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
467 // If it is double-word aligned, just load.
468 if (Offset % 8 == 0) {
469 int FI = MF.getFrameInfo().CreateFixedObject(8,
472 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
474 DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
475 InVals.push_back(Load);
479 int FI = MF.getFrameInfo().CreateFixedObject(4,
482 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
484 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
485 int FI2 = MF.getFrameInfo().CreateFixedObject(4,
488 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
491 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
494 std::swap(LoVal, HiVal);
497 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
498 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
499 InVals.push_back(WholeValue);
503 int FI = MF.getFrameInfo().CreateFixedObject(4,
506 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
508 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
509 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
510 } else if (VA.getValVT() == MVT::f128) {
511 report_fatal_error("SPARCv8 does not handle f128 in calls; "
514 // We shouldn't see any other value types here.
515 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
517 InVals.push_back(Load);
520 if (MF.getFunction().hasStructRetAttr()) {
521 // Copy the SRet Argument to SRetReturnReg.
522 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
523 Register Reg = SFI->getSRetReturnReg();
525 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
526 SFI->setSRetReturnReg(Reg);
528 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
529 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
532 // Store remaining ArgRegs to the stack if this is a varargs function.
534 static const MCPhysReg ArgRegs[] = {
535 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
537 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
538 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
539 unsigned ArgOffset = CCInfo.getNextStackOffset();
540 if (NumAllocated == 6)
541 ArgOffset += StackOffset;
544 ArgOffset = 68+4*NumAllocated;
547 // Remember the vararg offset for the va_start implementation.
548 FuncInfo->setVarArgsFrameOffset(ArgOffset);
550 std::vector<SDValue> OutChains;
552 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
553 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
554 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
555 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
557 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
559 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
562 DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
566 if (!OutChains.empty()) {
567 OutChains.push_back(Chain);
568 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
575 // Lower formal arguments for the 64 bit ABI.
576 SDValue SparcTargetLowering::LowerFormalArguments_64(
577 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
578 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
579 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
580 MachineFunction &MF = DAG.getMachineFunction();
582 // Analyze arguments according to CC_Sparc64.
583 SmallVector<CCValAssign, 16> ArgLocs;
584 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
586 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
588 // The argument array begins at %fp+BIAS+128, after the register save area.
589 const unsigned ArgArea = 128;
591 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
592 CCValAssign &VA = ArgLocs[i];
594 // This argument is passed in a register.
595 // All integer register arguments are promoted by the caller to i64.
597 // Create a virtual register for the promoted live-in value.
598 Register VReg = MF.addLiveIn(VA.getLocReg(),
599 getRegClassFor(VA.getLocVT()));
600 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
602 // Get the high bits for i32 struct elements.
603 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
604 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
605 DAG.getConstant(32, DL, MVT::i32));
607 // The caller promoted the argument, so insert an Assert?ext SDNode so we
608 // won't promote the value again in this function.
609 switch (VA.getLocInfo()) {
610 case CCValAssign::SExt:
611 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
612 DAG.getValueType(VA.getValVT()));
614 case CCValAssign::ZExt:
615 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
616 DAG.getValueType(VA.getValVT()));
622 // Truncate the register down to the argument type.
624 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
626 InVals.push_back(Arg);
630 // The registers are exhausted. This argument was passed on the stack.
631 assert(VA.isMemLoc());
632 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
633 // beginning of the arguments area at %fp+BIAS+128.
634 unsigned Offset = VA.getLocMemOffset() + ArgArea;
635 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
636 // Adjust offset for extended arguments, SPARC is big-endian.
637 // The caller will have written the full slot with extended bytes, but we
638 // prefer our own extending loads.
640 Offset += 8 - ValSize;
641 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
643 DAG.getLoad(VA.getValVT(), DL, Chain,
644 DAG.getFrameIndex(FI, getPointerTy(MF.getDataLayout())),
645 MachinePointerInfo::getFixedStack(MF, FI)));
651 // This function takes variable arguments, some of which may have been passed
652 // in registers %i0-%i5. Variable floating point arguments are never passed
653 // in floating point registers. They go on %i0-%i5 or on the stack like
654 // integer arguments.
656 // The va_start intrinsic needs to know the offset to the first variable
658 unsigned ArgOffset = CCInfo.getNextStackOffset();
659 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
660 // Skip the 128 bytes of register save area.
661 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
662 Subtarget->getStackPointerBias());
664 // Save the variable arguments that were passed in registers.
665 // The caller is required to reserve stack space for 6 arguments regardless
666 // of how many arguments were actually passed.
667 SmallVector<SDValue, 8> OutChains;
668 for (; ArgOffset < 6*8; ArgOffset += 8) {
669 Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
670 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
671 int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
672 auto PtrVT = getPointerTy(MF.getDataLayout());
674 DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
675 MachinePointerInfo::getFixedStack(MF, FI)));
678 if (!OutChains.empty())
679 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
685 SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
686 SmallVectorImpl<SDValue> &InVals) const {
687 if (Subtarget->is64Bit())
688 return LowerCall_64(CLI, InVals);
689 return LowerCall_32(CLI, InVals);
692 static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
693 const CallBase *Call) {
695 return Call->hasFnAttr(Attribute::ReturnsTwice);
697 const Function *CalleeFn = nullptr;
698 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
699 CalleeFn = dyn_cast<Function>(G->getGlobal());
700 } else if (ExternalSymbolSDNode *E =
701 dyn_cast<ExternalSymbolSDNode>(Callee)) {
702 const Function &Fn = DAG.getMachineFunction().getFunction();
703 const Module *M = Fn.getParent();
704 const char *CalleeName = E->getSymbol();
705 CalleeFn = M->getFunction(CalleeName);
710 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
713 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
714 /// for tail call optimization.
715 bool SparcTargetLowering::IsEligibleForTailCallOptimization(
716 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
718 auto &Outs = CLI.Outs;
719 auto &Caller = MF.getFunction();
721 // Do not tail call opt functions with "disable-tail-calls" attribute.
722 if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
725 // Do not tail call opt if the stack is used to pass parameters.
726 if (CCInfo.getNextStackOffset() != 0)
729 // Do not tail call opt if either the callee or caller returns
730 // a struct and the other does not.
731 if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
734 // Byval parameters hand the function a pointer directly into the stack area
735 // we want to reuse during a tail call.
736 for (auto &Arg : Outs)
737 if (Arg.Flags.isByVal())
743 // Lower a call for the 32-bit ABI.
745 SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
746 SmallVectorImpl<SDValue> &InVals) const {
747 SelectionDAG &DAG = CLI.DAG;
749 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
750 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
751 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
752 SDValue Chain = CLI.Chain;
753 SDValue Callee = CLI.Callee;
754 bool &isTailCall = CLI.IsTailCall;
755 CallingConv::ID CallConv = CLI.CallConv;
756 bool isVarArg = CLI.IsVarArg;
758 // Analyze operands of the call, assigning locations to each operand.
759 SmallVector<CCValAssign, 16> ArgLocs;
760 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
762 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
764 isTailCall = isTailCall && IsEligibleForTailCallOptimization(
765 CCInfo, CLI, DAG.getMachineFunction());
767 // Get the size of the outgoing arguments stack space requirement.
768 unsigned ArgsSize = CCInfo.getNextStackOffset();
770 // Keep stack frames 8-byte aligned.
771 ArgsSize = (ArgsSize+7) & ~7;
773 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
775 // Create local copies for byval args.
776 SmallVector<SDValue, 8> ByValArgs;
777 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
778 ISD::ArgFlagsTy Flags = Outs[i].Flags;
779 if (!Flags.isByVal())
782 SDValue Arg = OutVals[i];
783 unsigned Size = Flags.getByValSize();
784 Align Alignment = Flags.getNonZeroByValAlign();
787 int FI = MFI.CreateStackObject(Size, Alignment, false);
788 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
789 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
791 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
792 false, // isVolatile,
793 (Size <= 32), // AlwaysInline if size <= 32,
795 MachinePointerInfo(), MachinePointerInfo());
796 ByValArgs.push_back(FIPtr);
800 ByValArgs.push_back(nullVal);
804 assert(!isTailCall || ArgsSize == 0);
807 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
809 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
810 SmallVector<SDValue, 8> MemOpChains;
812 const unsigned StackOffset = 92;
813 bool hasStructRetAttr = false;
814 unsigned SRetArgSize = 0;
815 // Walk the register/memloc assignments, inserting copies/loads.
816 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
819 CCValAssign &VA = ArgLocs[i];
820 SDValue Arg = OutVals[realArgIdx];
822 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
824 // Use local copy if it is a byval arg.
825 if (Flags.isByVal()) {
826 Arg = ByValArgs[byvalArgIdx++];
832 // Promote the value if needed.
833 switch (VA.getLocInfo()) {
834 default: llvm_unreachable("Unknown loc info!");
835 case CCValAssign::Full: break;
836 case CCValAssign::SExt:
837 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
839 case CCValAssign::ZExt:
840 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
842 case CCValAssign::AExt:
843 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
845 case CCValAssign::BCvt:
846 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
850 if (Flags.isSRet()) {
851 assert(VA.needsCustom());
856 // store SRet argument in %sp+64
857 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
858 SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
859 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
860 MemOpChains.push_back(
861 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
862 hasStructRetAttr = true;
863 // sret only allowed on first argument
864 assert(Outs[realArgIdx].OrigArgIndex == 0);
866 DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
870 if (VA.needsCustom()) {
871 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
874 unsigned Offset = VA.getLocMemOffset() + StackOffset;
875 // if it is double-word aligned, just store.
876 if (Offset % 8 == 0) {
877 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
878 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
879 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
880 MemOpChains.push_back(
881 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
886 if (VA.getLocVT() == MVT::f64) {
887 // Move from the float value from float registers into the
888 // integer registers.
889 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg))
890 Arg = bitcastConstantFPToInt(C, dl, DAG);
892 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
895 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
897 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
898 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
900 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
903 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
905 CCValAssign &NextVA = ArgLocs[++i];
906 if (NextVA.isRegLoc()) {
907 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
909 // Store the second part in stack.
910 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
911 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
912 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
913 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
914 MemOpChains.push_back(
915 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
918 unsigned Offset = VA.getLocMemOffset() + StackOffset;
919 // Store the first part.
920 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
921 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
922 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
923 MemOpChains.push_back(
924 DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
925 // Store the second part.
926 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
927 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
928 MemOpChains.push_back(
929 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
934 // Arguments that can be passed on register must be kept at
937 if (VA.getLocVT() != MVT::f32) {
938 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
941 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
942 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
946 assert(VA.isMemLoc());
948 // Create a store off the stack pointer for this argument.
949 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
950 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + StackOffset,
952 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
953 MemOpChains.push_back(
954 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
958 // Emit all stores, make sure the occur before any copies into physregs.
959 if (!MemOpChains.empty())
960 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
962 // Build a sequence of copy-to-reg nodes chained together with token
963 // chain and flag operands which copy the outgoing args into registers.
964 // The InFlag in necessary since all emitted instructions must be
967 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
968 Register Reg = RegsToPass[i].first;
970 Reg = toCallerWindow(Reg);
971 Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
972 InFlag = Chain.getValue(1);
975 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
977 // If the callee is a GlobalAddress node (quite common, every direct call is)
978 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
979 // Likewise ExternalSymbol -> TargetExternalSymbol.
980 unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30
981 : SparcMCExpr::VK_Sparc_WDISP30;
982 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
983 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
984 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
985 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
987 // Returns a chain & a flag for retval copy to use
988 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
989 SmallVector<SDValue, 8> Ops;
990 Ops.push_back(Chain);
991 Ops.push_back(Callee);
992 if (hasStructRetAttr)
993 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
994 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
995 Register Reg = RegsToPass[i].first;
997 Reg = toCallerWindow(Reg);
998 Ops.push_back(DAG.getRegister(Reg, RegsToPass[i].second.getValueType()));
1001 // Add a register mask operand representing the call-preserved registers.
1002 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1003 const uint32_t *Mask =
1005 ? TRI->getRTCallPreservedMask(CallConv)
1006 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
1007 assert(Mask && "Missing call preserved mask for calling convention");
1008 Ops.push_back(DAG.getRegisterMask(Mask));
1010 if (InFlag.getNode())
1011 Ops.push_back(InFlag);
1014 DAG.getMachineFunction().getFrameInfo().setHasTailCall();
1015 return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1018 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
1019 InFlag = Chain.getValue(1);
1021 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
1022 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
1023 InFlag = Chain.getValue(1);
1025 // Assign locations to each value returned by this call.
1026 SmallVector<CCValAssign, 16> RVLocs;
1027 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1030 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1032 // Copy all of the result registers out of their specified physreg.
1033 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1034 if (RVLocs[i].getLocVT() == MVT::v2i32) {
1035 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1036 SDValue Lo = DAG.getCopyFromReg(
1037 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InFlag);
1038 Chain = Lo.getValue(1);
1039 InFlag = Lo.getValue(2);
1040 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1041 DAG.getConstant(0, dl, MVT::i32));
1042 SDValue Hi = DAG.getCopyFromReg(
1043 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InFlag);
1044 Chain = Hi.getValue(1);
1045 InFlag = Hi.getValue(2);
1046 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1047 DAG.getConstant(1, dl, MVT::i32));
1048 InVals.push_back(Vec);
1051 DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1052 RVLocs[i].getValVT(), InFlag)
1054 InFlag = Chain.getValue(2);
1055 InVals.push_back(Chain.getValue(0));
1062 // FIXME? Maybe this could be a TableGen attribute on some registers and
1063 // this table could be generated automatically from RegInfo.
1064 Register SparcTargetLowering::getRegisterByName(const char* RegName, LLT VT,
1065 const MachineFunction &MF) const {
1066 Register Reg = StringSwitch<Register>(RegName)
1067 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1068 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1069 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1070 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1071 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1072 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1073 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1074 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1080 report_fatal_error("Invalid register name global variable");
1083 // Fixup floating point arguments in the ... part of a varargs call.
1085 // The SPARC v9 ABI requires that floating point arguments are treated the same
1086 // as integers when calling a varargs function. This does not apply to the
1087 // fixed arguments that are part of the function's prototype.
1089 // This function post-processes a CCValAssign array created by
1090 // AnalyzeCallOperands().
1091 static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs,
1092 ArrayRef<ISD::OutputArg> Outs) {
1093 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1094 const CCValAssign &VA = ArgLocs[i];
1095 MVT ValTy = VA.getLocVT();
1096 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1097 // varargs functions.
1098 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1100 // The fixed arguments to a varargs function still go in FP registers.
1101 if (Outs[VA.getValNo()].IsFixed)
1104 // This floating point argument should be reassigned.
1107 // Determine the offset into the argument array.
1108 Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1109 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1110 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1111 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1114 // This argument should go in %i0-%i5.
1115 unsigned IReg = SP::I0 + Offset/8;
1116 if (ValTy == MVT::f64)
1117 // Full register, just bitconvert into i64.
1118 NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
1119 IReg, MVT::i64, CCValAssign::BCvt);
1121 assert(ValTy == MVT::f128 && "Unexpected type!");
1122 // Full register, just bitconvert into i128 -- We will lower this into
1123 // two i64s in LowerCall_64.
1124 NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(),
1125 IReg, MVT::i128, CCValAssign::BCvt);
1128 // This needs to go to memory, we're out of integer registers.
1129 NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
1130 Offset, VA.getLocVT(), VA.getLocInfo());
1136 // Lower a call for the 64-bit ABI.
1138 SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
1139 SmallVectorImpl<SDValue> &InVals) const {
1140 SelectionDAG &DAG = CLI.DAG;
1142 SDValue Chain = CLI.Chain;
1143 auto PtrVT = getPointerTy(DAG.getDataLayout());
1145 // Sparc target does not yet support tail call optimization.
1146 CLI.IsTailCall = false;
1148 // Analyze operands of the call, assigning locations to each operand.
1149 SmallVector<CCValAssign, 16> ArgLocs;
1150 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1152 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1154 // Get the size of the outgoing arguments stack space requirement.
1155 // The stack offset computed by CC_Sparc64 includes all arguments.
1156 // Called functions expect 6 argument words to exist in the stack frame, used
1158 unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
1160 // Keep stack frames 16-byte aligned.
1161 ArgsSize = alignTo(ArgsSize, 16);
1163 // Varargs calls require special treatment.
1165 fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1167 // Adjust the stack pointer to make room for the arguments.
1168 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1169 // with more than 6 arguments.
1170 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1172 // Collect the set of registers to pass to the function and their values.
1173 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1175 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
1177 // Collect chains from all the memory opeations that copy arguments to the
1178 // stack. They must follow the stack pointer adjustment above and precede the
1179 // call instruction itself.
1180 SmallVector<SDValue, 8> MemOpChains;
1182 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1183 const CCValAssign &VA = ArgLocs[i];
1184 SDValue Arg = CLI.OutVals[i];
1186 // Promote the value if needed.
1187 switch (VA.getLocInfo()) {
1189 llvm_unreachable("Unknown location info!");
1190 case CCValAssign::Full:
1192 case CCValAssign::SExt:
1193 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1195 case CCValAssign::ZExt:
1196 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1198 case CCValAssign::AExt:
1199 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1201 case CCValAssign::BCvt:
1202 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1203 // SPARC does not support i128 natively. Lower it into two i64, see below.
1204 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1205 || VA.getLocVT() != MVT::i128)
1206 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1210 if (VA.isRegLoc()) {
1211 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1212 && VA.getLocVT() == MVT::i128) {
1213 // Store and reload into the integer register reg and reg+1.
1214 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1215 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1216 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1217 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1218 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1219 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1220 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1222 // Store to %sp+BIAS+128+Offset
1224 DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1225 // Load into Reg and Reg+1
1227 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1229 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1230 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()),
1232 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1),
1237 // The custom bit on an i32 return value indicates that it should be
1238 // passed in the high bits of the register.
1239 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1240 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1241 DAG.getConstant(32, DL, MVT::i32));
1243 // The next value may go in the low bits of the same register.
1244 // Handle both at once.
1245 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1246 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1247 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1249 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1250 // Skip the next value, it's already done.
1254 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg));
1258 assert(VA.isMemLoc());
1260 // Create a store off the stack pointer for this argument.
1261 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1262 // The argument area starts at %fp+BIAS+128 in the callee frame,
1263 // %sp+BIAS+128 in ours.
1264 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1265 Subtarget->getStackPointerBias() +
1267 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1268 MemOpChains.push_back(
1269 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1272 // Emit all stores, make sure they occur before the call.
1273 if (!MemOpChains.empty())
1274 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1276 // Build a sequence of CopyToReg nodes glued together with token chain and
1277 // glue operands which copy the outgoing args into registers. The InGlue is
1278 // necessary since all emitted instructions must be stuck together in order
1279 // to pass the live physical registers.
1281 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1282 Chain = DAG.getCopyToReg(Chain, DL,
1283 RegsToPass[i].first, RegsToPass[i].second, InGlue);
1284 InGlue = Chain.getValue(1);
1287 // If the callee is a GlobalAddress node (quite common, every direct call is)
1288 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1289 // Likewise ExternalSymbol -> TargetExternalSymbol.
1290 SDValue Callee = CLI.Callee;
1291 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1292 unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30
1293 : SparcMCExpr::VK_Sparc_WDISP30;
1294 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1295 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1296 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1297 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1299 // Build the operands for the call instruction itself.
1300 SmallVector<SDValue, 8> Ops;
1301 Ops.push_back(Chain);
1302 Ops.push_back(Callee);
1303 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1304 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1305 RegsToPass[i].second.getValueType()));
1307 // Add a register mask operand representing the call-preserved registers.
1308 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1309 const uint32_t *Mask =
1310 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1311 : TRI->getCallPreservedMask(DAG.getMachineFunction(),
1313 assert(Mask && "Missing call preserved mask for calling convention");
1314 Ops.push_back(DAG.getRegisterMask(Mask));
1316 // Make sure the CopyToReg nodes are glued to the call instruction which
1317 // consumes the registers.
1318 if (InGlue.getNode())
1319 Ops.push_back(InGlue);
1321 // Now the call itself.
1322 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1323 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1324 InGlue = Chain.getValue(1);
1326 // Revert the stack pointer immediately after the call.
1327 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
1328 DAG.getIntPtrConstant(0, DL, true), InGlue, DL);
1329 InGlue = Chain.getValue(1);
1331 // Now extract the return values. This is more or less the same as
1332 // LowerFormalArguments_64.
1334 // Assign locations to each value returned by this call.
1335 SmallVector<CCValAssign, 16> RVLocs;
1336 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1339 // Set inreg flag manually for codegen generated library calls that
1341 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1342 CLI.Ins[0].Flags.setInReg();
1344 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1346 // Copy all of the result registers out of their specified physreg.
1347 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1348 CCValAssign &VA = RVLocs[i];
1349 unsigned Reg = toCallerWindow(VA.getLocReg());
1351 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1352 // reside in the same register in the high and low bits. Reuse the
1353 // CopyFromReg previous node to avoid duplicate copies.
1355 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1356 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1357 RV = Chain.getValue(0);
1359 // But usually we'll create a new CopyFromReg for a different register.
1360 if (!RV.getNode()) {
1361 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1362 Chain = RV.getValue(1);
1363 InGlue = Chain.getValue(2);
1366 // Get the high bits for i32 struct elements.
1367 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1368 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1369 DAG.getConstant(32, DL, MVT::i32));
1371 // The callee promoted the return value, so insert an Assert?ext SDNode so
1372 // we won't promote the value again in this function.
1373 switch (VA.getLocInfo()) {
1374 case CCValAssign::SExt:
1375 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1376 DAG.getValueType(VA.getValVT()));
1378 case CCValAssign::ZExt:
1379 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1380 DAG.getValueType(VA.getValVT()));
1386 // Truncate the register down to the return value type.
1387 if (VA.isExtInLoc())
1388 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1390 InVals.push_back(RV);
1396 //===----------------------------------------------------------------------===//
1397 // TargetLowering Implementation
1398 //===----------------------------------------------------------------------===//
1400 TargetLowering::AtomicExpansionKind SparcTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
1401 if (AI->getOperation() == AtomicRMWInst::Xchg &&
1402 AI->getType()->getPrimitiveSizeInBits() == 32)
1403 return AtomicExpansionKind::None; // Uses xchg instruction
1405 return AtomicExpansionKind::CmpXChg;
1408 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1410 static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) {
1412 default: llvm_unreachable("Unknown integer condition code!");
1413 case ISD::SETEQ: return SPCC::ICC_E;
1414 case ISD::SETNE: return SPCC::ICC_NE;
1415 case ISD::SETLT: return SPCC::ICC_L;
1416 case ISD::SETGT: return SPCC::ICC_G;
1417 case ISD::SETLE: return SPCC::ICC_LE;
1418 case ISD::SETGE: return SPCC::ICC_GE;
1419 case ISD::SETULT: return SPCC::ICC_CS;
1420 case ISD::SETULE: return SPCC::ICC_LEU;
1421 case ISD::SETUGT: return SPCC::ICC_GU;
1422 case ISD::SETUGE: return SPCC::ICC_CC;
1426 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1428 static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) {
1430 default: llvm_unreachable("Unknown fp condition code!");
1432 case ISD::SETOEQ: return SPCC::FCC_E;
1434 case ISD::SETUNE: return SPCC::FCC_NE;
1436 case ISD::SETOLT: return SPCC::FCC_L;
1438 case ISD::SETOGT: return SPCC::FCC_G;
1440 case ISD::SETOLE: return SPCC::FCC_LE;
1442 case ISD::SETOGE: return SPCC::FCC_GE;
1443 case ISD::SETULT: return SPCC::FCC_UL;
1444 case ISD::SETULE: return SPCC::FCC_ULE;
1445 case ISD::SETUGT: return SPCC::FCC_UG;
1446 case ISD::SETUGE: return SPCC::FCC_UGE;
1447 case ISD::SETUO: return SPCC::FCC_U;
1448 case ISD::SETO: return SPCC::FCC_O;
1449 case ISD::SETONE: return SPCC::FCC_LG;
1450 case ISD::SETUEQ: return SPCC::FCC_UE;
1454 SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM,
1455 const SparcSubtarget &STI)
1456 : TargetLowering(TM), Subtarget(&STI) {
1457 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
1459 // Instructions which use registers as conditionals examine all the
1460 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1461 // matters much whether it's ZeroOrOneBooleanContent, or
1462 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1464 setBooleanContents(ZeroOrOneBooleanContent);
1465 setBooleanVectorContents(ZeroOrOneBooleanContent);
1467 // Set up the register classes.
1468 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1469 if (!Subtarget->useSoftFloat()) {
1470 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1471 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1472 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1474 if (Subtarget->is64Bit()) {
1475 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1477 // On 32bit sparc, we define a double-register 32bit register
1478 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1479 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1481 // ...but almost all operations must be expanded, so set that as
1483 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1484 setOperationAction(Op, MVT::v2i32, Expand);
1486 // Truncating/extending stores/loads are also not supported.
1487 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
1488 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1489 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1490 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1492 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1493 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1494 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1496 setTruncStoreAction(VT, MVT::v2i32, Expand);
1497 setTruncStoreAction(MVT::v2i32, VT, Expand);
1499 // However, load and store *are* legal.
1500 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1501 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1502 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Legal);
1503 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Legal);
1505 // And we need to promote i64 loads/stores into vector load/store
1506 setOperationAction(ISD::LOAD, MVT::i64, Custom);
1507 setOperationAction(ISD::STORE, MVT::i64, Custom);
1509 // Sadly, this doesn't work:
1510 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1511 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1514 // Turn FP extload into load/fpextend
1515 for (MVT VT : MVT::fp_valuetypes()) {
1516 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1517 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1518 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1521 // Sparc doesn't have i1 sign extending load
1522 for (MVT VT : MVT::integer_valuetypes())
1523 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1525 // Turn FP truncstore into trunc + store.
1526 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1527 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1528 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1529 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
1530 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1531 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1533 // Custom legalize GlobalAddress nodes into LO/HI parts.
1534 setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
1535 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
1536 setOperationAction(ISD::ConstantPool, PtrVT, Custom);
1537 setOperationAction(ISD::BlockAddress, PtrVT, Custom);
1539 // Sparc doesn't have sext_inreg, replace them with shl/sra
1540 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
1541 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
1542 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
1544 // Sparc has no REM or DIVREM operations.
1545 setOperationAction(ISD::UREM, MVT::i32, Expand);
1546 setOperationAction(ISD::SREM, MVT::i32, Expand);
1547 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1548 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1550 // ... nor does SparcV9.
1551 if (Subtarget->is64Bit()) {
1552 setOperationAction(ISD::UREM, MVT::i64, Expand);
1553 setOperationAction(ISD::SREM, MVT::i64, Expand);
1554 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
1555 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
1558 // Custom expand fp<->sint
1559 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
1560 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
1561 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
1562 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
1564 // Custom Expand fp<->uint
1565 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
1566 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
1567 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
1568 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
1570 // Lower f16 conversion operations into library calls
1571 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1572 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1573 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1574 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1575 setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);
1576 setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);
1578 setOperationAction(ISD::BITCAST, MVT::f32, Expand);
1579 setOperationAction(ISD::BITCAST, MVT::i32, Expand);
1581 // Sparc has no select or setcc: expand to SELECT_CC.
1582 setOperationAction(ISD::SELECT, MVT::i32, Expand);
1583 setOperationAction(ISD::SELECT, MVT::f32, Expand);
1584 setOperationAction(ISD::SELECT, MVT::f64, Expand);
1585 setOperationAction(ISD::SELECT, MVT::f128, Expand);
1587 setOperationAction(ISD::SETCC, MVT::i32, Expand);
1588 setOperationAction(ISD::SETCC, MVT::f32, Expand);
1589 setOperationAction(ISD::SETCC, MVT::f64, Expand);
1590 setOperationAction(ISD::SETCC, MVT::f128, Expand);
1592 // Sparc doesn't have BRCOND either, it has BR_CC.
1593 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
1594 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1595 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1596 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
1597 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
1598 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
1599 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
1601 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1602 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
1603 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
1604 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
1606 setOperationAction(ISD::ADDC, MVT::i32, Custom);
1607 setOperationAction(ISD::ADDE, MVT::i32, Custom);
1608 setOperationAction(ISD::SUBC, MVT::i32, Custom);
1609 setOperationAction(ISD::SUBE, MVT::i32, Custom);
1611 if (Subtarget->is64Bit()) {
1612 setOperationAction(ISD::ADDC, MVT::i64, Custom);
1613 setOperationAction(ISD::ADDE, MVT::i64, Custom);
1614 setOperationAction(ISD::SUBC, MVT::i64, Custom);
1615 setOperationAction(ISD::SUBE, MVT::i64, Custom);
1616 setOperationAction(ISD::BITCAST, MVT::f64, Expand);
1617 setOperationAction(ISD::BITCAST, MVT::i64, Expand);
1618 setOperationAction(ISD::SELECT, MVT::i64, Expand);
1619 setOperationAction(ISD::SETCC, MVT::i64, Expand);
1620 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
1621 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
1623 setOperationAction(ISD::CTPOP, MVT::i64,
1624 Subtarget->usePopc() ? Legal : Expand);
1625 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
1626 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
1627 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
1628 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1629 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1630 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
1634 // Atomics are supported on SparcV9. 32-bit atomics are also
1635 // supported by some Leon SparcV8 variants. Otherwise, atomics
1637 if (Subtarget->isV9())
1638 setMaxAtomicSizeInBitsSupported(64);
1639 else if (Subtarget->hasLeonCasa())
1640 setMaxAtomicSizeInBitsSupported(32);
1642 setMaxAtomicSizeInBitsSupported(0);
1644 setMinCmpXchgSizeInBits(32);
1646 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);
1648 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal);
1650 // Custom Lower Atomic LOAD/STORE
1651 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1652 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1654 if (Subtarget->is64Bit()) {
1655 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
1656 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);
1657 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
1658 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
1661 if (!Subtarget->is64Bit()) {
1662 // These libcalls are not available in 32-bit.
1663 setLibcallName(RTLIB::MULO_I64, nullptr);
1664 setLibcallName(RTLIB::SHL_I128, nullptr);
1665 setLibcallName(RTLIB::SRL_I128, nullptr);
1666 setLibcallName(RTLIB::SRA_I128, nullptr);
1669 setLibcallName(RTLIB::MULO_I128, nullptr);
1671 if (!Subtarget->isV9()) {
1672 // SparcV8 does not have FNEGD and FABSD.
1673 setOperationAction(ISD::FNEG, MVT::f64, Custom);
1674 setOperationAction(ISD::FABS, MVT::f64, Custom);
1677 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1678 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1679 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
1680 setOperationAction(ISD::FREM , MVT::f128, Expand);
1681 setOperationAction(ISD::FMA , MVT::f128, Expand);
1682 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1683 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1684 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1685 setOperationAction(ISD::FREM , MVT::f64, Expand);
1686 setOperationAction(ISD::FMA , MVT::f64, Expand);
1687 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1688 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1689 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1690 setOperationAction(ISD::FREM , MVT::f32, Expand);
1691 setOperationAction(ISD::FMA , MVT::f32, Expand);
1692 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
1693 setOperationAction(ISD::CTLZ , MVT::i32, Expand);
1694 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1695 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1696 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1697 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
1698 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
1699 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
1700 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1701 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1702 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1704 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1705 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1706 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1708 // Expands to [SU]MUL_LOHI.
1709 setOperationAction(ISD::MULHU, MVT::i32, Expand);
1710 setOperationAction(ISD::MULHS, MVT::i32, Expand);
1711 setOperationAction(ISD::MUL, MVT::i32, Expand);
1713 if (Subtarget->useSoftMulDiv()) {
1714 // .umul works for both signed and unsigned
1715 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
1716 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
1717 setLibcallName(RTLIB::MUL_I32, ".umul");
1719 setOperationAction(ISD::SDIV, MVT::i32, Expand);
1720 setLibcallName(RTLIB::SDIV_I32, ".div");
1722 setOperationAction(ISD::UDIV, MVT::i32, Expand);
1723 setLibcallName(RTLIB::UDIV_I32, ".udiv");
1725 setLibcallName(RTLIB::SREM_I32, ".rem");
1726 setLibcallName(RTLIB::UREM_I32, ".urem");
1729 if (Subtarget->is64Bit()) {
1730 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
1731 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
1732 setOperationAction(ISD::MULHU, MVT::i64, Expand);
1733 setOperationAction(ISD::MULHS, MVT::i64, Expand);
1735 setOperationAction(ISD::UMULO, MVT::i64, Custom);
1736 setOperationAction(ISD::SMULO, MVT::i64, Custom);
1738 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
1739 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
1740 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
1743 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1744 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1745 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1746 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1748 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1749 setOperationAction(ISD::DEBUGTRAP , MVT::Other, Legal);
1751 // Use the default implementation.
1752 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1753 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1754 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
1755 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
1756 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
1758 setStackPointerRegisterToSaveRestore(SP::O6);
1760 setOperationAction(ISD::CTPOP, MVT::i32,
1761 Subtarget->usePopc() ? Legal : Expand);
1763 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1764 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1765 setOperationAction(ISD::STORE, MVT::f128, Legal);
1767 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1768 setOperationAction(ISD::STORE, MVT::f128, Custom);
1771 if (Subtarget->hasHardQuad()) {
1772 setOperationAction(ISD::FADD, MVT::f128, Legal);
1773 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1774 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1775 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1776 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1777 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1778 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1779 if (Subtarget->isV9()) {
1780 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1781 setOperationAction(ISD::FABS, MVT::f128, Legal);
1783 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1784 setOperationAction(ISD::FABS, MVT::f128, Custom);
1787 if (!Subtarget->is64Bit()) {
1788 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1789 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1790 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1791 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1795 // Custom legalize f128 operations.
1797 setOperationAction(ISD::FADD, MVT::f128, Custom);
1798 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1799 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1800 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1801 setOperationAction(ISD::FSQRT, MVT::f128, Custom);
1802 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1803 setOperationAction(ISD::FABS, MVT::f128, Custom);
1805 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
1806 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
1807 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
1809 // Setup Runtime library names.
1810 if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1811 setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1812 setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1813 setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1814 setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1815 setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1816 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1817 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1818 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1819 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1820 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1821 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1822 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1823 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1824 setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1825 setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1826 setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1827 setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1828 } else if (!Subtarget->useSoftFloat()) {
1829 setLibcallName(RTLIB::ADD_F128, "_Q_add");
1830 setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1831 setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1832 setLibcallName(RTLIB::DIV_F128, "_Q_div");
1833 setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1834 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1835 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1836 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1837 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1838 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1839 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1840 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1841 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1842 setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1843 setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1844 setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1845 setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1849 if (Subtarget->fixAllFDIVSQRT()) {
1850 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1851 // the former instructions generate errata on LEON processors.
1852 setOperationAction(ISD::FDIV, MVT::f32, Promote);
1853 setOperationAction(ISD::FSQRT, MVT::f32, Promote);
1856 if (Subtarget->hasNoFMULS()) {
1857 setOperationAction(ISD::FMUL, MVT::f32, Promote);
1860 // Custom combine bitcast between f64 and v2i32
1861 if (!Subtarget->is64Bit())
1862 setTargetDAGCombine(ISD::BITCAST);
1864 if (Subtarget->hasLeonCycleCounter())
1865 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
1867 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1869 setMinFunctionAlignment(Align(4));
1871 computeRegisterProperties(Subtarget->getRegisterInfo());
1874 bool SparcTargetLowering::useSoftFloat() const {
1875 return Subtarget->useSoftFloat();
1878 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
1879 switch ((SPISD::NodeType)Opcode) {
1880 case SPISD::FIRST_NUMBER: break;
1881 case SPISD::CMPICC: return "SPISD::CMPICC";
1882 case SPISD::CMPFCC: return "SPISD::CMPFCC";
1883 case SPISD::BRICC: return "SPISD::BRICC";
1884 case SPISD::BRXCC: return "SPISD::BRXCC";
1885 case SPISD::BRFCC: return "SPISD::BRFCC";
1886 case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
1887 case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
1888 case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
1889 case SPISD::Hi: return "SPISD::Hi";
1890 case SPISD::Lo: return "SPISD::Lo";
1891 case SPISD::FTOI: return "SPISD::FTOI";
1892 case SPISD::ITOF: return "SPISD::ITOF";
1893 case SPISD::FTOX: return "SPISD::FTOX";
1894 case SPISD::XTOF: return "SPISD::XTOF";
1895 case SPISD::CALL: return "SPISD::CALL";
1896 case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
1897 case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
1898 case SPISD::FLUSHW: return "SPISD::FLUSHW";
1899 case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
1900 case SPISD::TLS_LD: return "SPISD::TLS_LD";
1901 case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
1902 case SPISD::TAIL_CALL: return "SPISD::TAIL_CALL";
1903 case SPISD::LOAD_GDOP: return "SPISD::LOAD_GDOP";
1908 EVT SparcTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
1912 return VT.changeVectorElementTypeToInteger();
1915 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
1916 /// be zero. Op is expected to be a target specific node. Used by DAG
1918 void SparcTargetLowering::computeKnownBitsForTargetNode
1921 const APInt &DemandedElts,
1922 const SelectionDAG &DAG,
1923 unsigned Depth) const {
1927 switch (Op.getOpcode()) {
1929 case SPISD::SELECT_ICC:
1930 case SPISD::SELECT_XCC:
1931 case SPISD::SELECT_FCC:
1932 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
1933 Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
1935 // Only known if known in both the LHS and RHS.
1936 Known = KnownBits::commonBits(Known, Known2);
1941 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
1942 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
1943 static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
1944 ISD::CondCode CC, unsigned &SPCC) {
1945 if (isNullConstant(RHS) &&
1947 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
1948 LHS.getOpcode() == SPISD::SELECT_XCC) &&
1949 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
1950 (LHS.getOpcode() == SPISD::SELECT_FCC &&
1951 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
1952 isOneConstant(LHS.getOperand(0)) &&
1953 isNullConstant(LHS.getOperand(1))) {
1954 SDValue CMPCC = LHS.getOperand(3);
1955 SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
1956 LHS = CMPCC.getOperand(0);
1957 RHS = CMPCC.getOperand(1);
1961 // Convert to a target node and set target flags.
1962 SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF,
1963 SelectionDAG &DAG) const {
1964 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1965 return DAG.getTargetGlobalAddress(GA->getGlobal(),
1967 GA->getValueType(0),
1968 GA->getOffset(), TF);
1970 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
1971 return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
1972 CP->getAlign(), CP->getOffset(), TF);
1974 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
1975 return DAG.getTargetBlockAddress(BA->getBlockAddress(),
1980 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
1981 return DAG.getTargetExternalSymbol(ES->getSymbol(),
1982 ES->getValueType(0), TF);
1984 llvm_unreachable("Unhandled address SDNode");
1987 // Split Op into high and low parts according to HiTF and LoTF.
1988 // Return an ADD node combining the parts.
1989 SDValue SparcTargetLowering::makeHiLoPair(SDValue Op,
1990 unsigned HiTF, unsigned LoTF,
1991 SelectionDAG &DAG) const {
1993 EVT VT = Op.getValueType();
1994 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
1995 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
1996 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1999 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2000 // or ExternalSymbol SDNode.
2001 SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
2003 EVT VT = getPointerTy(DAG.getDataLayout());
2005 // Handle PIC mode first. SPARC needs a got load for every variable!
2006 if (isPositionIndependent()) {
2007 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2008 PICLevel::Level picLevel = M->getPICLevel();
2011 if (picLevel == PICLevel::SmallPIC) {
2012 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2013 Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
2014 withTargetFlags(Op, SparcMCExpr::VK_Sparc_GOT13, DAG));
2016 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2017 Idx = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_GOT22,
2018 SparcMCExpr::VK_Sparc_GOT10, DAG);
2021 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
2022 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
2023 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2024 // function has calls.
2025 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2026 MFI.setHasCalls(true);
2027 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2028 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2031 // This is one of the absolute code models.
2032 switch(getTargetMachine().getCodeModel()) {
2034 llvm_unreachable("Unsupported absolute code model");
2035 case CodeModel::Small:
2037 return makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI,
2038 SparcMCExpr::VK_Sparc_LO, DAG);
2039 case CodeModel::Medium: {
2041 SDValue H44 = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_H44,
2042 SparcMCExpr::VK_Sparc_M44, DAG);
2043 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2044 SDValue L44 = withTargetFlags(Op, SparcMCExpr::VK_Sparc_L44, DAG);
2045 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2046 return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2048 case CodeModel::Large: {
2050 SDValue Hi = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HH,
2051 SparcMCExpr::VK_Sparc_HM, DAG);
2052 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2053 SDValue Lo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI,
2054 SparcMCExpr::VK_Sparc_LO, DAG);
2055 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2060 SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
2061 SelectionDAG &DAG) const {
2062 return makeAddress(Op, DAG);
2065 SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
2066 SelectionDAG &DAG) const {
2067 return makeAddress(Op, DAG);
2070 SDValue SparcTargetLowering::LowerBlockAddress(SDValue Op,
2071 SelectionDAG &DAG) const {
2072 return makeAddress(Op, DAG);
2075 SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2076 SelectionDAG &DAG) const {
2078 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2079 if (DAG.getTarget().useEmulatedTLS())
2080 return LowerToTLSEmulatedModel(GA, DAG);
2083 const GlobalValue *GV = GA->getGlobal();
2084 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2086 TLSModel::Model model = getTargetMachine().getTLSModel(GV);
2088 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2089 unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2090 ? SparcMCExpr::VK_Sparc_TLS_GD_HI22
2091 : SparcMCExpr::VK_Sparc_TLS_LDM_HI22);
2092 unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2093 ? SparcMCExpr::VK_Sparc_TLS_GD_LO10
2094 : SparcMCExpr::VK_Sparc_TLS_LDM_LO10);
2095 unsigned addTF = ((model == TLSModel::GeneralDynamic)
2096 ? SparcMCExpr::VK_Sparc_TLS_GD_ADD
2097 : SparcMCExpr::VK_Sparc_TLS_LDM_ADD);
2098 unsigned callTF = ((model == TLSModel::GeneralDynamic)
2099 ? SparcMCExpr::VK_Sparc_TLS_GD_CALL
2100 : SparcMCExpr::VK_Sparc_TLS_LDM_CALL);
2102 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2103 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2104 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2105 withTargetFlags(Op, addTF, DAG));
2107 SDValue Chain = DAG.getEntryNode();
2110 Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
2111 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
2112 InFlag = Chain.getValue(1);
2113 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2114 SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2116 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2117 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2118 DAG.getMachineFunction(), CallingConv::C);
2119 assert(Mask && "Missing call preserved mask for calling convention");
2120 SDValue Ops[] = {Chain,
2123 DAG.getRegister(SP::O0, PtrVT),
2124 DAG.getRegisterMask(Mask),
2126 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2127 InFlag = Chain.getValue(1);
2128 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true),
2129 DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
2130 InFlag = Chain.getValue(1);
2131 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
2133 if (model != TLSModel::LocalDynamic)
2136 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2137 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22, DAG));
2138 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2139 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10, DAG));
2140 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2141 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2142 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_ADD, DAG));
2145 if (model == TLSModel::InitialExec) {
2146 unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2147 : SparcMCExpr::VK_Sparc_TLS_IE_LD);
2149 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2151 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2152 // function has calls.
2153 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2154 MFI.setHasCalls(true);
2156 SDValue TGA = makeHiLoPair(Op,
2157 SparcMCExpr::VK_Sparc_TLS_IE_HI22,
2158 SparcMCExpr::VK_Sparc_TLS_IE_LO10, DAG);
2159 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2160 SDValue Offset = DAG.getNode(SPISD::TLS_LD,
2162 withTargetFlags(Op, ldTF, DAG));
2163 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2164 DAG.getRegister(SP::G7, PtrVT), Offset,
2166 SparcMCExpr::VK_Sparc_TLS_IE_ADD, DAG));
2169 assert(model == TLSModel::LocalExec);
2170 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2171 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_HIX22, DAG));
2172 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2173 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_LOX10, DAG));
2174 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2176 return DAG.getNode(ISD::ADD, DL, PtrVT,
2177 DAG.getRegister(SP::G7, PtrVT), Offset);
2180 SDValue SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain,
2181 ArgListTy &Args, SDValue Arg,
2183 SelectionDAG &DAG) const {
2184 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2185 EVT ArgVT = Arg.getValueType();
2186 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2192 if (ArgTy->isFP128Ty()) {
2193 // Create a stack object and pass the pointer to the library function.
2194 int FI = MFI.CreateStackObject(16, Align(8), false);
2195 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2196 Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
2200 Entry.Ty = PointerType::getUnqual(ArgTy);
2202 Args.push_back(Entry);
2207 SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG,
2208 const char *LibFuncName,
2209 unsigned numArgs) const {
2213 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2214 auto PtrVT = getPointerTy(DAG.getDataLayout());
2216 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2217 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2218 Type *RetTyABI = RetTy;
2219 SDValue Chain = DAG.getEntryNode();
2222 if (RetTy->isFP128Ty()) {
2223 // Create a Stack Object to receive the return value of type f128.
2225 int RetFI = MFI.CreateStackObject(16, Align(8), false);
2226 RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2227 Entry.Node = RetPtr;
2228 Entry.Ty = PointerType::getUnqual(RetTy);
2229 if (!Subtarget->is64Bit()) {
2230 Entry.IsSRet = true;
2231 Entry.IndirectType = RetTy;
2233 Entry.IsReturned = false;
2234 Args.push_back(Entry);
2235 RetTyABI = Type::getVoidTy(*DAG.getContext());
2238 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2239 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2240 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2242 TargetLowering::CallLoweringInfo CLI(DAG);
2243 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2244 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2246 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2248 // chain is in second result.
2249 if (RetTyABI == RetTy)
2250 return CallInfo.first;
2252 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2254 Chain = CallInfo.second;
2256 // Load RetPtr to get the return value.
2257 return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2258 MachinePointerInfo(), Align(8));
2261 SDValue SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS,
2262 unsigned &SPCC, const SDLoc &DL,
2263 SelectionDAG &DAG) const {
2265 const char *LibCall = nullptr;
2266 bool is64Bit = Subtarget->is64Bit();
2268 default: llvm_unreachable("Unhandled conditional code!");
2269 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2270 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2271 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2272 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2273 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2274 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2282 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2285 auto PtrVT = getPointerTy(DAG.getDataLayout());
2286 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2287 Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2289 SDValue Chain = DAG.getEntryNode();
2290 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2291 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2293 TargetLowering::CallLoweringInfo CLI(DAG);
2294 CLI.setDebugLoc(DL).setChain(Chain)
2295 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2297 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2299 // result is in first, and chain is in second result.
2300 SDValue Result = CallInfo.first;
2304 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2305 SPCC = SPCC::ICC_NE;
2306 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2308 case SPCC::FCC_UL : {
2309 SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2310 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2311 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2312 SPCC = SPCC::ICC_NE;
2313 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2315 case SPCC::FCC_ULE: {
2316 SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2317 SPCC = SPCC::ICC_NE;
2318 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2320 case SPCC::FCC_UG : {
2321 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2323 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2325 case SPCC::FCC_UGE: {
2326 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2327 SPCC = SPCC::ICC_NE;
2328 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2331 case SPCC::FCC_U : {
2332 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2334 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2336 case SPCC::FCC_O : {
2337 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2338 SPCC = SPCC::ICC_NE;
2339 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2341 case SPCC::FCC_LG : {
2342 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2343 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2344 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2345 SPCC = SPCC::ICC_NE;
2346 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2348 case SPCC::FCC_UE : {
2349 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2350 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2351 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2353 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2359 LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG,
2360 const SparcTargetLowering &TLI) {
2362 if (Op.getOperand(0).getValueType() == MVT::f64)
2363 return TLI.LowerF128Op(Op, DAG,
2364 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2366 if (Op.getOperand(0).getValueType() == MVT::f32)
2367 return TLI.LowerF128Op(Op, DAG,
2368 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2370 llvm_unreachable("fpextend with non-float operand!");
2375 LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG,
2376 const SparcTargetLowering &TLI) {
2377 // FP_ROUND on f64 and f32 are legal.
2378 if (Op.getOperand(0).getValueType() != MVT::f128)
2381 if (Op.getValueType() == MVT::f64)
2382 return TLI.LowerF128Op(Op, DAG,
2383 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2384 if (Op.getValueType() == MVT::f32)
2385 return TLI.LowerF128Op(Op, DAG,
2386 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2388 llvm_unreachable("fpround to non-float!");
2392 static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG,
2393 const SparcTargetLowering &TLI,
2396 EVT VT = Op.getValueType();
2397 assert(VT == MVT::i32 || VT == MVT::i64);
2399 // Expand f128 operations to fp128 abi calls.
2400 if (Op.getOperand(0).getValueType() == MVT::f128
2401 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2402 const char *libName = TLI.getLibcallName(VT == MVT::i32
2403 ? RTLIB::FPTOSINT_F128_I32
2404 : RTLIB::FPTOSINT_F128_I64);
2405 return TLI.LowerF128Op(Op, DAG, libName, 1);
2408 // Expand if the resulting type is illegal.
2409 if (!TLI.isTypeLegal(VT))
2412 // Otherwise, Convert the fp value to integer in an FP register.
2414 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2416 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2418 return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2421 static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2422 const SparcTargetLowering &TLI,
2425 EVT OpVT = Op.getOperand(0).getValueType();
2426 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2428 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2430 // Expand f128 operations to fp128 ABI calls.
2431 if (Op.getValueType() == MVT::f128
2432 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2433 const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2434 ? RTLIB::SINTTOFP_I32_F128
2435 : RTLIB::SINTTOFP_I64_F128);
2436 return TLI.LowerF128Op(Op, DAG, libName, 1);
2439 // Expand if the operand type is illegal.
2440 if (!TLI.isTypeLegal(OpVT))
2443 // Otherwise, Convert the int value to FP in an FP register.
2444 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2445 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2446 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2449 static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG,
2450 const SparcTargetLowering &TLI,
2453 EVT VT = Op.getValueType();
2455 // Expand if it does not involve f128 or the target has support for
2456 // quad floating point instructions and the resulting type is legal.
2457 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2458 (hasHardQuad && TLI.isTypeLegal(VT)))
2461 assert(VT == MVT::i32 || VT == MVT::i64);
2463 return TLI.LowerF128Op(Op, DAG,
2464 TLI.getLibcallName(VT == MVT::i32
2465 ? RTLIB::FPTOUINT_F128_I32
2466 : RTLIB::FPTOUINT_F128_I64),
2470 static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2471 const SparcTargetLowering &TLI,
2474 EVT OpVT = Op.getOperand(0).getValueType();
2475 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2477 // Expand if it does not involve f128 or the target has support for
2478 // quad floating point instructions and the operand type is legal.
2479 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2482 return TLI.LowerF128Op(Op, DAG,
2483 TLI.getLibcallName(OpVT == MVT::i32
2484 ? RTLIB::UINTTOFP_I32_F128
2485 : RTLIB::UINTTOFP_I64_F128),
2489 static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
2490 const SparcTargetLowering &TLI,
2492 SDValue Chain = Op.getOperand(0);
2493 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2494 SDValue LHS = Op.getOperand(2);
2495 SDValue RHS = Op.getOperand(3);
2496 SDValue Dest = Op.getOperand(4);
2498 unsigned Opc, SPCC = ~0U;
2500 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2501 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2502 LookThroughSetCC(LHS, RHS, CC, SPCC);
2504 // Get the condition flag.
2505 SDValue CompareFlag;
2506 if (LHS.getValueType().isInteger()) {
2507 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2508 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2509 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2510 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
2512 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2513 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2514 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2517 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2518 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2522 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2523 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2526 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2527 const SparcTargetLowering &TLI,
2529 SDValue LHS = Op.getOperand(0);
2530 SDValue RHS = Op.getOperand(1);
2531 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2532 SDValue TrueVal = Op.getOperand(2);
2533 SDValue FalseVal = Op.getOperand(3);
2535 unsigned Opc, SPCC = ~0U;
2537 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2538 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2539 LookThroughSetCC(LHS, RHS, CC, SPCC);
2541 SDValue CompareFlag;
2542 if (LHS.getValueType().isInteger()) {
2543 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2544 Opc = LHS.getValueType() == MVT::i32 ?
2545 SPISD::SELECT_ICC : SPISD::SELECT_XCC;
2546 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2548 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2549 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2550 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2551 Opc = SPISD::SELECT_ICC;
2553 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2554 Opc = SPISD::SELECT_FCC;
2555 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2558 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2559 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2562 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
2563 const SparcTargetLowering &TLI) {
2564 MachineFunction &MF = DAG.getMachineFunction();
2565 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
2566 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2568 // Need frame address to find the address of VarArgsFrameIndex.
2569 MF.getFrameInfo().setFrameAddressIsTaken(true);
2571 // vastart just stores the address of the VarArgsFrameIndex slot into the
2572 // memory location argument.
2575 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2576 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2577 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2578 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2579 MachinePointerInfo(SV));
2582 static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
2583 SDNode *Node = Op.getNode();
2584 EVT VT = Node->getValueType(0);
2585 SDValue InChain = Node->getOperand(0);
2586 SDValue VAListPtr = Node->getOperand(1);
2587 EVT PtrVT = VAListPtr.getValueType();
2588 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2591 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2592 // Increment the pointer, VAList, to the next vaarg.
2593 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2594 DAG.getIntPtrConstant(VT.getSizeInBits()/8,
2596 // Store the incremented VAList to the legalized pointer.
2597 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2598 MachinePointerInfo(SV));
2599 // Load the actual argument out of the pointer VAList.
2600 // We can't count on greater alignment than the word size.
2602 VT, DL, InChain, VAList, MachinePointerInfo(),
2603 std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8);
2606 static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
2607 const SparcSubtarget *Subtarget) {
2608 SDValue Chain = Op.getOperand(0); // Legalize the chain.
2609 SDValue Size = Op.getOperand(1); // Legalize the size.
2610 MaybeAlign Alignment =
2611 cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
2612 Align StackAlign = Subtarget->getFrameLowering()->getStackAlign();
2613 EVT VT = Size->getValueType(0);
2616 // TODO: implement over-aligned alloca. (Note: also implies
2617 // supporting support for overaligned function frames + dynamic
2618 // allocations, at all, which currently isn't supported)
2619 if (Alignment && *Alignment > StackAlign) {
2620 const MachineFunction &MF = DAG.getMachineFunction();
2621 report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "
2622 "over-aligned dynamic alloca not supported.");
2625 // The resultant pointer needs to be above the register spill area
2626 // at the bottom of the stack.
2627 unsigned regSpillArea;
2628 if (Subtarget->is64Bit()) {
2631 // On Sparc32, the size of the spill area is 92. Unfortunately,
2632 // that's only 4-byte aligned, not 8-byte aligned (the stack
2633 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2634 // aligned dynamic allocation, we actually need to add 96 to the
2635 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2637 // That also means adding 4 to the size of the allocation --
2638 // before applying the 8-byte rounding. Unfortunately, we the
2639 // value we get here has already had rounding applied. So, we need
2640 // to add 8, instead, wasting a bit more memory.
2642 // Further, this only actually needs to be done if the required
2643 // alignment is > 4, but, we've lost that info by this point, too,
2644 // so we always apply it.
2646 // (An alternative approach would be to always reserve 96 bytes
2647 // instead of the required 92, but then we'd waste 4 extra bytes
2648 // in every frame, not just those with dynamic stack allocations)
2650 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2652 Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2653 DAG.getConstant(8, dl, VT));
2657 unsigned SPReg = SP::O6;
2658 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2659 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
2660 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
2662 regSpillArea += Subtarget->getStackPointerBias();
2664 SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
2665 DAG.getConstant(regSpillArea, dl, VT));
2666 SDValue Ops[2] = { NewVal, Chain };
2667 return DAG.getMergeValues(Ops, dl);
2671 static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) {
2673 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2674 dl, MVT::Other, DAG.getEntryNode());
2678 static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
2679 const SparcSubtarget *Subtarget,
2680 bool AlwaysFlush = false) {
2681 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2682 MFI.setFrameAddressIsTaken(true);
2684 EVT VT = Op.getValueType();
2686 unsigned FrameReg = SP::I6;
2687 unsigned stackBias = Subtarget->getStackPointerBias();
2692 // flush first to make sure the windowed registers' values are in stack
2693 Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2695 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2697 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2700 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2701 DAG.getIntPtrConstant(Offset, dl));
2702 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2704 if (Subtarget->is64Bit())
2705 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2706 DAG.getIntPtrConstant(stackBias, dl));
2711 static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG,
2712 const SparcSubtarget *Subtarget) {
2714 uint64_t depth = Op.getConstantOperandVal(0);
2716 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2720 static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
2721 const SparcTargetLowering &TLI,
2722 const SparcSubtarget *Subtarget) {
2723 MachineFunction &MF = DAG.getMachineFunction();
2724 MachineFrameInfo &MFI = MF.getFrameInfo();
2725 MFI.setReturnAddressIsTaken(true);
2727 if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG))
2730 EVT VT = Op.getValueType();
2732 uint64_t depth = Op.getConstantOperandVal(0);
2736 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2737 Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2738 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2742 // Need frame address to find return address of the caller.
2743 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2745 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2746 SDValue Ptr = DAG.getNode(ISD::ADD,
2749 DAG.getIntPtrConstant(Offset, dl));
2750 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2755 static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2757 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2758 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2760 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2761 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2762 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2764 // Note: in little-endian, the floating-point value is stored in the
2765 // registers are in the opposite order, so the subreg with the sign
2766 // bit is the highest-numbered (odd), rather than the
2767 // lowest-numbered (even).
2769 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2771 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2774 if (DAG.getDataLayout().isLittleEndian())
2775 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2777 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2779 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2781 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2783 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2788 // Lower a f128 load into two f64 loads.
2789 static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
2792 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2793 assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2795 Align Alignment = commonAlignment(LdNode->getOriginalAlign(), 8);
2798 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2799 LdNode->getPointerInfo(), Alignment);
2800 EVT addrVT = LdNode->getBasePtr().getValueType();
2801 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2802 LdNode->getBasePtr(),
2803 DAG.getConstant(8, dl, addrVT));
2804 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2805 LdNode->getPointerInfo().getWithOffset(8),
2808 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2809 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2811 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2813 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2815 SDValue(InFP128, 0),
2818 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2820 SDValue(InFP128, 0),
2823 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2824 SDValue(Lo64.getNode(), 1) };
2825 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2826 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2827 return DAG.getMergeValues(Ops, dl);
2830 static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
2832 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2834 EVT MemVT = LdNode->getMemoryVT();
2835 if (MemVT == MVT::f128)
2836 return LowerF128Load(Op, DAG);
2841 // Lower a f128 store into two f64 stores.
2842 static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {
2844 StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
2845 assert(StNode->getOffset().isUndef() && "Unexpected node type");
2847 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2848 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2850 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2855 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2861 Align Alignment = commonAlignment(StNode->getOriginalAlign(), 8);
2863 SDValue OutChains[2];
2865 DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2866 StNode->getBasePtr(), StNode->getPointerInfo(),
2868 EVT addrVT = StNode->getBasePtr().getValueType();
2869 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2870 StNode->getBasePtr(),
2871 DAG.getConstant(8, dl, addrVT));
2872 OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
2873 StNode->getPointerInfo().getWithOffset(8),
2875 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2878 static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
2881 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
2883 EVT MemVT = St->getMemoryVT();
2884 if (MemVT == MVT::f128)
2885 return LowerF128Store(Op, DAG);
2887 if (MemVT == MVT::i64) {
2888 // Custom handling for i64 stores: turn it into a bitcast and a
2890 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
2891 SDValue Chain = DAG.getStore(
2892 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
2893 St->getOriginalAlign(), St->getMemOperand()->getFlags(),
2901 static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
2902 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
2903 && "invalid opcode");
2907 if (Op.getValueType() == MVT::f64)
2908 return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
2909 if (Op.getValueType() != MVT::f128)
2912 // Lower fabs/fneg on f128 to fabs/fneg on f64
2913 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
2914 // (As with LowerF64Op, on little-endian, we need to negate the odd
2917 SDValue SrcReg128 = Op.getOperand(0);
2918 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
2920 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
2923 if (DAG.getDataLayout().isLittleEndian()) {
2925 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
2927 Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
2930 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
2932 Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
2935 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2937 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
2939 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
2944 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
2946 if (Op.getValueType() != MVT::i64)
2950 SDValue Src1 = Op.getOperand(0);
2951 SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
2952 SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
2953 DAG.getConstant(32, dl, MVT::i64));
2954 Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
2956 SDValue Src2 = Op.getOperand(1);
2957 SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
2958 SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
2959 DAG.getConstant(32, dl, MVT::i64));
2960 Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
2963 bool hasChain = false;
2964 unsigned hiOpc = Op.getOpcode();
2965 switch (Op.getOpcode()) {
2966 default: llvm_unreachable("Invalid opcode");
2967 case ISD::ADDC: hiOpc = ISD::ADDE; break;
2968 case ISD::ADDE: hasChain = true; break;
2969 case ISD::SUBC: hiOpc = ISD::SUBE; break;
2970 case ISD::SUBE: hasChain = true; break;
2973 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
2975 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
2978 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
2980 SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
2981 SDValue Carry = Hi.getValue(1);
2983 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
2984 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
2985 Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
2986 DAG.getConstant(32, dl, MVT::i64));
2988 SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
2989 SDValue Ops[2] = { Dst, Carry };
2990 return DAG.getMergeValues(Ops, dl);
2993 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
2994 // in LegalizeDAG.cpp except the order of arguments to the library function.
2995 static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,
2996 const SparcTargetLowering &TLI)
2998 unsigned opcode = Op.getOpcode();
2999 assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
3001 bool isSigned = (opcode == ISD::SMULO);
3003 EVT WideVT = MVT::i128;
3005 SDValue LHS = Op.getOperand(0);
3007 if (LHS.getValueType() != VT)
3010 SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
3012 SDValue RHS = Op.getOperand(1);
3013 SDValue HiLHS, HiRHS;
3015 HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
3016 HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
3018 HiLHS = DAG.getConstant(0, dl, VT);
3019 HiRHS = DAG.getConstant(0, dl, MVT::i64);
3022 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
3024 TargetLowering::MakeLibCallOptions CallOptions;
3025 CallOptions.setSExt(isSigned);
3026 SDValue MulResult = TLI.makeLibCall(DAG,
3027 RTLIB::MUL_I128, WideVT,
3028 Args, CallOptions, dl).first;
3029 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
3030 MulResult, DAG.getIntPtrConstant(0, dl));
3031 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
3032 MulResult, DAG.getIntPtrConstant(1, dl));
3034 SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
3035 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
3037 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
3040 // MulResult is a node with an illegal type. Because such things are not
3041 // generally permitted during this phase of legalization, ensure that
3042 // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
3044 assert(MulResult->use_empty() && "Illegally typed node still in use!");
3046 SDValue Ops[2] = { BottomHalf, TopHalf } ;
3047 return DAG.getMergeValues(Ops, dl);
3050 static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
3051 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {
3052 // Expand with a fence.
3056 // Monotonic load/stores are legal.
3060 SDValue SparcTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3061 SelectionDAG &DAG) const {
3062 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3065 default: return SDValue(); // Don't custom lower most intrinsics.
3066 case Intrinsic::thread_pointer: {
3067 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3068 return DAG.getRegister(SP::G7, PtrVT);
3073 SDValue SparcTargetLowering::
3074 LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3076 bool hasHardQuad = Subtarget->hasHardQuad();
3077 bool isV9 = Subtarget->isV9();
3079 switch (Op.getOpcode()) {
3080 default: llvm_unreachable("Should not custom lower this!");
3082 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3084 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3086 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3087 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3088 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3089 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3090 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3092 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3094 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3096 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3098 case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this,
3100 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this,
3102 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3103 case ISD::VAARG: return LowerVAARG(Op, DAG);
3104 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
3107 case ISD::LOAD: return LowerLOAD(Op, DAG);
3108 case ISD::STORE: return LowerSTORE(Op, DAG);
3109 case ISD::FADD: return LowerF128Op(Op, DAG,
3110 getLibcallName(RTLIB::ADD_F128), 2);
3111 case ISD::FSUB: return LowerF128Op(Op, DAG,
3112 getLibcallName(RTLIB::SUB_F128), 2);
3113 case ISD::FMUL: return LowerF128Op(Op, DAG,
3114 getLibcallName(RTLIB::MUL_F128), 2);
3115 case ISD::FDIV: return LowerF128Op(Op, DAG,
3116 getLibcallName(RTLIB::DIV_F128), 2);
3117 case ISD::FSQRT: return LowerF128Op(Op, DAG,
3118 getLibcallName(RTLIB::SQRT_F128),1);
3120 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3121 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3122 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3126 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
3128 case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
3129 case ISD::ATOMIC_LOAD:
3130 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3131 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3135 SDValue SparcTargetLowering::bitcastConstantFPToInt(ConstantFPSDNode *C,
3137 SelectionDAG &DAG) const {
3138 APInt V = C->getValueAPF().bitcastToAPInt();
3139 SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3140 SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3141 if (DAG.getDataLayout().isLittleEndian())
3143 return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3146 SDValue SparcTargetLowering::PerformBITCASTCombine(SDNode *N,
3147 DAGCombinerInfo &DCI) const {
3149 SDValue Src = N->getOperand(0);
3151 if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3152 Src.getSimpleValueType() == MVT::f64)
3153 return bitcastConstantFPToInt(cast<ConstantFPSDNode>(Src), dl, DCI.DAG);
3158 SDValue SparcTargetLowering::PerformDAGCombine(SDNode *N,
3159 DAGCombinerInfo &DCI) const {
3160 switch (N->getOpcode()) {
3164 return PerformBITCASTCombine(N, DCI);
3170 SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
3171 MachineBasicBlock *BB) const {
3172 switch (MI.getOpcode()) {
3173 default: llvm_unreachable("Unknown SELECT_CC!");
3174 case SP::SELECT_CC_Int_ICC:
3175 case SP::SELECT_CC_FP_ICC:
3176 case SP::SELECT_CC_DFP_ICC:
3177 case SP::SELECT_CC_QFP_ICC:
3178 return expandSelectCC(MI, BB, SP::BCOND);
3179 case SP::SELECT_CC_Int_XCC:
3180 case SP::SELECT_CC_FP_XCC:
3181 case SP::SELECT_CC_DFP_XCC:
3182 case SP::SELECT_CC_QFP_XCC:
3183 return expandSelectCC(MI, BB, SP::BPXCC);
3184 case SP::SELECT_CC_Int_FCC:
3185 case SP::SELECT_CC_FP_FCC:
3186 case SP::SELECT_CC_DFP_FCC:
3187 case SP::SELECT_CC_QFP_FCC:
3188 return expandSelectCC(MI, BB, SP::FBCOND);
3193 SparcTargetLowering::expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB,
3194 unsigned BROpcode) const {
3195 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3196 DebugLoc dl = MI.getDebugLoc();
3197 unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3199 // To "insert" a SELECT_CC instruction, we actually have to insert the
3200 // triangle control-flow pattern. The incoming instruction knows the
3201 // destination vreg to set, the condition code register to branch on, the
3202 // true/false values to select between, and the condition code for the branch.
3204 // We produce the following control flow:
3210 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3211 MachineFunction::iterator It = ++BB->getIterator();
3213 MachineBasicBlock *ThisMBB = BB;
3214 MachineFunction *F = BB->getParent();
3215 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3216 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3217 F->insert(It, IfFalseMBB);
3218 F->insert(It, SinkMBB);
3220 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3221 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3222 std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3223 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3225 // Set the new successors for ThisMBB.
3226 ThisMBB->addSuccessor(IfFalseMBB);
3227 ThisMBB->addSuccessor(SinkMBB);
3229 BuildMI(ThisMBB, dl, TII.get(BROpcode))
3233 // IfFalseMBB just falls through to SinkMBB.
3234 IfFalseMBB->addSuccessor(SinkMBB);
3236 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3237 BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3238 MI.getOperand(0).getReg())
3239 .addReg(MI.getOperand(1).getReg())
3241 .addReg(MI.getOperand(2).getReg())
3242 .addMBB(IfFalseMBB);
3244 MI.eraseFromParent(); // The pseudo instruction is gone now.
3248 //===----------------------------------------------------------------------===//
3249 // Sparc Inline Assembly Support
3250 //===----------------------------------------------------------------------===//
3252 /// getConstraintType - Given a constraint letter, return the type of
3253 /// constraint it is for this target.
3254 SparcTargetLowering::ConstraintType
3255 SparcTargetLowering::getConstraintType(StringRef Constraint) const {
3256 if (Constraint.size() == 1) {
3257 switch (Constraint[0]) {
3262 return C_RegisterClass;
3268 return TargetLowering::getConstraintType(Constraint);
3271 TargetLowering::ConstraintWeight SparcTargetLowering::
3272 getSingleConstraintMatchWeight(AsmOperandInfo &info,
3273 const char *constraint) const {
3274 ConstraintWeight weight = CW_Invalid;
3275 Value *CallOperandVal = info.CallOperandVal;
3276 // If we don't have a value, we can't do a match,
3277 // but allow it at the lowest weight.
3278 if (!CallOperandVal)
3281 // Look at the constraint type.
3282 switch (*constraint) {
3284 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3287 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3288 if (isInt<13>(C->getSExtValue()))
3289 weight = CW_Constant;
3296 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3297 /// vector. If it is invalid, don't add anything to Ops.
3298 void SparcTargetLowering::
3299 LowerAsmOperandForConstraint(SDValue Op,
3300 std::string &Constraint,
3301 std::vector<SDValue> &Ops,
3302 SelectionDAG &DAG) const {
3305 // Only support length 1 constraints for now.
3306 if (Constraint.length() > 1)
3309 char ConstraintLetter = Constraint[0];
3310 switch (ConstraintLetter) {
3313 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3314 if (isInt<13>(C->getSExtValue())) {
3315 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
3323 if (Result.getNode()) {
3324 Ops.push_back(Result);
3327 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3330 std::pair<unsigned, const TargetRegisterClass *>
3331 SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
3332 StringRef Constraint,
3334 if (Constraint.empty())
3335 return std::make_pair(0U, nullptr);
3337 if (Constraint.size() == 1) {
3338 switch (Constraint[0]) {
3340 if (VT == MVT::v2i32)
3341 return std::make_pair(0U, &SP::IntPairRegClass);
3342 else if (Subtarget->is64Bit())
3343 return std::make_pair(0U, &SP::I64RegsRegClass);
3345 return std::make_pair(0U, &SP::IntRegsRegClass);
3347 if (VT == MVT::f32 || VT == MVT::i32)
3348 return std::make_pair(0U, &SP::FPRegsRegClass);
3349 else if (VT == MVT::f64 || VT == MVT::i64)
3350 return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3351 else if (VT == MVT::f128)
3352 return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3353 // This will generate an error message
3354 return std::make_pair(0U, nullptr);
3356 if (VT == MVT::f32 || VT == MVT::i32)
3357 return std::make_pair(0U, &SP::FPRegsRegClass);
3358 else if (VT == MVT::f64 || VT == MVT::i64 )
3359 return std::make_pair(0U, &SP::DFPRegsRegClass);
3360 else if (VT == MVT::f128)
3361 return std::make_pair(0U, &SP::QFPRegsRegClass);
3362 // This will generate an error message
3363 return std::make_pair(0U, nullptr);
3367 if (Constraint.front() != '{')
3368 return std::make_pair(0U, nullptr);
3370 assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3371 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3372 if (RegName.empty())
3373 return std::make_pair(0U, nullptr);
3375 unsigned long long RegNo;
3376 // Handle numbered register aliases.
3377 if (RegName[0] == 'r' &&
3378 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3384 return std::make_pair(0U, nullptr);
3385 const char RegTypes[] = {'g', 'o', 'l', 'i'};
3386 char RegType = RegTypes[RegNo / 8];
3387 char RegIndex = '0' + (RegNo % 8);
3388 char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3389 return getRegForInlineAsmConstraint(TRI, Tmp, VT);
3392 // Rewrite the fN constraint according to the value type if needed.
3393 if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3394 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3395 if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3396 return getRegForInlineAsmConstraint(
3397 TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);
3398 } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3399 return getRegForInlineAsmConstraint(
3400 TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);
3402 return std::make_pair(0U, nullptr);
3407 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3408 if (!ResultPair.second)
3409 return std::make_pair(0U, nullptr);
3411 // Force the use of I64Regs over IntRegs for 64-bit values.
3412 if (Subtarget->is64Bit() && VT == MVT::i64) {
3413 assert(ResultPair.second == &SP::IntRegsRegClass &&
3414 "Unexpected register class");
3415 return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3422 SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3423 // The Sparc target isn't yet aware of offsets.
3427 void SparcTargetLowering::ReplaceNodeResults(SDNode *N,
3428 SmallVectorImpl<SDValue>& Results,
3429 SelectionDAG &DAG) const {
3433 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3435 switch (N->getOpcode()) {
3437 llvm_unreachable("Do not know how to custom type legalize this operation!");
3439 case ISD::FP_TO_SINT:
3440 case ISD::FP_TO_UINT:
3441 // Custom lower only if it involves f128 or i64.
3442 if (N->getOperand(0).getValueType() != MVT::f128
3443 || N->getValueType(0) != MVT::i64)
3445 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3446 ? RTLIB::FPTOSINT_F128_I64
3447 : RTLIB::FPTOUINT_F128_I64);
3449 Results.push_back(LowerF128Op(SDValue(N, 0),
3451 getLibcallName(libCall),
3454 case ISD::READCYCLECOUNTER: {
3455 assert(Subtarget->hasLeonCycleCounter());
3456 SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3457 SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3458 SDValue Ops[] = { Lo, Hi };
3459 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3460 Results.push_back(Pair);
3461 Results.push_back(N->getOperand(0));
3464 case ISD::SINT_TO_FP:
3465 case ISD::UINT_TO_FP:
3466 // Custom lower only if it involves f128 or i64.
3467 if (N->getValueType(0) != MVT::f128
3468 || N->getOperand(0).getValueType() != MVT::i64)
3471 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3472 ? RTLIB::SINTTOFP_I64_F128
3473 : RTLIB::UINTTOFP_I64_F128);
3475 Results.push_back(LowerF128Op(SDValue(N, 0),
3477 getLibcallName(libCall),
3481 LoadSDNode *Ld = cast<LoadSDNode>(N);
3482 // Custom handling only for i64: turn i64 load into a v2i32 load,
3484 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3488 SDValue LoadRes = DAG.getExtLoad(
3489 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3490 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32,
3491 Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags(),
3494 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3495 Results.push_back(Res);
3496 Results.push_back(LoadRes.getValue(1));
3502 // Override to enable LOAD_STACK_GUARD lowering on Linux.
3503 bool SparcTargetLowering::useLoadStackGuardNode() const {
3504 if (!Subtarget->isTargetLinux())
3505 return TargetLowering::useLoadStackGuardNode();
3509 // Override to disable global variable loading on Linux.
3510 void SparcTargetLowering::insertSSPDeclarations(Module &M) const {
3511 if (!Subtarget->isTargetLinux())
3512 return TargetLowering::insertSSPDeclarations(M);