1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the interfaces that Sparc uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "SparcISelLowering.h"
15 #include "MCTargetDesc/SparcMCExpr.h"
16 #include "SparcMachineFunctionInfo.h"
17 #include "SparcRegisterInfo.h"
18 #include "SparcTargetMachine.h"
19 #include "SparcTargetObjectFile.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringSwitch.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Module.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/KnownBits.h"
37 //===----------------------------------------------------------------------===//
38 // Calling Convention Implementation
39 //===----------------------------------------------------------------------===//
41 static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
42 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
43 ISD::ArgFlagsTy &ArgFlags, CCState &State)
45 assert (ArgFlags.isSRet());
47 // Assign SRet argument.
48 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
54 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
55 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
56 ISD::ArgFlagsTy &ArgFlags, CCState &State)
58 static const MCPhysReg RegList[] = {
59 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
61 // Try to get first reg.
62 if (Register Reg = State.AllocateReg(RegList)) {
63 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
65 // Assign whole thing in stack.
66 State.addLoc(CCValAssign::getCustomMem(
67 ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
71 // Try to get second reg.
72 if (Register Reg = State.AllocateReg(RegList))
73 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
75 State.addLoc(CCValAssign::getCustomMem(
76 ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
80 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
81 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
82 ISD::ArgFlagsTy &ArgFlags, CCState &State)
84 static const MCPhysReg RegList[] = {
85 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
88 // Try to get first reg.
89 if (Register Reg = State.AllocateReg(RegList))
90 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
94 // Try to get second reg.
95 if (Register Reg = State.AllocateReg(RegList))
96 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
103 // Allocate a full-sized argument for the 64-bit ABI.
104 static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
105 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
106 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
107 assert((LocVT == MVT::f32 || LocVT == MVT::f128
108 || LocVT.getSizeInBits() == 64) &&
109 "Can't handle non-64 bits locations");
111 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
112 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
113 Align alignment = (LocVT == MVT::f128) ? Align(16) : Align(8);
114 unsigned Offset = State.AllocateStack(size, alignment);
117 if (LocVT == MVT::i64 && Offset < 6*8)
118 // Promote integers to %i0-%i5.
119 Reg = SP::I0 + Offset/8;
120 else if (LocVT == MVT::f64 && Offset < 16*8)
121 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
122 Reg = SP::D0 + Offset/8;
123 else if (LocVT == MVT::f32 && Offset < 16*8)
124 // Promote floats to %f1, %f3, ...
125 Reg = SP::F1 + Offset/4;
126 else if (LocVT == MVT::f128 && Offset < 16*8)
127 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
128 Reg = SP::Q0 + Offset/16;
130 // Promote to register when possible, otherwise use the stack slot.
132 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
136 // Bail out if this is a return CC and we run out of registers to place
141 // This argument goes on the stack in an 8-byte slot.
142 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
143 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
144 if (LocVT == MVT::f32)
147 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
151 // Allocate a half-sized argument for the 64-bit ABI.
153 // This is used when passing { float, int } structs by value in registers.
154 static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
155 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
156 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
157 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
158 unsigned Offset = State.AllocateStack(4, Align(4));
160 if (LocVT == MVT::f32 && Offset < 16*8) {
161 // Promote floats to %f0-%f31.
162 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
167 if (LocVT == MVT::i32 && Offset < 6*8) {
168 // Promote integers to %i0-%i5, using half the register.
169 unsigned Reg = SP::I0 + Offset/8;
171 LocInfo = CCValAssign::AExt;
173 // Set the Custom bit if this i32 goes in the high bits of a register.
175 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
178 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
182 // Bail out if this is a return CC and we run out of registers to place
187 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
191 static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
192 CCValAssign::LocInfo &LocInfo,
193 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
194 return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
198 static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
199 CCValAssign::LocInfo &LocInfo,
200 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
201 return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
205 static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
206 CCValAssign::LocInfo &LocInfo,
207 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
208 return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
212 static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
213 CCValAssign::LocInfo &LocInfo,
214 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
215 return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
219 #include "SparcGenCallingConv.inc"
221 // The calling conventions in SparcCallingConv.td are described in terms of the
222 // callee's register window. This function translates registers to the
223 // corresponding caller window %o register.
224 static unsigned toCallerWindow(unsigned Reg) {
225 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
227 if (Reg >= SP::I0 && Reg <= SP::I7)
228 return Reg - SP::I0 + SP::O0;
232 bool SparcTargetLowering::CanLowerReturn(
233 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
234 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
235 SmallVector<CCValAssign, 16> RVLocs;
236 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
237 return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64
242 SparcTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
244 const SmallVectorImpl<ISD::OutputArg> &Outs,
245 const SmallVectorImpl<SDValue> &OutVals,
246 const SDLoc &DL, SelectionDAG &DAG) const {
247 if (Subtarget->is64Bit())
248 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
249 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
253 SparcTargetLowering::LowerReturn_32(SDValue Chain, CallingConv::ID CallConv,
255 const SmallVectorImpl<ISD::OutputArg> &Outs,
256 const SmallVectorImpl<SDValue> &OutVals,
257 const SDLoc &DL, SelectionDAG &DAG) const {
258 MachineFunction &MF = DAG.getMachineFunction();
260 // CCValAssign - represent the assignment of the return value to locations.
261 SmallVector<CCValAssign, 16> RVLocs;
263 // CCState - Info about the registers and stack slot.
264 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
267 // Analyze return values.
268 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
271 SmallVector<SDValue, 4> RetOps(1, Chain);
272 // Make room for the return address offset.
273 RetOps.push_back(SDValue());
275 // Copy the result values into the output registers.
276 for (unsigned i = 0, realRVLocIdx = 0;
278 ++i, ++realRVLocIdx) {
279 CCValAssign &VA = RVLocs[i];
280 assert(VA.isRegLoc() && "Can only return in registers!");
282 SDValue Arg = OutVals[realRVLocIdx];
284 if (VA.needsCustom()) {
285 assert(VA.getLocVT() == MVT::v2i32);
286 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
287 // happen by default if this wasn't a legal type)
289 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
291 DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
292 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
294 DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));
296 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag);
297 Flag = Chain.getValue(1);
298 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
299 VA = RVLocs[++i]; // skip ahead to next loc
300 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
303 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
305 // Guarantee that all emitted copies are stuck together with flags.
306 Flag = Chain.getValue(1);
307 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
310 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
311 // If the function returns a struct, copy the SRetReturnReg to I0
312 if (MF.getFunction().hasStructRetAttr()) {
313 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
314 Register Reg = SFI->getSRetReturnReg();
316 llvm_unreachable("sret virtual register not created in the entry block");
317 auto PtrVT = getPointerTy(DAG.getDataLayout());
318 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
319 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
320 Flag = Chain.getValue(1);
321 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
322 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
325 RetOps[0] = Chain; // Update chain.
326 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
328 // Add the flag if we have it.
330 RetOps.push_back(Flag);
332 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
335 // Lower return values for the 64-bit ABI.
336 // Return values are passed the exactly the same way as function arguments.
338 SparcTargetLowering::LowerReturn_64(SDValue Chain, CallingConv::ID CallConv,
340 const SmallVectorImpl<ISD::OutputArg> &Outs,
341 const SmallVectorImpl<SDValue> &OutVals,
342 const SDLoc &DL, SelectionDAG &DAG) const {
343 // CCValAssign - represent the assignment of the return value to locations.
344 SmallVector<CCValAssign, 16> RVLocs;
346 // CCState - Info about the registers and stack slot.
347 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
350 // Analyze return values.
351 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
354 SmallVector<SDValue, 4> RetOps(1, Chain);
356 // The second operand on the return instruction is the return address offset.
357 // The return address is always %i7+8 with the 64-bit ABI.
358 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
360 // Copy the result values into the output registers.
361 for (unsigned i = 0; i != RVLocs.size(); ++i) {
362 CCValAssign &VA = RVLocs[i];
363 assert(VA.isRegLoc() && "Can only return in registers!");
364 SDValue OutVal = OutVals[i];
366 // Integer return values must be sign or zero extended by the callee.
367 switch (VA.getLocInfo()) {
368 case CCValAssign::Full: break;
369 case CCValAssign::SExt:
370 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
372 case CCValAssign::ZExt:
373 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
375 case CCValAssign::AExt:
376 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
379 llvm_unreachable("Unknown loc info!");
382 // The custom bit on an i32 return value indicates that it should be passed
383 // in the high bits of the register.
384 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
385 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
386 DAG.getConstant(32, DL, MVT::i32));
388 // The next value may go in the low bits of the same register.
389 // Handle both at once.
390 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
391 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
392 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
393 // Skip the next value, it's already done.
398 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
400 // Guarantee that all emitted copies are stuck together with flags.
401 Flag = Chain.getValue(1);
402 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
405 RetOps[0] = Chain; // Update chain.
407 // Add the flag if we have it.
409 RetOps.push_back(Flag);
411 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
414 SDValue SparcTargetLowering::LowerFormalArguments(
415 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
416 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
417 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
418 if (Subtarget->is64Bit())
419 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
421 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
425 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
426 /// passed in either one or two GPRs, including FP values. TODO: we should
427 /// pass FP values in FP registers for fastcc functions.
428 SDValue SparcTargetLowering::LowerFormalArguments_32(
429 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
430 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
431 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
432 MachineFunction &MF = DAG.getMachineFunction();
433 MachineRegisterInfo &RegInfo = MF.getRegInfo();
434 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
436 // Assign locations to all of the incoming arguments.
437 SmallVector<CCValAssign, 16> ArgLocs;
438 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
440 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
442 const unsigned StackOffset = 92;
443 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
446 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
447 CCValAssign &VA = ArgLocs[i];
449 if (Ins[InIdx].Flags.isSRet()) {
451 report_fatal_error("sparc only supports sret on the first parameter");
452 // Get SRet from [%fp+64].
453 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
454 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
456 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
457 InVals.push_back(Arg);
462 if (VA.needsCustom()) {
463 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
465 Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
466 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
467 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
470 CCValAssign &NextVA = ArgLocs[++i];
473 if (NextVA.isMemLoc()) {
474 int FrameIdx = MF.getFrameInfo().
475 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
476 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
477 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
479 Register loReg = MF.addLiveIn(NextVA.getLocReg(),
480 &SP::IntRegsRegClass);
481 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
485 std::swap(LoVal, HiVal);
488 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
489 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
490 InVals.push_back(WholeValue);
493 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
494 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
495 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
496 if (VA.getLocVT() == MVT::f32)
497 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
498 else if (VA.getLocVT() != MVT::i32) {
499 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
500 DAG.getValueType(VA.getLocVT()));
501 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
503 InVals.push_back(Arg);
507 assert(VA.isMemLoc());
509 unsigned Offset = VA.getLocMemOffset()+StackOffset;
510 auto PtrVT = getPointerTy(DAG.getDataLayout());
512 if (VA.needsCustom()) {
513 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
514 // If it is double-word aligned, just load.
515 if (Offset % 8 == 0) {
516 int FI = MF.getFrameInfo().CreateFixedObject(8,
519 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
521 DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
522 InVals.push_back(Load);
526 int FI = MF.getFrameInfo().CreateFixedObject(4,
529 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
531 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
532 int FI2 = MF.getFrameInfo().CreateFixedObject(4,
535 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
538 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
541 std::swap(LoVal, HiVal);
544 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
545 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
546 InVals.push_back(WholeValue);
550 int FI = MF.getFrameInfo().CreateFixedObject(4,
553 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
555 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
556 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
557 } else if (VA.getValVT() == MVT::f128) {
558 report_fatal_error("SPARCv8 does not handle f128 in calls; "
561 // We shouldn't see any other value types here.
562 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
564 InVals.push_back(Load);
567 if (MF.getFunction().hasStructRetAttr()) {
568 // Copy the SRet Argument to SRetReturnReg.
569 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
570 Register Reg = SFI->getSRetReturnReg();
572 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
573 SFI->setSRetReturnReg(Reg);
575 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
576 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
579 // Store remaining ArgRegs to the stack if this is a varargs function.
581 static const MCPhysReg ArgRegs[] = {
582 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
584 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
585 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
586 unsigned ArgOffset = CCInfo.getNextStackOffset();
587 if (NumAllocated == 6)
588 ArgOffset += StackOffset;
591 ArgOffset = 68+4*NumAllocated;
594 // Remember the vararg offset for the va_start implementation.
595 FuncInfo->setVarArgsFrameOffset(ArgOffset);
597 std::vector<SDValue> OutChains;
599 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
600 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
601 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
602 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
604 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
606 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
609 DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
613 if (!OutChains.empty()) {
614 OutChains.push_back(Chain);
615 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
622 // Lower formal arguments for the 64 bit ABI.
623 SDValue SparcTargetLowering::LowerFormalArguments_64(
624 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
625 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
626 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
627 MachineFunction &MF = DAG.getMachineFunction();
629 // Analyze arguments according to CC_Sparc64.
630 SmallVector<CCValAssign, 16> ArgLocs;
631 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
633 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
635 // The argument array begins at %fp+BIAS+128, after the register save area.
636 const unsigned ArgArea = 128;
638 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
639 CCValAssign &VA = ArgLocs[i];
641 // This argument is passed in a register.
642 // All integer register arguments are promoted by the caller to i64.
644 // Create a virtual register for the promoted live-in value.
645 Register VReg = MF.addLiveIn(VA.getLocReg(),
646 getRegClassFor(VA.getLocVT()));
647 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
649 // Get the high bits for i32 struct elements.
650 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
651 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
652 DAG.getConstant(32, DL, MVT::i32));
654 // The caller promoted the argument, so insert an Assert?ext SDNode so we
655 // won't promote the value again in this function.
656 switch (VA.getLocInfo()) {
657 case CCValAssign::SExt:
658 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
659 DAG.getValueType(VA.getValVT()));
661 case CCValAssign::ZExt:
662 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
663 DAG.getValueType(VA.getValVT()));
669 // Truncate the register down to the argument type.
671 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
673 InVals.push_back(Arg);
677 // The registers are exhausted. This argument was passed on the stack.
678 assert(VA.isMemLoc());
679 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
680 // beginning of the arguments area at %fp+BIAS+128.
681 unsigned Offset = VA.getLocMemOffset() + ArgArea;
682 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
683 // Adjust offset for extended arguments, SPARC is big-endian.
684 // The caller will have written the full slot with extended bytes, but we
685 // prefer our own extending loads.
687 Offset += 8 - ValSize;
688 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
690 DAG.getLoad(VA.getValVT(), DL, Chain,
691 DAG.getFrameIndex(FI, getPointerTy(MF.getDataLayout())),
692 MachinePointerInfo::getFixedStack(MF, FI)));
698 // This function takes variable arguments, some of which may have been passed
699 // in registers %i0-%i5. Variable floating point arguments are never passed
700 // in floating point registers. They go on %i0-%i5 or on the stack like
701 // integer arguments.
703 // The va_start intrinsic needs to know the offset to the first variable
705 unsigned ArgOffset = CCInfo.getNextStackOffset();
706 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
707 // Skip the 128 bytes of register save area.
708 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
709 Subtarget->getStackPointerBias());
711 // Save the variable arguments that were passed in registers.
712 // The caller is required to reserve stack space for 6 arguments regardless
713 // of how many arguments were actually passed.
714 SmallVector<SDValue, 8> OutChains;
715 for (; ArgOffset < 6*8; ArgOffset += 8) {
716 Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
717 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
718 int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
719 auto PtrVT = getPointerTy(MF.getDataLayout());
721 DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
722 MachinePointerInfo::getFixedStack(MF, FI)));
725 if (!OutChains.empty())
726 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
732 SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
733 SmallVectorImpl<SDValue> &InVals) const {
734 if (Subtarget->is64Bit())
735 return LowerCall_64(CLI, InVals);
736 return LowerCall_32(CLI, InVals);
739 static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
740 const CallBase *Call) {
742 return Call->hasFnAttr(Attribute::ReturnsTwice);
744 const Function *CalleeFn = nullptr;
745 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
746 CalleeFn = dyn_cast<Function>(G->getGlobal());
747 } else if (ExternalSymbolSDNode *E =
748 dyn_cast<ExternalSymbolSDNode>(Callee)) {
749 const Function &Fn = DAG.getMachineFunction().getFunction();
750 const Module *M = Fn.getParent();
751 const char *CalleeName = E->getSymbol();
752 CalleeFn = M->getFunction(CalleeName);
757 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
760 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
761 /// for tail call optimization.
762 bool SparcTargetLowering::IsEligibleForTailCallOptimization(
763 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
765 auto &Outs = CLI.Outs;
766 auto &Caller = MF.getFunction();
768 // Do not tail call opt functions with "disable-tail-calls" attribute.
769 if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
772 // Do not tail call opt if the stack is used to pass parameters.
773 if (CCInfo.getNextStackOffset() != 0)
776 // Do not tail call opt if either the callee or caller returns
777 // a struct and the other does not.
778 if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
781 // Byval parameters hand the function a pointer directly into the stack area
782 // we want to reuse during a tail call.
783 for (auto &Arg : Outs)
784 if (Arg.Flags.isByVal())
790 // Lower a call for the 32-bit ABI.
792 SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
793 SmallVectorImpl<SDValue> &InVals) const {
794 SelectionDAG &DAG = CLI.DAG;
796 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
797 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
798 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
799 SDValue Chain = CLI.Chain;
800 SDValue Callee = CLI.Callee;
801 bool &isTailCall = CLI.IsTailCall;
802 CallingConv::ID CallConv = CLI.CallConv;
803 bool isVarArg = CLI.IsVarArg;
805 // Analyze operands of the call, assigning locations to each operand.
806 SmallVector<CCValAssign, 16> ArgLocs;
807 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
809 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
811 isTailCall = isTailCall && IsEligibleForTailCallOptimization(
812 CCInfo, CLI, DAG.getMachineFunction());
814 // Get the size of the outgoing arguments stack space requirement.
815 unsigned ArgsSize = CCInfo.getNextStackOffset();
817 // Keep stack frames 8-byte aligned.
818 ArgsSize = (ArgsSize+7) & ~7;
820 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
822 // Create local copies for byval args.
823 SmallVector<SDValue, 8> ByValArgs;
824 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
825 ISD::ArgFlagsTy Flags = Outs[i].Flags;
826 if (!Flags.isByVal())
829 SDValue Arg = OutVals[i];
830 unsigned Size = Flags.getByValSize();
831 Align Alignment = Flags.getNonZeroByValAlign();
834 int FI = MFI.CreateStackObject(Size, Alignment, false);
835 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
836 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
838 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
839 false, // isVolatile,
840 (Size <= 32), // AlwaysInline if size <= 32,
842 MachinePointerInfo(), MachinePointerInfo());
843 ByValArgs.push_back(FIPtr);
847 ByValArgs.push_back(nullVal);
851 assert(!isTailCall || ArgsSize == 0);
854 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
856 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
857 SmallVector<SDValue, 8> MemOpChains;
859 const unsigned StackOffset = 92;
860 bool hasStructRetAttr = false;
861 unsigned SRetArgSize = 0;
862 // Walk the register/memloc assignments, inserting copies/loads.
863 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
866 CCValAssign &VA = ArgLocs[i];
867 SDValue Arg = OutVals[realArgIdx];
869 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
871 // Use local copy if it is a byval arg.
872 if (Flags.isByVal()) {
873 Arg = ByValArgs[byvalArgIdx++];
879 // Promote the value if needed.
880 switch (VA.getLocInfo()) {
881 default: llvm_unreachable("Unknown loc info!");
882 case CCValAssign::Full: break;
883 case CCValAssign::SExt:
884 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
886 case CCValAssign::ZExt:
887 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
889 case CCValAssign::AExt:
890 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
892 case CCValAssign::BCvt:
893 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
897 if (Flags.isSRet()) {
898 assert(VA.needsCustom());
903 // store SRet argument in %sp+64
904 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
905 SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
906 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
907 MemOpChains.push_back(
908 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
909 hasStructRetAttr = true;
910 // sret only allowed on first argument
911 assert(Outs[realArgIdx].OrigArgIndex == 0);
913 DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
917 if (VA.needsCustom()) {
918 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
921 unsigned Offset = VA.getLocMemOffset() + StackOffset;
922 // if it is double-word aligned, just store.
923 if (Offset % 8 == 0) {
924 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
925 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
926 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
927 MemOpChains.push_back(
928 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
933 if (VA.getLocVT() == MVT::f64) {
934 // Move from the float value from float registers into the
935 // integer registers.
936 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg))
937 Arg = bitcastConstantFPToInt(C, dl, DAG);
939 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
942 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
944 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
945 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
947 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
950 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
952 CCValAssign &NextVA = ArgLocs[++i];
953 if (NextVA.isRegLoc()) {
954 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
956 // Store the second part in stack.
957 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
958 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
959 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
960 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
961 MemOpChains.push_back(
962 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
965 unsigned Offset = VA.getLocMemOffset() + StackOffset;
966 // Store the first part.
967 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
968 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
969 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
970 MemOpChains.push_back(
971 DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
972 // Store the second part.
973 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
974 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
975 MemOpChains.push_back(
976 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
981 // Arguments that can be passed on register must be kept at
984 if (VA.getLocVT() != MVT::f32) {
985 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
988 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
989 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
993 assert(VA.isMemLoc());
995 // Create a store off the stack pointer for this argument.
996 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
997 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + StackOffset,
999 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1000 MemOpChains.push_back(
1001 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
1005 // Emit all stores, make sure the occur before any copies into physregs.
1006 if (!MemOpChains.empty())
1007 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1009 // Build a sequence of copy-to-reg nodes chained together with token
1010 // chain and flag operands which copy the outgoing args into registers.
1011 // The InFlag in necessary since all emitted instructions must be
1014 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1015 Register Reg = RegsToPass[i].first;
1017 Reg = toCallerWindow(Reg);
1018 Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
1019 InFlag = Chain.getValue(1);
1022 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1024 // If the callee is a GlobalAddress node (quite common, every direct call is)
1025 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1026 // Likewise ExternalSymbol -> TargetExternalSymbol.
1027 unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30
1028 : SparcMCExpr::VK_Sparc_WDISP30;
1029 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1030 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
1031 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1032 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
1034 // Returns a chain & a flag for retval copy to use
1035 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1036 SmallVector<SDValue, 8> Ops;
1037 Ops.push_back(Chain);
1038 Ops.push_back(Callee);
1039 if (hasStructRetAttr)
1040 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
1041 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1042 Register Reg = RegsToPass[i].first;
1044 Reg = toCallerWindow(Reg);
1045 Ops.push_back(DAG.getRegister(Reg, RegsToPass[i].second.getValueType()));
1048 // Add a register mask operand representing the call-preserved registers.
1049 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1050 const uint32_t *Mask =
1052 ? TRI->getRTCallPreservedMask(CallConv)
1053 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
1054 assert(Mask && "Missing call preserved mask for calling convention");
1055 Ops.push_back(DAG.getRegisterMask(Mask));
1057 if (InFlag.getNode())
1058 Ops.push_back(InFlag);
1061 DAG.getMachineFunction().getFrameInfo().setHasTailCall();
1062 return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1065 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
1066 InFlag = Chain.getValue(1);
1068 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
1069 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
1070 InFlag = Chain.getValue(1);
1072 // Assign locations to each value returned by this call.
1073 SmallVector<CCValAssign, 16> RVLocs;
1074 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1077 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1079 // Copy all of the result registers out of their specified physreg.
1080 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1081 assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
1082 if (RVLocs[i].getLocVT() == MVT::v2i32) {
1083 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1084 SDValue Lo = DAG.getCopyFromReg(
1085 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InFlag);
1086 Chain = Lo.getValue(1);
1087 InFlag = Lo.getValue(2);
1088 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1089 DAG.getConstant(0, dl, MVT::i32));
1090 SDValue Hi = DAG.getCopyFromReg(
1091 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InFlag);
1092 Chain = Hi.getValue(1);
1093 InFlag = Hi.getValue(2);
1094 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1095 DAG.getConstant(1, dl, MVT::i32));
1096 InVals.push_back(Vec);
1099 DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1100 RVLocs[i].getValVT(), InFlag)
1102 InFlag = Chain.getValue(2);
1103 InVals.push_back(Chain.getValue(0));
1110 // FIXME? Maybe this could be a TableGen attribute on some registers and
1111 // this table could be generated automatically from RegInfo.
1112 Register SparcTargetLowering::getRegisterByName(const char* RegName, LLT VT,
1113 const MachineFunction &MF) const {
1114 Register Reg = StringSwitch<Register>(RegName)
1115 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1116 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1117 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1118 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1119 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1120 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1121 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1122 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1128 report_fatal_error("Invalid register name global variable");
1131 // Fixup floating point arguments in the ... part of a varargs call.
1133 // The SPARC v9 ABI requires that floating point arguments are treated the same
1134 // as integers when calling a varargs function. This does not apply to the
1135 // fixed arguments that are part of the function's prototype.
1137 // This function post-processes a CCValAssign array created by
1138 // AnalyzeCallOperands().
1139 static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs,
1140 ArrayRef<ISD::OutputArg> Outs) {
1141 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1142 const CCValAssign &VA = ArgLocs[i];
1143 MVT ValTy = VA.getLocVT();
1144 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1145 // varargs functions.
1146 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1148 // The fixed arguments to a varargs function still go in FP registers.
1149 if (Outs[VA.getValNo()].IsFixed)
1152 // This floating point argument should be reassigned.
1155 // Determine the offset into the argument array.
1156 Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1157 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1158 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1159 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1162 // This argument should go in %i0-%i5.
1163 unsigned IReg = SP::I0 + Offset/8;
1164 if (ValTy == MVT::f64)
1165 // Full register, just bitconvert into i64.
1166 NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
1167 IReg, MVT::i64, CCValAssign::BCvt);
1169 assert(ValTy == MVT::f128 && "Unexpected type!");
1170 // Full register, just bitconvert into i128 -- We will lower this into
1171 // two i64s in LowerCall_64.
1172 NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(),
1173 IReg, MVT::i128, CCValAssign::BCvt);
1176 // This needs to go to memory, we're out of integer registers.
1177 NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
1178 Offset, VA.getLocVT(), VA.getLocInfo());
1184 // Lower a call for the 64-bit ABI.
1186 SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
1187 SmallVectorImpl<SDValue> &InVals) const {
1188 SelectionDAG &DAG = CLI.DAG;
1190 SDValue Chain = CLI.Chain;
1191 auto PtrVT = getPointerTy(DAG.getDataLayout());
1193 // Sparc target does not yet support tail call optimization.
1194 CLI.IsTailCall = false;
1196 // Analyze operands of the call, assigning locations to each operand.
1197 SmallVector<CCValAssign, 16> ArgLocs;
1198 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1200 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1202 // Get the size of the outgoing arguments stack space requirement.
1203 // The stack offset computed by CC_Sparc64 includes all arguments.
1204 // Called functions expect 6 argument words to exist in the stack frame, used
1206 unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
1208 // Keep stack frames 16-byte aligned.
1209 ArgsSize = alignTo(ArgsSize, 16);
1211 // Varargs calls require special treatment.
1213 fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1215 // Adjust the stack pointer to make room for the arguments.
1216 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1217 // with more than 6 arguments.
1218 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1220 // Collect the set of registers to pass to the function and their values.
1221 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1223 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
1225 // Collect chains from all the memory opeations that copy arguments to the
1226 // stack. They must follow the stack pointer adjustment above and precede the
1227 // call instruction itself.
1228 SmallVector<SDValue, 8> MemOpChains;
1230 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1231 const CCValAssign &VA = ArgLocs[i];
1232 SDValue Arg = CLI.OutVals[i];
1234 // Promote the value if needed.
1235 switch (VA.getLocInfo()) {
1237 llvm_unreachable("Unknown location info!");
1238 case CCValAssign::Full:
1240 case CCValAssign::SExt:
1241 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1243 case CCValAssign::ZExt:
1244 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1246 case CCValAssign::AExt:
1247 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1249 case CCValAssign::BCvt:
1250 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1251 // SPARC does not support i128 natively. Lower it into two i64, see below.
1252 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1253 || VA.getLocVT() != MVT::i128)
1254 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1258 if (VA.isRegLoc()) {
1259 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1260 && VA.getLocVT() == MVT::i128) {
1261 // Store and reload into the integer register reg and reg+1.
1262 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1263 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1264 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1265 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1266 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1267 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1268 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1270 // Store to %sp+BIAS+128+Offset
1272 DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1273 // Load into Reg and Reg+1
1275 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1277 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1278 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()),
1280 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1),
1285 // The custom bit on an i32 return value indicates that it should be
1286 // passed in the high bits of the register.
1287 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1288 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1289 DAG.getConstant(32, DL, MVT::i32));
1291 // The next value may go in the low bits of the same register.
1292 // Handle both at once.
1293 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1294 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1295 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1297 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1298 // Skip the next value, it's already done.
1302 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg));
1306 assert(VA.isMemLoc());
1308 // Create a store off the stack pointer for this argument.
1309 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1310 // The argument area starts at %fp+BIAS+128 in the callee frame,
1311 // %sp+BIAS+128 in ours.
1312 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1313 Subtarget->getStackPointerBias() +
1315 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1316 MemOpChains.push_back(
1317 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1320 // Emit all stores, make sure they occur before the call.
1321 if (!MemOpChains.empty())
1322 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1324 // Build a sequence of CopyToReg nodes glued together with token chain and
1325 // glue operands which copy the outgoing args into registers. The InGlue is
1326 // necessary since all emitted instructions must be stuck together in order
1327 // to pass the live physical registers.
1329 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1330 Chain = DAG.getCopyToReg(Chain, DL,
1331 RegsToPass[i].first, RegsToPass[i].second, InGlue);
1332 InGlue = Chain.getValue(1);
1335 // If the callee is a GlobalAddress node (quite common, every direct call is)
1336 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1337 // Likewise ExternalSymbol -> TargetExternalSymbol.
1338 SDValue Callee = CLI.Callee;
1339 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1340 unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30
1341 : SparcMCExpr::VK_Sparc_WDISP30;
1342 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1343 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1344 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1345 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1347 // Build the operands for the call instruction itself.
1348 SmallVector<SDValue, 8> Ops;
1349 Ops.push_back(Chain);
1350 Ops.push_back(Callee);
1351 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1352 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1353 RegsToPass[i].second.getValueType()));
1355 // Add a register mask operand representing the call-preserved registers.
1356 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1357 const uint32_t *Mask =
1358 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1359 : TRI->getCallPreservedMask(DAG.getMachineFunction(),
1361 assert(Mask && "Missing call preserved mask for calling convention");
1362 Ops.push_back(DAG.getRegisterMask(Mask));
1364 // Make sure the CopyToReg nodes are glued to the call instruction which
1365 // consumes the registers.
1366 if (InGlue.getNode())
1367 Ops.push_back(InGlue);
1369 // Now the call itself.
1370 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1371 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1372 InGlue = Chain.getValue(1);
1374 // Revert the stack pointer immediately after the call.
1375 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
1376 DAG.getIntPtrConstant(0, DL, true), InGlue, DL);
1377 InGlue = Chain.getValue(1);
1379 // Now extract the return values. This is more or less the same as
1380 // LowerFormalArguments_64.
1382 // Assign locations to each value returned by this call.
1383 SmallVector<CCValAssign, 16> RVLocs;
1384 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1387 // Set inreg flag manually for codegen generated library calls that
1389 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1390 CLI.Ins[0].Flags.setInReg();
1392 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1394 // Copy all of the result registers out of their specified physreg.
1395 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1396 CCValAssign &VA = RVLocs[i];
1397 assert(VA.isRegLoc() && "Can only return in registers!");
1398 unsigned Reg = toCallerWindow(VA.getLocReg());
1400 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1401 // reside in the same register in the high and low bits. Reuse the
1402 // CopyFromReg previous node to avoid duplicate copies.
1404 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1405 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1406 RV = Chain.getValue(0);
1408 // But usually we'll create a new CopyFromReg for a different register.
1409 if (!RV.getNode()) {
1410 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1411 Chain = RV.getValue(1);
1412 InGlue = Chain.getValue(2);
1415 // Get the high bits for i32 struct elements.
1416 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1417 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1418 DAG.getConstant(32, DL, MVT::i32));
1420 // The callee promoted the return value, so insert an Assert?ext SDNode so
1421 // we won't promote the value again in this function.
1422 switch (VA.getLocInfo()) {
1423 case CCValAssign::SExt:
1424 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1425 DAG.getValueType(VA.getValVT()));
1427 case CCValAssign::ZExt:
1428 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1429 DAG.getValueType(VA.getValVT()));
1435 // Truncate the register down to the return value type.
1436 if (VA.isExtInLoc())
1437 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1439 InVals.push_back(RV);
1445 //===----------------------------------------------------------------------===//
1446 // TargetLowering Implementation
1447 //===----------------------------------------------------------------------===//
1449 TargetLowering::AtomicExpansionKind SparcTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
1450 if (AI->getOperation() == AtomicRMWInst::Xchg &&
1451 AI->getType()->getPrimitiveSizeInBits() == 32)
1452 return AtomicExpansionKind::None; // Uses xchg instruction
1454 return AtomicExpansionKind::CmpXChg;
1457 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1459 static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) {
1461 default: llvm_unreachable("Unknown integer condition code!");
1462 case ISD::SETEQ: return SPCC::ICC_E;
1463 case ISD::SETNE: return SPCC::ICC_NE;
1464 case ISD::SETLT: return SPCC::ICC_L;
1465 case ISD::SETGT: return SPCC::ICC_G;
1466 case ISD::SETLE: return SPCC::ICC_LE;
1467 case ISD::SETGE: return SPCC::ICC_GE;
1468 case ISD::SETULT: return SPCC::ICC_CS;
1469 case ISD::SETULE: return SPCC::ICC_LEU;
1470 case ISD::SETUGT: return SPCC::ICC_GU;
1471 case ISD::SETUGE: return SPCC::ICC_CC;
1475 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1477 static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) {
1479 default: llvm_unreachable("Unknown fp condition code!");
1481 case ISD::SETOEQ: return SPCC::FCC_E;
1483 case ISD::SETUNE: return SPCC::FCC_NE;
1485 case ISD::SETOLT: return SPCC::FCC_L;
1487 case ISD::SETOGT: return SPCC::FCC_G;
1489 case ISD::SETOLE: return SPCC::FCC_LE;
1491 case ISD::SETOGE: return SPCC::FCC_GE;
1492 case ISD::SETULT: return SPCC::FCC_UL;
1493 case ISD::SETULE: return SPCC::FCC_ULE;
1494 case ISD::SETUGT: return SPCC::FCC_UG;
1495 case ISD::SETUGE: return SPCC::FCC_UGE;
1496 case ISD::SETUO: return SPCC::FCC_U;
1497 case ISD::SETO: return SPCC::FCC_O;
1498 case ISD::SETONE: return SPCC::FCC_LG;
1499 case ISD::SETUEQ: return SPCC::FCC_UE;
1503 SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM,
1504 const SparcSubtarget &STI)
1505 : TargetLowering(TM), Subtarget(&STI) {
1506 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
1508 // Instructions which use registers as conditionals examine all the
1509 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1510 // matters much whether it's ZeroOrOneBooleanContent, or
1511 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1513 setBooleanContents(ZeroOrOneBooleanContent);
1514 setBooleanVectorContents(ZeroOrOneBooleanContent);
1516 // Set up the register classes.
1517 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1518 if (!Subtarget->useSoftFloat()) {
1519 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1520 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1521 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1523 if (Subtarget->is64Bit()) {
1524 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1526 // On 32bit sparc, we define a double-register 32bit register
1527 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1528 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1530 // ...but almost all operations must be expanded, so set that as
1532 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1533 setOperationAction(Op, MVT::v2i32, Expand);
1535 // Truncating/extending stores/loads are also not supported.
1536 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
1537 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1538 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1539 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1541 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1542 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1543 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1545 setTruncStoreAction(VT, MVT::v2i32, Expand);
1546 setTruncStoreAction(MVT::v2i32, VT, Expand);
1548 // However, load and store *are* legal.
1549 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1550 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1551 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Legal);
1552 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Legal);
1554 // And we need to promote i64 loads/stores into vector load/store
1555 setOperationAction(ISD::LOAD, MVT::i64, Custom);
1556 setOperationAction(ISD::STORE, MVT::i64, Custom);
1558 // Sadly, this doesn't work:
1559 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1560 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1563 // Turn FP extload into load/fpextend
1564 for (MVT VT : MVT::fp_valuetypes()) {
1565 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1566 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1567 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1570 // Sparc doesn't have i1 sign extending load
1571 for (MVT VT : MVT::integer_valuetypes())
1572 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1574 // Turn FP truncstore into trunc + store.
1575 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1576 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1577 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1578 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
1579 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1580 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1582 // Custom legalize GlobalAddress nodes into LO/HI parts.
1583 setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
1584 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
1585 setOperationAction(ISD::ConstantPool, PtrVT, Custom);
1586 setOperationAction(ISD::BlockAddress, PtrVT, Custom);
1588 // Sparc doesn't have sext_inreg, replace them with shl/sra
1589 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
1590 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
1591 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
1593 // Sparc has no REM or DIVREM operations.
1594 setOperationAction(ISD::UREM, MVT::i32, Expand);
1595 setOperationAction(ISD::SREM, MVT::i32, Expand);
1596 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1597 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1599 // ... nor does SparcV9.
1600 if (Subtarget->is64Bit()) {
1601 setOperationAction(ISD::UREM, MVT::i64, Expand);
1602 setOperationAction(ISD::SREM, MVT::i64, Expand);
1603 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
1604 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
1607 // Custom expand fp<->sint
1608 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
1609 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
1610 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
1611 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
1613 // Custom Expand fp<->uint
1614 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
1615 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
1616 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
1617 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
1619 // Lower f16 conversion operations into library calls
1620 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1621 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1622 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1623 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1624 setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);
1625 setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);
1627 setOperationAction(ISD::BITCAST, MVT::f32, Expand);
1628 setOperationAction(ISD::BITCAST, MVT::i32, Expand);
1630 // Sparc has no select or setcc: expand to SELECT_CC.
1631 setOperationAction(ISD::SELECT, MVT::i32, Expand);
1632 setOperationAction(ISD::SELECT, MVT::f32, Expand);
1633 setOperationAction(ISD::SELECT, MVT::f64, Expand);
1634 setOperationAction(ISD::SELECT, MVT::f128, Expand);
1636 setOperationAction(ISD::SETCC, MVT::i32, Expand);
1637 setOperationAction(ISD::SETCC, MVT::f32, Expand);
1638 setOperationAction(ISD::SETCC, MVT::f64, Expand);
1639 setOperationAction(ISD::SETCC, MVT::f128, Expand);
1641 // Sparc doesn't have BRCOND either, it has BR_CC.
1642 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
1643 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1644 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1645 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
1646 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
1647 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
1648 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
1650 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1651 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
1652 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
1653 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
1655 setOperationAction(ISD::ADDC, MVT::i32, Custom);
1656 setOperationAction(ISD::ADDE, MVT::i32, Custom);
1657 setOperationAction(ISD::SUBC, MVT::i32, Custom);
1658 setOperationAction(ISD::SUBE, MVT::i32, Custom);
1660 if (Subtarget->is64Bit()) {
1661 setOperationAction(ISD::ADDC, MVT::i64, Custom);
1662 setOperationAction(ISD::ADDE, MVT::i64, Custom);
1663 setOperationAction(ISD::SUBC, MVT::i64, Custom);
1664 setOperationAction(ISD::SUBE, MVT::i64, Custom);
1665 setOperationAction(ISD::BITCAST, MVT::f64, Expand);
1666 setOperationAction(ISD::BITCAST, MVT::i64, Expand);
1667 setOperationAction(ISD::SELECT, MVT::i64, Expand);
1668 setOperationAction(ISD::SETCC, MVT::i64, Expand);
1669 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
1670 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
1672 setOperationAction(ISD::CTPOP, MVT::i64,
1673 Subtarget->usePopc() ? Legal : Expand);
1674 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
1675 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
1676 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
1677 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1678 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1679 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
1683 // Atomics are supported on SparcV9. 32-bit atomics are also
1684 // supported by some Leon SparcV8 variants. Otherwise, atomics
1686 if (Subtarget->isV9())
1687 setMaxAtomicSizeInBitsSupported(64);
1688 else if (Subtarget->hasLeonCasa())
1689 setMaxAtomicSizeInBitsSupported(32);
1691 setMaxAtomicSizeInBitsSupported(0);
1693 setMinCmpXchgSizeInBits(32);
1695 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);
1697 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal);
1699 // Custom Lower Atomic LOAD/STORE
1700 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1701 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1703 if (Subtarget->is64Bit()) {
1704 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
1705 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);
1706 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
1707 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
1710 if (!Subtarget->is64Bit()) {
1711 // These libcalls are not available in 32-bit.
1712 setLibcallName(RTLIB::MULO_I64, nullptr);
1713 setLibcallName(RTLIB::SHL_I128, nullptr);
1714 setLibcallName(RTLIB::SRL_I128, nullptr);
1715 setLibcallName(RTLIB::SRA_I128, nullptr);
1718 setLibcallName(RTLIB::MULO_I128, nullptr);
1720 if (!Subtarget->isV9()) {
1721 // SparcV8 does not have FNEGD and FABSD.
1722 setOperationAction(ISD::FNEG, MVT::f64, Custom);
1723 setOperationAction(ISD::FABS, MVT::f64, Custom);
1726 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1727 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1728 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
1729 setOperationAction(ISD::FREM , MVT::f128, Expand);
1730 setOperationAction(ISD::FMA , MVT::f128, Expand);
1731 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1732 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1733 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1734 setOperationAction(ISD::FREM , MVT::f64, Expand);
1735 setOperationAction(ISD::FMA , MVT::f64, Expand);
1736 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1737 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1738 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1739 setOperationAction(ISD::FREM , MVT::f32, Expand);
1740 setOperationAction(ISD::FMA , MVT::f32, Expand);
1741 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
1742 setOperationAction(ISD::CTLZ , MVT::i32, Expand);
1743 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1744 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1745 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1746 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
1747 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
1748 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
1749 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1750 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1751 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1753 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1754 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1755 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1757 // Expands to [SU]MUL_LOHI.
1758 setOperationAction(ISD::MULHU, MVT::i32, Expand);
1759 setOperationAction(ISD::MULHS, MVT::i32, Expand);
1760 setOperationAction(ISD::MUL, MVT::i32, Expand);
1762 if (Subtarget->useSoftMulDiv()) {
1763 // .umul works for both signed and unsigned
1764 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
1765 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
1766 setLibcallName(RTLIB::MUL_I32, ".umul");
1768 setOperationAction(ISD::SDIV, MVT::i32, Expand);
1769 setLibcallName(RTLIB::SDIV_I32, ".div");
1771 setOperationAction(ISD::UDIV, MVT::i32, Expand);
1772 setLibcallName(RTLIB::UDIV_I32, ".udiv");
1774 setLibcallName(RTLIB::SREM_I32, ".rem");
1775 setLibcallName(RTLIB::UREM_I32, ".urem");
1778 if (Subtarget->is64Bit()) {
1779 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
1780 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
1781 setOperationAction(ISD::MULHU, MVT::i64, Expand);
1782 setOperationAction(ISD::MULHS, MVT::i64, Expand);
1784 setOperationAction(ISD::UMULO, MVT::i64, Custom);
1785 setOperationAction(ISD::SMULO, MVT::i64, Custom);
1787 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
1788 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
1789 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
1792 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1793 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1794 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1795 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1797 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1798 setOperationAction(ISD::DEBUGTRAP , MVT::Other, Legal);
1800 // Use the default implementation.
1801 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1802 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1803 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
1804 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
1805 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
1807 setStackPointerRegisterToSaveRestore(SP::O6);
1809 setOperationAction(ISD::CTPOP, MVT::i32,
1810 Subtarget->usePopc() ? Legal : Expand);
1812 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1813 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1814 setOperationAction(ISD::STORE, MVT::f128, Legal);
1816 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1817 setOperationAction(ISD::STORE, MVT::f128, Custom);
1820 if (Subtarget->hasHardQuad()) {
1821 setOperationAction(ISD::FADD, MVT::f128, Legal);
1822 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1823 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1824 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1825 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1826 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1827 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1828 if (Subtarget->isV9()) {
1829 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1830 setOperationAction(ISD::FABS, MVT::f128, Legal);
1832 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1833 setOperationAction(ISD::FABS, MVT::f128, Custom);
1836 if (!Subtarget->is64Bit()) {
1837 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1838 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1839 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1840 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1844 // Custom legalize f128 operations.
1846 setOperationAction(ISD::FADD, MVT::f128, Custom);
1847 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1848 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1849 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1850 setOperationAction(ISD::FSQRT, MVT::f128, Custom);
1851 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1852 setOperationAction(ISD::FABS, MVT::f128, Custom);
1854 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
1855 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
1856 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
1858 // Setup Runtime library names.
1859 if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1860 setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1861 setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1862 setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1863 setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1864 setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1865 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1866 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1867 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1868 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1869 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1870 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1871 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1872 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1873 setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1874 setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1875 setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1876 setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1877 } else if (!Subtarget->useSoftFloat()) {
1878 setLibcallName(RTLIB::ADD_F128, "_Q_add");
1879 setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1880 setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1881 setLibcallName(RTLIB::DIV_F128, "_Q_div");
1882 setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1883 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1884 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1885 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1886 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1887 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1888 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1889 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1890 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1891 setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1892 setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1893 setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1894 setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1898 if (Subtarget->fixAllFDIVSQRT()) {
1899 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1900 // the former instructions generate errata on LEON processors.
1901 setOperationAction(ISD::FDIV, MVT::f32, Promote);
1902 setOperationAction(ISD::FSQRT, MVT::f32, Promote);
1905 if (Subtarget->hasNoFMULS()) {
1906 setOperationAction(ISD::FMUL, MVT::f32, Promote);
1909 // Custom combine bitcast between f64 and v2i32
1910 if (!Subtarget->is64Bit())
1911 setTargetDAGCombine(ISD::BITCAST);
1913 if (Subtarget->hasLeonCycleCounter())
1914 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
1916 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1918 setMinFunctionAlignment(Align(4));
1920 computeRegisterProperties(Subtarget->getRegisterInfo());
1923 bool SparcTargetLowering::useSoftFloat() const {
1924 return Subtarget->useSoftFloat();
1927 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
1928 switch ((SPISD::NodeType)Opcode) {
1929 case SPISD::FIRST_NUMBER: break;
1930 case SPISD::CMPICC: return "SPISD::CMPICC";
1931 case SPISD::CMPFCC: return "SPISD::CMPFCC";
1932 case SPISD::BRICC: return "SPISD::BRICC";
1933 case SPISD::BRXCC: return "SPISD::BRXCC";
1934 case SPISD::BRFCC: return "SPISD::BRFCC";
1935 case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
1936 case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
1937 case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
1938 case SPISD::Hi: return "SPISD::Hi";
1939 case SPISD::Lo: return "SPISD::Lo";
1940 case SPISD::FTOI: return "SPISD::FTOI";
1941 case SPISD::ITOF: return "SPISD::ITOF";
1942 case SPISD::FTOX: return "SPISD::FTOX";
1943 case SPISD::XTOF: return "SPISD::XTOF";
1944 case SPISD::CALL: return "SPISD::CALL";
1945 case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
1946 case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
1947 case SPISD::FLUSHW: return "SPISD::FLUSHW";
1948 case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
1949 case SPISD::TLS_LD: return "SPISD::TLS_LD";
1950 case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
1951 case SPISD::TAIL_CALL: return "SPISD::TAIL_CALL";
1952 case SPISD::LOAD_GDOP: return "SPISD::LOAD_GDOP";
1957 EVT SparcTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
1961 return VT.changeVectorElementTypeToInteger();
1964 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
1965 /// be zero. Op is expected to be a target specific node. Used by DAG
1967 void SparcTargetLowering::computeKnownBitsForTargetNode
1970 const APInt &DemandedElts,
1971 const SelectionDAG &DAG,
1972 unsigned Depth) const {
1976 switch (Op.getOpcode()) {
1978 case SPISD::SELECT_ICC:
1979 case SPISD::SELECT_XCC:
1980 case SPISD::SELECT_FCC:
1981 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
1982 Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
1984 // Only known if known in both the LHS and RHS.
1985 Known = KnownBits::commonBits(Known, Known2);
1990 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
1991 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
1992 static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
1993 ISD::CondCode CC, unsigned &SPCC) {
1994 if (isNullConstant(RHS) &&
1996 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
1997 LHS.getOpcode() == SPISD::SELECT_XCC) &&
1998 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
1999 (LHS.getOpcode() == SPISD::SELECT_FCC &&
2000 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
2001 isOneConstant(LHS.getOperand(0)) &&
2002 isNullConstant(LHS.getOperand(1))) {
2003 SDValue CMPCC = LHS.getOperand(3);
2004 SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
2005 LHS = CMPCC.getOperand(0);
2006 RHS = CMPCC.getOperand(1);
2010 // Convert to a target node and set target flags.
2011 SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF,
2012 SelectionDAG &DAG) const {
2013 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
2014 return DAG.getTargetGlobalAddress(GA->getGlobal(),
2016 GA->getValueType(0),
2017 GA->getOffset(), TF);
2019 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
2020 return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
2021 CP->getAlign(), CP->getOffset(), TF);
2023 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
2024 return DAG.getTargetBlockAddress(BA->getBlockAddress(),
2029 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
2030 return DAG.getTargetExternalSymbol(ES->getSymbol(),
2031 ES->getValueType(0), TF);
2033 llvm_unreachable("Unhandled address SDNode");
2036 // Split Op into high and low parts according to HiTF and LoTF.
2037 // Return an ADD node combining the parts.
2038 SDValue SparcTargetLowering::makeHiLoPair(SDValue Op,
2039 unsigned HiTF, unsigned LoTF,
2040 SelectionDAG &DAG) const {
2042 EVT VT = Op.getValueType();
2043 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
2044 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
2045 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2048 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2049 // or ExternalSymbol SDNode.
2050 SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
2052 EVT VT = getPointerTy(DAG.getDataLayout());
2054 // Handle PIC mode first. SPARC needs a got load for every variable!
2055 if (isPositionIndependent()) {
2056 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2057 PICLevel::Level picLevel = M->getPICLevel();
2060 if (picLevel == PICLevel::SmallPIC) {
2061 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2062 Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
2063 withTargetFlags(Op, SparcMCExpr::VK_Sparc_GOT13, DAG));
2065 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2066 Idx = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_GOT22,
2067 SparcMCExpr::VK_Sparc_GOT10, DAG);
2070 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
2071 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
2072 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2073 // function has calls.
2074 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2075 MFI.setHasCalls(true);
2076 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2077 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2080 // This is one of the absolute code models.
2081 switch(getTargetMachine().getCodeModel()) {
2083 llvm_unreachable("Unsupported absolute code model");
2084 case CodeModel::Small:
2086 return makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI,
2087 SparcMCExpr::VK_Sparc_LO, DAG);
2088 case CodeModel::Medium: {
2090 SDValue H44 = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_H44,
2091 SparcMCExpr::VK_Sparc_M44, DAG);
2092 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2093 SDValue L44 = withTargetFlags(Op, SparcMCExpr::VK_Sparc_L44, DAG);
2094 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2095 return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2097 case CodeModel::Large: {
2099 SDValue Hi = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HH,
2100 SparcMCExpr::VK_Sparc_HM, DAG);
2101 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2102 SDValue Lo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI,
2103 SparcMCExpr::VK_Sparc_LO, DAG);
2104 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2109 SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
2110 SelectionDAG &DAG) const {
2111 return makeAddress(Op, DAG);
2114 SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
2115 SelectionDAG &DAG) const {
2116 return makeAddress(Op, DAG);
2119 SDValue SparcTargetLowering::LowerBlockAddress(SDValue Op,
2120 SelectionDAG &DAG) const {
2121 return makeAddress(Op, DAG);
2124 SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2125 SelectionDAG &DAG) const {
2127 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2128 if (DAG.getTarget().useEmulatedTLS())
2129 return LowerToTLSEmulatedModel(GA, DAG);
2132 const GlobalValue *GV = GA->getGlobal();
2133 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2135 TLSModel::Model model = getTargetMachine().getTLSModel(GV);
2137 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2138 unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2139 ? SparcMCExpr::VK_Sparc_TLS_GD_HI22
2140 : SparcMCExpr::VK_Sparc_TLS_LDM_HI22);
2141 unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2142 ? SparcMCExpr::VK_Sparc_TLS_GD_LO10
2143 : SparcMCExpr::VK_Sparc_TLS_LDM_LO10);
2144 unsigned addTF = ((model == TLSModel::GeneralDynamic)
2145 ? SparcMCExpr::VK_Sparc_TLS_GD_ADD
2146 : SparcMCExpr::VK_Sparc_TLS_LDM_ADD);
2147 unsigned callTF = ((model == TLSModel::GeneralDynamic)
2148 ? SparcMCExpr::VK_Sparc_TLS_GD_CALL
2149 : SparcMCExpr::VK_Sparc_TLS_LDM_CALL);
2151 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2152 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2153 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2154 withTargetFlags(Op, addTF, DAG));
2156 SDValue Chain = DAG.getEntryNode();
2159 Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
2160 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
2161 InFlag = Chain.getValue(1);
2162 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2163 SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2165 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2166 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2167 DAG.getMachineFunction(), CallingConv::C);
2168 assert(Mask && "Missing call preserved mask for calling convention");
2169 SDValue Ops[] = {Chain,
2172 DAG.getRegister(SP::O0, PtrVT),
2173 DAG.getRegisterMask(Mask),
2175 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2176 InFlag = Chain.getValue(1);
2177 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true),
2178 DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
2179 InFlag = Chain.getValue(1);
2180 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
2182 if (model != TLSModel::LocalDynamic)
2185 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2186 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22, DAG));
2187 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2188 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10, DAG));
2189 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2190 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2191 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_ADD, DAG));
2194 if (model == TLSModel::InitialExec) {
2195 unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2196 : SparcMCExpr::VK_Sparc_TLS_IE_LD);
2198 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2200 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2201 // function has calls.
2202 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2203 MFI.setHasCalls(true);
2205 SDValue TGA = makeHiLoPair(Op,
2206 SparcMCExpr::VK_Sparc_TLS_IE_HI22,
2207 SparcMCExpr::VK_Sparc_TLS_IE_LO10, DAG);
2208 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2209 SDValue Offset = DAG.getNode(SPISD::TLS_LD,
2211 withTargetFlags(Op, ldTF, DAG));
2212 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2213 DAG.getRegister(SP::G7, PtrVT), Offset,
2215 SparcMCExpr::VK_Sparc_TLS_IE_ADD, DAG));
2218 assert(model == TLSModel::LocalExec);
2219 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2220 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_HIX22, DAG));
2221 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2222 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_LOX10, DAG));
2223 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2225 return DAG.getNode(ISD::ADD, DL, PtrVT,
2226 DAG.getRegister(SP::G7, PtrVT), Offset);
2229 SDValue SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain,
2230 ArgListTy &Args, SDValue Arg,
2232 SelectionDAG &DAG) const {
2233 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2234 EVT ArgVT = Arg.getValueType();
2235 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2241 if (ArgTy->isFP128Ty()) {
2242 // Create a stack object and pass the pointer to the library function.
2243 int FI = MFI.CreateStackObject(16, Align(8), false);
2244 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2245 Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
2249 Entry.Ty = PointerType::getUnqual(ArgTy);
2251 Args.push_back(Entry);
2256 SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG,
2257 const char *LibFuncName,
2258 unsigned numArgs) const {
2262 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2263 auto PtrVT = getPointerTy(DAG.getDataLayout());
2265 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2266 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2267 Type *RetTyABI = RetTy;
2268 SDValue Chain = DAG.getEntryNode();
2271 if (RetTy->isFP128Ty()) {
2272 // Create a Stack Object to receive the return value of type f128.
2274 int RetFI = MFI.CreateStackObject(16, Align(8), false);
2275 RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2276 Entry.Node = RetPtr;
2277 Entry.Ty = PointerType::getUnqual(RetTy);
2278 if (!Subtarget->is64Bit()) {
2279 Entry.IsSRet = true;
2280 Entry.IndirectType = RetTy;
2282 Entry.IsReturned = false;
2283 Args.push_back(Entry);
2284 RetTyABI = Type::getVoidTy(*DAG.getContext());
2287 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2288 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2289 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2291 TargetLowering::CallLoweringInfo CLI(DAG);
2292 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2293 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2295 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2297 // chain is in second result.
2298 if (RetTyABI == RetTy)
2299 return CallInfo.first;
2301 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2303 Chain = CallInfo.second;
2305 // Load RetPtr to get the return value.
2306 return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2307 MachinePointerInfo(), Align(8));
2310 SDValue SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS,
2311 unsigned &SPCC, const SDLoc &DL,
2312 SelectionDAG &DAG) const {
2314 const char *LibCall = nullptr;
2315 bool is64Bit = Subtarget->is64Bit();
2317 default: llvm_unreachable("Unhandled conditional code!");
2318 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2319 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2320 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2321 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2322 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2323 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2331 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2334 auto PtrVT = getPointerTy(DAG.getDataLayout());
2335 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2336 Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2338 SDValue Chain = DAG.getEntryNode();
2339 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2340 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2342 TargetLowering::CallLoweringInfo CLI(DAG);
2343 CLI.setDebugLoc(DL).setChain(Chain)
2344 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2346 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2348 // result is in first, and chain is in second result.
2349 SDValue Result = CallInfo.first;
2353 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2354 SPCC = SPCC::ICC_NE;
2355 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2357 case SPCC::FCC_UL : {
2358 SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2359 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2360 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2361 SPCC = SPCC::ICC_NE;
2362 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2364 case SPCC::FCC_ULE: {
2365 SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2366 SPCC = SPCC::ICC_NE;
2367 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2369 case SPCC::FCC_UG : {
2370 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2372 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2374 case SPCC::FCC_UGE: {
2375 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2376 SPCC = SPCC::ICC_NE;
2377 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2380 case SPCC::FCC_U : {
2381 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2383 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2385 case SPCC::FCC_O : {
2386 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2387 SPCC = SPCC::ICC_NE;
2388 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2390 case SPCC::FCC_LG : {
2391 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2392 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2393 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2394 SPCC = SPCC::ICC_NE;
2395 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2397 case SPCC::FCC_UE : {
2398 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2399 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2400 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2402 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2408 LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG,
2409 const SparcTargetLowering &TLI) {
2411 if (Op.getOperand(0).getValueType() == MVT::f64)
2412 return TLI.LowerF128Op(Op, DAG,
2413 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2415 if (Op.getOperand(0).getValueType() == MVT::f32)
2416 return TLI.LowerF128Op(Op, DAG,
2417 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2419 llvm_unreachable("fpextend with non-float operand!");
2424 LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG,
2425 const SparcTargetLowering &TLI) {
2426 // FP_ROUND on f64 and f32 are legal.
2427 if (Op.getOperand(0).getValueType() != MVT::f128)
2430 if (Op.getValueType() == MVT::f64)
2431 return TLI.LowerF128Op(Op, DAG,
2432 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2433 if (Op.getValueType() == MVT::f32)
2434 return TLI.LowerF128Op(Op, DAG,
2435 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2437 llvm_unreachable("fpround to non-float!");
2441 static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG,
2442 const SparcTargetLowering &TLI,
2445 EVT VT = Op.getValueType();
2446 assert(VT == MVT::i32 || VT == MVT::i64);
2448 // Expand f128 operations to fp128 abi calls.
2449 if (Op.getOperand(0).getValueType() == MVT::f128
2450 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2451 const char *libName = TLI.getLibcallName(VT == MVT::i32
2452 ? RTLIB::FPTOSINT_F128_I32
2453 : RTLIB::FPTOSINT_F128_I64);
2454 return TLI.LowerF128Op(Op, DAG, libName, 1);
2457 // Expand if the resulting type is illegal.
2458 if (!TLI.isTypeLegal(VT))
2461 // Otherwise, Convert the fp value to integer in an FP register.
2463 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2465 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2467 return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2470 static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2471 const SparcTargetLowering &TLI,
2474 EVT OpVT = Op.getOperand(0).getValueType();
2475 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2477 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2479 // Expand f128 operations to fp128 ABI calls.
2480 if (Op.getValueType() == MVT::f128
2481 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2482 const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2483 ? RTLIB::SINTTOFP_I32_F128
2484 : RTLIB::SINTTOFP_I64_F128);
2485 return TLI.LowerF128Op(Op, DAG, libName, 1);
2488 // Expand if the operand type is illegal.
2489 if (!TLI.isTypeLegal(OpVT))
2492 // Otherwise, Convert the int value to FP in an FP register.
2493 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2494 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2495 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2498 static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG,
2499 const SparcTargetLowering &TLI,
2502 EVT VT = Op.getValueType();
2504 // Expand if it does not involve f128 or the target has support for
2505 // quad floating point instructions and the resulting type is legal.
2506 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2507 (hasHardQuad && TLI.isTypeLegal(VT)))
2510 assert(VT == MVT::i32 || VT == MVT::i64);
2512 return TLI.LowerF128Op(Op, DAG,
2513 TLI.getLibcallName(VT == MVT::i32
2514 ? RTLIB::FPTOUINT_F128_I32
2515 : RTLIB::FPTOUINT_F128_I64),
2519 static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2520 const SparcTargetLowering &TLI,
2523 EVT OpVT = Op.getOperand(0).getValueType();
2524 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2526 // Expand if it does not involve f128 or the target has support for
2527 // quad floating point instructions and the operand type is legal.
2528 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2531 return TLI.LowerF128Op(Op, DAG,
2532 TLI.getLibcallName(OpVT == MVT::i32
2533 ? RTLIB::UINTTOFP_I32_F128
2534 : RTLIB::UINTTOFP_I64_F128),
2538 static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
2539 const SparcTargetLowering &TLI,
2541 SDValue Chain = Op.getOperand(0);
2542 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2543 SDValue LHS = Op.getOperand(2);
2544 SDValue RHS = Op.getOperand(3);
2545 SDValue Dest = Op.getOperand(4);
2547 unsigned Opc, SPCC = ~0U;
2549 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2550 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2551 LookThroughSetCC(LHS, RHS, CC, SPCC);
2553 // Get the condition flag.
2554 SDValue CompareFlag;
2555 if (LHS.getValueType().isInteger()) {
2556 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2557 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2558 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2559 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
2561 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2562 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2563 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2566 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2567 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2571 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2572 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2575 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2576 const SparcTargetLowering &TLI,
2578 SDValue LHS = Op.getOperand(0);
2579 SDValue RHS = Op.getOperand(1);
2580 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2581 SDValue TrueVal = Op.getOperand(2);
2582 SDValue FalseVal = Op.getOperand(3);
2584 unsigned Opc, SPCC = ~0U;
2586 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2587 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2588 LookThroughSetCC(LHS, RHS, CC, SPCC);
2590 SDValue CompareFlag;
2591 if (LHS.getValueType().isInteger()) {
2592 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2593 Opc = LHS.getValueType() == MVT::i32 ?
2594 SPISD::SELECT_ICC : SPISD::SELECT_XCC;
2595 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2597 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2598 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2599 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2600 Opc = SPISD::SELECT_ICC;
2602 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2603 Opc = SPISD::SELECT_FCC;
2604 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2607 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2608 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2611 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
2612 const SparcTargetLowering &TLI) {
2613 MachineFunction &MF = DAG.getMachineFunction();
2614 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
2615 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2617 // Need frame address to find the address of VarArgsFrameIndex.
2618 MF.getFrameInfo().setFrameAddressIsTaken(true);
2620 // vastart just stores the address of the VarArgsFrameIndex slot into the
2621 // memory location argument.
2624 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2625 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2626 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2627 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2628 MachinePointerInfo(SV));
2631 static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
2632 SDNode *Node = Op.getNode();
2633 EVT VT = Node->getValueType(0);
2634 SDValue InChain = Node->getOperand(0);
2635 SDValue VAListPtr = Node->getOperand(1);
2636 EVT PtrVT = VAListPtr.getValueType();
2637 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2640 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2641 // Increment the pointer, VAList, to the next vaarg.
2642 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2643 DAG.getIntPtrConstant(VT.getSizeInBits()/8,
2645 // Store the incremented VAList to the legalized pointer.
2646 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2647 MachinePointerInfo(SV));
2648 // Load the actual argument out of the pointer VAList.
2649 // We can't count on greater alignment than the word size.
2651 VT, DL, InChain, VAList, MachinePointerInfo(),
2652 std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8);
2655 static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
2656 const SparcSubtarget *Subtarget) {
2657 SDValue Chain = Op.getOperand(0); // Legalize the chain.
2658 SDValue Size = Op.getOperand(1); // Legalize the size.
2659 MaybeAlign Alignment =
2660 cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
2661 Align StackAlign = Subtarget->getFrameLowering()->getStackAlign();
2662 EVT VT = Size->getValueType(0);
2665 // TODO: implement over-aligned alloca. (Note: also implies
2666 // supporting support for overaligned function frames + dynamic
2667 // allocations, at all, which currently isn't supported)
2668 if (Alignment && *Alignment > StackAlign) {
2669 const MachineFunction &MF = DAG.getMachineFunction();
2670 report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "
2671 "over-aligned dynamic alloca not supported.");
2674 // The resultant pointer needs to be above the register spill area
2675 // at the bottom of the stack.
2676 unsigned regSpillArea;
2677 if (Subtarget->is64Bit()) {
2680 // On Sparc32, the size of the spill area is 92. Unfortunately,
2681 // that's only 4-byte aligned, not 8-byte aligned (the stack
2682 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2683 // aligned dynamic allocation, we actually need to add 96 to the
2684 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2686 // That also means adding 4 to the size of the allocation --
2687 // before applying the 8-byte rounding. Unfortunately, we the
2688 // value we get here has already had rounding applied. So, we need
2689 // to add 8, instead, wasting a bit more memory.
2691 // Further, this only actually needs to be done if the required
2692 // alignment is > 4, but, we've lost that info by this point, too,
2693 // so we always apply it.
2695 // (An alternative approach would be to always reserve 96 bytes
2696 // instead of the required 92, but then we'd waste 4 extra bytes
2697 // in every frame, not just those with dynamic stack allocations)
2699 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2701 Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2702 DAG.getConstant(8, dl, VT));
2706 unsigned SPReg = SP::O6;
2707 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2708 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
2709 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
2711 regSpillArea += Subtarget->getStackPointerBias();
2713 SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
2714 DAG.getConstant(regSpillArea, dl, VT));
2715 SDValue Ops[2] = { NewVal, Chain };
2716 return DAG.getMergeValues(Ops, dl);
2720 static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) {
2722 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2723 dl, MVT::Other, DAG.getEntryNode());
2727 static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
2728 const SparcSubtarget *Subtarget,
2729 bool AlwaysFlush = false) {
2730 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2731 MFI.setFrameAddressIsTaken(true);
2733 EVT VT = Op.getValueType();
2735 unsigned FrameReg = SP::I6;
2736 unsigned stackBias = Subtarget->getStackPointerBias();
2741 // flush first to make sure the windowed registers' values are in stack
2742 Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2744 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2746 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2749 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2750 DAG.getIntPtrConstant(Offset, dl));
2751 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2753 if (Subtarget->is64Bit())
2754 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2755 DAG.getIntPtrConstant(stackBias, dl));
2760 static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG,
2761 const SparcSubtarget *Subtarget) {
2763 uint64_t depth = Op.getConstantOperandVal(0);
2765 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2769 static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
2770 const SparcTargetLowering &TLI,
2771 const SparcSubtarget *Subtarget) {
2772 MachineFunction &MF = DAG.getMachineFunction();
2773 MachineFrameInfo &MFI = MF.getFrameInfo();
2774 MFI.setReturnAddressIsTaken(true);
2776 if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG))
2779 EVT VT = Op.getValueType();
2781 uint64_t depth = Op.getConstantOperandVal(0);
2785 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2786 Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2787 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2791 // Need frame address to find return address of the caller.
2792 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2794 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2795 SDValue Ptr = DAG.getNode(ISD::ADD,
2798 DAG.getIntPtrConstant(Offset, dl));
2799 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2804 static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2806 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2807 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2809 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2810 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2811 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2813 // Note: in little-endian, the floating-point value is stored in the
2814 // registers are in the opposite order, so the subreg with the sign
2815 // bit is the highest-numbered (odd), rather than the
2816 // lowest-numbered (even).
2818 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2820 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2823 if (DAG.getDataLayout().isLittleEndian())
2824 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2826 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2828 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2830 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2832 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2837 // Lower a f128 load into two f64 loads.
2838 static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
2841 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2842 assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2844 Align Alignment = commonAlignment(LdNode->getOriginalAlign(), 8);
2847 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2848 LdNode->getPointerInfo(), Alignment);
2849 EVT addrVT = LdNode->getBasePtr().getValueType();
2850 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2851 LdNode->getBasePtr(),
2852 DAG.getConstant(8, dl, addrVT));
2853 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2854 LdNode->getPointerInfo().getWithOffset(8),
2857 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2858 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2860 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2862 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2864 SDValue(InFP128, 0),
2867 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2869 SDValue(InFP128, 0),
2872 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2873 SDValue(Lo64.getNode(), 1) };
2874 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2875 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2876 return DAG.getMergeValues(Ops, dl);
2879 static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
2881 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2883 EVT MemVT = LdNode->getMemoryVT();
2884 if (MemVT == MVT::f128)
2885 return LowerF128Load(Op, DAG);
2890 // Lower a f128 store into two f64 stores.
2891 static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {
2893 StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
2894 assert(StNode->getOffset().isUndef() && "Unexpected node type");
2896 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2897 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2899 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2904 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2910 Align Alignment = commonAlignment(StNode->getOriginalAlign(), 8);
2912 SDValue OutChains[2];
2914 DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2915 StNode->getBasePtr(), StNode->getPointerInfo(),
2917 EVT addrVT = StNode->getBasePtr().getValueType();
2918 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2919 StNode->getBasePtr(),
2920 DAG.getConstant(8, dl, addrVT));
2921 OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
2922 StNode->getPointerInfo().getWithOffset(8),
2924 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2927 static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
2930 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
2932 EVT MemVT = St->getMemoryVT();
2933 if (MemVT == MVT::f128)
2934 return LowerF128Store(Op, DAG);
2936 if (MemVT == MVT::i64) {
2937 // Custom handling for i64 stores: turn it into a bitcast and a
2939 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
2940 SDValue Chain = DAG.getStore(
2941 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
2942 St->getOriginalAlign(), St->getMemOperand()->getFlags(),
2950 static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
2951 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
2952 && "invalid opcode");
2956 if (Op.getValueType() == MVT::f64)
2957 return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
2958 if (Op.getValueType() != MVT::f128)
2961 // Lower fabs/fneg on f128 to fabs/fneg on f64
2962 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
2963 // (As with LowerF64Op, on little-endian, we need to negate the odd
2966 SDValue SrcReg128 = Op.getOperand(0);
2967 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
2969 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
2972 if (DAG.getDataLayout().isLittleEndian()) {
2974 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
2976 Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
2979 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
2981 Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
2984 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2986 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
2988 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
2993 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
2995 if (Op.getValueType() != MVT::i64)
2999 SDValue Src1 = Op.getOperand(0);
3000 SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
3001 SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
3002 DAG.getConstant(32, dl, MVT::i64));
3003 Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
3005 SDValue Src2 = Op.getOperand(1);
3006 SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
3007 SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
3008 DAG.getConstant(32, dl, MVT::i64));
3009 Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
3012 bool hasChain = false;
3013 unsigned hiOpc = Op.getOpcode();
3014 switch (Op.getOpcode()) {
3015 default: llvm_unreachable("Invalid opcode");
3016 case ISD::ADDC: hiOpc = ISD::ADDE; break;
3017 case ISD::ADDE: hasChain = true; break;
3018 case ISD::SUBC: hiOpc = ISD::SUBE; break;
3019 case ISD::SUBE: hasChain = true; break;
3022 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
3024 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
3027 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
3029 SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
3030 SDValue Carry = Hi.getValue(1);
3032 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
3033 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
3034 Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
3035 DAG.getConstant(32, dl, MVT::i64));
3037 SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
3038 SDValue Ops[2] = { Dst, Carry };
3039 return DAG.getMergeValues(Ops, dl);
3042 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
3043 // in LegalizeDAG.cpp except the order of arguments to the library function.
3044 static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,
3045 const SparcTargetLowering &TLI)
3047 unsigned opcode = Op.getOpcode();
3048 assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
3050 bool isSigned = (opcode == ISD::SMULO);
3052 EVT WideVT = MVT::i128;
3054 SDValue LHS = Op.getOperand(0);
3056 if (LHS.getValueType() != VT)
3059 SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
3061 SDValue RHS = Op.getOperand(1);
3062 SDValue HiLHS, HiRHS;
3064 HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
3065 HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
3067 HiLHS = DAG.getConstant(0, dl, VT);
3068 HiRHS = DAG.getConstant(0, dl, MVT::i64);
3071 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
3073 TargetLowering::MakeLibCallOptions CallOptions;
3074 CallOptions.setSExt(isSigned);
3075 SDValue MulResult = TLI.makeLibCall(DAG,
3076 RTLIB::MUL_I128, WideVT,
3077 Args, CallOptions, dl).first;
3078 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
3079 MulResult, DAG.getIntPtrConstant(0, dl));
3080 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
3081 MulResult, DAG.getIntPtrConstant(1, dl));
3083 SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
3084 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
3086 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
3089 // MulResult is a node with an illegal type. Because such things are not
3090 // generally permitted during this phase of legalization, ensure that
3091 // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
3093 assert(MulResult->use_empty() && "Illegally typed node still in use!");
3095 SDValue Ops[2] = { BottomHalf, TopHalf } ;
3096 return DAG.getMergeValues(Ops, dl);
3099 static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
3100 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {
3101 // Expand with a fence.
3105 // Monotonic load/stores are legal.
3109 SDValue SparcTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3110 SelectionDAG &DAG) const {
3111 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3114 default: return SDValue(); // Don't custom lower most intrinsics.
3115 case Intrinsic::thread_pointer: {
3116 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3117 return DAG.getRegister(SP::G7, PtrVT);
3122 SDValue SparcTargetLowering::
3123 LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3125 bool hasHardQuad = Subtarget->hasHardQuad();
3126 bool isV9 = Subtarget->isV9();
3128 switch (Op.getOpcode()) {
3129 default: llvm_unreachable("Should not custom lower this!");
3131 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3133 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3135 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3136 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3137 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3138 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3139 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3141 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3143 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3145 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3147 case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this,
3149 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this,
3151 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3152 case ISD::VAARG: return LowerVAARG(Op, DAG);
3153 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
3156 case ISD::LOAD: return LowerLOAD(Op, DAG);
3157 case ISD::STORE: return LowerSTORE(Op, DAG);
3158 case ISD::FADD: return LowerF128Op(Op, DAG,
3159 getLibcallName(RTLIB::ADD_F128), 2);
3160 case ISD::FSUB: return LowerF128Op(Op, DAG,
3161 getLibcallName(RTLIB::SUB_F128), 2);
3162 case ISD::FMUL: return LowerF128Op(Op, DAG,
3163 getLibcallName(RTLIB::MUL_F128), 2);
3164 case ISD::FDIV: return LowerF128Op(Op, DAG,
3165 getLibcallName(RTLIB::DIV_F128), 2);
3166 case ISD::FSQRT: return LowerF128Op(Op, DAG,
3167 getLibcallName(RTLIB::SQRT_F128),1);
3169 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3170 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3171 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3175 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
3177 case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
3178 case ISD::ATOMIC_LOAD:
3179 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3180 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3184 SDValue SparcTargetLowering::bitcastConstantFPToInt(ConstantFPSDNode *C,
3186 SelectionDAG &DAG) const {
3187 APInt V = C->getValueAPF().bitcastToAPInt();
3188 SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3189 SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3190 if (DAG.getDataLayout().isLittleEndian())
3192 return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3195 SDValue SparcTargetLowering::PerformBITCASTCombine(SDNode *N,
3196 DAGCombinerInfo &DCI) const {
3198 SDValue Src = N->getOperand(0);
3200 if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3201 Src.getSimpleValueType() == MVT::f64)
3202 return bitcastConstantFPToInt(cast<ConstantFPSDNode>(Src), dl, DCI.DAG);
3207 SDValue SparcTargetLowering::PerformDAGCombine(SDNode *N,
3208 DAGCombinerInfo &DCI) const {
3209 switch (N->getOpcode()) {
3213 return PerformBITCASTCombine(N, DCI);
3219 SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
3220 MachineBasicBlock *BB) const {
3221 switch (MI.getOpcode()) {
3222 default: llvm_unreachable("Unknown SELECT_CC!");
3223 case SP::SELECT_CC_Int_ICC:
3224 case SP::SELECT_CC_FP_ICC:
3225 case SP::SELECT_CC_DFP_ICC:
3226 case SP::SELECT_CC_QFP_ICC:
3227 return expandSelectCC(MI, BB, SP::BCOND);
3228 case SP::SELECT_CC_Int_XCC:
3229 case SP::SELECT_CC_FP_XCC:
3230 case SP::SELECT_CC_DFP_XCC:
3231 case SP::SELECT_CC_QFP_XCC:
3232 return expandSelectCC(MI, BB, SP::BPXCC);
3233 case SP::SELECT_CC_Int_FCC:
3234 case SP::SELECT_CC_FP_FCC:
3235 case SP::SELECT_CC_DFP_FCC:
3236 case SP::SELECT_CC_QFP_FCC:
3237 return expandSelectCC(MI, BB, SP::FBCOND);
3242 SparcTargetLowering::expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB,
3243 unsigned BROpcode) const {
3244 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3245 DebugLoc dl = MI.getDebugLoc();
3246 unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3248 // To "insert" a SELECT_CC instruction, we actually have to insert the
3249 // triangle control-flow pattern. The incoming instruction knows the
3250 // destination vreg to set, the condition code register to branch on, the
3251 // true/false values to select between, and the condition code for the branch.
3253 // We produce the following control flow:
3259 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3260 MachineFunction::iterator It = ++BB->getIterator();
3262 MachineBasicBlock *ThisMBB = BB;
3263 MachineFunction *F = BB->getParent();
3264 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3265 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3266 F->insert(It, IfFalseMBB);
3267 F->insert(It, SinkMBB);
3269 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3270 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3271 std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3272 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3274 // Set the new successors for ThisMBB.
3275 ThisMBB->addSuccessor(IfFalseMBB);
3276 ThisMBB->addSuccessor(SinkMBB);
3278 BuildMI(ThisMBB, dl, TII.get(BROpcode))
3282 // IfFalseMBB just falls through to SinkMBB.
3283 IfFalseMBB->addSuccessor(SinkMBB);
3285 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3286 BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3287 MI.getOperand(0).getReg())
3288 .addReg(MI.getOperand(1).getReg())
3290 .addReg(MI.getOperand(2).getReg())
3291 .addMBB(IfFalseMBB);
3293 MI.eraseFromParent(); // The pseudo instruction is gone now.
3297 //===----------------------------------------------------------------------===//
3298 // Sparc Inline Assembly Support
3299 //===----------------------------------------------------------------------===//
3301 /// getConstraintType - Given a constraint letter, return the type of
3302 /// constraint it is for this target.
3303 SparcTargetLowering::ConstraintType
3304 SparcTargetLowering::getConstraintType(StringRef Constraint) const {
3305 if (Constraint.size() == 1) {
3306 switch (Constraint[0]) {
3311 return C_RegisterClass;
3317 return TargetLowering::getConstraintType(Constraint);
3320 TargetLowering::ConstraintWeight SparcTargetLowering::
3321 getSingleConstraintMatchWeight(AsmOperandInfo &info,
3322 const char *constraint) const {
3323 ConstraintWeight weight = CW_Invalid;
3324 Value *CallOperandVal = info.CallOperandVal;
3325 // If we don't have a value, we can't do a match,
3326 // but allow it at the lowest weight.
3327 if (!CallOperandVal)
3330 // Look at the constraint type.
3331 switch (*constraint) {
3333 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3336 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3337 if (isInt<13>(C->getSExtValue()))
3338 weight = CW_Constant;
3345 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3346 /// vector. If it is invalid, don't add anything to Ops.
3347 void SparcTargetLowering::
3348 LowerAsmOperandForConstraint(SDValue Op,
3349 std::string &Constraint,
3350 std::vector<SDValue> &Ops,
3351 SelectionDAG &DAG) const {
3354 // Only support length 1 constraints for now.
3355 if (Constraint.length() > 1)
3358 char ConstraintLetter = Constraint[0];
3359 switch (ConstraintLetter) {
3362 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3363 if (isInt<13>(C->getSExtValue())) {
3364 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
3372 if (Result.getNode()) {
3373 Ops.push_back(Result);
3376 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3379 std::pair<unsigned, const TargetRegisterClass *>
3380 SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
3381 StringRef Constraint,
3383 if (Constraint.empty())
3384 return std::make_pair(0U, nullptr);
3386 if (Constraint.size() == 1) {
3387 switch (Constraint[0]) {
3389 if (VT == MVT::v2i32)
3390 return std::make_pair(0U, &SP::IntPairRegClass);
3391 else if (Subtarget->is64Bit())
3392 return std::make_pair(0U, &SP::I64RegsRegClass);
3394 return std::make_pair(0U, &SP::IntRegsRegClass);
3396 if (VT == MVT::f32 || VT == MVT::i32)
3397 return std::make_pair(0U, &SP::FPRegsRegClass);
3398 else if (VT == MVT::f64 || VT == MVT::i64)
3399 return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3400 else if (VT == MVT::f128)
3401 return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3402 // This will generate an error message
3403 return std::make_pair(0U, nullptr);
3405 if (VT == MVT::f32 || VT == MVT::i32)
3406 return std::make_pair(0U, &SP::FPRegsRegClass);
3407 else if (VT == MVT::f64 || VT == MVT::i64 )
3408 return std::make_pair(0U, &SP::DFPRegsRegClass);
3409 else if (VT == MVT::f128)
3410 return std::make_pair(0U, &SP::QFPRegsRegClass);
3411 // This will generate an error message
3412 return std::make_pair(0U, nullptr);
3416 if (Constraint.front() != '{')
3417 return std::make_pair(0U, nullptr);
3419 assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3420 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3421 if (RegName.empty())
3422 return std::make_pair(0U, nullptr);
3424 unsigned long long RegNo;
3425 // Handle numbered register aliases.
3426 if (RegName[0] == 'r' &&
3427 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3433 return std::make_pair(0U, nullptr);
3434 const char RegTypes[] = {'g', 'o', 'l', 'i'};
3435 char RegType = RegTypes[RegNo / 8];
3436 char RegIndex = '0' + (RegNo % 8);
3437 char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3438 return getRegForInlineAsmConstraint(TRI, Tmp, VT);
3441 // Rewrite the fN constraint according to the value type if needed.
3442 if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3443 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3444 if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3445 return getRegForInlineAsmConstraint(
3446 TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);
3447 } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3448 return getRegForInlineAsmConstraint(
3449 TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);
3451 return std::make_pair(0U, nullptr);
3456 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3457 if (!ResultPair.second)
3458 return std::make_pair(0U, nullptr);
3460 // Force the use of I64Regs over IntRegs for 64-bit values.
3461 if (Subtarget->is64Bit() && VT == MVT::i64) {
3462 assert(ResultPair.second == &SP::IntRegsRegClass &&
3463 "Unexpected register class");
3464 return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3471 SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3472 // The Sparc target isn't yet aware of offsets.
3476 void SparcTargetLowering::ReplaceNodeResults(SDNode *N,
3477 SmallVectorImpl<SDValue>& Results,
3478 SelectionDAG &DAG) const {
3482 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3484 switch (N->getOpcode()) {
3486 llvm_unreachable("Do not know how to custom type legalize this operation!");
3488 case ISD::FP_TO_SINT:
3489 case ISD::FP_TO_UINT:
3490 // Custom lower only if it involves f128 or i64.
3491 if (N->getOperand(0).getValueType() != MVT::f128
3492 || N->getValueType(0) != MVT::i64)
3494 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3495 ? RTLIB::FPTOSINT_F128_I64
3496 : RTLIB::FPTOUINT_F128_I64);
3498 Results.push_back(LowerF128Op(SDValue(N, 0),
3500 getLibcallName(libCall),
3503 case ISD::READCYCLECOUNTER: {
3504 assert(Subtarget->hasLeonCycleCounter());
3505 SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3506 SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3507 SDValue Ops[] = { Lo, Hi };
3508 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3509 Results.push_back(Pair);
3510 Results.push_back(N->getOperand(0));
3513 case ISD::SINT_TO_FP:
3514 case ISD::UINT_TO_FP:
3515 // Custom lower only if it involves f128 or i64.
3516 if (N->getValueType(0) != MVT::f128
3517 || N->getOperand(0).getValueType() != MVT::i64)
3520 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3521 ? RTLIB::SINTTOFP_I64_F128
3522 : RTLIB::UINTTOFP_I64_F128);
3524 Results.push_back(LowerF128Op(SDValue(N, 0),
3526 getLibcallName(libCall),
3530 LoadSDNode *Ld = cast<LoadSDNode>(N);
3531 // Custom handling only for i64: turn i64 load into a v2i32 load,
3533 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3537 SDValue LoadRes = DAG.getExtLoad(
3538 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3539 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32,
3540 Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags(),
3543 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3544 Results.push_back(Res);
3545 Results.push_back(LoadRes.getValue(1));
3551 // Override to enable LOAD_STACK_GUARD lowering on Linux.
3552 bool SparcTargetLowering::useLoadStackGuardNode() const {
3553 if (!Subtarget->isTargetLinux())
3554 return TargetLowering::useLoadStackGuardNode();
3558 // Override to disable global variable loading on Linux.
3559 void SparcTargetLowering::insertSSPDeclarations(Module &M) const {
3560 if (!Subtarget->isTargetLinux())
3561 return TargetLowering::insertSSPDeclarations(M);