SDValue
SparcTargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
+ CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
+ DebugLoc DL, SelectionDAG &DAG) const {
+ if (Subtarget->is64Bit())
+ return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
+ return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
+}
+SDValue
+SparcTargetLowering::LowerReturn_32(SDValue Chain,
+ CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc DL, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
// CCValAssign - represent the assignment of the return value to locations.
SmallVector<CCValAssign, 16> RVLocs;
// CCState - Info about the registers and stack slot.
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
DAG.getTarget(), RVLocs, *DAG.getContext());
- // Analize return values.
- CCInfo.AnalyzeReturn(Outs, Subtarget->is64Bit() ? CC_Sparc64 : RetCC_Sparc32);
+ // Analyze return values.
+ CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
SDValue Flag;
SmallVector<SDValue, 4> RetOps(1, Chain);
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(),
OutVals[i], Flag);
// Guarantee that all emitted copies are stuck together with flags.
unsigned Reg = SFI->getSRetReturnReg();
if (!Reg)
llvm_unreachable("sret virtual register not created in the entry block");
- SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
- Chain = DAG.getCopyToReg(Chain, dl, SP::I0, Val, Flag);
+ SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy());
+ Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
Flag = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(SP::I0, getPointerTy()));
RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
if (Flag.getNode())
RetOps.push_back(Flag);
- return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other,
+ return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other,
+ &RetOps[0], RetOps.size());
+}
+
+// Lower return values for the 64-bit ABI.
+// Return values are passed the exactly the same way as function arguments.
+SDValue
+SparcTargetLowering::LowerReturn_64(SDValue Chain,
+ CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc DL, SelectionDAG &DAG) const {
+ // CCValAssign - represent the assignment of the return value to locations.
+ SmallVector<CCValAssign, 16> RVLocs;
+
+ // CCState - Info about the registers and stack slot.
+ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
+ DAG.getTarget(), RVLocs, *DAG.getContext());
+
+ // Analyze return values.
+ CCInfo.AnalyzeReturn(Outs, CC_Sparc64);
+
+ SDValue Flag;
+ SmallVector<SDValue, 4> RetOps(1, Chain);
+
+ // The second operand on the return instruction is the return address offset.
+ // The return address is always %i7+8 with the 64-bit ABI.
+ RetOps.push_back(DAG.getConstant(8, MVT::i32));
+
+ // Copy the result values into the output registers.
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ CCValAssign &VA = RVLocs[i];
+ assert(VA.isRegLoc() && "Can only return in registers!");
+ SDValue OutVal = OutVals[i];
+
+ // Integer return values must be sign or zero extended by the callee.
+ switch (VA.getLocInfo()) {
+ case CCValAssign::SExt:
+ OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
+ break;
+ case CCValAssign::ZExt:
+ OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
+ break;
+ case CCValAssign::AExt:
+ OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
+ default:
+ break;
+ }
+
+ // The custom bit on an i32 return value indicates that it should be passed
+ // in the high bits of the register.
+ if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
+ OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
+ DAG.getConstant(32, MVT::i32));
+
+ // The next value may go in the low bits of the same register.
+ // Handle both at once.
+ if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
+ SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
+ OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
+ // Skip the next value, it's already done.
+ ++i;
+ }
+ }
+
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
+
+ // Guarantee that all emitted copies are stuck together with flags.
+ Flag = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
+ }
+
+ RetOps[0] = Chain; // Update chain.
+
+ // Add the flag if we have it.
+ if (Flag.getNode())
+ RetOps.push_back(Flag);
+
+ return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other,
&RetOps[0], RetOps.size());
}
def : Pat<(i64 (zext i32:$val)), (SRLri $val, 0)>;
def : Pat<(i64 (sext i32:$val)), (SRAri $val, 0)>;
+def : Pat<(i64 (and i64:$val, 0xffffffff)), (SRLri $val, 0)>;
+def : Pat<(i64 (sext_inreg i64:$val, i32)), (SRAri $val, 0)>;
+
defm SLLX : F3_S<"sllx", 0b100101, 1, shl, i64, I64Regs>;
defm SRLX : F3_S<"srlx", 0b100110, 1, srl, i64, I64Regs>;
defm SRAX : F3_S<"srax", 0b100111, 1, sra, i64, I64Regs>;
// Extending loads to i64.
def : Pat<(i64 (zextloadi8 ADDRrr:$addr)), (LDUBrr ADDRrr:$addr)>;
def : Pat<(i64 (zextloadi8 ADDRri:$addr)), (LDUBri ADDRri:$addr)>;
+def : Pat<(i64 (extloadi8 ADDRrr:$addr)), (LDUBrr ADDRrr:$addr)>;
+def : Pat<(i64 (extloadi8 ADDRri:$addr)), (LDUBri ADDRri:$addr)>;
def : Pat<(i64 (sextloadi8 ADDRrr:$addr)), (LDSBrr ADDRrr:$addr)>;
def : Pat<(i64 (sextloadi8 ADDRri:$addr)), (LDSBri ADDRri:$addr)>;
def : Pat<(i64 (zextloadi16 ADDRrr:$addr)), (LDUHrr ADDRrr:$addr)>;
def : Pat<(i64 (zextloadi16 ADDRri:$addr)), (LDUHri ADDRri:$addr)>;
+def : Pat<(i64 (extloadi16 ADDRrr:$addr)), (LDUHrr ADDRrr:$addr)>;
+def : Pat<(i64 (extloadi16 ADDRri:$addr)), (LDUHri ADDRri:$addr)>;
def : Pat<(i64 (sextloadi16 ADDRrr:$addr)), (LDSHrr ADDRrr:$addr)>;
def : Pat<(i64 (sextloadi16 ADDRri:$addr)), (LDSHri ADDRri:$addr)>;
def : Pat<(i64 (zextloadi32 ADDRrr:$addr)), (LDrr ADDRrr:$addr)>;
def : Pat<(i64 (zextloadi32 ADDRri:$addr)), (LDri ADDRri:$addr)>;
+def : Pat<(i64 (extloadi32 ADDRrr:$addr)), (LDrr ADDRrr:$addr)>;
+def : Pat<(i64 (extloadi32 ADDRri:$addr)), (LDri ADDRri:$addr)>;
// Sign-extending load of i32 into i64 is a new SPARC v9 instruction.
def LDSWrr : F3_1<3, 0b001011,
%rv = sub i32 %a1, %a0
ret i32 %rv
}
+
+; Structs up to 32 bytes in size can be returned in registers.
+; CHECK: ret_i64_pair
+; CHECK: ldx [%i2], %i0
+; CHECK: ldx [%i3], %i1
+define { i64, i64 } @ret_i64_pair(i32 %a0, i32 %a1, i64* %p, i64* %q) {
+ %r1 = load i64* %p
+ %rv1 = insertvalue { i64, i64 } undef, i64 %r1, 0
+ store i64 0, i64* %p
+ %r2 = load i64* %q
+ %rv2 = insertvalue { i64, i64 } %rv1, i64 %r2, 1
+ ret { i64, i64 } %rv2
+}
+
+; This is not a C struct, each member uses 8 bytes.
+; CHECK: ret_i32_float_pair
+; CHECK: ld [%i2], %i0
+; CHECK: ld [%i3], %f3
+define { i32, float } @ret_i32_float_pair(i32 %a0, i32 %a1,
+ i32* %p, float* %q) {
+ %r1 = load i32* %p
+ %rv1 = insertvalue { i32, float } undef, i32 %r1, 0
+ store i32 0, i32* %p
+ %r2 = load float* %q
+ %rv2 = insertvalue { i32, float } %rv1, float %r2, 1
+ ret { i32, float } %rv2
+}
+
+; This is a C struct, each member uses 4 bytes.
+; CHECK: ret_i32_float_packed
+; CHECK: ld [%i2], [[R:%[gilo][0-7]]]
+; CHECK: sllx [[R]], 32, %i0
+; CHECK: ld [%i3], %f1
+define inreg { i32, float } @ret_i32_float_packed(i32 %a0, i32 %a1,
+ i32* %p, float* %q) {
+ %r1 = load i32* %p
+ %rv1 = insertvalue { i32, float } undef, i32 %r1, 0
+ store i32 0, i32* %p
+ %r2 = load float* %q
+ %rv2 = insertvalue { i32, float } %rv1, float %r2, 1
+ ret { i32, float } %rv2
+}
+
+; The C frontend should use i64 to return { i32, i32 } structs, but verify that
+; we don't miscompile thi case where both struct elements are placed in %i0.
+; CHECK: ret_i32_packed
+; CHECK: ld [%i2], [[R1:%[gilo][0-7]]]
+; CHECK: ld [%i3], [[R2:%[gilo][0-7]]]
+; CHECK: sllx [[R2]], 32, [[R3:%[gilo][0-7]]]
+; CHECK: or [[R3]], [[R1]], %i0
+define inreg { i32, i32 } @ret_i32_packed(i32 %a0, i32 %a1,
+ i32* %p, i32* %q) {
+ %r1 = load i32* %p
+ %rv1 = insertvalue { i32, i32 } undef, i32 %r1, 1
+ store i32 0, i32* %p
+ %r2 = load i32* %q
+ %rv2 = insertvalue { i32, i32 } %rv1, i32 %r2, 0
+ ret { i32, i32 } %rv2
+}
+
+; The return value must be sign-extended to 64 bits.
+; CHECK: ret_sext
+; CHECK: sra %i0, 0, %i0
+define signext i32 @ret_sext(i32 %a0) {
+ ret i32 %a0
+}
+
+; CHECK: ret_zext
+; CHECK: srl %i0, 0, %i0
+define zeroext i32 @ret_zext(i32 %a0) {
+ ret i32 %a0
+}
+
+; CHECK: ret_nosext
+; CHECK-NOT: sra
+define signext i32 @ret_nosext(i32 signext %a0) {
+ ret i32 %a0
+}
+
+; CHECK: ret_nozext
+; CHECK-NOT: srl
+define signext i32 @ret_nozext(i32 signext %a0) {
+ ret i32 %a0
+}