}
};
+struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
+private:
+ // The function used internally to assign args - we ignore the AssignFn stored
+ // by IncomingValueAssigner since RISC-V implements its CC using a custom
+ // function with a different signature.
+ RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn;
+
+ // Whether this is assigning args from a return.
+ bool IsRet;
+
+public:
+ RISCVIncomingValueAssigner(
+ RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
+ : CallLowering::IncomingValueAssigner(nullptr),
+ RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet) {}
+
+ bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
+ CCState &State) override {
+ MachineFunction &MF = State.getMachineFunction();
+ const DataLayout &DL = MF.getDataLayout();
+ const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
+
+ return RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
+ LocInfo, Flags, State, /*IsFixed=*/true, IsRet,
+ Info.Ty, *Subtarget.getTargetLowering(),
+ /*FirstMaskArgument=*/std::nullopt);
+ }
+};
+
+struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
+ RISCVIncomingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
+ : IncomingValueHandler(B, MRI) {}
+
+ Register getStackAddress(uint64_t MemSize, int64_t Offset,
+ MachinePointerInfo &MPO,
+ ISD::ArgFlagsTy Flags) override {
+ llvm_unreachable("not implemented");
+ }
+
+ void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
+ MachinePointerInfo &MPO, CCValAssign &VA) override {
+ llvm_unreachable("not implemented");
+ }
+
+ void assignValueToReg(Register ValVReg, Register PhysReg,
+ CCValAssign VA) override {
+ // Copy argument received in physical register to desired VReg.
+ MIRBuilder.getMBB().addLiveIn(PhysReg);
+ MIRBuilder.buildCopy(ValVReg, PhysReg);
+ }
+};
+
} // namespace
RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI)
const Function &F,
ArrayRef<ArrayRef<Register>> VRegs,
FunctionLoweringInfo &FLI) const {
-
+ // Early exit if there are no arguments.
if (F.arg_empty())
return true;
- return false;
+ // TODO: Support vararg functions.
+ if (F.isVarArg())
+ return false;
+
+ // TODO: Support all argument types.
+ for (auto &Arg : F.args()) {
+ if (Arg.getType()->isIntegerTy())
+ continue;
+ if (Arg.getType()->isPointerTy())
+ continue;
+ return false;
+ }
+
+ MachineFunction &MF = MIRBuilder.getMF();
+ const DataLayout &DL = MF.getDataLayout();
+ CallingConv::ID CC = F.getCallingConv();
+
+ SmallVector<ArgInfo, 32> SplitArgInfos;
+ unsigned Index = 0;
+ for (auto &Arg : F.args()) {
+ // Construct the ArgInfo object from destination register and argument type.
+ ArgInfo AInfo(VRegs[Index], Arg.getType(), Index);
+ setArgFlags(AInfo, Index + AttributeList::FirstArgIndex, DL, F);
+
+ // Handle any required merging from split value types from physical
+ // registers into the desired VReg. ArgInfo objects are constructed
+ // correspondingly and appended to SplitArgInfos.
+ splitToValueTypes(AInfo, SplitArgInfos, DL, CC);
+
+ ++Index;
+ }
+
+ RISCVIncomingValueAssigner Assigner(
+ CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
+ /*IsRet=*/false);
+ RISCVIncomingValueHandler Handler(MIRBuilder, MF.getRegInfo());
+
+ return determineAndHandleAssignments(Handler, Assigner, SplitArgInfos,
+ MIRBuilder, CC, F.isVarArg());
}
bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64I %s
+
+define void @test_args_i8(i8 %a) {
+
+ ; RV32I-LABEL: name: test_args_i8
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+ ; RV32I-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s8) = G_ADD [[TRUNC]], [[C]]
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_args_i8
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s8) = G_ADD [[TRUNC]], [[C]]
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %0 = add i8 %a, 1
+ ret void
+}
+
+define void @test_args_i16(i16 %a) {
+
+ ; RV32I-LABEL: name: test_args_i16
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV32I-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[C]]
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_args_i16
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[C]]
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %0 = add i16 %a, 1
+ ret void
+}
+
+define void @test_args_i32(i32 %a) {
+
+ ; RV32I-LABEL: name: test_args_i32
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C]]
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_args_i32
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC]], [[C]]
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %0 = add i32 %a, 1
+ ret void
+}
+
+define void @test_args_i64(i64 %a) {
+
+ ; RV32I-LABEL: name: test_args_i64
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32I-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; RV32I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MV]], [[C]]
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_args_i64
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[C]]
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %0 = add i64 %a, 1
+ ret void
+}
+
+define void @test_args_i8_ptr(ptr %a) {
+
+ ; RV32I-LABEL: name: test_args_i8_ptr
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load (s8) from %ir.a)
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_args_i8_ptr
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load (s8) from %ir.a)
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %0 = load i8, ptr %a
+ ret void
+}
+
+define void @test_args_2xi8(i8 %a, i8 %b) {
+
+ ; RV32I-LABEL: name: test_args_2xi8
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32I-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s8) = G_ADD [[TRUNC]], [[TRUNC1]]
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_args_2xi8
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s64)
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s8) = G_ADD [[TRUNC]], [[TRUNC1]]
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %0 = add i8 %a, %b
+ ret void
+}
+
+define void @test_args_2xi16(i16 %a, i16 %b) {
+
+ ; RV32I-LABEL: name: test_args_2xi16
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[TRUNC1]]
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_args_2xi16
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[TRUNC1]]
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %0 = add i16 %a, %b
+ ret void
+}
+
+define void @test_args_2xi32(i32 %a, i32 %b) {
+
+ ; RV32I-LABEL: name: test_args_2xi32
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_args_2xi32
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC]], [[TRUNC1]]
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %0 = add i32 %a, %b
+ ret void
+}
+
+define void @test_args_2xi64(i64 %a, i64 %b) {
+
+ ; RV32I-LABEL: name: test_args_2xi64
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: liveins: $x10, $x11, $x12, $x13
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32I-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+ ; RV32I-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MV]], [[MV1]]
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_args_2xi64
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %0 = add i64 %a, %b
+ ret void
+}
+
+define void @test_args_2xi8_ptr(ptr %a, ptr %b) {
+
+ ; RV32I-LABEL: name: test_args_2xi8_ptr
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load (s8) from %ir.a)
+ ; RV32I-NEXT: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[COPY1]](p0) :: (load (s8) from %ir.b)
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_args_2xi8_ptr
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load (s8) from %ir.a)
+ ; RV64I-NEXT: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[COPY1]](p0) :: (load (s8) from %ir.b)
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %0 = load i8, ptr %a
+ %1 = load i8, ptr %b
+ ret void
+}
+
+define void @test_args_ptr_byval(ptr byval(i8) %a) {
+ ; RV32I-LABEL: name: test_args_ptr_byval
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (dereferenceable load (s8) from %ir.a)
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_args_ptr_byval
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (dereferenceable load (s8) from %ir.a)
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %0 = load i8, ptr %a
+ ret void
+}
+
+define void @test_args_ptr_sret(ptr sret(i8) %a) {
+ ; RV32I-LABEL: name: test_args_ptr_sret
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (dereferenceable load (s8) from %ir.a)
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_args_ptr_sret
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (dereferenceable load (s8) from %ir.a)
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %0 = load i8, ptr %a
+ ret void
+}