From d53d842d12ceb182f1f5c12cc001b09f9000dbf4 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Tue, 18 Jul 2023 09:50:30 -0700 Subject: [PATCH] [RISCV][AArch64][IRGen] Add a special case to CodeGenFunction::EmitCall for scalable vector return being coerced to fixed vector. Before falling back to CreateCoercedStore, detect a scalable vector return being coerced to fixed vector. Handle it using a vector.extract intrinsic without going through memory. Reviewed By: c-rhodes Differential Revision: https://reviews.llvm.org/D155495 --- clang/lib/CodeGen/CGCall.cpp | 14 ++++++++++++++ clang/test/CodeGen/attr-arm-sve-vector-bits-call.c | 6 +----- clang/test/CodeGen/attr-riscv-rvv-vector-bits-call.c | 6 +----- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index 9012395..bd272e0 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -5743,6 +5743,20 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, llvm_unreachable("bad evaluation kind"); } + // If coercing a fixed vector from a scalable vector for ABI + // compatibility, and the types match, use the llvm.vector.extract + // intrinsic to perform the conversion. + if (auto *FixedDst = dyn_cast(RetIRTy)) { + llvm::Value *V = CI; + if (auto *ScalableSrc = dyn_cast(V->getType())) { + if (FixedDst->getElementType() == ScalableSrc->getElementType()) { + llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); + V = Builder.CreateExtractVector(FixedDst, V, Zero, "cast.fixed"); + return RValue::get(V); + } + } + } + Address DestPtr = ReturnValue.getValue(); bool DestIsVolatile = ReturnValue.isVolatile(); diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c index 3d72c1f..6685fe0 100644 --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c @@ -41,11 +41,7 @@ fixed_int32_t fixed_callee(fixed_int32_t x) { // CHECK-LABEL: @sizeless_caller( // CHECK-NEXT: entry: -// CHECK-NEXT: [[COERCE1:%.*]] = alloca <16 x i32>, align 16 -// CHECK-NEXT: store [[X:%.*]], ptr [[COERCE1]], align 16 -// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr [[COERCE1]], align 16, !tbaa [[TBAA6:![0-9]+]] -// CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = tail call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP1]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE2]] +// CHECK-NEXT: ret [[X:%.*]] // svint32_t sizeless_caller(svint32_t x) { return fixed_callee(x); diff --git a/clang/test/CodeGen/attr-riscv-rvv-vector-bits-call.c b/clang/test/CodeGen/attr-riscv-rvv-vector-bits-call.c index 330e4cd1..70e1aef 100644 --- a/clang/test/CodeGen/attr-riscv-rvv-vector-bits-call.c +++ b/clang/test/CodeGen/attr-riscv-rvv-vector-bits-call.c @@ -38,11 +38,7 @@ fixed_int32m1_t fixed_callee(fixed_int32m1_t x) { // CHECK-LABEL: @sizeless_caller( // CHECK-NEXT: entry: -// CHECK-NEXT: [[COERCE1:%.*]] = alloca <8 x i32>, align 8 -// CHECK-NEXT: store [[X:%.*]], ptr [[COERCE1]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load <8 x i32>, ptr [[COERCE1]], align 8, !tbaa [[TBAA4:![0-9]+]] -// CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = tail call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[TMP0]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE2]] +// CHECK-NEXT: ret [[X:%.*]] // vint32m1_t sizeless_caller(vint32m1_t x) { return fixed_callee(x); -- 2.7.4