From a584f0f553453627e10d870c74530576338b1c1e Mon Sep 17 00:00:00 2001 From: eopXD Date: Fri, 2 Jun 2023 05:35:56 -0700 Subject: [PATCH] [Clang][RISCV] Add test coverage for `_mu` overloaded variant for the load intrinsics. NFC Maps to amendment in under specification: riscv-non-isa/rvv-intrinsic-doc#233 Signed-off-by: eop Chen --- .../policy/overloaded/vid.c | 220 ++++++++++++++++++ .../policy/overloaded/viota.c | 220 ++++++++++++++++++ .../policy/overloaded/vle16.c | 180 +++++++++++++++ .../policy/overloaded/vle16ff.c | 234 +++++++++++++++++++ .../policy/overloaded/vle32.c | 150 ++++++++++++ .../policy/overloaded/vle32ff.c | 195 ++++++++++++++++ .../policy/overloaded/vle64.c | 120 ++++++++++ .../policy/overloaded/vle64ff.c | 156 +++++++++++++ .../policy/overloaded/vle8.c | 140 ++++++++++++ .../policy/overloaded/vle8ff.c | 182 +++++++++++++++ .../policy/overloaded/vlse16.c | 180 +++++++++++++++ .../policy/overloaded/vlse32.c | 150 ++++++++++++ .../policy/overloaded/vlse64.c | 120 ++++++++++ .../policy/overloaded/vlse8.c | 140 ++++++++++++ .../policy/overloaded/vlseg2e16.c | 210 +++++++++++++++++ .../policy/overloaded/vlseg2e16ff.c | 240 ++++++++++++++++++++ .../policy/overloaded/vlseg2e32.c | 168 ++++++++++++++ .../policy/overloaded/vlseg2e32ff.c | 192 ++++++++++++++++ .../policy/overloaded/vlseg2e64.c | 126 +++++++++++ .../policy/overloaded/vlseg2e64ff.c | 144 ++++++++++++ .../policy/overloaded/vlseg2e8.c | 168 ++++++++++++++ .../policy/overloaded/vlseg2e8ff.c | 192 ++++++++++++++++ .../policy/overloaded/vlseg3e16.c | 192 ++++++++++++++++ .../policy/overloaded/vlseg3e16ff.c | 216 ++++++++++++++++++ .../policy/overloaded/vlseg3e32.c | 144 ++++++++++++ .../policy/overloaded/vlseg3e32ff.c | 162 +++++++++++++ .../policy/overloaded/vlseg3e64.c | 96 ++++++++ .../policy/overloaded/vlseg3e64ff.c | 108 +++++++++ .../policy/overloaded/vlseg3e8.c | 160 +++++++++++++ .../policy/overloaded/vlseg3e8ff.c | 180 +++++++++++++++ .../policy/overloaded/vlseg4e16.c | 216 ++++++++++++++++++ .../policy/overloaded/vlseg4e16ff.c | 240 ++++++++++++++++++++ .../policy/overloaded/vlseg4e32.c | 162 +++++++++++++ .../policy/overloaded/vlseg4e32ff.c | 180 +++++++++++++++ .../policy/overloaded/vlseg4e64.c | 108 +++++++++ .../policy/overloaded/vlseg4e64ff.c | 120 ++++++++++ .../policy/overloaded/vlseg4e8.c | 180 +++++++++++++++ .../policy/overloaded/vlseg4e8ff.c | 200 ++++++++++++++++ .../policy/overloaded/vlseg5e16.c | 180 +++++++++++++++ .../policy/overloaded/vlseg5e16ff.c | 198 ++++++++++++++++ .../policy/overloaded/vlseg5e32.c | 120 ++++++++++ .../policy/overloaded/vlseg5e32ff.c | 132 +++++++++++ .../policy/overloaded/vlseg5e64.c | 60 +++++ .../policy/overloaded/vlseg5e64ff.c | 66 ++++++ .../policy/overloaded/vlseg5e8.c | 160 +++++++++++++ .../policy/overloaded/vlseg5e8ff.c | 176 ++++++++++++++ .../policy/overloaded/vlseg6e16.c | 198 ++++++++++++++++ .../policy/overloaded/vlseg6e16ff.c | 216 ++++++++++++++++++ .../policy/overloaded/vlseg6e32.c | 132 +++++++++++ .../policy/overloaded/vlseg6e32ff.c | 144 ++++++++++++ .../policy/overloaded/vlseg6e64.c | 66 ++++++ .../policy/overloaded/vlseg6e64ff.c | 72 ++++++ .../policy/overloaded/vlseg6e8.c | 176 ++++++++++++++ .../policy/overloaded/vlseg6e8ff.c | 192 ++++++++++++++++ .../policy/overloaded/vlseg7e16.c | 216 ++++++++++++++++++ .../policy/overloaded/vlseg7e16ff.c | 234 +++++++++++++++++++ .../policy/overloaded/vlseg7e32.c | 144 ++++++++++++ .../policy/overloaded/vlseg7e32ff.c | 156 +++++++++++++ .../policy/overloaded/vlseg7e64.c | 72 ++++++ .../policy/overloaded/vlseg7e64ff.c | 78 +++++++ .../policy/overloaded/vlseg7e8.c | 192 ++++++++++++++++ .../policy/overloaded/vlseg7e8ff.c | 208 +++++++++++++++++ .../policy/overloaded/vlseg8e16.c | 234 +++++++++++++++++++ .../policy/overloaded/vlseg8e16ff.c | 252 +++++++++++++++++++++ .../policy/overloaded/vlseg8e32.c | 156 +++++++++++++ .../policy/overloaded/vlseg8e32ff.c | 168 ++++++++++++++ .../policy/overloaded/vlseg8e64.c | 78 +++++++ .../policy/overloaded/vlseg8e64ff.c | 84 +++++++ .../policy/overloaded/vlseg8e8.c | 208 +++++++++++++++++ .../policy/overloaded/vlseg8e8ff.c | 224 ++++++++++++++++++ .../policy/overloaded/vlsseg2e16.c | 210 +++++++++++++++++ .../policy/overloaded/vlsseg2e32.c | 168 ++++++++++++++ .../policy/overloaded/vlsseg2e64.c | 126 +++++++++++ .../policy/overloaded/vlsseg2e8.c | 168 ++++++++++++++ .../policy/overloaded/vlsseg3e16.c | 192 ++++++++++++++++ .../policy/overloaded/vlsseg3e32.c | 144 ++++++++++++ .../policy/overloaded/vlsseg3e64.c | 96 ++++++++ .../policy/overloaded/vlsseg3e8.c | 160 +++++++++++++ .../policy/overloaded/vlsseg4e16.c | 216 ++++++++++++++++++ .../policy/overloaded/vlsseg4e32.c | 162 +++++++++++++ .../policy/overloaded/vlsseg4e64.c | 108 +++++++++ .../policy/overloaded/vlsseg4e8.c | 180 +++++++++++++++ .../policy/overloaded/vlsseg5e16.c | 180 +++++++++++++++ .../policy/overloaded/vlsseg5e32.c | 120 ++++++++++ .../policy/overloaded/vlsseg5e64.c | 60 +++++ .../policy/overloaded/vlsseg5e8.c | 160 +++++++++++++ .../policy/overloaded/vlsseg6e16.c | 198 ++++++++++++++++ .../policy/overloaded/vlsseg6e32.c | 132 +++++++++++ .../policy/overloaded/vlsseg6e64.c | 66 ++++++ .../policy/overloaded/vlsseg6e8.c | 176 ++++++++++++++ .../policy/overloaded/vlsseg7e16.c | 216 ++++++++++++++++++ .../policy/overloaded/vlsseg7e32.c | 144 ++++++++++++ .../policy/overloaded/vlsseg7e64.c | 72 ++++++ .../policy/overloaded/vlsseg7e8.c | 192 ++++++++++++++++ .../policy/overloaded/vlsseg8e16.c | 234 +++++++++++++++++++ .../policy/overloaded/vlsseg8e32.c | 156 +++++++++++++ .../policy/overloaded/vlsseg8e64.c | 78 +++++++ .../policy/overloaded/vlsseg8e8.c | 208 +++++++++++++++++ 98 files changed, 15805 insertions(+) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vid.c index 6e67ba57..ae74ff8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vid.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vid.c @@ -666,3 +666,223 @@ vuint64m8_t test_vid_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, size_t v return __riscv_vid_tumu(mask, maskedoff, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u8mf8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i8.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vid_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u8mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i8.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vid_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u8mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i8.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vid_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u8m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i8.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vid_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u8m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i8.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vid_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u8m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i8.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vid_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u8m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv64i8.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vid_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u16mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i16.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vid_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u16mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i16.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vid_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u16m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i16.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vid_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u16m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i16.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vid_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u16m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i16.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vid_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u16m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i16.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vid_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u32mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vid_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u32m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i32.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vid_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u32m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i32.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vid_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u32m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i32.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vid_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u32m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i32.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vid_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u64m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i64.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vid_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u64m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i64.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vid_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u64m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i64.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vid_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vid_v_u64m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i64.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vid_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, size_t vl) { + return __riscv_vid_mu(mask, maskedoff, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/viota.c index 8d247cc..a721cba 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/viota.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/viota.c @@ -666,3 +666,223 @@ vuint64m8_t test_viota_m_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vbool8 return __riscv_viota_tumu(mask, maskedoff, op1, vl); } +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u8mf8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i8.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_viota_m_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u8mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i8.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_viota_m_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u8mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i8.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_viota_m_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u8m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_viota_m_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u8m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_viota_m_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u8m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_viota_m_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u8m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_viota_m_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u16mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_viota_m_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u16mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_viota_m_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u16m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_viota_m_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u16m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_viota_m_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u16m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_viota_m_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u16m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_viota_m_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u32mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_viota_m_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u32m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_viota_m_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u32m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_viota_m_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u32m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_viota_m_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u32m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_viota_m_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u64m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_viota_m_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u64m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_viota_m_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u64m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_viota_m_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_viota_m_u64m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_viota_m_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { + return __riscv_viota_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16.c index 82ababc..74fa4f8a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16.c @@ -547,3 +547,183 @@ vuint16m8_t test_vle16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const return __riscv_vle16_tumu(mask, maskedoff, base, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_f16mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1f16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vle16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_f16mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2f16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vle16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_f16m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4f16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vle16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_f16m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8f16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vle16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_f16m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16f16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vle16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_f16m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32f16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vle16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_i16mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vle16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_i16mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vle16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_i16m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vle16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_i16m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vle16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_i16m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vle16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_i16m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vle16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_u16mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vle16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_u16mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vle16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_u16m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vle16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_u16m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vle16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_u16m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vle16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16_v_u16m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vle16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { + return __riscv_vle16_mu(mask, maskedoff, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16ff.c index 4bb713a..76674f0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle16ff.c @@ -709,3 +709,237 @@ vuint16m8_t test_vle16ff_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, cons return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_f16mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vle16ff_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_f16mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vle16ff_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_f16m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vle16ff_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_f16m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vle16ff_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_f16m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vle16ff_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_f16m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32f16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vle16ff_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_i16mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vle16ff_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_i16mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vle16ff_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_i16m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vle16ff_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_i16m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vle16ff_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_i16m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vle16ff_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_i16m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vle16ff_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_u16mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vle16ff_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_u16mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vle16ff_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_u16m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vle16ff_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_u16m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vle16ff_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_u16m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vle16ff_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle16ff_v_u16m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vle16ff_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32.c index 7f5902f..0f51dbe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32.c @@ -457,3 +457,153 @@ vuint32m8_t test_vle32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const return __riscv_vle32_tumu(mask, maskedoff, base, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vle32_v_f32mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vle32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) { + return __riscv_vle32_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32_v_f32m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vle32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) { + return __riscv_vle32_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32_v_f32m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vle32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) { + return __riscv_vle32_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32_v_f32m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vle32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) { + return __riscv_vle32_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32_v_f32m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vle32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) { + return __riscv_vle32_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32_v_i32mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vle32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { + return __riscv_vle32_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32_v_i32m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vle32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { + return __riscv_vle32_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32_v_i32m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vle32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { + return __riscv_vle32_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32_v_i32m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vle32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { + return __riscv_vle32_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32_v_i32m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vle32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { + return __riscv_vle32_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32_v_u32mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vle32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { + return __riscv_vle32_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32_v_u32m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vle32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { + return __riscv_vle32_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32_v_u32m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vle32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { + return __riscv_vle32_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32_v_u32m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vle32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { + return __riscv_vle32_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32_v_u32m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vle32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { + return __riscv_vle32_mu(mask, maskedoff, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32ff.c index a8a24aad..94adfcc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle32ff.c @@ -592,3 +592,198 @@ vuint32m8_t test_vle32ff_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, cons return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vle32ff_v_f32mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vle32ff_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32ff_v_f32m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vle32ff_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32ff_v_f32m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vle32ff_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32ff_v_f32m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vle32ff_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32ff_v_f32m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vle32ff_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32ff_v_i32mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vle32ff_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32ff_v_i32m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vle32ff_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32ff_v_i32m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vle32ff_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32ff_v_i32m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vle32ff_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32ff_v_i32m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vle32ff_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32ff_v_u32mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vle32ff_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32ff_v_u32m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vle32ff_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32ff_v_u32m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vle32ff_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32ff_v_u32m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vle32ff_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle32ff_v_u32m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vle32ff_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64.c index 6ad67d7..823823e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64.c @@ -367,3 +367,123 @@ vuint64m8_t test_vle64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const return __riscv_vle64_tumu(mask, maskedoff, base, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vle64_v_f64m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vle64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) { + return __riscv_vle64_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64_v_f64m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vle64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) { + return __riscv_vle64_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64_v_f64m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vle64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) { + return __riscv_vle64_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64_v_f64m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vle64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) { + return __riscv_vle64_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64_v_i64m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vle64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { + return __riscv_vle64_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64_v_i64m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vle64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { + return __riscv_vle64_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64_v_i64m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vle64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { + return __riscv_vle64_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64_v_i64m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vle64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { + return __riscv_vle64_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64_v_u64m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vle64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { + return __riscv_vle64_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64_v_u64m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vle64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { + return __riscv_vle64_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64_v_u64m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vle64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { + return __riscv_vle64_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64_v_u64m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vle64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { + return __riscv_vle64_mu(mask, maskedoff, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64ff.c index 1084e2f..92563dc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle64ff.c @@ -475,3 +475,159 @@ vuint64m8_t test_vle64ff_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, cons return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vle64ff_v_f64m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vle64ff_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64ff_v_f64m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vle64ff_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64ff_v_f64m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vle64ff_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64ff_v_f64m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vle64ff_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64ff_v_i64m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vle64ff_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64ff_v_i64m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vle64ff_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64ff_v_i64m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vle64ff_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64ff_v_i64m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vle64ff_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64ff_v_u64m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vle64ff_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64ff_v_u64m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vle64ff_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64ff_v_u64m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vle64ff_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle64ff_v_u64m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vle64ff_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8.c index 7c63d96..63ace90 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8.c @@ -427,3 +427,143 @@ vuint8m8_t test_vle8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint return __riscv_vle8_tumu(mask, maskedoff, base, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vle8_v_i8mf8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vle8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { + return __riscv_vle8_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8_v_i8mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vle8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { + return __riscv_vle8_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8_v_i8mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vle8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { + return __riscv_vle8_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8_v_i8m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vle8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { + return __riscv_vle8_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8_v_i8m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vle8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { + return __riscv_vle8_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8_v_i8m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vle8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { + return __riscv_vle8_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8_v_i8m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vle8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { + return __riscv_vle8_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8_v_u8mf8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vle8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { + return __riscv_vle8_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8_v_u8mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vle8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { + return __riscv_vle8_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8_v_u8mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vle8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { + return __riscv_vle8_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8_v_u8m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vle8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { + return __riscv_vle8_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8_v_u8m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vle8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { + return __riscv_vle8_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8_v_u8m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vle8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { + return __riscv_vle8_mu(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8_v_u8m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vle8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { + return __riscv_vle8_mu(mask, maskedoff, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8ff.c index a331111..4fd0ce7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vle8ff.c @@ -553,3 +553,185 @@ vuint8m8_t test_vle8ff_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const ui return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vle8ff_v_i8mf8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vle8ff_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8ff_v_i8mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vle8ff_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8ff_v_i8mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vle8ff_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8ff_v_i8m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vle8ff_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8ff_v_i8m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vle8ff_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8ff_v_i8m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vle8ff_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8ff_v_i8m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m8_t test_vle8ff_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8ff_v_u8mf8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vle8ff_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8ff_v_u8mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vle8ff_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8ff_v_u8mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vle8ff_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8ff_v_u8m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vle8ff_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8ff_v_u8m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vle8ff_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8ff_v_u8m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vle8ff_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vle8ff_v_u8m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vle8ff_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse16.c index d9d3518..764ae09 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse16.c @@ -547,3 +547,183 @@ vuint16m8_t test_vlse16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_f16mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1f16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlse16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_f16mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2f16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vlse16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_f16m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4f16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vlse16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_f16m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8f16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vlse16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_f16m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16f16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vlse16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_f16m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32f16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vlse16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_i16mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlse16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_i16mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlse16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_i16m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlse16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_i16m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vlse16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_i16m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vlse16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_i16m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vlse16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_u16mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlse16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_u16mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlse16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_u16m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlse16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_u16m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vlse16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_u16m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vlse16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse16_v_u16m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vlse16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse32.c index d880cd4..2eca352 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse32.c @@ -457,3 +457,153 @@ vuint32m8_t test_vlse32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vlse32_v_f32mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlse32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse32_v_f32m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlse32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse32_v_f32m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vlse32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse32_v_f32m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vlse32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse32_v_f32m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vlse32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse32_v_i32mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlse32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse32_v_i32m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vlse32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse32_v_i32m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vlse32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse32_v_i32m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vlse32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse32_v_i32m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vlse32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse32_v_u32mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlse32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse32_v_u32m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vlse32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse32_v_u32m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vlse32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse32_v_u32m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vlse32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse32_v_u32m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vlse32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse64.c index 434168c..6ec1e01 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse64.c @@ -367,3 +367,123 @@ vuint64m8_t test_vlse64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vlse64_v_f64m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlse64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse64_v_f64m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vlse64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse64_v_f64m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vlse64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse64_v_f64m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vlse64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse64_v_i64m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vlse64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse64_v_i64m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vlse64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse64_v_i64m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vlse64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse64_v_i64m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vlse64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse64_v_u64m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vlse64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse64_v_u64m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vlse64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse64_v_u64m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vlse64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse64_v_u64m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vlse64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse8.c index 08d00b0..bab9c58 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlse8.c @@ -426,3 +426,143 @@ vuint8m8_t test_vlse8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uin return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vlse8_v_i8mf8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlse8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse8_v_i8mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlse8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse8_v_i8mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlse8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse8_v_i8m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlse8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse8_v_i8m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlse8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse8_v_i8m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vlse8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse8_v_i8m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vlse8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse8_v_u8mf8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlse8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse8_v_u8mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlse8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse8_v_u8mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlse8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse8_v_u8m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlse8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse8_v_u8m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlse8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse8_v_u8m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vlse8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vlse8_v_u8m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vlse8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16.c index 01c300b..a2ea835 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16.c @@ -637,3 +637,213 @@ void test_vlseg2e16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask return __riscv_vlseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) { + return __riscv_vlseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) { + return __riscv_vlseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) { + return __riscv_vlseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16_v_f16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) { + return __riscv_vlseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16_v_f16m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) { + return __riscv_vlseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { + return __riscv_vlseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { + return __riscv_vlseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { + return __riscv_vlseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16_v_i16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { + return __riscv_vlseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16_v_i16m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { + return __riscv_vlseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { + return __riscv_vlseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { + return __riscv_vlseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { + return __riscv_vlseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16_v_u16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { + return __riscv_vlseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16_v_u16m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { + return __riscv_vlseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c index af5d260..46cb85f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c @@ -727,3 +727,243 @@ void test_vlseg2e16ff_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t ma return __riscv_vlseg2e16ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16ff_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16ff_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16ff_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16ff_v_f16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16ff_v_f16m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16ff_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16ff_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16ff_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16ff_v_i16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16ff_v_i16m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16ff_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16ff_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16ff_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16ff_v_u16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e16ff_v_u16m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32.c index 13d392e..3a8212e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32.c @@ -511,3 +511,171 @@ void test_vlseg2e32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask return __riscv_vlseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { + return __riscv_vlseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { + return __riscv_vlseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32_v_f32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { + return __riscv_vlseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32_v_f32m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { + return __riscv_vlseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { + return __riscv_vlseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { + return __riscv_vlseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32_v_i32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { + return __riscv_vlseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32_v_i32m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { + return __riscv_vlseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { + return __riscv_vlseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { + return __riscv_vlseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32_v_u32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { + return __riscv_vlseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32_v_u32m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { + return __riscv_vlseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c index 42a6764..7357452 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c @@ -583,3 +583,195 @@ void test_vlseg2e32ff_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma return __riscv_vlseg2e32ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32ff_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32ff_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32ff_v_f32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32ff_v_f32m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32ff_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32ff_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32ff_v_i32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32ff_v_i32m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32ff_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32ff_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32ff_v_u32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e32ff_v_u32m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64.c index 2ecb62b..14819e8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64.c @@ -385,3 +385,129 @@ void test_vlseg2e64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mas return __riscv_vlseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { + return __riscv_vlseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64_v_f64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { + return __riscv_vlseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64_v_f64m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { + return __riscv_vlseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { + return __riscv_vlseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64_v_i64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { + return __riscv_vlseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64_v_i64m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { + return __riscv_vlseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { + return __riscv_vlseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64_v_u64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { + return __riscv_vlseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64_v_u64m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { + return __riscv_vlseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c index ebb24f2..2cd70ae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c @@ -439,3 +439,147 @@ void test_vlseg2e64ff_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m return __riscv_vlseg2e64ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64ff_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64ff_v_f64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64ff_v_f64m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64ff_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64ff_v_i64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64ff_v_i64m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64ff_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64ff_v_u64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e64ff_v_u64m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8.c index af3ad5f..9d8a3ae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8.c @@ -510,3 +510,171 @@ void test_vlseg2e8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vu return __riscv_vlseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { + return __riscv_vlseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { + return __riscv_vlseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { + return __riscv_vlseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { + return __riscv_vlseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8_v_i8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { + return __riscv_vlseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8_v_i8m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { + return __riscv_vlseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { + return __riscv_vlseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { + return __riscv_vlseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { + return __riscv_vlseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { + return __riscv_vlseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8_v_u8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { + return __riscv_vlseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8_v_u8m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { + return __riscv_vlseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c index d494adf..6e21ab5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c @@ -583,3 +583,195 @@ void test_vlseg2e8ff_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, return __riscv_vlseg2e8ff_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8ff_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8ff_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8ff_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8ff_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8ff_v_i8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8ff_v_i8m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8ff_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8ff_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8ff_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8ff_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8ff_v_u8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg2e8ff_v_u8m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16.c index 6a1d16b..c5714e5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16.c @@ -583,3 +583,195 @@ void test_vlseg3e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * return __riscv_vlseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) { + return __riscv_vlseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) { + return __riscv_vlseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) { + return __riscv_vlseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16_v_f16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) { + return __riscv_vlseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { + return __riscv_vlseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { + return __riscv_vlseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { + return __riscv_vlseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16_v_i16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { + return __riscv_vlseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { + return __riscv_vlseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { + return __riscv_vlseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { + return __riscv_vlseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16_v_u16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { + return __riscv_vlseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c index 0944fb4..ad501f5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c @@ -655,3 +655,219 @@ void test_vlseg3e16ff_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t return __riscv_vlseg3e16ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16ff_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16ff_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16ff_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16ff_v_f16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16ff_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16ff_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16ff_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16ff_v_i16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16ff_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16ff_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16ff_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e16ff_v_u16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32.c index 8afae2c..95ed62c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32.c @@ -439,3 +439,147 @@ void test_vlseg3e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * return __riscv_vlseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { + return __riscv_vlseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { + return __riscv_vlseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32_v_f32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { + return __riscv_vlseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { + return __riscv_vlseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { + return __riscv_vlseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32_v_i32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { + return __riscv_vlseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { + return __riscv_vlseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { + return __riscv_vlseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32_v_u32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { + return __riscv_vlseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c index c77bb1f..ead18fe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c @@ -493,3 +493,165 @@ void test_vlseg3e32ff_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t return __riscv_vlseg3e32ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32ff_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32ff_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32ff_v_f32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32ff_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32ff_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32ff_v_i32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32ff_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32ff_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e32ff_v_u32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64.c index 819a877..186c43d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64.c @@ -295,3 +295,99 @@ void test_vlseg3e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * return __riscv_vlseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e64_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { + return __riscv_vlseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e64_v_f64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { + return __riscv_vlseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e64_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { + return __riscv_vlseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e64_v_i64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { + return __riscv_vlseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e64_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { + return __riscv_vlseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e64_v_u64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { + return __riscv_vlseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c index 7f094da..528d4d0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c @@ -331,3 +331,111 @@ void test_vlseg3e64ff_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t return __riscv_vlseg3e64ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e64ff_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e64ff_v_f64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e64ff_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e64ff_v_i64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e64ff_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e64ff_v_u64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8.c index 8390c60..0b5eb32 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8.c @@ -486,3 +486,163 @@ void test_vlseg3e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v return __riscv_vlseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { + return __riscv_vlseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { + return __riscv_vlseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { + return __riscv_vlseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { + return __riscv_vlseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8_v_i8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { + return __riscv_vlseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { + return __riscv_vlseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { + return __riscv_vlseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { + return __riscv_vlseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { + return __riscv_vlseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8_v_u8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { + return __riscv_vlseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c index bb7a264..2519253 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c @@ -547,3 +547,183 @@ void test_vlseg3e8ff_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, return __riscv_vlseg3e8ff_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8ff_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8ff_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8ff_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8ff_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8ff_v_i8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8ff_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8ff_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8ff_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8ff_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg3e8ff_v_u8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16.c index 211994b..fc74430 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16.c @@ -655,3 +655,219 @@ void test_vlseg4e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * return __riscv_vlseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) { + return __riscv_vlseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) { + return __riscv_vlseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) { + return __riscv_vlseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16_v_f16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) { + return __riscv_vlseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { + return __riscv_vlseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { + return __riscv_vlseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { + return __riscv_vlseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16_v_i16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { + return __riscv_vlseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { + return __riscv_vlseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { + return __riscv_vlseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { + return __riscv_vlseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16_v_u16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { + return __riscv_vlseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c index cff856e..7dbbd18 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c @@ -727,3 +727,243 @@ void test_vlseg4e16ff_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t return __riscv_vlseg4e16ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16ff_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16ff_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16ff_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16ff_v_f16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16ff_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16ff_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16ff_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16ff_v_i16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16ff_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16ff_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16ff_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e16ff_v_u16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32.c index d7d848d..f8bcfd4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32.c @@ -493,3 +493,165 @@ void test_vlseg4e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * return __riscv_vlseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { + return __riscv_vlseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { + return __riscv_vlseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32_v_f32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { + return __riscv_vlseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { + return __riscv_vlseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { + return __riscv_vlseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32_v_i32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { + return __riscv_vlseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { + return __riscv_vlseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { + return __riscv_vlseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32_v_u32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { + return __riscv_vlseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c index 37c19cd..d2dddaf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c @@ -547,3 +547,183 @@ void test_vlseg4e32ff_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t return __riscv_vlseg4e32ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32ff_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32ff_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32ff_v_f32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32ff_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32ff_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32ff_v_i32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32ff_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32ff_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e32ff_v_u32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64.c index 364bdaa..2b01c6d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64.c @@ -331,3 +331,111 @@ void test_vlseg4e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * return __riscv_vlseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e64_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { + return __riscv_vlseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e64_v_f64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { + return __riscv_vlseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e64_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { + return __riscv_vlseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e64_v_i64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { + return __riscv_vlseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e64_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { + return __riscv_vlseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e64_v_u64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { + return __riscv_vlseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c index 5a38056..f7ebf55 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c @@ -367,3 +367,123 @@ void test_vlseg4e64ff_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t return __riscv_vlseg4e64ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e64ff_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e64ff_v_f64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e64ff_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e64ff_v_i64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e64ff_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e64ff_v_u64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8.c index f14fa57..8d9f1f2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8.c @@ -546,3 +546,183 @@ void test_vlseg4e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v return __riscv_vlseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { + return __riscv_vlseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { + return __riscv_vlseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { + return __riscv_vlseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { + return __riscv_vlseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8_v_i8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { + return __riscv_vlseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { + return __riscv_vlseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { + return __riscv_vlseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { + return __riscv_vlseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { + return __riscv_vlseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8_v_u8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { + return __riscv_vlseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c index 07b0535..d5763ff 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c @@ -607,3 +607,203 @@ void test_vlseg4e8ff_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, return __riscv_vlseg4e8ff_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8ff_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8ff_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8ff_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8ff_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8ff_v_i8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8ff_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8ff_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8ff_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8ff_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg4e8ff_v_u8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16.c index a72e987..38e8a39 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16.c @@ -547,3 +547,183 @@ void test_vlseg5e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * return __riscv_vlseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) { + return __riscv_vlseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) { + return __riscv_vlseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) { + return __riscv_vlseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { + return __riscv_vlseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { + return __riscv_vlseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { + return __riscv_vlseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { + return __riscv_vlseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { + return __riscv_vlseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { + return __riscv_vlseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c index 76e5a9d..aed9e2d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c @@ -601,3 +601,201 @@ void test_vlseg5e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t return __riscv_vlseg5e16ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16ff_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16ff_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16ff_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16ff_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16ff_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16ff_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16ff_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16ff_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e16ff_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32.c index dc24e87..5b725d4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32.c @@ -367,3 +367,123 @@ void test_vlseg5e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * return __riscv_vlseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e32_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { + return __riscv_vlseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e32_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { + return __riscv_vlseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e32_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { + return __riscv_vlseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e32_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { + return __riscv_vlseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e32_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { + return __riscv_vlseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e32_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { + return __riscv_vlseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c index ded7953..f311044 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c @@ -403,3 +403,135 @@ void test_vlseg5e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t return __riscv_vlseg5e32ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e32ff_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e32ff_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e32ff_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e32ff_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e32ff_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e32ff_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64.c index b336587..e43cf78 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64.c @@ -187,3 +187,63 @@ void test_vlseg5e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * return __riscv_vlseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e64_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { + return __riscv_vlseg5e64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e64_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { + return __riscv_vlseg5e64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e64_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { + return __riscv_vlseg5e64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c index fab97e0..b99a869 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c @@ -205,3 +205,69 @@ void test_vlseg5e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t return __riscv_vlseg5e64ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e64ff_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e64ff_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e64ff_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8.c index 98a606a..2d433d3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8.c @@ -486,3 +486,163 @@ void test_vlseg5e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v return __riscv_vlseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { + return __riscv_vlseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { + return __riscv_vlseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { + return __riscv_vlseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { + return __riscv_vlseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { + return __riscv_vlseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { + return __riscv_vlseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { + return __riscv_vlseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { + return __riscv_vlseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c index e444517..d3d3fe3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c @@ -535,3 +535,179 @@ void test_vlseg5e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, return __riscv_vlseg5e8ff_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8ff_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8ff_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8ff_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8ff_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8ff_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8ff_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8ff_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg5e8ff_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16.c index b4468ad..e0a99dc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16.c @@ -601,3 +601,201 @@ void test_vlseg6e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * return __riscv_vlseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) { + return __riscv_vlseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) { + return __riscv_vlseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) { + return __riscv_vlseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { + return __riscv_vlseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { + return __riscv_vlseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { + return __riscv_vlseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { + return __riscv_vlseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { + return __riscv_vlseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { + return __riscv_vlseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c index e354e6e..5e85cf2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c @@ -655,3 +655,219 @@ void test_vlseg6e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t return __riscv_vlseg6e16ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16ff_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16ff_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16ff_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16ff_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16ff_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16ff_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16ff_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16ff_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e16ff_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32.c index f1394a2..f71e7b5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32.c @@ -403,3 +403,135 @@ void test_vlseg6e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * return __riscv_vlseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e32_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { + return __riscv_vlseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e32_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { + return __riscv_vlseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e32_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { + return __riscv_vlseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e32_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { + return __riscv_vlseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e32_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { + return __riscv_vlseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e32_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { + return __riscv_vlseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c index dcaece3..a2b8a94 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c @@ -439,3 +439,147 @@ void test_vlseg6e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t return __riscv_vlseg6e32ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e32ff_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e32ff_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e32ff_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e32ff_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e32ff_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e32ff_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64.c index 95f38c5..0d59a90 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64.c @@ -205,3 +205,69 @@ void test_vlseg6e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * return __riscv_vlseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e64_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { + return __riscv_vlseg6e64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e64_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { + return __riscv_vlseg6e64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e64_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { + return __riscv_vlseg6e64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c index 6aae166..dc63f93 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c @@ -223,3 +223,75 @@ void test_vlseg6e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t return __riscv_vlseg6e64ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e64ff_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e64ff_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e64ff_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8.c index db98dd5..917a781 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8.c @@ -534,3 +534,179 @@ void test_vlseg6e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v return __riscv_vlseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { + return __riscv_vlseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { + return __riscv_vlseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { + return __riscv_vlseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { + return __riscv_vlseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { + return __riscv_vlseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { + return __riscv_vlseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { + return __riscv_vlseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { + return __riscv_vlseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c index 07cfe82..b7c177a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c @@ -583,3 +583,195 @@ void test_vlseg6e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, return __riscv_vlseg6e8ff_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8ff_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8ff_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8ff_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8ff_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8ff_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8ff_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8ff_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg6e8ff_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16.c index ed60684..2d46494 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16.c @@ -655,3 +655,219 @@ void test_vlseg7e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * return __riscv_vlseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) { + return __riscv_vlseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) { + return __riscv_vlseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) { + return __riscv_vlseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { + return __riscv_vlseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { + return __riscv_vlseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { + return __riscv_vlseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { + return __riscv_vlseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { + return __riscv_vlseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { + return __riscv_vlseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c index 8af8a9e..ff0e6dc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c @@ -709,3 +709,237 @@ void test_vlseg7e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t return __riscv_vlseg7e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16ff_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16ff_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16ff_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16ff_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16ff_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16ff_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16ff_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16ff_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e16ff_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32.c index 2fff47f..5ebb288 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32.c @@ -439,3 +439,147 @@ void test_vlseg7e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * return __riscv_vlseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e32_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { + return __riscv_vlseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e32_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { + return __riscv_vlseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e32_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { + return __riscv_vlseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e32_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { + return __riscv_vlseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e32_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { + return __riscv_vlseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e32_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { + return __riscv_vlseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c index 22cbbf2..a5897f3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c @@ -475,3 +475,159 @@ void test_vlseg7e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t return __riscv_vlseg7e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e32ff_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e32ff_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e32ff_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e32ff_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e32ff_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e32ff_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64.c index 46d503b..1b956bf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64.c @@ -223,3 +223,75 @@ void test_vlseg7e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * return __riscv_vlseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e64_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { + return __riscv_vlseg7e64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e64_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { + return __riscv_vlseg7e64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e64_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { + return __riscv_vlseg7e64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c index eddc77e..0c1ac1d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c @@ -241,3 +241,81 @@ void test_vlseg7e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t return __riscv_vlseg7e64ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e64ff_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e64ff_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e64ff_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8.c index 36a83d5..e870a54 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8.c @@ -582,3 +582,195 @@ void test_vlseg7e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v return __riscv_vlseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { + return __riscv_vlseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { + return __riscv_vlseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { + return __riscv_vlseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { + return __riscv_vlseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { + return __riscv_vlseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { + return __riscv_vlseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { + return __riscv_vlseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { + return __riscv_vlseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c index 1cb2d4d..3c37942 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c @@ -631,3 +631,211 @@ void test_vlseg7e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, return __riscv_vlseg7e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8ff_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8ff_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8ff_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8ff_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8ff_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8ff_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8ff_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg7e8ff_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16.c index 046fb5e..f37bc985 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16.c @@ -709,3 +709,237 @@ void test_vlseg8e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * return __riscv_vlseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) { + return __riscv_vlseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) { + return __riscv_vlseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) { + return __riscv_vlseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { + return __riscv_vlseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { + return __riscv_vlseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { + return __riscv_vlseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { + return __riscv_vlseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { + return __riscv_vlseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { + return __riscv_vlseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c index 2f2055b..6ce8d95 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c @@ -763,3 +763,255 @@ void test_vlseg8e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t return __riscv_vlseg8e16ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16ff_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16ff_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16ff_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16ff_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16ff_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16ff_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16ff_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16ff_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e16ff_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32.c index 03bbe59..da8df38 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32.c @@ -475,3 +475,159 @@ void test_vlseg8e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * return __riscv_vlseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e32_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { + return __riscv_vlseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e32_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { + return __riscv_vlseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e32_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { + return __riscv_vlseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e32_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { + return __riscv_vlseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e32_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { + return __riscv_vlseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e32_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { + return __riscv_vlseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c index 34eead4..800f944 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c @@ -511,3 +511,171 @@ void test_vlseg8e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t return __riscv_vlseg8e32ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e32ff_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e32ff_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e32ff_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e32ff_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e32ff_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e32ff_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64.c index 1f447b4..90e3772 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64.c @@ -241,3 +241,81 @@ void test_vlseg8e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * return __riscv_vlseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e64_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { + return __riscv_vlseg8e64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e64_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { + return __riscv_vlseg8e64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e64_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { + return __riscv_vlseg8e64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c index d7cd265..c1e59a7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c @@ -259,3 +259,87 @@ void test_vlseg8e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t return __riscv_vlseg8e64ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e64ff_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e64ff_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e64ff_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8.c index 019a723..fee7c63 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8.c @@ -630,3 +630,211 @@ void test_vlseg8e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v return __riscv_vlseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { + return __riscv_vlseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { + return __riscv_vlseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { + return __riscv_vlseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { + return __riscv_vlseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { + return __riscv_vlseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { + return __riscv_vlseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { + return __riscv_vlseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { + return __riscv_vlseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c index 7f86990..5a5db4f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c @@ -679,3 +679,227 @@ void test_vlseg8e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, return __riscv_vlseg8e8ff_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8ff_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8ff_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8ff_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8ff_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8ff_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8ff_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8ff_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlseg8e8ff_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c index 6734a72..8e5c40d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c @@ -637,3 +637,213 @@ void test_vlsseg2e16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c index 7dbc38b..4a7d3ce 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c @@ -511,3 +511,171 @@ void test_vlsseg2e32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c index eb354ca..34d00f5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c @@ -385,3 +385,129 @@ void test_vlsseg2e64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t ma return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c index cae7067..c5b31c3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c @@ -510,3 +510,171 @@ void test_vlsseg2e8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, v return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c index 29f8453..9a6e421 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c @@ -583,3 +583,195 @@ void test_vlsseg3e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c index e7d9182f..f1b315f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c @@ -439,3 +439,147 @@ void test_vlsseg3e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c index fda921d..4f399d3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c @@ -295,3 +295,99 @@ void test_vlsseg3e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c index 6173b24..7e74f5e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c @@ -486,3 +486,163 @@ void test_vlsseg3e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c index 1a0e271..749deba 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c @@ -655,3 +655,219 @@ void test_vlsseg4e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c index 6f9ad72..137a9d0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c @@ -493,3 +493,165 @@ void test_vlsseg4e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c index d8d749c..fa4116d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c @@ -331,3 +331,111 @@ void test_vlsseg4e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c index dec9b52..4f8a663 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c @@ -546,3 +546,183 @@ void test_vlsseg4e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c index 2cf5dda..a23b266 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c @@ -547,3 +547,183 @@ void test_vlsseg5e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c index 8a16d87..75c4022 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c @@ -367,3 +367,123 @@ void test_vlsseg5e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c index 6383fa0..5c97572 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c @@ -187,3 +187,63 @@ void test_vlsseg5e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t return __riscv_vlsseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c index cbff179..fa21ff9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c @@ -486,3 +486,163 @@ void test_vlsseg5e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c index 59fa7d3..0bb9934 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c @@ -601,3 +601,201 @@ void test_vlsseg6e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c index b9506c7..23deb0a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c @@ -403,3 +403,135 @@ void test_vlsseg6e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c index 3b7e92d..156f505 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c @@ -205,3 +205,69 @@ void test_vlsseg6e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t return __riscv_vlsseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c index ce8e87c..dbf7a41 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c @@ -534,3 +534,179 @@ void test_vlsseg6e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c index 30ea2a3..194aed9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c @@ -655,3 +655,219 @@ void test_vlsseg7e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c index 688a3d0..83f9b81 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c @@ -439,3 +439,147 @@ void test_vlsseg7e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c index ebc5fde..160edc5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c @@ -223,3 +223,75 @@ void test_vlsseg7e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t return __riscv_vlsseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c index c732189..a14b5a5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c @@ -582,3 +582,195 @@ void test_vlsseg7e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c index 99be005..5c4485f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c @@ -709,3 +709,237 @@ void test_vlsseg8e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c index bef2feb..066e71e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c @@ -475,3 +475,159 @@ void test_vlsseg8e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c index 2f10b10..33eb4a2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c @@ -241,3 +241,81 @@ void test_vlsseg8e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t return __riscv_vlsseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c index d4ace1f..25d501d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c @@ -630,3 +630,211 @@ void test_vlsseg8e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_mu +// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + -- 2.7.4